diff --git a/.gitignore b/.gitignore index f8b6eb209..70c3c4095 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ cover/* .tox nosetests.xml .testrepository +.stestr # Translations *.mo diff --git a/.mailmap b/.mailmap index cc92f17b8..c7b4804d7 100644 --- a/.mailmap +++ b/.mailmap @@ -1,3 +1,6 @@ # Format is: # -# \ No newline at end of file +# + + + diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 000000000..f42846f41 --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./openstack/tests/unit +top_dir=./ diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 499b7a370..000000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./openstack/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -group_regex=([^\.]+\.)+ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6fe312571..57b6bf563 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,16 +1,45 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: +.. _contributing: - https://docs.openstack.org/infra/manual/developers.html +=================================== +Contributing to python-openstacksdk +=================================== -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: +If you're interested in contributing to the python-openstacksdk project, +the following will help get you started. - https://docs.openstack.org/infra/manual/developers.html#development-workflow +Contributor License Agreement +----------------------------- +.. index:: + single: license; agreement + +In order to contribute to the python-openstacksdk project, you need to have +signed OpenStack's contributor's agreement. + +Please read `DeveloperWorkflow`_ before sending your first patch for review. Pull requests submitted through GitHub will be ignored. -Bugs should be filed on Launchpad, not GitHub: +.. seealso:: - https://bugs.launchpad.net/python-openstacksdk + * http://wiki.openstack.org/HowToContribute + * http://wiki.openstack.org/CLA + +.. _DeveloperWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Project Hosting Details +------------------------- + +Project Documentation + http://docs.openstack.org/sdks/python/openstacksdk/ + +Bug tracker + https://bugs.launchpad.net/python-openstacksdk + +Mailing list (prefix subjects with ``[sdk]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev + +Code Hosting + https://git.openstack.org/cgit/openstack/python-openstacksdk + +Code Review + https://review.openstack.org/#/q/status:open+project:openstack/python-openstacksdk,n,z diff --git a/HACKING.rst b/HACKING.rst index e7627c519..6350ad49f 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -1,4 +1,49 @@ -python-openstacksdk Style Commandments -====================================== +openstacksdk Style Commandments +=============================== -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ +Read the OpenStack Style Commandments +http://docs.openstack.org/developer/hacking/ + +Indentation +----------- + +PEP-8 allows for 'visual' indentation. Do not use it. Visual indentation looks +like this: + +.. code-block:: python + + return_value = self.some_method(arg1, arg1, + arg3, arg4) + +Visual indentation makes refactoring the code base unneccesarily hard. + +Instead of visual indentation, use this: + +.. code-block:: python + + return_value = self.some_method( + arg1, arg1, arg3, arg4) + +That way, if some_method ever needs to be renamed, the only line that needs +to be touched is the line with some_method. Additionaly, if you need to +line break at the top of a block, please indent the continuation line +an additional 4 spaces, like this: + +.. code-block:: python + + for val in self.some_method( + arg1, arg1, arg3, arg4): + self.do_something_awesome() + +Neither of these are 'mandated' by PEP-8. However, they are prevailing styles +within this code base. + +Unit Tests +---------- + +Unit tests should be virtually instant. If a unit test takes more than 1 second +to run, it is a bad unit test. Honestly, 1 second is too slow. + +All unit test classes should subclass `openstack.tests.unit.base.BaseTestCase`. The +base TestCase class takes care of properly creating `OpenStackCloud` objects +in a way that protects against local environment. diff --git a/README.rst b/README.rst index 36ad09d38..5989a8aae 100644 --- a/README.rst +++ b/README.rst @@ -1,36 +1,119 @@ -OpenStack Python SDK -==================== +openstacksdk +============ -The ``python-openstacksdk`` is a collection of libraries for building -applications to work with OpenStack clouds. The project aims to provide -a consistent and complete set of interactions with OpenStack's many -services, along with complete documentation, examples, and tools. +openstacksdk is a client library for for building applications to work +with OpenStack clouds. The project aims to provide a consistent and +complete set of interactions with OpenStack's many services, along with +complete documentation, examples, and tools. -This SDK is under active development, and in the interests of providing -a high-quality interface, the APIs provided in this release may differ -from those provided in future release. +It also contains a simple interface layer. Clouds can do many things, but +there are probably only about 10 of them that most people care about with any +regularity. If you want to do complicated things, the per-service oriented +portions of the SDK are for you. However, if what you want is to be able to +write an application that talks to clouds no matter what crazy choices the +deployer has made in an attempt to be more hipster than their self-entitled +narcissist peers, then the ``openstack.cloud`` layer is for you. -Usage ------ +A Brief History +--------------- -The following example simply connects to an OpenStack cloud and lists -the containers in the Object Store service.:: +openstacksdk started its life as three different libraries: shade, +os-client-config and python-openstacksdk. - from openstack import connection - conn = connection.Connection(auth_url="http://openstack:5000/v3", - project_name="big_project", - username="SDK_user", - password="Super5ecretPassw0rd") - for container in conn.object_store.containers(): - print(container.name) +``shade`` started its life as some code inside of OpenStack Infra's nodepool +project, and as some code inside of Ansible. Ansible had a bunch of different +OpenStack related modules, and there was a ton of duplicated code. Eventually, +between refactoring that duplication into an internal library, and adding logic +and features that the OpenStack Infra team had developed to run client +applications at scale, it turned out that we'd written nine-tenths of what we'd +need to have a standalone library. -Documentation -------------- +``os-client-config`` was a library for collecting client configuration for +using an OpenStack cloud in a consistent and comprehensive manner. +In parallel, the python-openstacksdk team was working on a library to expose +the OpenStack APIs to developers in a consistent and predictable manner. After +a while it became clear that there was value in both a high-level layer that +contains business logic, a lower-level SDK that exposes services and their +resources as Python objects, and also to be able to make direct REST calls +when needed with a properly configured Session or Adapter from python-requests. +This led to the merger of the three projects. -Documentation is available at -https://developer.openstack.org/sdks/python/openstacksdk/ +The contents of the shade library have been moved into ``openstack.cloud`` +and os-client-config has been moved in to ``openstack.config``. The next +release of shade will be a thin compatibility layer that subclasses the objects +from ``openstack.cloud`` and provides different argument defaults where needed +for compat. Similarly the next release of os-client-config will be a compat +layer shim around ``openstack.config``. -License -------- +openstack.config +================ -Apache 2.0 +``openstack.config`` will find cloud configuration for as few as 1 clouds and +as many as you want to put in a config file. It will read environment variables +and config files, and it also contains some vendor specific default values so +that you don't have to know extra info to use OpenStack + +* If you have a config file, you will get the clouds listed in it +* If you have environment variables, you will get a cloud named `envvars` +* If you have neither, you will get a cloud named `defaults` with base defaults + +Sometimes an example is nice. + +Create a ``clouds.yaml`` file: + +.. code-block:: yaml + + clouds: + mordred: + region_name: Dallas + auth: + username: 'mordred' + password: XXXXXXX + project_name: 'shade' + auth_url: 'https://identity.example.com' + +Please note: ``openstack.config`` will look for a file called ``clouds.yaml`` +in the following locations: + +* Current Directory +* ``~/.config/openstack`` +* ``/etc/openstack`` + +More information at https://developer.openstack.org/sdks/python/openstacksdk/users/config + +openstack.cloud +=============== + +Create a server using objects configured with the ``clouds.yaml`` file: + +.. code-block:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + # Initialize cloud + # Cloud configs are read with openstack.config + cloud = openstack.openstack_cloud(cloud='mordred') + + # Upload an image to the cloud + image = cloud.create_image( + 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Links +===== + +* `Issue Tracker `_ +* `Code Review `_ +* `Documentation `_ +* `PyPI `_ +* `Mailing list `_ diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 000000000..e5d10a394 --- /dev/null +++ b/bindep.txt @@ -0,0 +1,8 @@ +# This is a cross-platform list tracking distribution packages needed by tests; +# see http://docs.openstack.org/infra/bindep/ for additional information. + +build-essential [platform:dpkg] +python-dev [platform:dpkg] +python-devel [platform:rpm] +libffi-dev [platform:dpkg] +libffi-devel [platform:rpm] diff --git a/devstack/plugin.sh b/devstack/plugin.sh new file mode 100644 index 000000000..4a710af2f --- /dev/null +++ b/devstack/plugin.sh @@ -0,0 +1,54 @@ +# Install and configure **openstacksdk** library in devstack +# +# To enable openstacksdk in devstack add an entry to local.conf that looks like +# +# [[local|localrc]] +# enable_plugin openstacksdk git://git.openstack.org/openstack/python-openstacksdk + +function preinstall_openstacksdk { + : +} + +function install_openstacksdk { + if use_library_from_git "python-openstacksdk"; then + # don't clone, it'll be done by the plugin install + setup_dev_lib "python-openstacksdk" + else + pip_install "python-openstacksdk" + fi +} + +function configure_openstacksdk { + : +} + +function initialize_openstacksdk { + : +} + +function unstack_openstacksdk { + : +} + +function clean_openstacksdk { + : +} + +# This is the main for plugin.sh +if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + preinstall_openstacksdk +elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_openstacksdk +elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + configure_openstacksdk +elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + initialize_openstacksdk +fi + +if [[ "$1" == "unstack" ]]; then + unstack_openstacksdk +fi + +if [[ "$1" == "clean" ]]; then + clean_openstacksdk +fi diff --git a/doc/source/conf.py b/doc/source/conf.py index 6c4cbcf16..a57f6fc87 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -19,18 +19,28 @@ import openstackdocstheme sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('.')) + # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', + 'openstackdocstheme', 'enforcer' ] +# openstackdocstheme options +repository_name = 'openstack/python-openstacksdk' +bug_project = '760' +bug_tag = '' +html_last_updated_fmt = '%Y-%m-%d %H:%M' +html_theme = 'openstackdocs' + +# TODO(shade) Set this to true once the build-openstack-sphinx-docs job is +# updated to use sphinx-build. # When True, this will raise an exception that kills sphinx-build. -enforcer_warnings_as_errors = True +enforcer_warnings_as_errors = False # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. @@ -47,18 +57,7 @@ master_doc = 'index' # General information about the project. project = u'python-openstacksdk' -copyright = u'2015, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# "version" and "release" are used by the "log-a-bug" feature -# -# The short X.Y version. -version = '1.0' -# The full version, including alpha/beta/rc tags. -release = '1.0' +copyright = u'2017, Various members of the OpenStack Foundation' # A few variables have to be set for the log-a-bug feature. # giturl: The location of conf.py on Git. Must be set manually. @@ -101,13 +100,6 @@ exclude_patterns = [] # -- Options for HTML output ---------------------------------------------- -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [openstackdocstheme.get_html_theme_path()] - # Don't let openstackdocstheme insert TOCs automatically. theme_include_auto_toc = False @@ -124,9 +116,5 @@ latex_documents = [ u'OpenStack Foundation', 'manual'), ] -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/3/': None, - 'http://docs.python-requests.org/en/master/': None} - # Include both the class and __init__ docstrings when describing the class autoclass_content = "both" diff --git a/doc/source/contributors/clouds.yaml b/doc/source/contributor/clouds.yaml similarity index 100% rename from doc/source/contributors/clouds.yaml rename to doc/source/contributor/clouds.yaml diff --git a/doc/source/contributor/coding.rst b/doc/source/contributor/coding.rst new file mode 100644 index 000000000..1cb45b9cc --- /dev/null +++ b/doc/source/contributor/coding.rst @@ -0,0 +1,114 @@ +======================================== +OpenStack SDK Developer Coding Standards +======================================== + +In the beginning, there were no guidelines. And it was good. But that +didn't last long. As more and more people added more and more code, +we realized that we needed a set of coding standards to make sure that +the openstacksdk API at least *attempted* to display some form of consistency. + +Thus, these coding standards/guidelines were developed. Note that not +all of openstacksdk adheres to these standards just yet. Some older code has +not been updated because we need to maintain backward compatibility. +Some of it just hasn't been changed yet. But be clear, all new code +*must* adhere to these guidelines. + +Below are the patterns that we expect openstacksdk developers to follow. + +Release Notes +============= + +openstacksdk uses `reno `_ for +managing its release notes. A new release note should be added to +your contribution anytime you add new API calls, fix significant bugs, +add new functionality or parameters to existing API calls, or make any +other significant changes to the code base that we should draw attention +to for the user base. + +It is *not* necessary to add release notes for minor fixes, such as +correction of documentation typos, minor code cleanup or reorganization, +or any other change that a user would not notice through normal usage. + +Exceptions +========== + +Exceptions should NEVER be wrapped and re-raised inside of a new exception. +This removes important debug information from the user. All of the exceptions +should be raised correctly the first time. + +openstack.cloud API Methods +=========================== + +The `openstack.cloud` layer has some specific rules: + +- When an API call acts on a resource that has both a unique ID and a + name, that API call should accept either identifier with a name_or_id + parameter. + +- All resources should adhere to the get/list/search interface that + control retrieval of those resources. E.g., `get_image()`, `list_images()`, + `search_images()`. + +- Resources should have `create_RESOURCE()`, `delete_RESOURCE()`, + `update_RESOURCE()` API methods (as it makes sense). + +- For those methods that should behave differently for omitted or None-valued + parameters, use the `_utils.valid_kwargs` decorator. Notably: all Neutron + `update_*` functions. + +- Deleting a resource should return True if the delete succeeded, or False + if the resource was not found. + +Returned Resources +------------------ + +Complex objects returned to the caller must be a `munch.Munch` type. The +`openstack.cloud._adapter.Adapter` class makes resources into `munch.Munch`. + +All objects should be normalized. It is shade's purpose in life to make +OpenStack consistent for end users, and this means not trusting the clouds +to return consistent objects. There should be a normalize function in +`openstack/cloud/_normalize.py` that is applied to objects before returning +them to the user. See :doc:`../user/model` for further details on object model +requirements. + +Fields should not be in the normalization contract if we cannot commit to +providing them to all users. + +Fields should be renamed in normalization to be consistent with +the rest of `openstack.cloud`. For instance, nothing in `openstack.cloud` +exposes the legacy OpenStack concept of "tenant" to a user, but instead uses +"project" even if the cloud in question uses tenant. + +Nova vs. Neutron +---------------- + +- Recognize that not all cloud providers support Neutron, so never + assume it will be present. If a task can be handled by either + Neutron or Nova, code it to be handled by either. + +- For methods that accept either a Nova pool or Neutron network, the + parameter should just refer to the network, but documentation of it + should explain about the pool. See: `create_floating_ip()` and + `available_floating_ip()` methods. + +Tests +===== + +- New API methods *must* have unit tests! + +- New unit tests should only mock at the REST layer using `requests_mock`. + Any mocking of openstacksdk itself should be considered legacy and to be + avoided. Exceptions to this rule can be made when attempting to test the + internals of a logical shim where the inputs and output of the method aren't + actually impacted by remote content. + +- Functional tests should be added, when possible. + +- In functional tests, always use unique names (for resources that have this + attribute) and use it for clean up (see next point). + +- In functional tests, always define cleanup functions to delete data added + by your test, should something go wrong. Data removal should be wrapped in + a try except block and try to delete as many entries added by the test as + possible. diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 000000000..b1cd2f37d --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1 @@ +.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/contributors/create/examples/resource/fake.py b/doc/source/contributor/create/examples/resource/fake.py similarity index 100% rename from doc/source/contributors/create/examples/resource/fake.py rename to doc/source/contributor/create/examples/resource/fake.py diff --git a/doc/source/contributors/create/examples/resource/fake_service.py b/doc/source/contributor/create/examples/resource/fake_service.py similarity index 100% rename from doc/source/contributors/create/examples/resource/fake_service.py rename to doc/source/contributor/create/examples/resource/fake_service.py diff --git a/doc/source/contributors/create/resource.rst b/doc/source/contributor/create/resource.rst similarity index 100% rename from doc/source/contributors/create/resource.rst rename to doc/source/contributor/create/resource.rst diff --git a/doc/source/contributors/index.rst b/doc/source/contributor/index.rst similarity index 89% rename from doc/source/contributors/index.rst rename to doc/source/contributor/index.rst index 67c5b261f..d9e5ed723 100644 --- a/doc/source/contributors/index.rst +++ b/doc/source/contributor/index.rst @@ -13,6 +13,14 @@ software development kit for the programs which make up the OpenStack community. It is a set of Python-based libraries, documentation, examples, and tools released under the Apache 2 license. +Contribution Mechanics +---------------------- + +.. toctree:: + :maxdepth: 2 + + contributing + Contacting the Developers ------------------------- @@ -33,6 +41,17 @@ mailing list fields questions of all types on OpenStack. Using the ``[python-openstacksdk]`` filter to begin your email subject will ensure that the message gets to SDK developers. +Coding Standards +---------------- + +We are a bit stricter than usual in the coding standards department. It's a +good idea to read through the :doc:`coding ` section. + +.. toctree:: + :maxdepth: 2 + + coding + Development Environment ----------------------- diff --git a/doc/source/contributors/layout.rst b/doc/source/contributor/layout.rst similarity index 100% rename from doc/source/contributors/layout.rst rename to doc/source/contributor/layout.rst diff --git a/doc/source/contributors/layout.txt b/doc/source/contributor/layout.txt similarity index 100% rename from doc/source/contributors/layout.txt rename to doc/source/contributor/layout.txt diff --git a/doc/source/contributors/local.conf b/doc/source/contributor/local.conf similarity index 100% rename from doc/source/contributors/local.conf rename to doc/source/contributor/local.conf diff --git a/doc/source/contributors/setup.rst b/doc/source/contributor/setup.rst similarity index 100% rename from doc/source/contributors/setup.rst rename to doc/source/contributor/setup.rst diff --git a/doc/source/contributors/testing.rst b/doc/source/contributor/testing.rst similarity index 100% rename from doc/source/contributors/testing.rst rename to doc/source/contributor/testing.rst diff --git a/doc/source/enforcer.py b/doc/source/enforcer.py index 40740e033..04bea4335 100644 --- a/doc/source/enforcer.py +++ b/doc/source/enforcer.py @@ -123,8 +123,11 @@ def build_finished(app, exception): app.info("ENFORCER: Found %d missing proxy methods " "in the output" % missing_count) - for name in sorted(missing): - app.warn("ENFORCER: %s was not included in the output" % name) + # TODO(shade) Remove the if DEBUG once the build-openstack-sphinx-docs + # has been updated to use sphinx-build. + if DEBUG: + for name in sorted(missing): + app.info("ENFORCER: %s was not included in the output" % name) if app.config.enforcer_warnings_as_errors and missing_count > 0: raise EnforcementError( diff --git a/doc/source/history.rst b/doc/source/history.rst deleted file mode 100644 index 69ed4fe6c..000000000 --- a/doc/source/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../ChangeLog diff --git a/doc/source/index.rst b/doc/source/index.rst index e2c2bb526..df613dd65 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -4,7 +4,7 @@ Welcome to the OpenStack SDK! This documentation is split into two sections: one for :doc:`users ` looking to build applications which make use of OpenStack, and another for those looking to -:doc:`contribute ` to this project. +:doc:`contribute ` to this project. For Users --------- @@ -13,6 +13,10 @@ For Users :maxdepth: 2 users/index + install/index + user/index + +.. TODO(shade) merge users/index and user/index into user/index For Contributors ---------------- @@ -20,7 +24,9 @@ For Contributors .. toctree:: :maxdepth: 2 - contributors/index + contributor/index + +.. include:: ../../README.rst General Information ------------------- @@ -31,4 +37,4 @@ General information about the SDK including a glossary and release history. :maxdepth: 1 Glossary of Terms - Release History + Release Notes diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst new file mode 100644 index 000000000..5b06c9812 --- /dev/null +++ b/doc/source/install/index.rst @@ -0,0 +1,12 @@ +============ +Installation +============ + +At the command line:: + + $ pip install python-openstacksdk + +Or, if you have virtualenv wrapper installed:: + + $ mkvirtualenv python-openstacksdk + $ pip install python-openstacksdk diff --git a/doc/source/releasenotes.rst b/doc/source/releasenotes.rst new file mode 100644 index 000000000..a61e4d3e3 --- /dev/null +++ b/doc/source/releasenotes.rst @@ -0,0 +1,6 @@ +============= +Release Notes +============= + +Release notes for `python-openstacksdk` can be found at +http://docs.openstack.org/releasenotes/python-openstacksdk/ diff --git a/doc/source/user/config/configuration.rst b/doc/source/user/config/configuration.rst new file mode 100644 index 000000000..df0b26659 --- /dev/null +++ b/doc/source/user/config/configuration.rst @@ -0,0 +1,303 @@ +=========================================== + Configuring os-client-config Applications +=========================================== + +Environment Variables +--------------------- + +`os-client-config` honors all of the normal `OS_*` variables. It does not +provide backwards compatibility to service-specific variables such as +`NOVA_USERNAME`. + +If you have OpenStack environment variables set, `os-client-config` will produce +a cloud config object named `envvars` containing your values from the +environment. If you don't like the name `envvars`, that's ok, you can override +it by setting `OS_CLOUD_NAME`. + +Service specific settings, like the nova service type, are set with the +default service type as a prefix. For instance, to set a special service_type +for trove set + +.. code-block:: bash + + export OS_DATABASE_SERVICE_TYPE=rax:database + +Config Files +------------ + +`os-client-config` will look for a file called `clouds.yaml` in the following +locations: + +* Current Directory +* ~/.config/openstack +* /etc/openstack + +The first file found wins. + +You can also set the environment variable `OS_CLIENT_CONFIG_FILE` to an +absolute path of a file to look for and that location will be inserted at the +front of the file search list. + +The keys are all of the keys you'd expect from `OS_*` - except lower case +and without the OS prefix. So, region name is set with `region_name`. + +Service specific settings, like the nova service type, are set with the +default service type as a prefix. For instance, to set a special service_type +for trove (because you're using Rackspace) set: + +.. code-block:: yaml + + database_service_type: 'rax:database' + + +Site Specific File Locations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to `~/.config/openstack` and `/etc/openstack` - some platforms +have other locations they like to put things. `os-client-config` will also +look in an OS specific config dir + +* `USER_CONFIG_DIR` +* `SITE_CONFIG_DIR` + +`USER_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `~/.config/openstack` +* OSX: `~/Library/Application Support/openstack` +* Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` + +`SITE_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `/etc/openstack` +* OSX: `/Library/Application Support/openstack` +* Windows: `C:\\ProgramData\\OpenStack\\openstack` + +An example config file is probably helpful: + +.. code-block:: yaml + + clouds: + mtvexx: + profile: vexxhost + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: ca-ymq-1 + dns_api_version: 1 + mordred: + region_name: RegionOne + auth: + username: 'mordred' + password: XXXXXXX + project_name: 'shade' + auth_url: 'https://montytaylor-sjc.openstack.blueboxgrid.com:5001/v2.0' + infra: + profile: rackspace + auth: + username: openstackci + password: XXXXXXXX + project_id: 610275 + regions: + - DFW + - ORD + - IAD + +You may note a few things. First, since `auth_url` settings are silly +and embarrassingly ugly, known cloud vendor profile information is included and +may be referenced by name. One of the benefits of that is that `auth_url` +isn't the only thing the vendor defaults contain. For instance, since +Rackspace lists `rax:database` as the service type for trove, `os-client-config` +knows that so that you don't have to. In case the cloud vendor profile is not +available, you can provide one called `clouds-public.yaml`, following the same +location rules previously mentioned for the config files. + +`regions` can be a list of regions. When you call `get_all_clouds`, +you'll get a cloud config object for each cloud/region combo. + +As seen with `dns_service_type`, any setting that makes sense to be per-service, +like `service_type` or `endpoint` or `api_version` can be set by prefixing +the setting with the default service type. That might strike you funny when +setting `service_type` and it does me too - but that's just the world we live +in. + +Auth Settings +------------- + +Keystone has auth plugins - which means it's not possible to know ahead of time +which auth settings are needed. `os-client-config` sets the default plugin type +to `password`, which is what things all were before plugins came about. In +order to facilitate validation of values, all of the parameters that exist +as a result of a chosen plugin need to go into the auth dict. For password +auth, this includes `auth_url`, `username` and `password` as well as anything +related to domains, projects and trusts. + +Splitting Secrets +----------------- + +In some scenarios, such as configuration management controlled environments, +it might be easier to have secrets in one file and non-secrets in another. +This is fully supported via an optional file `secure.yaml` which follows all +the same location rules as `clouds.yaml`. It can contain anything you put +in `clouds.yaml` and will take precedence over anything in the `clouds.yaml` +file. + +.. code-block:: yaml + + # clouds.yaml + clouds: + internap: + profile: internap + auth: + username: api-55f9a00fb2619 + project_name: inap-17037 + regions: + - ams01 + - nyj01 + # secure.yaml + clouds: + internap: + auth: + password: XXXXXXXXXXXXXXXXX + +SSL Settings +------------ + +When the access to a cloud is done via a secure connection, `os-client-config` +will always verify the SSL cert by default. This can be disabled by setting +`verify` to `False`. In case the cert is signed by an unknown CA, a specific +cacert can be provided via `cacert`. **WARNING:** `verify` will always have +precedence over `cacert`, so when setting a CA cert but disabling `verify`, the +cloud cert will never be validated. + +Client certs are also configurable. `cert` will be the client cert file +location. In case the cert key is not included within the client cert file, +its file location needs to be set via `key`. + +.. code-block:: yaml + + # clouds.yaml + clouds: + secure: + auth: ... + key: /home/myhome/client-cert.key + cert: /home/myhome/client-cert.crt + cacert: /home/myhome/ca.crt + insecure: + auth: ... + verify: False + +Cache Settings +-------------- + +Accessing a cloud is often expensive, so it's quite common to want to do some +client-side caching of those operations. To facilitate that, `os-client-config` +understands passing through cache settings to dogpile.cache, with the following +behaviors: + +* Listing no config settings means you get a null cache. +* `cache.expiration_time` and nothing else gets you memory cache. +* Otherwise, `cache.class` and `cache.arguments` are passed in + +Different cloud behaviors are also differently expensive to deal with. If you +want to get really crazy and tweak stuff, you can specify different expiration +times on a per-resource basis by passing values, in seconds to an expiration +mapping keyed on the singular name of the resource. A value of `-1` indicates +that the resource should never expire. + +`os-client-config` does not actually cache anything itself, but it collects +and presents the cache information so that your various applications that +are connecting to OpenStack can share a cache should you desire. + +.. code-block:: yaml + + cache: + class: dogpile.cache.pylibmc + expiration_time: 3600 + arguments: + url: + - 127.0.0.1 + expiration: + server: 5 + flavor: -1 + clouds: + mtvexx: + profile: vexxhost + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: ca-ymq-1 + dns_api_version: 1 + + +IPv6 +---- + +IPv6 is the future, and you should always use it if your cloud supports it and +if your local network supports it. Both of those are easily detectable and all +friendly software should do the right thing. However, sometimes you might +exist in a location where you have an IPv6 stack, but something evil has +caused it to not actually function. In that case, there is a config option +you can set to unbreak you `force_ipv4`, or `OS_FORCE_IPV4` boolean +environment variable. + +.. code-block:: yaml + + client: + force_ipv4: true + clouds: + mtvexx: + profile: vexxhost + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: ca-ymq-1 + dns_api_version: 1 + monty: + profile: rax + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: DFW + +The above snippet will tell client programs to prefer returning an IPv4 +address. + +Per-region settings +------------------- + +Sometimes you have a cloud provider that has config that is common to the +cloud, but also with some things you might want to express on a per-region +basis. For instance, Internap provides a public and private network specific +to the user in each region, and putting the values of those networks into +config can make consuming programs more efficient. + +To support this, the region list can actually be a list of dicts, and any +setting that can be set at the cloud level can be overridden for that +region. + +.. code-block:: yaml + + clouds: + internap: + profile: internap + auth: + password: XXXXXXXXXXXXXXXXX + username: api-55f9a00fb2619 + project_name: inap-17037 + regions: + - name: ams01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + - name: inap-17037-LAN6745 + - name: nyj01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + - name: inap-17037-LAN6745 diff --git a/doc/source/user/config/index.rst b/doc/source/user/config/index.rst new file mode 100644 index 000000000..d09b28351 --- /dev/null +++ b/doc/source/user/config/index.rst @@ -0,0 +1,12 @@ +======================== + Using os-client-config +======================== + +.. toctree:: + :maxdepth: 2 + + configuration + using + vendor-support + network-config + reference diff --git a/doc/source/user/config/network-config.rst b/doc/source/user/config/network-config.rst new file mode 100644 index 000000000..09571804e --- /dev/null +++ b/doc/source/user/config/network-config.rst @@ -0,0 +1,60 @@ +============== +Network Config +============== + +There are several different qualities that networks in OpenStack might have +that might not be able to be automatically inferred from the available +metadata. To help users navigate more complex setups, `os-client-config` +allows configuring a list of network metadata. + +.. code-block:: yaml + + clouds: + amazing: + networks: + - name: blue + routes_externally: true + - name: purple + routes_externally: true + default_interface: true + - name: green + routes_externally: false + - name: yellow + routes_externally: false + nat_destination: true + - name: chartreuse + routes_externally: false + routes_ipv6_externally: true + - name: aubergine + routes_ipv4_externally: false + routes_ipv6_externally: true + +Every entry must have a name field, which can hold either the name or the id +of the network. + +`routes_externally` is a boolean field that labels the network as handling +north/south traffic off of the cloud. In a public cloud this might be thought +of as the "public" network, but in private clouds it's possible it might +be an RFC1918 address. In either case, it's provides IPs to servers that +things not on the cloud can use. This value defaults to `false`, which +indicates only servers on the same network can talk to it. + +`routes_ipv4_externally` and `routes_ipv6_externally` are boolean fields to +help handle `routes_externally` in the case where a network has a split stack +with different values for IPv4 and IPv6. Either entry, if not given, defaults +to the value of `routes_externally`. + +`default_interface` is a boolean field that indicates that the network is the +one that programs should use. It defaults to false. An example of needing to +use this value is a cloud with two private networks, and where a user is +running ansible in one of the servers to talk to other servers on the private +network. Because both networks are private, there would otherwise be no way +to determine which one should be used for the traffic. There can only be one +`default_interface` per cloud. + +`nat_destination` is a boolean field that indicates which network floating +ips should be attached to. It defaults to false. Normally this can be inferred +by looking for a network that has subnets that have a gateway_ip. But it's +possible to have more than one network that satisfies that condition, so the +user might want to tell programs which one to pick. There can be only one +`nat_destination` per cloud. diff --git a/doc/source/user/config/reference.rst b/doc/source/user/config/reference.rst new file mode 100644 index 000000000..1fc509baa --- /dev/null +++ b/doc/source/user/config/reference.rst @@ -0,0 +1,10 @@ +============= +API Reference +============= + +.. module:: openstack.config + :synopsis: OpenStack client configuration + +.. autoclass:: openstack.config.OpenStackConfig + :members: + :inherited-members: diff --git a/doc/source/user/config/using.rst b/doc/source/user/config/using.rst new file mode 100644 index 000000000..b21356329 --- /dev/null +++ b/doc/source/user/config/using.rst @@ -0,0 +1,141 @@ +======================================== +Using openstack.config in an Application +======================================== + +Usage +----- + +The simplest and least useful thing you can do is: + +.. code-block:: python + + python -m openstack.config.loader + +Which will print out whatever if finds for your config. If you want to use +it from python, which is much more likely what you want to do, things like: + +Get a named cloud. + +.. code-block:: python + + import openstack.config + + cloud_config = openstack.config.OpenStackConfig().get_one_cloud( + 'internap', region_name='ams01') + print(cloud_config.name, cloud_config.region, cloud_config.config) + +Or, get all of the clouds. + +.. code-block:: python + + import openstack.config + + cloud_config = openstack.config.OpenStackConfig().get_all_clouds() + for cloud in cloud_config: + print(cloud.name, cloud.region, cloud.config) + +argparse +-------- + +If you're using `openstack.config` from a program that wants to process +command line options, there is a registration function to register the +arguments that both `openstack.config` and keystoneauth know how to deal +with - as well as a consumption argument. + +.. code-block:: python + + import argparse + import sys + + import openstack.config + + cloud_config = openstack.config.OpenStackConfig() + parser = argparse.ArgumentParser() + cloud_config.register_argparse_arguments(parser, sys.argv) + + options = parser.parse_args() + + cloud = cloud_config.get_one_cloud(argparse=options) + +Constructing a Connection object +-------------------------------- + +If what you want to do is get an `openstack.connection.Connection` and you +want it to do all the normal things related to clouds.yaml, `OS_` environment +variables, a helper function is provided. The following will get you a fully +configured `openstacksdk` instance. + +.. code-block:: python + + import openstack.config + + conn = openstack.config.make_connection() + +If you want to do the same thing but on a named cloud. + +.. code-block:: python + + import openstack.config + + conn = openstack.config.make_connection(cloud='mtvexx') + +If you want to do the same thing but also support command line parsing. + +.. code-block:: python + + import argparse + + import openstack.config + + conn = openstack.config.make_connection(options=argparse.ArgumentParser()) + +Constructing cloud objects +-------------------------- + +If what you want to do is get an +`opentack.cloud.openstackcloud.OpenStackCloud` object, a +helper function that honors clouds.yaml and `OS_` environment variables is +provided. The following will get you a fully configured `OpenStackCloud` +instance. + +.. code-block:: python + + import openstack.config + + cloud = openstack.config.make_cloud() + +If you want to do the same thing but on a named cloud. + +.. code-block:: python + + import openstack.config + + cloud = openstack.config.make_cloud(cloud='mtvexx') + +If you want to do the same thing but also support command line parsing. + +.. code-block:: python + + import argparse + + import openstack.config + + cloud = openstack.config.make_cloud(options=argparse.ArgumentParser()) + +Constructing REST API Clients +----------------------------- + +What if you want to make direct REST calls via a Session interface? You're +in luck. A similar interface is available as with `openstacksdk` and `shade`. +The main difference is that you need to specify which service you want to +talk to and `make_rest_client` will return you a keystoneauth Session object +that is mounted on the endpoint for the service you're looking for. + +.. code-block:: python + + import openstack.config + + session = openstack.config.make_rest_client('compute', cloud='vexxhost') + + response = session.get('/servers') + server_list = response.json()['servers'] diff --git a/doc/source/user/config/vendor-support.rst b/doc/source/user/config/vendor-support.rst new file mode 100644 index 000000000..449fc5af8 --- /dev/null +++ b/doc/source/user/config/vendor-support.rst @@ -0,0 +1,337 @@ +============== +Vendor Support +============== + +OpenStack presents deployers with many options, some of which can expose +differences to end users. `os-client-config` tries its best to collect +information about various things a user would need to know. The following +is a text representation of the vendor related defaults `os-client-config` +knows about. + +Default Values +-------------- + +These are the default behaviors unless a cloud is configured differently. + +* Identity uses `password` authentication +* Identity API Version is 2 +* Image API Version is 2 +* Volume API Version is 2 +* Images must be in `qcow2` format +* Images are uploaded using PUT interface +* Public IPv4 is directly routable via DHCP from Neutron +* IPv6 is not provided +* Floating IPs are not required +* Floating IPs are provided by Neutron +* Security groups are provided by Neutron +* Vendor specific agents are not used + +auro +---- + +https://api.auro.io:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +van1 Vancouver, BC +============== ================ + +* Public IPv4 is provided via NAT with Neutron Floating IP + +catalyst +-------- + +https://api.cloud.catalyst.net.nz:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +nz-por-1 Porirua, NZ +nz_wlg_2 Wellington, NZ +============== ================ + +* Image API Version is 1 +* Images must be in `raw` format +* Volume API Version is 1 + +citycloud +--------- + +https://identity1.citycloud.com:5000/v3/ + +============== ================ +Region Name Location +============== ================ +Buf1 Buffalo, NY +Fra1 Frankfurt, DE +Kna1 Karlskrona, SE +La1 Los Angeles, CA +Lon1 London, UK +Sto2 Stockholm, SE +============== ================ + +* Identity API Version is 3 +* Public IPv4 is provided via NAT with Neutron Floating IP +* Volume API Version is 1 + +conoha +------ + +https://identity.%(region_name)s.conoha.io + +============== ================ +Region Name Location +============== ================ +tyo1 Tokyo, JP +sin1 Singapore +sjc1 San Jose, CA +============== ================ + +* Image upload is not supported + +datacentred +----------- + +https://compute.datacentred.io:5000 + +============== ================ +Region Name Location +============== ================ +sal01 Manchester, UK +============== ================ + +* Image API Version is 1 + +dreamcompute +------------ + +https://iad2.dream.io:5000 + +============== ================ +Region Name Location +============== ================ +RegionOne Ashburn, VA +============== ================ + +* Identity API Version is 3 +* Images must be in `raw` format +* IPv6 is provided to every server + +dreamhost +--------- + +Deprecated, please use dreamcompute + +https://keystone.dream.io/v2.0 + +============== ================ +Region Name Location +============== ================ +RegionOne Ashburn, VA +============== ================ + +* Images must be in `raw` format +* Public IPv4 is provided via NAT with Neutron Floating IP +* IPv6 is provided to every server + +otc +--- + +https://iam.%(region_name)s.otc.t-systems.com/v3 + +============== ================ +Region Name Location +============== ================ +eu-de Germany +============== ================ + +* Identity API Version is 3 +* Images must be in `vhd` format +* Public IPv4 is provided via NAT with Neutron Floating IP + +elastx +------ + +https://ops.elastx.net:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +regionOne Stockholm, SE +============== ================ + +* Public IPv4 is provided via NAT with Neutron Floating IP + +entercloudsuite +--------------- + +https://api.entercloudsuite.com/v2.0 + +============== ================ +Region Name Location +============== ================ +nl-ams1 Amsterdam, NL +it-mil1 Milan, IT +de-fra1 Frankfurt, DE +============== ================ + +* Image API Version is 1 +* Volume API Version is 1 + +fuga +---- + +https://identity.api.fuga.io:5000 + +============== ================ +Region Name Location +============== ================ +cystack Netherlands +============== ================ + +* Identity API Version is 3 +* Volume API Version is 3 + +internap +-------- + +https://identity.api.cloud.iweb.com/v2.0 + +============== ================ +Region Name Location +============== ================ +ams01 Amsterdam, NL +da01 Dallas, TX +nyj01 New York, NY +sin01 Singapore +sjc01 San Jose, CA +============== ================ + +* Floating IPs are not supported + +ovh +--- + +https://auth.cloud.ovh.net/v2.0 + +============== ================ +Region Name Location +============== ================ +BHS1 Beauharnois, QC +SBG1 Strassbourg, FR +GRA1 Gravelines, FR +============== ================ + +* Images may be in `raw` format. The `qcow2` default is also supported +* Floating IPs are not supported + +rackspace +--------- + +https://identity.api.rackspacecloud.com/v2.0/ + +============== ================ +Region Name Location +============== ================ +DFW Dallas, TX +HKG Hong Kong +IAD Washington, D.C. +LON London, UK +ORD Chicago, IL +SYD Sydney, NSW +============== ================ + +* Database Service Type is `rax:database` +* Compute Service Name is `cloudServersOpenStack` +* Images must be in `vhd` format +* Images must be uploaded using the Glance Task Interface +* Floating IPs are not supported +* Public IPv4 is directly routable via static config by Nova +* IPv6 is provided to every server +* Security groups are not supported +* Uploaded Images need properties to not use vendor agent:: + :vm_mode: hvm + :xenapi_use_agent: False +* Volume API Version is 1 +* While passwords are recommended for use, API keys do work as well. + The `rackspaceauth` python package must be installed, and then the following + can be added to clouds.yaml:: + + auth: + username: myusername + api_key: myapikey + auth_type: rackspace_apikey + +switchengines +------------- + +https://keystone.cloud.switch.ch:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +LS Lausanne, CH +ZH Zurich, CH +============== ================ + +* Images must be in `raw` format +* Images must be uploaded using the Glance Task Interface +* Volume API Version is 1 + +ultimum +------- + +https://console.ultimum-cloud.com:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +RegionOne Prague, CZ +============== ================ + +* Volume API Version is 1 + +unitedstack +----------- + +https://identity.api.ustack.com/v3 + +============== ================ +Region Name Location +============== ================ +bj1 Beijing, CN +gd1 Guangdong, CN +============== ================ + +* Identity API Version is 3 +* Images must be in `raw` format +* Volume API Version is 1 + +vexxhost +-------- + +http://auth.vexxhost.net + +============== ================ +Region Name Location +============== ================ +ca-ymq-1 Montreal, QC +============== ================ + +* DNS API Version is 1 +* Identity API Version is 3 + +zetta +----- + +https://identity.api.zetta.io/v3 + +============== ================ +Region Name Location +============== ================ +no-osl1 Oslo, NO +============== ================ + +* DNS API Version is 2 +* Identity API Version is 3 diff --git a/doc/source/user/examples/cleanup-servers.py b/doc/source/user/examples/cleanup-servers.py new file mode 100644 index 000000000..628c8657f --- /dev/null +++ b/doc/source/user/examples/cleanup-servers.py @@ -0,0 +1,13 @@ +import openstack.cloud + +# Initialize and turn on debug logging +openstack.cloud.simple_logging(debug=True) + +for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + for server in cloud.search_servers('my-server'): + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/doc/source/user/examples/create-server-dict.py b/doc/source/user/examples/create-server-dict.py new file mode 100644 index 000000000..3f9fc8223 --- /dev/null +++ b/doc/source/user/examples/create-server-dict.py @@ -0,0 +1,22 @@ +import openstack.cloud + +# Initialize and turn on debug logging +openstack.cloud.simple_logging(debug=True) + +for cloud_name, region_name, image, flavor_id in [ + ('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', + '5cf64088-893b-46b5-9bb1-ee020277635d'), + ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', + '0dab10b5-42a2-438e-be7b-505741a7ffcc'), + ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', + 'A1.4')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=dict(id=flavor_id), + wait=True, auto_ip=True) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/doc/source/user/examples/create-server-name-or-id.py b/doc/source/user/examples/create-server-name-or-id.py new file mode 100644 index 000000000..16c011aa1 --- /dev/null +++ b/doc/source/user/examples/create-server-name-or-id.py @@ -0,0 +1,25 @@ +import openstack.cloud + +# Initialize and turn on debug logging +openstack.cloud.simple_logging(debug=True) + +for cloud_name, region_name, image, flavor in [ + ('my-vexxhost', 'ca-ymq-1', + 'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4'), + ('my-citycloud', 'Buf1', + 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), + ('my-internap', 'ams01', + 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + cloud.delete_server('my-server', wait=True, delete_ips=True) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + print(server.name) + print(server['name']) + cloud.pprint(server) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/doc/source/user/examples/debug-logging.py b/doc/source/user/examples/debug-logging.py new file mode 100644 index 000000000..c0d91e125 --- /dev/null +++ b/doc/source/user/examples/debug-logging.py @@ -0,0 +1,6 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud( + cloud='my-vexxhost', region_name='ca-ymq-1') +cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') diff --git a/doc/source/user/examples/find-an-image.py b/doc/source/user/examples/find-an-image.py new file mode 100644 index 000000000..74c666a60 --- /dev/null +++ b/doc/source/user/examples/find-an-image.py @@ -0,0 +1,7 @@ +import openstack.cloud +openstack.cloud.simple_logging() + +cloud = openstack.openstack_cloud(cloud='fuga', region_name='cystack') +cloud.pprint([ + image for image in cloud.list_images() + if 'ubuntu' in image.name.lower()]) diff --git a/doc/source/user/examples/http-debug-logging.py b/doc/source/user/examples/http-debug-logging.py new file mode 100644 index 000000000..eff9d7572 --- /dev/null +++ b/doc/source/user/examples/http-debug-logging.py @@ -0,0 +1,6 @@ +import openstack.cloud +openstack.cloud.simple_logging(http_debug=True) + +cloud = openstack.openstack_cloud( + cloud='my-vexxhost', region_name='ca-ymq-1') +cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') diff --git a/doc/source/user/examples/munch-dict-object.py b/doc/source/user/examples/munch-dict-object.py new file mode 100644 index 000000000..bfde7b41b --- /dev/null +++ b/doc/source/user/examples/munch-dict-object.py @@ -0,0 +1,7 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='ovh', region_name='SBG1') +image = cloud.get_image('Ubuntu 16.10') +print(image.name) +print(image['name']) diff --git a/doc/source/user/examples/normalization.py b/doc/source/user/examples/normalization.py new file mode 100644 index 000000000..22b9b0f26 --- /dev/null +++ b/doc/source/user/examples/normalization.py @@ -0,0 +1,7 @@ +import openstack.cloud +openstack.cloud.simple_logging() + +cloud = openstack.openstack_cloud(cloud='fuga', region_name='cystack') +image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') +cloud.pprint(image) diff --git a/doc/source/user/examples/server-information.py b/doc/source/user/examples/server-information.py new file mode 100644 index 000000000..5d9599d06 --- /dev/null +++ b/doc/source/user/examples/server-information.py @@ -0,0 +1,23 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='my-citycloud', region_name='Buf1') +try: + server = cloud.create_server( + 'my-server', image='Ubuntu 16.04 Xenial Xerus', + flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), + wait=True, auto_ip=True) + + print("\n\nFull Server\n\n") + cloud.pprint(server) + + print("\n\nTurn Detailed Off\n\n") + cloud.pprint(cloud.get_server('my-server', detailed=False)) + + print("\n\nBare Server\n\n") + cloud.pprint(cloud.get_server('my-server', bare=True)) + +finally: + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + diff --git a/doc/source/user/examples/service-conditional-overrides.py b/doc/source/user/examples/service-conditional-overrides.py new file mode 100644 index 000000000..845e7ab7c --- /dev/null +++ b/doc/source/user/examples/service-conditional-overrides.py @@ -0,0 +1,5 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='rax', region_name='DFW') +print(cloud.has_service('network')) diff --git a/doc/source/user/examples/service-conditionals.py b/doc/source/user/examples/service-conditionals.py new file mode 100644 index 000000000..9bb198cbc --- /dev/null +++ b/doc/source/user/examples/service-conditionals.py @@ -0,0 +1,6 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='kiss', region_name='region1') +print(cloud.has_service('network')) +print(cloud.has_service('container-orchestration')) diff --git a/doc/source/user/examples/strict-mode.py b/doc/source/user/examples/strict-mode.py new file mode 100644 index 000000000..251547160 --- /dev/null +++ b/doc/source/user/examples/strict-mode.py @@ -0,0 +1,8 @@ +import openstack.cloud +openstack.cloud.simple_logging() + +cloud = openstack.openstack_cloud( + cloud='fuga', region_name='cystack', strict=True) +image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') +cloud.pprint(image) diff --git a/doc/source/user/examples/upload-large-object.py b/doc/source/user/examples/upload-large-object.py new file mode 100644 index 000000000..6b5c392a8 --- /dev/null +++ b/doc/source/user/examples/upload-large-object.py @@ -0,0 +1,10 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='ovh', region_name='SBG1') +cloud.create_object( + container='my-container', name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000) +cloud.delete_object('my-container', 'my-object') +cloud.delete_container('my-container') diff --git a/doc/source/user/examples/upload-object.py b/doc/source/user/examples/upload-object.py new file mode 100644 index 000000000..6b5c392a8 --- /dev/null +++ b/doc/source/user/examples/upload-object.py @@ -0,0 +1,10 @@ +import openstack.cloud +openstack.cloud.simple_logging(debug=True) + +cloud = openstack.openstack_cloud(cloud='ovh', region_name='SBG1') +cloud.create_object( + container='my-container', name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000) +cloud.delete_object('my-container', 'my-object') +cloud.delete_container('my-container') diff --git a/doc/source/user/examples/user-agent.py b/doc/source/user/examples/user-agent.py new file mode 100644 index 000000000..094c91e1c --- /dev/null +++ b/doc/source/user/examples/user-agent.py @@ -0,0 +1,6 @@ +import openstack.cloud +openstack.cloud.simple_logging(http_debug=True) + +cloud = openstack.openstack_cloud( + cloud='datacentred', app_name='AmazingApp', app_version='1.0') +cloud.list_networks() diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst new file mode 100644 index 000000000..10513936f --- /dev/null +++ b/doc/source/user/index.rst @@ -0,0 +1,20 @@ +================== + Shade User Guide +================== + +.. toctree:: + :maxdepth: 2 + + config/index + usage + logging + model + microversions + +Presentations +============= + +.. toctree:: + :maxdepth: 1 + + multi-cloud-demo diff --git a/doc/source/user/logging.rst b/doc/source/user/logging.rst new file mode 100644 index 000000000..0d6276e2e --- /dev/null +++ b/doc/source/user/logging.rst @@ -0,0 +1,105 @@ +======= +Logging +======= + +.. note:: TODO(shade) This document is written from a shade POV. It needs to + be combined with the existing logging guide, but also the logging + systems need to be rationalized. + +`openstacksdk` uses `Python Logging`_. As `openstacksdk` is a library, it does +not configure logging handlers automatically, expecting instead for that to be +the purview of the consuming application. + +Simple Usage +------------ + +For consumers who just want to get a basic logging setup without thinking +about it too deeply, there is a helper method. If used, it should be called +before any other `shade` functionality. + +.. code-block:: python + + import openstack.cloud + openstack.cloud.simple_logging() + +`openstack.cloud.simple_logging` takes two optional boolean arguments: + +debug + Turns on debug logging. + +http_debug + Turns on debug logging as well as debug logging of the underlying HTTP calls. + +`openstack.cloud.simple_logging` also sets up a few other loggers and +squelches some warnings or log messages that are otherwise uninteresting or +unactionable by a `openstack.cloud` user. + +Advanced Usage +-------------- + +`openstack.cloud` logs to a set of different named loggers. + +Most of the logging is set up to log to the root `openstack.cloud` logger. +There are additional sub-loggers that are used at times, primarily so that a +user can decide to turn on or off a specific type of logging. They are listed +below. + +openstack.cloud.task_manager + `openstack.cloud` uses a Task Manager to perform remote calls. The + `openstack.cloud.task_manager` logger emits messages at the start and end + of each Task announcing what it is going to run and then what it ran and + how long it took. Logging `openstack.cloud.task_manager` is a good way to + get a trace of external actions `openstack.cloud` is taking without full + `HTTP Tracing`_. + +openstack.cloud.request_ids + The `openstack.cloud.request_ids` logger emits a log line at the end of each + HTTP interaction with the OpenStack Request ID associated with the + interaction. This can be be useful for tracking action taken on the + server-side if one does not want `HTTP Tracing`_. + +openstack.cloud.exc + If `log_inner_exceptions` is set to True, `shade` will emit any wrapped + exception to the `openstack.cloud.exc` logger. Wrapped exceptions are usually + considered implementation details, but can be useful for debugging problems. + +openstack.cloud.iterate_timeout + When `shade` needs to poll a resource, it does so in a loop that waits + between iterations and ultimately timesout. The + `openstack.cloud.iterate_timeout` logger emits messages for each iteration + indicating it is waiting and for how long. These can be useful to see for + long running tasks so that one can know things are not stuck, but can also + be noisy. + +openstack.cloud.http + `shade` will sometimes log additional information about HTTP interactions + to the `openstack.cloud.http` logger. This can be verbose, as it sometimes + logs entire response bodies. + +openstack.cloud.fnmatch + `shade` will try to use `fnmatch`_ on given `name_or_id` arguments. It's a + best effort attempt, so pattern misses are logged to + `openstack.cloud.fnmatch`. A user may not be intending to use an fnmatch + pattern - such as if they are trying to find an image named + ``Fedora 24 [official]``, so these messages are logged separately. + +.. _fnmatch: https://pymotw.com/2/fnmatch/ + +HTTP Tracing +------------ + +HTTP Interactions are handled by `keystoneauth`. If you want to enable HTTP +tracing while using `shade` and are not using `openstack.cloud.simple_logging`, +set the log level of the `keystoneauth` logger to `DEBUG`. + +Python Logging +-------------- + +Python logging is a standard feature of Python and is documented fully in the +Python Documentation, which varies by version of Python. + +For more information on Python Logging for Python v2, see +https://docs.python.org/2/library/logging.html. + +For more information on Python Logging for Python v3, see +https://docs.python.org/3/library/logging.html. diff --git a/doc/source/user/microversions.rst b/doc/source/user/microversions.rst new file mode 100644 index 000000000..ce821dace --- /dev/null +++ b/doc/source/user/microversions.rst @@ -0,0 +1,75 @@ +============= +Microversions +============= + +As shade rolls out support for consuming microversions, it will do so on a +call by call basis as needed. Just like with major versions, shade should have +logic to handle each microversion for a given REST call it makes, with the +following rules in mind: + +* If an activity shade performs can be done differently or more efficiently + with a new microversion, the support should be added to openstack.cloud. + +* shade should always attempt to use the latest microversion it is aware of + for a given call, unless a microversion removes important data. + +* Microversion selection should under no circumstances be exposed to the user, + except in the case of missing feature error messages. + +* If a feature is only exposed for a given microversion and cannot be simulated + for older clouds without that microversion, it is ok to add it to shade but + a clear error message should be given to the user that the given feature is + not available on their cloud. (A message such as "This cloud only supports + a maximum microversion of XXX for service YYY and this feature only exists + on clouds with microversion ZZZ. Please contact your cloud provider for + information about when this feature might be available") + +* When adding a feature to shade that only exists behind a new microversion, + every effort should be made to figure out how to provide the same + functionality if at all possible, even if doing so is inefficient. If an + inefficient workaround is employed, a warning should be provided to the + user. (the user's workaround to skip the inefficient behavior would be to + stop using that shade API call) + +* If shade is aware of logic for more than one microversion, it should always + attempt to use the latest version available for the service for that call. + +* Objects returned from shade should always go through normalization and thus + should always conform to shade's documented data model and should never look + different to the shade user regardless of the microversion used for the REST + call. + +* If a microversion adds new fields to an object, those fields should be + added to shade's data model contract for that object and the data should + either be filled in by performing additional REST calls if the data is + available that way, or the field should have a default value of None which + the user can be expected to test for when attempting to use the new value. + +* If a microversion removes fields from an object that are part of shade's + existing data model contract, care should be taken to not use the new + microversion for that call unless forced to by lack of availablity of the + old microversion on the cloud in question. In the case where an old + microversion is no longer available, care must be taken to either find the + data from another source and fill it in, or to put a value of None into the + field and document for the user that on some clouds the value may not exist. + +* If a microversion removes a field and the outcome is particularly intractable + and impossible to work around without fundamentally breaking shade's users, + an issue should be raised with the service team in question. Hopefully a + resolution can be found during the period while clouds still have the old + microversion. + +* As new calls or objects are added to shade, it is important to check in with + the service team in question on the expected stability of the object. If + there are known changes expected in the future, even if they may be a few + years off, shade should take care to not add committments to its data model + for those fields/features. It is ok for shade to not have something. + + ..note:: + shade does not currently have any sort of "experimental" opt-in API that + would allow a shade to expose things to a user that may not be supportable + under shade's normal compatibility contract. If a conflict arises in the + future where there is a strong desire for a feature but also a lack of + certainty about its stability over time, an experimental API may want to + be explored ... but concrete use cases should arise before such a thing + is started. diff --git a/doc/source/user/model.rst b/doc/source/user/model.rst new file mode 100644 index 000000000..3293b0306 --- /dev/null +++ b/doc/source/user/model.rst @@ -0,0 +1,504 @@ +========== +Data Model +========== + +shade has a very strict policy on not breaking backwards compatability ever. +However, with the data structures returned from OpenStack, there are places +where the resource structures from OpenStack are returned to the user somewhat +directly, leaving a shade user open to changes/differences in result content. + +To combat that, shade 'normalizes' the return structure from OpenStack in many +places, and the results of that normalization are listed below. Where shade +performs normalization, a user can count on any fields declared in the docs +as being completely safe to use - they are as much a part of shade's API +contract as any other Python method. + +Some OpenStack objects allow for arbitrary attributes at +the root of the object. shade will pass those through so as not to break anyone +who may be counting on them, but as they are arbitrary shade can make no +guarantees as to their existence. As part of normalization, shade will put any +attribute from an OpenStack resource that is not in its data model contract +into an attribute called 'properties'. The contents of properties are +defined to be an arbitrary collection of key value pairs with no promises as +to any particular key ever existing. + +If a user passes `strict=True` to the shade constructor, shade will not pass +through arbitrary objects to the root of the resource, and will instead only +put them in the properties dict. If a user is worried about accidentally +writing code that depends on an attribute that is not part of the API contract, +this can be a useful tool. Keep in mind all data can still be accessed via +the properties dict, but any code touching anything in the properties dict +should be aware that the keys found there are highly user/cloud specific. +Any key that is transformed as part of the shade data model contract will +not wind up with an entry in properties - only keys that are unknown. + +Location +-------- + +A Location defines where a resource lives. It includes a cloud name and a +region name, an availability zone as well as information about the project +that owns the resource. + +The project information may contain a project id, or a combination of one or +more of a project name with a domain name or id. If a project id is present, +it should be considered correct. + +Some resources do not carry ownership information with them. For those, the +project information will be filled in from the project the user currently +has a token for. + +Some resources do not have information about availability zones, or may exist +region wide. Those resources will have None as their availability zone. + +If all of the project information is None, then + +.. code-block:: python + + Location = dict( + cloud=str(), + region=str(), + zone=str() or None, + project=dict( + id=str() or None, + name=str() or None, + domain_id=str() or None, + domain_name=str() or None)) + + +Flavor +------ + +A flavor for a Nova Server. + +.. code-block:: python + + Flavor = dict( + location=Location(), + id=str(), + name=str(), + is_public=bool(), + is_disabled=bool(), + ram=int(), + vcpus=int(), + disk=int(), + ephemeral=int(), + swap=int(), + rxtx_factor=float(), + extra_specs=dict(), + properties=dict()) + + +Flavor Access +------------- + +An access entry for a Nova Flavor. + +.. code-block:: python + + FlavorAccess = dict( + flavor_id=str(), + project_id=str()) + + +Image +----- + +A Glance Image. + +.. code-block:: python + + Image = dict( + location=Location(), + id=str(), + name=str(), + min_ram=int(), + min_disk=int(), + size=int(), + virtual_size=int(), + container_format=str(), + disk_format=str(), + checksum=str(), + created_at=str(), + updated_at=str(), + owner=str(), + is_public=bool(), + is_protected=bool(), + visibility=str(), + status=str(), + locations=list(), + direct_url=str() or None, + tags=list(), + properties=dict()) + + +Keypair +------- + +A keypair for a Nova Server. + +.. code-block:: python + + Keypair = dict( + location=Location(), + name=str(), + id=str(), + public_key=str(), + fingerprint=str(), + type=str(), + user_id=str(), + private_key=str() or None + properties=dict()) + + +Security Group +-------------- + +A Security Group from either Nova or Neutron + +.. code-block:: python + + SecurityGroup = dict( + location=Location(), + id=str(), + name=str(), + description=str(), + security_group_rules=list(), + properties=dict()) + +Security Group Rule +------------------- + +A Security Group Rule from either Nova or Neutron + +.. code-block:: python + + SecurityGroupRule = dict( + location=Location(), + id=str(), + direction=str(), # oneof('ingress', 'egress') + ethertype=str(), + port_range_min=int() or None, + port_range_max=int() or None, + protocol=str() or None, + remote_ip_prefix=str() or None, + security_group_id=str() or None, + remote_group_id=str() or None + properties=dict()) + +Server +------ + +A Server from Nova + +.. code-block:: python + + Server = dict( + location=Location(), + id=str(), + name=str(), + image=dict() or str(), + flavor=dict(), + volumes=list(), # Volume + interface_ip=str(), + has_config_drive=bool(), + accessIPv4=str(), + accessIPv6=str(), + addresses=dict(), # string, list(Address) + created=str(), + key_name=str(), + metadata=dict(), # string, string + private_v4=str(), + progress=int(), + public_v4=str(), + public_v6=str(), + security_groups=list(), # SecurityGroup + status=str(), + updated=str(), + user_id=str(), + host_id=str() or None, + power_state=str() or None, + task_state=str() or None, + vm_state=str() or None, + launched_at=str() or None, + terminated_at=str() or None, + task_state=str() or None, + properties=dict()) + +ComputeLimits +------------- + +Limits and current usage for a project in Nova + +.. code-block:: python + + ComputeLimits = dict( + location=Location(), + max_personality=int(), + max_personality_size=int(), + max_server_group_members=int(), + max_server_groups=int(), + max_server_meta=int(), + max_total_cores=int(), + max_total_instances=int(), + max_total_keypairs=int(), + max_total_ram_size=int(), + total_cores_used=int(), + total_instances_used=int(), + total_ram_used=int(), + total_server_groups_used=int(), + properties=dict()) + +ComputeUsage +------------ + +Current usage for a project in Nova + +.. code-block:: python + + ComputeUsage = dict( + location=Location(), + started_at=str(), + stopped_at=str(), + server_usages=list(), + max_personality=int(), + max_personality_size=int(), + max_server_group_members=int(), + max_server_groups=int(), + max_server_meta=int(), + max_total_cores=int(), + max_total_instances=int(), + max_total_keypairs=int(), + max_total_ram_size=int(), + total_cores_used=int(), + total_hours=int(), + total_instances_used=int(), + total_local_gb_usage=int(), + total_memory_mb_usage=int(), + total_ram_used=int(), + total_server_groups_used=int(), + total_vcpus_usage=int(), + properties=dict()) + +ServerUsage +----------- + +Current usage for a server in Nova + +.. code-block:: python + + ComputeUsage = dict( + started_at=str(), + ended_at=str(), + flavor=str(), + hours=int(), + instance_id=str(), + local_gb=int(), + memory_mb=int(), + name=str(), + state=str(), + uptime=int(), + vcpus=int(), + properties=dict()) + +Floating IP +----------- + +A Floating IP from Neutron or Nova + + +.. code-block:: python + + FloatingIP = dict( + location=Location(), + id=str(), + description=str(), + attached=bool(), + fixed_ip_address=str() or None, + floating_ip_address=str() or None, + network=str() or None, + port=str() or None, + router=str(), + status=str(), + created_at=str() or None, + updated_at=str() or None, + revision_number=int() or None, + properties=dict()) + +Project +------- + +A Project from Keystone (or a tenant if Keystone v2) + +Location information for Project has some specific semantics. + +If the project has a parent project, that will be in location.project.id, +and if it doesn't that should be None. If the Project is associated with +a domain that will be in location.project.domain_id regardless of the current +user's token scope. location.project.name and location.project.domain_name +will always be None. Finally, location.region_name will always be None as +Projects are global to a cloud. If a deployer happens to deploy OpenStack +in such a way that users and projects are not shared amongst regions, that +necessitates treating each of those regions as separate clouds from shade's +POV. + +.. code-block:: python + + Project = dict( + location=Location(), + id=str(), + name=str(), + description=str(), + is_enabled=bool(), + is_domain=bool(), + properties=dict()) + +Volume +------ + +A volume from cinder. + +.. code-block:: python + + Volume = dict( + location=Location(), + id=str(), + name=str(), + description=str(), + size=int(), + attachments=list(), + status=str(), + migration_status=str() or None, + host=str() or None, + replication_driver=str() or None, + replication_status=str() or None, + replication_extended_status=str() or None, + snapshot_id=str() or None, + created_at=str(), + updated_at=str() or None, + source_volume_id=str() or None, + consistencygroup_id=str() or None, + volume_type=str() or None, + metadata=dict(), + is_bootable=bool(), + is_encrypted=bool(), + can_multiattach=bool(), + properties=dict()) + + +VolumeType +---------- + +A volume type from cinder. + +.. code-block:: python + + VolumeType = dict( + location=Location(), + id=str(), + name=str(), + description=str() or None, + is_public=bool(), + qos_specs_id=str() or None, + extra_specs=dict(), + properties=dict()) + + +VolumeTypeAccess +---------------- + +A volume type access from cinder. + +.. code-block:: python + + VolumeTypeAccess = dict( + location=Location(), + volume_type_id=str(), + project_id=str(), + properties=dict()) + + +ClusterTemplate +--------------- + +A Cluster Template from magnum. + +.. code-block:: python + + ClusterTemplate = dict( + location=Location(), + apiserver_port=int(), + cluster_distro=str(), + coe=str(), + created_at=str(), + dns_nameserver=str(), + docker_volume_size=int(), + external_network_id=str(), + fixed_network=str() or None, + flavor_id=str(), + http_proxy=str() or None, + https_proxy=str() or None, + id=str(), + image_id=str(), + insecure_registry=str(), + is_public=bool(), + is_registry_enabled=bool(), + is_tls_disabled=bool(), + keypair_id=str(), + labels=dict(), + master_flavor_id=str() or None, + name=str(), + network_driver=str(), + no_proxy=str() or None, + server_type=str(), + updated_at=str() or None, + volume_driver=str(), + properties=dict()) + +MagnumService +------------- + +A Magnum Service from magnum + +.. code-block:: python + + MagnumService = dict( + location=Location(), + binary=str(), + created_at=str(), + disabled_reason=str() or None, + host=str(), + id=str(), + report_count=int(), + state=str(), + properties=dict()) + +Stack +----- + +A Stack from Heat + +.. code-block:: python + + Stack = dict( + location=Location(), + id=str(), + name=str(), + created_at=str(), + deleted_at=str(), + updated_at=str(), + description=str(), + action=str(), + identifier=str(), + is_rollback_enabled=bool(), + notification_topics=list(), + outputs=list(), + owner=str(), + parameters=dict(), + parent=str(), + stack_user_project_id=str(), + status=str(), + status_reason=str(), + tags=dict(), + tempate_description=str(), + timeout_mins=int(), + properties=dict()) diff --git a/doc/source/user/multi-cloud-demo.rst b/doc/source/user/multi-cloud-demo.rst new file mode 100644 index 000000000..6d6914231 --- /dev/null +++ b/doc/source/user/multi-cloud-demo.rst @@ -0,0 +1,811 @@ +================ +Multi-Cloud Demo +================ + +This document contains a presentation in `presentty`_ format. If you want to +walk through it like a presentation, install `presentty` and run: + +.. code:: bash + + presentty doc/source/user/multi-cloud-demo.rst + +The content is hopefully helpful even if it's not being narrated, so it's being +included in the `shade` docs. + +.. _presentty: https://pypi.python.org/pypi/presentty + +Using Multiple OpenStack Clouds Easily with Shade +================================================= + +Who am I? +========= + +Monty Taylor + +* OpenStack Infra Core +* irc: mordred +* twitter: @e_monty + +What are we going to talk about? +================================ + +`shade` + +* a task and end-user oriented Python library +* abstracts deployment differences +* designed for multi-cloud +* simple to use +* massive scale + + * optional advanced features to handle 20k servers a day + +* Initial logic/design extracted from nodepool +* Librified to re-use in Ansible + +shade is Free Software +====================== + +* https://git.openstack.org/cgit/openstack-infra/shade +* openstack-dev@lists.openstack.org +* #openstack-shade on freenode + +This talk is Free Software, too +=============================== + +* Written for presentty (https://pypi.python.org/pypi/presentty) +* doc/source/multi-cloud-demo.rst +* examples in doc/source/examples +* Paths subject to change- this is the first presentation in tree! + +Complete Example +================ + +.. code:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Let's Take a Few Steps Back +=========================== + +Multi-cloud is easy, but you need to know a few things. + +* Terminology +* Config +* Shade API + +Cloud Terminology +================= + +Let's define a few terms, so that we can use them with ease: + +* `cloud` - logically related collection of services +* `region` - completely independent subset of a given cloud +* `patron` - human who has an account +* `user` - account on a cloud +* `project` - logical collection of cloud resources +* `domain` - collection of users and projects + +Cloud Terminology Relationships +=============================== + +* A `cloud` has one or more `regions` +* A `patron` has one or more `users` +* A `patron` has one or more `projects` +* A `cloud` has one or more `domains` +* In a `cloud` with one `domain` it is named "default" +* Each `patron` may have their own `domain` +* Each `user` is in one `domain` +* Each `project` is in one `domain` +* A `user` has one or more `roles` on one or more `projects` + +HTTP Sessions +============= + +* HTTP interactions are authenticated via keystone +* Authenticating returns a `token` +* An authenticated HTTP Session is shared across a `region` + +Cloud Regions +============= + +A `cloud region` is the basic unit of REST interaction. + +* A `cloud` has a `service catalog` +* The `service catalog` is returned in the `token` +* The `service catalog` lists `endpoint` for each `service` in each `region` +* A `region` is completely autonomous + +Users, Projects and Domains +=========================== + +In clouds with multiple domains, project and user names are +only unique within a region. + +* Names require `domain` information for uniqueness. IDs do not. +* Providing `domain` information when not needed is fine. +* `project_name` requires `project_domain_name` or `project_domain_id` +* `project_id` does not +* `username` requires `user_domain_name` or `user_domain_id` +* `user_id` does not + +Confused Yet? +============= + +Don't worry - you don't have to deal with most of that. + +Auth per cloud, select per region +================================= + +In general, the thing you need to know is: + +* Configure authentication per `cloud` +* Select config to use by `cloud` and `region` + +clouds.yaml +=========== + +Information about the clouds you want to connect to is stored in a file +called `clouds.yaml`. + +`clouds.yaml` can be in your homedir: `~/.config/openstack/clouds.yaml` +or system-wide: `/etc/openstack/clouds.yaml`. + +Information in your homedir, if it exists, takes precedence. + +Full docs on `clouds.yaml` are at +https://docs.openstack.org/developer/os-client-config/ + +What about Mac and Windows? +=========================== + +`USER_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `~/.config/openstack` +* OSX: `~/Library/Application Support/openstack` +* Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` + +`SITE_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `/etc/openstack` +* OSX: `/Library/Application Support/openstack` +* Windows: `C:\\ProgramData\\OpenStack\\openstack` + +Config Terminology +================== + +For multi-cloud, think of two types: + +* `profile` - Facts about the `cloud` that are true for everyone +* `cloud` - Information specific to a given `user` + +Apologies for the use of `cloud` twice. + +Environment Variables and Simple Usage +====================================== + +* Environment variables starting with `OS_` go into a cloud called `envvars` +* If you only have one cloud, you don't have to specify it +* `OS_CLOUD` and `OS_REGION_NAME` are default values for + `cloud` and `region_name` + +TOO MUCH TALKING - NOT ENOUGH CODE +================================== + +basic clouds.yaml for the example code +====================================== + +Simple example of a clouds.yaml + +* Config for a named `cloud` "my-citycloud" +* Reference a well-known "named" profile: `citycloud` +* `os-client-config` has a built-in list of profiles at + https://docs.openstack.org/developer/os-client-config/vendor-support.html +* Vendor profiles contain various advanced config +* `cloud` name can match `profile` name (using different names for clarity) + +.. code:: yaml + + clouds: + my-citycloud: + profile: citycloud + auth: + username: mordred + project_id: 65222a4d09ea4c68934fa1028c77f394 + user_domain_id: d0919bd5e8d74e49adf0e145807ffc38 + project_domain_id: d0919bd5e8d74e49adf0e145807ffc38 + +Where's the password? + +secure.yaml +=========== + +* Optional additional file just like `clouds.yaml` +* Values overlaid on `clouds.yaml` +* Useful if you want to protect secrets more stringently + +Example secure.yaml +=================== + +* No, my password isn't XXXXXXXX +* `cloud` name should match `clouds.yaml` +* Optional - I actually keep mine in my `clouds.yaml` + +.. code:: yaml + + clouds: + my-citycloud: + auth: + password: XXXXXXXX + +more clouds.yaml +================ + +More information can be provided. + +* Use v3 of the `identity` API - even if others are present +* Use `https://image-ca-ymq-1.vexxhost.net/v2` for `image` API + instead of what's in the catalog + +.. code:: yaml + + my-vexxhost: + identity_api_version: 3 + image_endpoint_override: https://image-ca-ymq-1.vexxhost.net/v2 + profile: vexxhost + auth: + user_domain_id: default + project_domain_id: default + project_name: d8af8a8f-a573-48e6-898a-af333b970a2d + username: 0b8c435b-cc4d-4e05-8a47-a2ada0539af1 + +Much more complex clouds.yaml example +===================================== + +* Not using a profile - all settings included +* In the `ams01` `region` there are two networks with undiscoverable qualities +* Each one are labeled here so choices can be made +* Any of the settings can be specific to a `region` if needed +* `region` settings override `cloud` settings +* `cloud` does not support `floating-ips` + +.. code:: yaml + + my-internap: + auth: + auth_url: https://identity.api.cloud.iweb.com + username: api-55f9a00fb2619 + project_name: inap-17037 + identity_api_version: 3 + floating_ip_source: None + regions: + - name: ams01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + default_interface: true + - name: inap-17037-LAN3631 + routes_externally: false + +Complete Example Again +====================== + +.. code:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Step By Step +============ + +Import the library +================== + +.. code:: python + + import openstack.cloud + +Logging +======= + +* `shade` uses standard python logging +* Special `openstack.cloud.request_ids` logger for API request IDs +* `simple_logging` does easy defaults +* Squelches some meaningless warnings + + * `debug` + + * Logs shade loggers at debug level + * Includes `openstack.cloud.request_ids` debug logging + + * `http_debug` Implies `debug`, turns on HTTP tracing + +.. code:: python + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + +Example with Debug Logging +========================== + +* doc/source/examples/debug-logging.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud( + cloud='my-vexxhost', region_name='ca-ymq-1') + cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') + +Example with HTTP Debug Logging +=============================== + +* doc/source/examples/http-debug-logging.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(http_debug=True) + + cloud = openstack.openstack_cloud( + cloud='my-vexxhost', region_name='ca-ymq-1') + cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') + +Cloud Regions +============= + +* `cloud` constructor needs `cloud` and `region_name` +* `openstack.openstack_cloud` is a helper factory function + +.. code:: python + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + +Upload an Image +=============== + +* Picks the correct upload mechanism +* **SUGGESTION** Always upload your own base images + +.. code:: python + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True) + +Always Upload an Image +====================== + +Ok. You don't have to. But, for multi-cloud... + +* Images with same content are named different on different clouds +* Images with same name on different clouds can have different content +* Upload your own to all clouds, both problems go away +* Download from OS vendor or build with `diskimage-builder` + +Find a flavor +============= + +* Flavors are all named differently on clouds +* Flavors can be found via RAM +* `get_flavor_by_ram` finds the smallest matching flavor + +.. code:: python + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + +Create a server +=============== + +* my-vexxhost + + * Boot server + * Wait for `status==ACTIVE` + +* my-internap + + * Boot server on network `inap-17037-WAN1654` + * Wait for `status==ACTIVE` + +* my-citycloud + + * Boot server + * Wait for `status==ACTIVE` + * Find the `port` for the `fixed_ip` for `server` + * Create `floating-ip` on that `port` + * Wait for `floating-ip` to attach + +.. code:: python + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Wow. We didn't even deploy Wordpress! +===================================== + +Image and Flavor by Name or ID +============================== + +* Pass string to image/flavor +* Image/Flavor will be found by name or ID +* Common pattern +* doc/source/examples/create-server-name-or-id.py + +.. code:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + for cloud_name, region_name, image, flavor in [ + ('my-vexxhost', 'ca-ymq-1', + 'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4'), + ('my-citycloud', 'Buf1', + 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), + ('my-internap', 'ams01', + 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + print(server.name) + print(server['name']) + cloud.pprint(server) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +cloud.pprint method was just added this morning +=============================================== + +Delete Servers +============== + +* `delete_ips` Delete any `floating_ips` the server may have + +.. code:: python + + cloud.delete_server('my-server', wait=True, delete_ips=True) + +Image and Flavor by Dict +======================== + +* Pass dict to image/flavor +* If you know if the value is Name or ID +* Common pattern +* doc/source/examples/create-server-dict.py + +.. code:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + for cloud_name, region_name, image, flavor_id in [ + ('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', + '5cf64088-893b-46b5-9bb1-ee020277635d'), + ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', + '0dab10b5-42a2-438e-be7b-505741a7ffcc'), + ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', + 'A1.4')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=dict(id=flavor_id), + wait=True, auto_ip=True) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +Munch Objects +============= + +* Behave like a dict and an object +* doc/source/examples/munch-dict-object.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='zetta', region_name='no-osl1') + image = cloud.get_image('Ubuntu 14.04 (AMD64) [Local Storage]') + print(image.name) + print(image['name']) + +API Organized by Logical Resource +================================= + +* list_servers +* search_servers +* get_server +* create_server +* delete_server +* update_server + +For other things, it's still {verb}_{noun} + +* attach_volume +* wait_for_server +* add_auto_ip + +Cleanup Script +============== + +* Sometimes my examples had bugs +* doc/source/examples/cleanup-servers.py + +.. code:: python + + import openstack.cloud + + # Initialize and turn on debug logging + openstack.cloud.simple_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.openstack_cloud(cloud=cloud_name, region_name=region_name) + for server in cloud.search_servers('my-server'): + cloud.delete_server(server, wait=True, delete_ips=True) + +Normalization +============= + +* https://docs.openstack.org/developer/shade/model.html#image +* doc/source/examples/normalization.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging() + + cloud = openstack.openstack_cloud(cloud='fuga', region_name='cystack') + image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') + cloud.pprint(image) + +Strict Normalized Results +========================= + +* Return only the declared model +* doc/source/examples/strict-mode.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging() + + cloud = openstack.openstack_cloud( + cloud='fuga', region_name='cystack', strict=True) + image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') + cloud.pprint(image) + +How Did I Find the Image Name for the Last Example? +=================================================== + +* I often make stupid little utility scripts +* doc/source/examples/find-an-image.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging() + + cloud = openstack.openstack_cloud(cloud='fuga', region_name='cystack') + cloud.pprint([ + image for image in cloud.list_images() + if 'ubuntu' in image.name.lower()]) + +Added / Modified Information +============================ + +* Servers need more extra help +* Fetch addresses dict from neutron +* Figure out which IPs are good +* `detailed` - defaults to True, add everything +* `bare` - no extra calls - don't even fix broken things +* `bare` is still normalized +* doc/source/examples/server-information.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='my-citycloud', region_name='Buf1') + try: + server = cloud.create_server( + 'my-server', image='Ubuntu 16.04 Xenial Xerus', + flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), + wait=True, auto_ip=True) + + print("\n\nFull Server\n\n") + cloud.pprint(server) + + print("\n\nTurn Detailed Off\n\n") + cloud.pprint(cloud.get_server('my-server', detailed=False)) + + print("\n\nBare Server\n\n") + cloud.pprint(cloud.get_server('my-server', bare=True)) + + finally: + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +Exceptions +========== + +* All shade exceptions are subclasses of `OpenStackCloudException` +* Direct REST calls throw `OpenStackCloudHTTPError` +* `OpenStackCloudHTTPError` subclasses `OpenStackCloudException` + and `requests.exceptions.HTTPError` +* `OpenStackCloudURINotFound` for 404 +* `OpenStackCloudBadRequest` for 400 + +User Agent Info +=============== + +* Set `app_name` and `app_version` for User Agents +* (sssh ... `region_name` is optional if the cloud has one region) +* doc/source/examples/user-agent.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(http_debug=True) + + cloud = openstack.openstack_cloud( + cloud='datacentred', app_name='AmazingApp', app_version='1.0') + cloud.list_networks() + +Uploading Large Objects +======================= + +* swift has a maximum object size +* Large Objects are uploaded specially +* shade figures this out and does it +* multi-threaded +* doc/source/examples/upload-object.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='ovh', region_name='SBG1') + cloud.create_object( + container='my-container', name='my-object', + filename='/home/mordred/briarcliff.sh3d') + cloud.delete_object('my-container', 'my-object') + cloud.delete_container('my-container') + +Uploading Large Objects +======================= + +* Default max_file_size is 5G +* This is a conference demo +* Let's force a segment_size +* One MILLION bytes +* doc/source/examples/upload-object.py + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='ovh', region_name='SBG1') + cloud.create_object( + container='my-container', name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000) + cloud.delete_object('my-container', 'my-object') + cloud.delete_container('my-container') + +Service Conditionals +==================== + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='kiss', region_name='region1') + print(cloud.has_service('network')) + print(cloud.has_service('container-orchestration')) + +Service Conditional Overrides +============================= + +* Sometimes clouds are weird and figuring that out won't work + +.. code:: python + + import openstack.cloud + openstack.cloud.simple_logging(debug=True) + + cloud = openstack.openstack_cloud(cloud='rax', region_name='DFW') + print(cloud.has_service('network')) + +.. code:: yaml + + clouds: + rax: + profile: rackspace + auth: + username: mordred + project_id: 245018 + # This is already in profile: rackspace + has_network: false + +Coming Soon +=========== + +* Completion of RESTification +* Full version discovery support +* Multi-cloud facade layer +* Microversion support (talk tomorrow) +* Completion of caching tier (talk tomorrow) +* All of you helping hacking on shade!!! (we're friendly) diff --git a/doc/source/user/usage.rst b/doc/source/user/usage.rst new file mode 100644 index 000000000..7ef63d511 --- /dev/null +++ b/doc/source/user/usage.rst @@ -0,0 +1,22 @@ +===== +Usage +===== + +To use `openstack.cloud` in a project: + +.. code-block:: python + + import openstack.cloud + +.. note:: + API methods that return a description of an OpenStack resource (e.g., + server instance, image, volume, etc.) do so using a `munch.Munch` object + from the `Munch library `_. `Munch` + objects can be accessed using either dictionary or object notation + (e.g., ``server.id``, ``image.name`` and ``server['id']``, ``image['name']``) + +.. autoclass:: openstack.OpenStackCloud + :members: + +.. autoclass:: openstack.OperatorCloud + :members: diff --git a/doc/source/users/guides/connect_from_config.rst b/doc/source/users/guides/connect_from_config.rst index ca99426d3..79a7927f7 100644 --- a/doc/source/users/guides/connect_from_config.rst +++ b/doc/source/users/guides/connect_from_config.rst @@ -18,7 +18,7 @@ Default Location To create a connection from a file you need a YAML file to contain the configuration. -.. literalinclude:: ../../contributors/clouds.yaml +.. literalinclude:: ../../contributor/clouds.yaml :language: yaml To use a configuration file called ``clouds.yaml`` in one of the default @@ -33,7 +33,7 @@ function takes three optional arguments: * **cloud_name** allows you to specify a cloud from your ``clouds.yaml`` file. * **cloud_config** allows you to pass in an existing - ``os_client_config.config.OpenStackConfig``` object. + ``openstack.config.loader.OpenStackConfig``` object. * **options** allows you to specify a namespace object with options to be added to the cloud config. diff --git a/doc/source/users/guides/image.rst b/doc/source/users/guides/image.rst index 6b78a06e5..10e28cc68 100644 --- a/doc/source/users/guides/image.rst +++ b/doc/source/users/guides/image.rst @@ -44,8 +44,7 @@ efficient method may be to iterate over a stream of the response data. By choosing to stream the response content, you determine the ``chunk_size`` that is appropriate for your needs, meaning only that many bytes of data are read for each iteration of the loop until all data has been consumed. -See :meth:`requests.Response.iter_content` for more information, as well -as Requests' :ref:`body-content-workflow`. +See :meth:`requests.Response.iter_content` for more information. When you choose to stream an image download, openstacksdk is no longer able to compute the checksum of the response data for you. This example diff --git a/examples/connect.py b/examples/connect.py index 07216ae16..7db375ca5 100644 --- a/examples/connect.py +++ b/examples/connect.py @@ -19,8 +19,7 @@ For a full guide see TODO(etoews):link to docs on developer.openstack.org import argparse import os -import os_client_config - +from openstack import config as occ from openstack import connection from openstack import profile from openstack import utils @@ -49,8 +48,8 @@ def _get_resource_value(resource_key, default): except KeyError: return default -occ = os_client_config.OpenStackConfig() -cloud = occ.get_one_cloud(TEST_CLOUD) +config = occ.OpenStackConfig() +cloud = config.get_one_cloud(TEST_CLOUD) SERVER_NAME = 'openstacksdk-example' IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.5-x86_64-disk') @@ -68,14 +67,14 @@ EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image' def create_connection_from_config(): opts = Opts(cloud_name=TEST_CLOUD) - occ = os_client_config.OpenStackConfig() - cloud = occ.get_one_cloud(opts.cloud) + config = occ.OpenStackConfig() + cloud = config.get_one_cloud(opts.cloud) return connection.from_config(cloud_config=cloud, options=opts) def create_connection_from_args(): parser = argparse.ArgumentParser() - config = os_client_config.OpenStackConfig() + config = occ.OpenStackConfig() config.register_argparse_arguments(parser, sys.argv[1:]) args = parser.parse_args() return connection.from_config(options=args) diff --git a/extras/delete-network.sh b/extras/delete-network.sh new file mode 100644 index 000000000..1d02959ea --- /dev/null +++ b/extras/delete-network.sh @@ -0,0 +1,14 @@ +neutron router-gateway-clear router1 +neutron router-interface-delete router1 +for subnet in private-subnet ipv6-private-subnet ; do + neutron router-interface-delete router1 $subnet + subnet_id=$(neutron subnet-show $subnet -f value -c id) + neutron port-list | grep $subnet_id | awk '{print $2}' | xargs -n1 neutron port-delete + neutron subnet-delete $subnet +done +neutron router-delete router1 +neutron net-delete private + +# Make the public network directly consumable +neutron subnet-update public-subnet --enable-dhcp=True +neutron net-update public --shared=True diff --git a/extras/install-tips.sh b/extras/install-tips.sh new file mode 100644 index 000000000..d96773ac4 --- /dev/null +++ b/extras/install-tips.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright (c) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +for lib in \ + python-keystoneclient \ + python-ironicclient \ + os-client-config \ + keystoneauth +do + egg=$(echo $lib | tr '-' '_' | sed 's/python-//') + if [ -d /opt/stack/new/$lib ] ; then + tip_location="git+file:///opt/stack/new/$lib#egg=$egg" + echo "$(which pip) install -U -e $tip_location" + pip uninstall -y $lib + pip install -U -e $tip_location + else + echo "$lib not found in /opt/stack/new/$lib" + fi +done diff --git a/extras/run-ansible-tests.sh b/extras/run-ansible-tests.sh new file mode 100755 index 000000000..bda6007c9 --- /dev/null +++ b/extras/run-ansible-tests.sh @@ -0,0 +1,94 @@ +#!/bin/bash +############################################################################# +# run-ansible-tests.sh +# +# Script used to setup a tox environment for running Ansible. This is meant +# to be called by tox (via tox.ini). To run the Ansible tests, use: +# +# tox -e ansible [TAG ...] +# or +# tox -e ansible -- -c cloudX [TAG ...] +# or to use the development version of Ansible: +# tox -e ansible -- -d -c cloudX [TAG ...] +# +# USAGE: +# run-ansible-tests.sh -e ENVDIR [-d] [-c CLOUD] [TAG ...] +# +# PARAMETERS: +# -d Use Ansible source repo development branch. +# -e ENVDIR Directory of the tox environment to use for testing. +# -c CLOUD Name of the cloud to use for testing. +# Defaults to "devstack-admin". +# [TAG ...] Optional list of space-separated tags to control which +# modules are tested. +# +# EXAMPLES: +# # Run all Ansible tests +# run-ansible-tests.sh -e ansible +# +# # Run auth, keypair, and network tests against cloudX +# run-ansible-tests.sh -e ansible -c cloudX auth keypair network +############################################################################# + + +CLOUD="devstack-admin" +ENVDIR= +USE_DEV=0 + +while getopts "c:de:" opt +do + case $opt in + d) USE_DEV=1 ;; + c) CLOUD=${OPTARG} ;; + e) ENVDIR=${OPTARG} ;; + ?) echo "Invalid option: -${OPTARG}" + exit 1;; + esac +done + +if [ -z ${ENVDIR} ] +then + echo "Option -e is required" + exit 1 +fi + +shift $((OPTIND-1)) +TAGS=$( echo "$*" | tr ' ' , ) + +# We need to source the current tox environment so that Ansible will +# be setup for the correct python environment. +source $ENVDIR/bin/activate + +if [ ${USE_DEV} -eq 1 ] +then + if [ -d ${ENVDIR}/ansible ] + then + echo "Using existing Ansible source repo" + else + echo "Installing Ansible source repo at $ENVDIR" + git clone --recursive https://github.com/ansible/ansible.git ${ENVDIR}/ansible + fi + source $ENVDIR/ansible/hacking/env-setup +else + echo "Installing Ansible from pip" + pip install ansible +fi + +# Run the shade Ansible tests +tag_opt="" +if [ ! -z ${TAGS} ] +then + tag_opt="--tags ${TAGS}" +fi + +# Until we have a module that lets us determine the image we want from +# within a playbook, we have to find the image here and pass it in. +# We use the openstack client instead of nova client since it can use clouds.yaml. +IMAGE=`openstack --os-cloud=${CLOUD} image list -f value -c Name | grep cirros | grep -v -e ramdisk -e kernel` +if [ $? -ne 0 ] +then + echo "Failed to find Cirros image" + exit 1 +fi + +ansible-playbook -vvv ./openstack/tests/ansible/run.yml -e "cloud=${CLOUD} image=${IMAGE}" ${tag_opt} diff --git a/openstack/__init__.py b/openstack/__init__.py index e69de29bb..a03c06908 100644 --- a/openstack/__init__.py +++ b/openstack/__init__.py @@ -0,0 +1,132 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import warnings + +import keystoneauth1.exceptions +import pbr.version +import requestsexceptions + +from openstack import _log +from openstack.cloud.exc import * # noqa +from openstack.cloud.openstackcloud import OpenStackCloud +from openstack.cloud.operatorcloud import OperatorCloud + +__version__ = pbr.version.VersionInfo('openstacksdk').version_string() + +if requestsexceptions.SubjectAltNameWarning: + warnings.filterwarnings( + 'ignore', category=requestsexceptions.SubjectAltNameWarning) + + +def _get_openstack_config(app_name=None, app_version=None): + import openstack.config + # Protect against older versions of os-client-config that don't expose this + try: + return openstack.config.OpenStackConfig( + app_name=app_name, app_version=app_version) + except Exception: + return openstack.config.OpenStackConfig() + + +def simple_logging(debug=False, http_debug=False): + if http_debug: + debug = True + if debug: + log_level = logging.DEBUG + else: + log_level = logging.INFO + if http_debug: + # Enable HTTP level tracing + log = _log.setup_logging('keystoneauth') + log.addHandler(logging.StreamHandler()) + log.setLevel(log_level) + # We only want extra shade HTTP tracing in http debug mode + log = _log.setup_logging('openstack.cloud.http') + log.setLevel(log_level) + else: + # We only want extra shade HTTP tracing in http debug mode + log = _log.setup_logging('openstack.cloud.http') + log.setLevel(logging.WARNING) + log = _log.setup_logging('openstack.cloud') + log.addHandler(logging.StreamHandler()) + log.setLevel(log_level) + # Suppress warning about keystoneauth loggers + log = _log.setup_logging('keystoneauth.identity.base') + log = _log.setup_logging('keystoneauth.identity.generic.base') + + +# TODO(shade) Document this and add some examples +# TODO(shade) This wants to be renamed before we make a release. +def openstack_clouds( + config=None, debug=False, cloud=None, strict=False, + app_name=None, app_version=None): + if not config: + config = _get_openstack_config(app_name, app_version) + try: + if cloud is None: + return [ + OpenStackCloud( + cloud=f.name, debug=debug, + cloud_config=f, + strict=strict, + **f.config) + for f in config.get_all_clouds() + ] + else: + return [ + OpenStackCloud( + cloud=f.name, debug=debug, + cloud_config=f, + strict=strict, + **f.config) + for f in config.get_all_clouds() + if f.name == cloud + ] + except keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin as e: + raise OpenStackCloudException( + "Invalid cloud configuration: {exc}".format(exc=str(e))) + + +# TODO(shade) This wants to be renamed before we make a release - there is +# ultimately no reason to have an openstack_cloud and a connect +# factory function - but we have a few steps to go first and this is used +# in the imported tests from shade. +def openstack_cloud( + config=None, strict=False, app_name=None, app_version=None, **kwargs): + if not config: + config = _get_openstack_config(app_name, app_version) + try: + cloud_config = config.get_one_cloud(**kwargs) + except keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin as e: + raise OpenStackCloudException( + "Invalid cloud configuration: {exc}".format(exc=str(e))) + return OpenStackCloud(cloud_config=cloud_config, strict=strict) + + +# TODO(shade) This wants to be renamed before we make a release - there is +# ultimately no reason to have an operator_cloud and a connect +# factory function - but we have a few steps to go first and this is used +# in the imported tests from shade. +def operator_cloud( + config=None, strict=False, app_name=None, app_version=None, **kwargs): + if not config: + config = _get_openstack_config(app_name, app_version) + try: + cloud_config = config.get_one_cloud(**kwargs) + except keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin as e: + raise OpenStackCloudException( + "Invalid cloud configuration: {exc}".format(exc=str(e))) + return OperatorCloud(cloud_config=cloud_config, strict=strict) diff --git a/openstack/_log.py b/openstack/_log.py new file mode 100644 index 000000000..ff2f2eac7 --- /dev/null +++ b/openstack/_log.py @@ -0,0 +1,28 @@ +# Copyright (c) 2015 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + + +def setup_logging(name): + log = logging.getLogger(name) + if len(log.handlers) == 0: + h = NullHandler() + log.addHandler(h) + return log diff --git a/openstack/cloud/__init__.py b/openstack/cloud/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/cloud/_adapter.py b/openstack/cloud/_adapter.py new file mode 100644 index 000000000..2929a195b --- /dev/null +++ b/openstack/cloud/_adapter.py @@ -0,0 +1,166 @@ +# Copyright (c) 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Wrapper around keystoneauth Session to wrap calls in TaskManager ''' + +import functools +from keystoneauth1 import adapter +from six.moves import urllib + +from openstack import _log +from openstack.cloud import exc +from openstack.cloud import task_manager + + +def extract_name(url): + '''Produce a key name to use in logging/metrics from the URL path. + + We want to be able to logic/metric sane general things, so we pull + the url apart to generate names. The function returns a list because + there are two different ways in which the elements want to be combined + below (one for logging, one for statsd) + + Some examples are likely useful: + + /servers -> ['servers'] + /servers/{id} -> ['servers'] + /servers/{id}/os-security-groups -> ['servers', 'os-security-groups'] + /v2.0/networks.json -> ['networks'] + ''' + + url_path = urllib.parse.urlparse(url).path.strip() + # Remove / from the beginning to keep the list indexes of interesting + # things consistent + if url_path.startswith('/'): + url_path = url_path[1:] + + # Special case for neutron, which puts .json on the end of urls + if url_path.endswith('.json'): + url_path = url_path[:-len('.json')] + + url_parts = url_path.split('/') + if url_parts[-1] == 'detail': + # Special case detail calls + # GET /servers/detail + # returns ['servers', 'detail'] + name_parts = url_parts[-2:] + else: + # Strip leading version piece so that + # GET /v2.0/networks + # returns ['networks'] + if url_parts[0] in ('v1', 'v2', 'v2.0'): + url_parts = url_parts[1:] + name_parts = [] + # Pull out every other URL portion - so that + # GET /servers/{id}/os-security-groups + # returns ['servers', 'os-security-groups'] + for idx in range(0, len(url_parts)): + if not idx % 2 and url_parts[idx]: + name_parts.append(url_parts[idx]) + + # Keystone Token fetching is a special case, so we name it "tokens" + if url_path.endswith('tokens'): + name_parts = ['tokens'] + + # Getting the root of an endpoint is doing version discovery + if not name_parts: + name_parts = ['discovery'] + + # Strip out anything that's empty or None + return [part for part in name_parts if part] + + +# TODO(shade) This adapter should go away in favor of the work merging +# adapter with openstack.proxy. +class ShadeAdapter(adapter.Adapter): + + def __init__(self, shade_logger, manager, *args, **kwargs): + super(ShadeAdapter, self).__init__(*args, **kwargs) + self.shade_logger = shade_logger + self.manager = manager + self.request_log = _log.setup_logging('openstack.cloud.request_ids') + + def _log_request_id(self, response, obj=None): + # Log the request id and object id in a specific logger. This way + # someone can turn it on if they're interested in this kind of tracing. + request_id = response.headers.get('x-openstack-request-id') + if not request_id: + return response + tmpl = "{meth} call to {service} for {url} used request id {req}" + kwargs = dict( + meth=response.request.method, + service=self.service_type, + url=response.request.url, + req=request_id) + + if isinstance(obj, dict): + obj_id = obj.get('id', obj.get('uuid')) + if obj_id: + kwargs['obj_id'] = obj_id + tmpl += " returning object {obj_id}" + self.request_log.debug(tmpl.format(**kwargs)) + return response + + def _munch_response(self, response, result_key=None, error_message=None): + exc.raise_from_response(response, error_message=error_message) + + if not response.content: + # This doens't have any content + return self._log_request_id(response) + + # Some REST calls do not return json content. Don't decode it. + if 'application/json' not in response.headers.get('Content-Type'): + return self._log_request_id(response) + + try: + result_json = response.json() + self._log_request_id(response, result_json) + except Exception: + return self._log_request_id(response) + return result_json + + def request( + self, url, method, run_async=False, error_message=None, + *args, **kwargs): + name_parts = extract_name(url) + name = '.'.join([self.service_type, method] + name_parts) + class_name = "".join([ + part.lower().capitalize() for part in name.split('.')]) + + request_method = functools.partial( + super(ShadeAdapter, self).request, url, method) + + class RequestTask(task_manager.BaseTask): + + def __init__(self, **kw): + super(RequestTask, self).__init__(**kw) + self.name = name + self.__class__.__name__ = str(class_name) + self.run_async = run_async + + def main(self, client): + self.args.setdefault('raise_exc', False) + return request_method(**self.args) + + response = self.manager.submit_task(RequestTask(**kwargs)) + if run_async: + return response + else: + return self._munch_response(response, error_message=error_message) + + def _version_matches(self, version): + api_version = self.get_api_major_version() + if api_version: + return api_version[0] == version + return False diff --git a/openstack/cloud/_heat/__init__.py b/openstack/cloud/_heat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/cloud/_heat/environment_format.py b/openstack/cloud/_heat/environment_format.py new file mode 100644 index 000000000..ac60715ae --- /dev/null +++ b/openstack/cloud/_heat/environment_format.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from openstack.cloud._heat import template_format + + +SECTIONS = ( + PARAMETER_DEFAULTS, PARAMETERS, RESOURCE_REGISTRY, + ENCRYPTED_PARAM_NAMES, EVENT_SINKS, + PARAMETER_MERGE_STRATEGIES +) = ( + 'parameter_defaults', 'parameters', 'resource_registry', + 'encrypted_param_names', 'event_sinks', + 'parameter_merge_strategies' +) + + +def parse(env_str): + """Takes a string and returns a dict containing the parsed structure. + + This includes determination of whether the string is using the + YAML format. + """ + try: + env = yaml.load(env_str, Loader=template_format.yaml_loader) + except yaml.YAMLError: + # NOTE(prazumovsky): we need to return more informative error for + # user, so use SafeLoader, which return error message with template + # snippet where error has been occurred. + try: + env = yaml.load(env_str, Loader=yaml.SafeLoader) + except yaml.YAMLError as yea: + raise ValueError(yea) + else: + if env is None: + env = {} + elif not isinstance(env, dict): + raise ValueError( + 'The environment is not a valid YAML mapping data type.') + + for param in env: + if param not in SECTIONS: + raise ValueError('environment has wrong section "%s"' % param) + + return env diff --git a/openstack/cloud/_heat/event_utils.py b/openstack/cloud/_heat/event_utils.py new file mode 100644 index 000000000..bceec38af --- /dev/null +++ b/openstack/cloud/_heat/event_utils.py @@ -0,0 +1,98 @@ +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import time + +from openstack.cloud import meta + + +def get_events(cloud, stack_id, event_args, marker=None, limit=None): + # TODO(mordred) FIX THIS ONCE assert_calls CAN HANDLE QUERY STRINGS + params = collections.OrderedDict() + for k in sorted(event_args.keys()): + params[k] = event_args[k] + + if marker: + event_args['marker'] = marker + if limit: + event_args['limit'] = limit + + data = cloud._orchestration_client.get( + '/stacks/{id}/events'.format(id=stack_id), + params=params) + events = meta.get_and_munchify('events', data) + + # Show which stack the event comes from (for nested events) + for e in events: + e['stack_name'] = stack_id.split("/")[0] + return events + + +def poll_for_events( + cloud, stack_name, action=None, poll_period=5, marker=None): + """Continuously poll events and logs for performed action on stack.""" + + if action: + stop_status = ('%s_FAILED' % action, '%s_COMPLETE' % action) + stop_check = lambda a: a in stop_status + else: + stop_check = lambda a: a.endswith('_COMPLETE') or a.endswith('_FAILED') + + no_event_polls = 0 + msg_template = "\n Stack %(name)s %(status)s \n" + + def is_stack_event(event): + if event.get('resource_name', '') != stack_name: + return False + + phys_id = event.get('physical_resource_id', '') + links = dict((l.get('rel'), + l.get('href')) for l in event.get('links', [])) + stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1] + return stack_id == phys_id + + while True: + events = get_events( + cloud, stack_id=stack_name, + event_args={'sort_dir': 'asc', 'marker': marker}) + + if len(events) == 0: + no_event_polls += 1 + else: + no_event_polls = 0 + # set marker to last event that was received. + marker = getattr(events[-1], 'id', None) + + for event in events: + # check if stack event was also received + if is_stack_event(event): + stack_status = getattr(event, 'resource_status', '') + msg = msg_template % dict( + name=stack_name, status=stack_status) + if stop_check(stack_status): + return stack_status, msg + + if no_event_polls >= 2: + # after 2 polls with no events, fall back to a stack get + stack = cloud.get_stack(stack_name) + stack_status = stack['stack_status'] + msg = msg_template % dict( + name=stack_name, status=stack_status) + if stop_check(stack_status): + return stack_status, msg + # go back to event polling again + no_event_polls = 0 + + time.sleep(poll_period) diff --git a/openstack/cloud/_heat/template_format.py b/openstack/cloud/_heat/template_format.py new file mode 100644 index 000000000..4bb6098dc --- /dev/null +++ b/openstack/cloud/_heat/template_format.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import yaml + +if hasattr(yaml, 'CSafeLoader'): + yaml_loader = yaml.CSafeLoader +else: + yaml_loader = yaml.SafeLoader + +if hasattr(yaml, 'CSafeDumper'): + yaml_dumper = yaml.CSafeDumper +else: + yaml_dumper = yaml.SafeDumper + + +def _construct_yaml_str(self, node): + # Override the default string handling function + # to always return unicode objects + return self.construct_scalar(node) +yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str) +# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type +# datetime.data which causes problems in API layer when being processed by +# openstack.common.jsonutils. Therefore, make unicode string out of timestamps +# until jsonutils can handle dates. +yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp', + _construct_yaml_str) + + +def parse(tmpl_str): + """Takes a string and returns a dict containing the parsed structure. + + This includes determination of whether the string is using the + JSON or YAML format. + """ + # strip any whitespace before the check + tmpl_str = tmpl_str.strip() + if tmpl_str.startswith('{'): + tpl = json.loads(tmpl_str) + else: + try: + tpl = yaml.load(tmpl_str, Loader=yaml_loader) + except yaml.YAMLError: + # NOTE(prazumovsky): we need to return more informative error for + # user, so use SafeLoader, which return error message with template + # snippet where error has been occurred. + try: + tpl = yaml.load(tmpl_str, Loader=yaml.SafeLoader) + except yaml.YAMLError as yea: + raise ValueError(yea) + else: + if tpl is None: + tpl = {} + # Looking for supported version keys in the loaded template + if not ('HeatTemplateFormatVersion' in tpl + or 'heat_template_version' in tpl + or 'AWSTemplateFormatVersion' in tpl): + raise ValueError("Template format version not found.") + return tpl diff --git a/openstack/cloud/_heat/template_utils.py b/openstack/cloud/_heat/template_utils.py new file mode 100644 index 000000000..c56b76ea5 --- /dev/null +++ b/openstack/cloud/_heat/template_utils.py @@ -0,0 +1,314 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import json +import six +from six.moves.urllib import parse +from six.moves.urllib import request + +from openstack.cloud._heat import environment_format +from openstack.cloud._heat import template_format +from openstack.cloud._heat import utils +from openstack.cloud import exc + + +def get_template_contents(template_file=None, template_url=None, + template_object=None, object_request=None, + files=None, existing=False): + + is_object = False + tpl = None + + # Transform a bare file path to a file:// URL. + if template_file: + template_url = utils.normalise_file_path_to_url(template_file) + + if template_url: + tpl = request.urlopen(template_url).read() + + elif template_object: + is_object = True + template_url = template_object + tpl = object_request and object_request('GET', + template_object) + elif existing: + return {}, None + else: + raise exc.OpenStackCloudException( + 'Must provide one of template_file,' + ' template_url or template_object') + + if not tpl: + raise exc.OpenStackCloudException( + 'Could not fetch template from %s' % template_url) + + try: + if isinstance(tpl, six.binary_type): + tpl = tpl.decode('utf-8') + template = template_format.parse(tpl) + except ValueError as e: + raise exc.OpenStackCloudException( + 'Error parsing template %(url)s %(error)s' % + {'url': template_url, 'error': e}) + + tmpl_base_url = utils.base_url_for_url(template_url) + if files is None: + files = {} + resolve_template_get_files(template, files, tmpl_base_url, is_object, + object_request) + return files, template + + +def resolve_template_get_files(template, files, template_base_url, + is_object=False, object_request=None): + + def ignore_if(key, value): + if key != 'get_file' and key != 'type': + return True + if not isinstance(value, six.string_types): + return True + if (key == 'type' and + not value.endswith(('.yaml', '.template'))): + return True + return False + + def recurse_if(value): + return isinstance(value, (dict, list)) + + get_file_contents(template, files, template_base_url, + ignore_if, recurse_if, is_object, object_request) + + +def is_template(file_content): + try: + if isinstance(file_content, six.binary_type): + file_content = file_content.decode('utf-8') + template_format.parse(file_content) + except (ValueError, TypeError): + return False + return True + + +def get_file_contents(from_data, files, base_url=None, + ignore_if=None, recurse_if=None, + is_object=False, object_request=None): + + if recurse_if and recurse_if(from_data): + if isinstance(from_data, dict): + recurse_data = from_data.values() + else: + recurse_data = from_data + for value in recurse_data: + get_file_contents(value, files, base_url, ignore_if, recurse_if, + is_object, object_request) + + if isinstance(from_data, dict): + for key, value in from_data.items(): + if ignore_if and ignore_if(key, value): + continue + + if base_url and not base_url.endswith('/'): + base_url = base_url + '/' + + str_url = parse.urljoin(base_url, value) + if str_url not in files: + if is_object and object_request: + file_content = object_request('GET', str_url) + else: + file_content = utils.read_url_content(str_url) + if is_template(file_content): + if is_object: + template = get_template_contents( + template_object=str_url, files=files, + object_request=object_request)[1] + else: + template = get_template_contents( + template_url=str_url, files=files)[1] + file_content = json.dumps(template) + files[str_url] = file_content + # replace the data value with the normalised absolute URL + from_data[key] = str_url + + +def deep_update(old, new): + '''Merge nested dictionaries.''' + + # Prevents an error if in a previous iteration + # old[k] = None but v[k] = {...}, + if old is None: + old = {} + + for k, v in new.items(): + if isinstance(v, collections.Mapping): + r = deep_update(old.get(k, {}), v) + old[k] = r + else: + old[k] = new[k] + return old + + +def process_multiple_environments_and_files(env_paths=None, template=None, + template_url=None, + env_path_is_object=None, + object_request=None, + env_list_tracker=None): + """Reads one or more environment files. + + Reads in each specified environment file and returns a dictionary + of the filenames->contents (suitable for the files dict) + and the consolidated environment (after having applied the correct + overrides based on order). + + If a list is provided in the env_list_tracker parameter, the behavior + is altered to take advantage of server-side environment resolution. + Specifically, this means: + + * Populating env_list_tracker with an ordered list of environment file + URLs to be passed to the server + * Including the contents of each environment file in the returned + files dict, keyed by one of the URLs in env_list_tracker + + :param env_paths: list of paths to the environment files to load; if + None, empty results will be returned + :type env_paths: list or None + :param template: unused; only included for API compatibility + :param template_url: unused; only included for API compatibility + :param env_list_tracker: if specified, environment filenames will be + stored within + :type env_list_tracker: list or None + :return: tuple of files dict and a dict of the consolidated environment + :rtype: tuple + """ + merged_files = {} + merged_env = {} + + # If we're keeping a list of environment files separately, include the + # contents of the files in the files dict + include_env_in_files = env_list_tracker is not None + + if env_paths: + for env_path in env_paths: + files, env = process_environment_and_files( + env_path=env_path, + template=template, + template_url=template_url, + env_path_is_object=env_path_is_object, + object_request=object_request, + include_env_in_files=include_env_in_files) + + # 'files' looks like {"filename1": contents, "filename2": contents} + # so a simple update is enough for merging + merged_files.update(files) + + # 'env' can be a deeply nested dictionary, so a simple update is + # not enough + merged_env = deep_update(merged_env, env) + + if env_list_tracker is not None: + env_url = utils.normalise_file_path_to_url(env_path) + env_list_tracker.append(env_url) + + return merged_files, merged_env + + +def process_environment_and_files(env_path=None, + template=None, + template_url=None, + env_path_is_object=None, + object_request=None, + include_env_in_files=False): + """Loads a single environment file. + + Returns an entry suitable for the files dict which maps the environment + filename to its contents. + + :param env_path: full path to the file to load + :type env_path: str or None + :param include_env_in_files: if specified, the raw environment file itself + will be included in the returned files dict + :type include_env_in_files: bool + :return: tuple of files dict and the loaded environment as a dict + :rtype: (dict, dict) + """ + files = {} + env = {} + + is_object = env_path_is_object and env_path_is_object(env_path) + + if is_object: + raw_env = object_request and object_request('GET', env_path) + env = environment_format.parse(raw_env) + env_base_url = utils.base_url_for_url(env_path) + + resolve_environment_urls( + env.get('resource_registry'), + files, + env_base_url, is_object=True, object_request=object_request) + + elif env_path: + env_url = utils.normalise_file_path_to_url(env_path) + env_base_url = utils.base_url_for_url(env_url) + raw_env = request.urlopen(env_url).read() + + env = environment_format.parse(raw_env) + + resolve_environment_urls( + env.get('resource_registry'), + files, + env_base_url) + + if include_env_in_files: + files[env_url] = json.dumps(env) + + return files, env + + +def resolve_environment_urls(resource_registry, files, env_base_url, + is_object=False, object_request=None): + """Handles any resource URLs specified in an environment. + + :param resource_registry: mapping of type name to template filename + :type resource_registry: dict + :param files: dict to store loaded file contents into + :type files: dict + :param env_base_url: base URL to look in when loading files + :type env_base_url: str or None + """ + if resource_registry is None: + return + + rr = resource_registry + base_url = rr.get('base_url', env_base_url) + + def ignore_if(key, value): + if key == 'base_url': + return True + if isinstance(value, dict): + return True + if '::' in value: + # Built in providers like: "X::Compute::Server" + # don't need downloading. + return True + if key in ['hooks', 'restricted_actions']: + return True + + get_file_contents(rr, files, base_url, ignore_if, + is_object=is_object, object_request=object_request) + + for res_name, res_dict in rr.get('resources', {}).items(): + res_base_url = res_dict.get('base_url', base_url) + get_file_contents( + res_dict, files, res_base_url, ignore_if, + is_object=is_object, object_request=object_request) diff --git a/openstack/cloud/_heat/utils.py b/openstack/cloud/_heat/utils.py new file mode 100644 index 000000000..c916c8b63 --- /dev/null +++ b/openstack/cloud/_heat/utils.py @@ -0,0 +1,61 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import os + +from six.moves.urllib import error +from six.moves.urllib import parse +from six.moves.urllib import request + +from openstack.cloud import exc + + +def base_url_for_url(url): + parsed = parse.urlparse(url) + parsed_dir = os.path.dirname(parsed.path) + return parse.urljoin(url, parsed_dir) + + +def normalise_file_path_to_url(path): + if parse.urlparse(path).scheme: + return path + path = os.path.abspath(path) + return parse.urljoin('file:', request.pathname2url(path)) + + +def read_url_content(url): + try: + # TODO(mordred) Use requests + content = request.urlopen(url).read() + except error.URLError: + raise exc.OpenStackCloudException( + 'Could not fetch contents for %s' % url) + + if content: + try: + content.decode('utf-8') + except ValueError: + content = base64.encodestring(content) + return content + + +def resource_nested_identifier(rsrc): + nested_link = [l for l in rsrc.links or [] + if l.get('rel') == 'nested'] + if nested_link: + nested_href = nested_link[0].get('href') + nested_identifier = nested_href.split("/")[-2:] + return "/".join(nested_identifier) diff --git a/openstack/cloud/_normalize.py b/openstack/cloud/_normalize.py new file mode 100644 index 000000000..cf80627bc --- /dev/null +++ b/openstack/cloud/_normalize.py @@ -0,0 +1,1095 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(shade) The normalize functions here should get merged in to +# the sdk resource objects. + +import datetime +import munch +import six + +_IMAGE_FIELDS = ( + 'checksum', + 'container_format', + 'direct_url', + 'disk_format', + 'file', + 'id', + 'name', + 'owner', + 'virtual_size', +) + +_SERVER_FIELDS = ( + 'accessIPv4', + 'accessIPv6', + 'addresses', + 'adminPass', + 'created', + 'key_name', + 'metadata', + 'networks', + 'private_v4', + 'public_v4', + 'public_v6', + 'status', + 'updated', + 'user_id', +) + +_KEYPAIR_FIELDS = ( + 'fingerprint', + 'name', + 'private_key', + 'public_key', + 'user_id', +) + +_KEYPAIR_USELESS_FIELDS = ( + 'deleted', + 'deleted_at', + 'id', + 'updated_at', +) + +_COMPUTE_LIMITS_FIELDS = ( + ('maxPersonality', 'max_personality'), + ('maxPersonalitySize', 'max_personality_size'), + ('maxServerGroupMembers', 'max_server_group_members'), + ('maxServerGroups', 'max_server_groups'), + ('maxServerMeta', 'max_server_meta'), + ('maxTotalCores', 'max_total_cores'), + ('maxTotalInstances', 'max_total_instances'), + ('maxTotalKeypairs', 'max_total_keypairs'), + ('maxTotalRAMSize', 'max_total_ram_size'), + ('totalCoresUsed', 'total_cores_used'), + ('totalInstancesUsed', 'total_instances_used'), + ('totalRAMUsed', 'total_ram_used'), + ('totalServerGroupsUsed', 'total_server_groups_used'), +) + + +_pushdown_fields = { + 'project': [ + 'domain_id' + ] +} + + +def _split_filters(obj_name='', filters=None, **kwargs): + # Handle jmsepath filters + if not filters: + filters = {} + if not isinstance(filters, dict): + return {}, filters + # Filter out None values from extra kwargs, because those are + # defaults. If you want to search for things with None values, + # they're going to need to go into the filters dict + for (key, value) in kwargs.items(): + if value is not None: + filters[key] = value + pushdown = {} + client = {} + for (key, value) in filters.items(): + if key in _pushdown_fields.get(obj_name, {}): + pushdown[key] = value + else: + client[key] = value + return pushdown, client + + +def _to_bool(value): + if isinstance(value, six.string_types): + if not value: + return False + prospective = value.lower().capitalize() + return prospective == 'True' + return bool(value) + + +def _pop_int(resource, key): + return int(resource.pop(key, 0) or 0) + + +def _pop_float(resource, key): + return float(resource.pop(key, 0) or 0) + + +def _pop_or_get(resource, key, default, strict): + if strict: + return resource.pop(key, default) + else: + return resource.get(key, default) + + +class Normalizer(object): + '''Mix-in class to provide the normalization functions. + + This is in a separate class just for on-disk source code organization + reasons. + ''' + + def _normalize_compute_limits(self, limits, project_id=None): + """ Normalize a limits object. + + Limits modified in this method and shouldn't be modified afterwards. + """ + + # Copy incoming limits because of shared dicts in unittests + limits = limits['absolute'].copy() + + new_limits = munch.Munch() + new_limits['location'] = self._get_current_location( + project_id=project_id) + + for field in _COMPUTE_LIMITS_FIELDS: + new_limits[field[1]] = limits.pop(field[0], None) + + new_limits['properties'] = limits.copy() + + return new_limits + + def _remove_novaclient_artifacts(self, item): + # Remove novaclient artifacts + item.pop('links', None) + item.pop('NAME_ATTR', None) + item.pop('HUMAN_ID', None) + item.pop('human_id', None) + item.pop('request_ids', None) + item.pop('x_openstack_request_ids', None) + + def _normalize_flavors(self, flavors): + """ Normalize a list of flavor objects """ + ret = [] + for flavor in flavors: + ret.append(self._normalize_flavor(flavor)) + return ret + + def _normalize_flavor(self, flavor): + """ Normalize a flavor object """ + new_flavor = munch.Munch() + + # Copy incoming group because of shared dicts in unittests + flavor = flavor.copy() + + # Discard noise + self._remove_novaclient_artifacts(flavor) + flavor.pop('links', None) + + ephemeral = int(_pop_or_get( + flavor, 'OS-FLV-EXT-DATA:ephemeral', 0, self.strict_mode)) + ephemeral = flavor.pop('ephemeral', ephemeral) + is_public = _to_bool(_pop_or_get( + flavor, 'os-flavor-access:is_public', True, self.strict_mode)) + is_public = _to_bool(flavor.pop('is_public', is_public)) + is_disabled = _to_bool(_pop_or_get( + flavor, 'OS-FLV-DISABLED:disabled', False, self.strict_mode)) + extra_specs = _pop_or_get( + flavor, 'OS-FLV-WITH-EXT-SPECS:extra_specs', {}, self.strict_mode) + extra_specs = flavor.pop('extra_specs', extra_specs) + extra_specs = munch.Munch(extra_specs) + + new_flavor['location'] = self.current_location + new_flavor['id'] = flavor.pop('id') + new_flavor['name'] = flavor.pop('name') + new_flavor['is_public'] = is_public + new_flavor['is_disabled'] = is_disabled + new_flavor['ram'] = _pop_int(flavor, 'ram') + new_flavor['vcpus'] = _pop_int(flavor, 'vcpus') + new_flavor['disk'] = _pop_int(flavor, 'disk') + new_flavor['ephemeral'] = ephemeral + new_flavor['swap'] = _pop_int(flavor, 'swap') + new_flavor['rxtx_factor'] = _pop_float(flavor, 'rxtx_factor') + + new_flavor['properties'] = flavor.copy() + new_flavor['extra_specs'] = extra_specs + + # Backwards compat with nova - passthrough values + if not self.strict_mode: + for (k, v) in new_flavor['properties'].items(): + new_flavor.setdefault(k, v) + + return new_flavor + + def _normalize_keypairs(self, keypairs): + """Normalize Nova Keypairs""" + ret = [] + for keypair in keypairs: + ret.append(self._normalize_keypair(keypair)) + return ret + + def _normalize_keypair(self, keypair): + """Normalize Ironic Machine""" + + new_keypair = munch.Munch() + keypair = keypair.copy() + + # Discard noise + self._remove_novaclient_artifacts(keypair) + + new_keypair['location'] = self.current_location + for key in _KEYPAIR_FIELDS: + new_keypair[key] = keypair.pop(key, None) + # These are completely meaningless fields + for key in _KEYPAIR_USELESS_FIELDS: + keypair.pop(key, None) + new_keypair['type'] = keypair.pop('type', 'ssh') + # created_at isn't returned from the keypair creation. (what?) + new_keypair['created_at'] = keypair.pop( + 'created_at', datetime.datetime.now().isoformat()) + # Don't even get me started on this + new_keypair['id'] = new_keypair['name'] + + new_keypair['properties'] = keypair.copy() + + return new_keypair + + def _normalize_images(self, images): + ret = [] + for image in images: + ret.append(self._normalize_image(image)) + return ret + + def _normalize_image(self, image): + new_image = munch.Munch( + location=self._get_current_location(project_id=image.get('owner'))) + + # This copy is to keep things from getting epically weird in tests + image = image.copy() + + # Discard noise + self._remove_novaclient_artifacts(image) + + # If someone made a property called "properties" that contains a + # string (this has happened at least one time in the wild), the + # the rest of the normalization here goes belly up. + properties = image.pop('properties', {}) + if not isinstance(properties, dict): + properties = {'properties': properties} + + visibility = image.pop('visibility', None) + protected = _to_bool(image.pop('protected', False)) + + if visibility: + is_public = (visibility == 'public') + else: + is_public = image.pop('is_public', False) + visibility = 'public' if is_public else 'private' + + new_image['size'] = image.pop('OS-EXT-IMG-SIZE:size', 0) + new_image['size'] = image.pop('size', new_image['size']) + + new_image['min_ram'] = image.pop('minRam', 0) + new_image['min_ram'] = image.pop('min_ram', new_image['min_ram']) + + new_image['min_disk'] = image.pop('minDisk', 0) + new_image['min_disk'] = image.pop('min_disk', new_image['min_disk']) + + new_image['created_at'] = image.pop('created', '') + new_image['created_at'] = image.pop( + 'created_at', new_image['created_at']) + + new_image['updated_at'] = image.pop('updated', '') + new_image['updated_at'] = image.pop( + 'updated_at', new_image['updated_at']) + + for field in _IMAGE_FIELDS: + new_image[field] = image.pop(field, None) + + new_image['tags'] = image.pop('tags', []) + new_image['status'] = image.pop('status').lower() + for field in ('min_ram', 'min_disk', 'size', 'virtual_size'): + new_image[field] = _pop_int(new_image, field) + new_image['is_protected'] = protected + new_image['locations'] = image.pop('locations', []) + + metadata = image.pop('metadata', {}) + for key, val in metadata.items(): + properties.setdefault(key, val) + + for key, val in image.items(): + properties.setdefault(key, val) + new_image['properties'] = properties + new_image['is_public'] = is_public + new_image['visibility'] = visibility + + # Backwards compat with glance + if not self.strict_mode: + for key, val in properties.items(): + if key != 'properties': + new_image[key] = val + new_image['protected'] = protected + new_image['metadata'] = properties + new_image['created'] = new_image['created_at'] + new_image['updated'] = new_image['updated_at'] + new_image['minDisk'] = new_image['min_disk'] + new_image['minRam'] = new_image['min_ram'] + return new_image + + def _normalize_secgroups(self, groups): + """Normalize the structure of security groups + + This makes security group dicts, as returned from nova, look like the + security group dicts as returned from neutron. This does not make them + look exactly the same, but it's pretty close. + + :param list groups: A list of security group dicts. + + :returns: A list of normalized dicts. + """ + ret = [] + for group in groups: + ret.append(self._normalize_secgroup(group)) + return ret + + def _normalize_secgroup(self, group): + + ret = munch.Munch() + # Copy incoming group because of shared dicts in unittests + group = group.copy() + + # Discard noise + self._remove_novaclient_artifacts(group) + + rules = self._normalize_secgroup_rules( + group.pop('security_group_rules', group.pop('rules', []))) + project_id = group.pop('tenant_id', '') + project_id = group.pop('project_id', project_id) + + ret['location'] = self._get_current_location(project_id=project_id) + ret['id'] = group.pop('id') + ret['name'] = group.pop('name') + ret['security_group_rules'] = rules + ret['description'] = group.pop('description') + ret['properties'] = group + + # Backwards compat with Neutron + if not self.strict_mode: + ret['tenant_id'] = project_id + ret['project_id'] = project_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + + return ret + + def _normalize_secgroup_rules(self, rules): + """Normalize the structure of nova security group rules + + Note that nova uses -1 for non-specific port values, but neutron + represents these with None. + + :param list rules: A list of security group rule dicts. + + :returns: A list of normalized dicts. + """ + ret = [] + for rule in rules: + ret.append(self._normalize_secgroup_rule(rule)) + return ret + + def _normalize_secgroup_rule(self, rule): + ret = munch.Munch() + # Copy incoming rule because of shared dicts in unittests + rule = rule.copy() + + ret['id'] = rule.pop('id') + ret['direction'] = rule.pop('direction', 'ingress') + ret['ethertype'] = rule.pop('ethertype', 'IPv4') + port_range_min = rule.get( + 'port_range_min', rule.pop('from_port', None)) + if port_range_min == -1: + port_range_min = None + if port_range_min is not None: + port_range_min = int(port_range_min) + ret['port_range_min'] = port_range_min + port_range_max = rule.pop( + 'port_range_max', rule.pop('to_port', None)) + if port_range_max == -1: + port_range_max = None + if port_range_min is not None: + port_range_min = int(port_range_min) + ret['port_range_max'] = port_range_max + ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) + ret['remote_ip_prefix'] = rule.pop( + 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)) + ret['security_group_id'] = rule.pop( + 'security_group_id', rule.pop('parent_group_id', None)) + ret['remote_group_id'] = rule.pop('remote_group_id', None) + project_id = rule.pop('tenant_id', '') + project_id = rule.pop('project_id', project_id) + ret['location'] = self._get_current_location(project_id=project_id) + ret['properties'] = rule + + # Backwards compat with Neutron + if not self.strict_mode: + ret['tenant_id'] = project_id + ret['project_id'] = project_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + return ret + + def _normalize_servers(self, servers): + # Here instead of _utils because we need access to region and cloud + # name from the cloud object + ret = [] + for server in servers: + ret.append(self._normalize_server(server)) + return ret + + def _normalize_server(self, server): + ret = munch.Munch() + # Copy incoming server because of shared dicts in unittests + server = server.copy() + + self._remove_novaclient_artifacts(server) + + ret['id'] = server.pop('id') + ret['name'] = server.pop('name') + + server['flavor'].pop('links', None) + ret['flavor'] = server.pop('flavor') + + # OpenStack can return image as a string when you've booted + # from volume + if str(server['image']) != server['image']: + server['image'].pop('links', None) + ret['image'] = server.pop('image') + + project_id = server.pop('tenant_id', '') + project_id = server.pop('project_id', project_id) + + az = _pop_or_get( + server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode) + ret['location'] = self._get_current_location( + project_id=project_id, zone=az) + + # Ensure volumes is always in the server dict, even if empty + ret['volumes'] = _pop_or_get( + server, 'os-extended-volumes:volumes_attached', + [], self.strict_mode) + + config_drive = server.pop('config_drive', False) + ret['has_config_drive'] = _to_bool(config_drive) + + host_id = server.pop('hostId', None) + ret['host_id'] = host_id + + ret['progress'] = _pop_int(server, 'progress') + + # Leave these in so that the general properties handling works + ret['disk_config'] = _pop_or_get( + server, 'OS-DCF:diskConfig', None, self.strict_mode) + for key in ( + 'OS-EXT-STS:power_state', + 'OS-EXT-STS:task_state', + 'OS-EXT-STS:vm_state', + 'OS-SRV-USG:launched_at', + 'OS-SRV-USG:terminated_at'): + short_key = key.split(':')[1] + ret[short_key] = _pop_or_get(server, key, None, self.strict_mode) + + # Protect against security_groups being None + ret['security_groups'] = server.pop('security_groups', None) or [] + + for field in _SERVER_FIELDS: + ret[field] = server.pop(field, None) + if not ret['networks']: + ret['networks'] = {} + + ret['interface_ip'] = '' + + ret['properties'] = server.copy() + + # Backwards compat + if not self.strict_mode: + ret['hostId'] = host_id + ret['config_drive'] = config_drive + ret['project_id'] = project_id + ret['tenant_id'] = project_id + ret['region'] = self.region_name + ret['cloud'] = self.name + ret['az'] = az + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + return ret + + def _normalize_floating_ips(self, ips): + """Normalize the structure of floating IPs + + Unfortunately, not all the Neutron floating_ip attributes are available + with Nova and not all Nova floating_ip attributes are available with + Neutron. + This function extract attributes that are common to Nova and Neutron + floating IP resource. + If the whole structure is needed inside shade, shade provides private + methods that returns "original" objects (e.g. + _neutron_allocate_floating_ip) + + :param list ips: A list of Neutron floating IPs. + + :returns: + A list of normalized dicts with the following attributes:: + + [ + { + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": "192.0.2.10", + "floating_ip_address": "198.51.100.10", + "network": "this-is-a-net-or-pool-id", + "attached": True, + "status": "ACTIVE" + }, ... + ] + + """ + return [ + self._normalize_floating_ip(ip) for ip in ips + ] + + def _normalize_floating_ip(self, ip): + ret = munch.Munch() + + # Copy incoming floating ip because of shared dicts in unittests + ip = ip.copy() + + fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None)) + floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None)) + network_id = ip.pop( + 'floating_network_id', ip.pop('network', ip.pop('pool', None))) + project_id = ip.pop('tenant_id', '') + project_id = ip.pop('project_id', project_id) + + instance_id = ip.pop('instance_id', None) + router_id = ip.pop('router_id', None) + id = ip.pop('id') + port_id = ip.pop('port_id', None) + created_at = ip.pop('created_at', None) + updated_at = ip.pop('updated_at', None) + # Note - description may not always be on the underlying cloud. + # Normalizing it here is easy - what do we do when people want to + # set a description? + description = ip.pop('description', '') + revision_number = ip.pop('revision_number', None) + + if self._use_neutron_floating(): + attached = bool(port_id) + status = ip.pop('status', 'UNKNOWN') + else: + attached = bool(instance_id) + # In neutron's terms, Nova floating IPs are always ACTIVE + status = 'ACTIVE' + + ret = munch.Munch( + attached=attached, + fixed_ip_address=fixed_ip_address, + floating_ip_address=floating_ip_address, + id=id, + location=self._get_current_location(project_id=project_id), + network=network_id, + port=port_id, + router=router_id, + status=status, + created_at=created_at, + updated_at=updated_at, + description=description, + revision_number=revision_number, + properties=ip.copy(), + ) + # Backwards compat + if not self.strict_mode: + ret['port_id'] = port_id + ret['router_id'] = router_id + ret['project_id'] = project_id + ret['tenant_id'] = project_id + ret['floating_network_id'] = network_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + + return ret + + def _normalize_projects(self, projects): + """Normalize the structure of projects + + This makes tenants from keystone v2 look like projects from v3. + + :param list projects: A list of projects to normalize + + :returns: A list of normalized dicts. + """ + ret = [] + for project in projects: + ret.append(self._normalize_project(project)) + return ret + + def _normalize_project(self, project): + + # Copy incoming project because of shared dicts in unittests + project = project.copy() + + # Discard noise + self._remove_novaclient_artifacts(project) + + # In both v2 and v3 + project_id = project.pop('id') + name = project.pop('name', '') + description = project.pop('description', '') + is_enabled = project.pop('enabled', True) + + # Projects are global - strip region + location = self._get_current_location(project_id=project_id) + location['region_name'] = None + + # v3 additions + domain_id = project.pop('domain_id', 'default') + parent_id = project.pop('parent_id', None) + is_domain = project.pop('is_domain', False) + + # Projects have a special relationship with location + location['project']['domain_id'] = domain_id + location['project']['domain_name'] = None + location['project']['name'] = None + location['project']['id'] = parent_id + + ret = munch.Munch( + location=location, + id=project_id, + name=name, + description=description, + is_enabled=is_enabled, + is_domain=is_domain, + properties=project.copy() + ) + + # Backwards compat + if not self.strict_mode: + ret['enabled'] = is_enabled + ret['domain_id'] = domain_id + ret['parent_id'] = parent_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + + return ret + + def _normalize_volume_type_access(self, volume_type_access): + + volume_type_access = volume_type_access.copy() + + volume_type_id = volume_type_access.pop('volume_type_id') + project_id = volume_type_access.pop('project_id') + ret = munch.Munch( + location=self.current_location, + project_id=project_id, + volume_type_id=volume_type_id, + properties=volume_type_access.copy(), + ) + return ret + + def _normalize_volume_type_accesses(self, volume_type_accesses): + ret = [] + for volume_type_access in volume_type_accesses: + ret.append(self._normalize_volume_type_access(volume_type_access)) + return ret + + def _normalize_volume_type(self, volume_type): + + volume_type = volume_type.copy() + + volume_id = volume_type.pop('id') + description = volume_type.pop('description', None) + name = volume_type.pop('name', None) + old_is_public = volume_type.pop('os-volume-type-access:is_public', + False) + is_public = volume_type.pop('is_public', old_is_public) + qos_specs_id = volume_type.pop('qos_specs_id', None) + extra_specs = volume_type.pop('extra_specs', {}) + ret = munch.Munch( + location=self.current_location, + is_public=is_public, + id=volume_id, + name=name, + description=description, + qos_specs_id=qos_specs_id, + extra_specs=extra_specs, + properties=volume_type.copy(), + ) + return ret + + def _normalize_volume_types(self, volume_types): + ret = [] + for volume in volume_types: + ret.append(self._normalize_volume_type(volume)) + return ret + + def _normalize_volumes(self, volumes): + """Normalize the structure of volumes + + This makes tenants from cinder v1 look like volumes from v2. + + :param list projects: A list of volumes to normalize + + :returns: A list of normalized dicts. + """ + ret = [] + for volume in volumes: + ret.append(self._normalize_volume(volume)) + return ret + + def _normalize_volume(self, volume): + + volume = volume.copy() + + # Discard noise + self._remove_novaclient_artifacts(volume) + + volume_id = volume.pop('id') + + name = volume.pop('display_name', None) + name = volume.pop('name', name) + + description = volume.pop('display_description', None) + description = volume.pop('description', description) + + is_bootable = _to_bool(volume.pop('bootable', True)) + is_encrypted = _to_bool(volume.pop('encrypted', False)) + can_multiattach = _to_bool(volume.pop('multiattach', False)) + + project_id = _pop_or_get( + volume, 'os-vol-tenant-attr:tenant_id', None, self.strict_mode) + az = volume.pop('availability_zone', None) + + location = self._get_current_location(project_id=project_id, zone=az) + + host = _pop_or_get( + volume, 'os-vol-host-attr:host', None, self.strict_mode) + replication_extended_status = _pop_or_get( + volume, 'os-volume-replication:extended_status', + None, self.strict_mode) + + migration_status = _pop_or_get( + volume, 'os-vol-mig-status-attr:migstat', None, self.strict_mode) + migration_status = volume.pop('migration_status', migration_status) + _pop_or_get(volume, 'user_id', None, self.strict_mode) + source_volume_id = _pop_or_get( + volume, 'source_volid', None, self.strict_mode) + replication_driver = _pop_or_get( + volume, 'os-volume-replication:driver_data', + None, self.strict_mode) + + ret = munch.Munch( + location=location, + id=volume_id, + name=name, + description=description, + size=_pop_int(volume, 'size'), + attachments=volume.pop('attachments', []), + status=volume.pop('status'), + migration_status=migration_status, + host=host, + replication_driver=replication_driver, + replication_status=volume.pop('replication_status', None), + replication_extended_status=replication_extended_status, + snapshot_id=volume.pop('snapshot_id', None), + created_at=volume.pop('created_at'), + updated_at=volume.pop('updated_at', None), + source_volume_id=source_volume_id, + consistencygroup_id=volume.pop('consistencygroup_id', None), + volume_type=volume.pop('volume_type', None), + metadata=volume.pop('metadata', {}), + is_bootable=is_bootable, + is_encrypted=is_encrypted, + can_multiattach=can_multiattach, + properties=volume.copy(), + ) + + # Backwards compat + if not self.strict_mode: + ret['display_name'] = name + ret['display_description'] = description + ret['bootable'] = is_bootable + ret['encrypted'] = is_encrypted + ret['multiattach'] = can_multiattach + ret['availability_zone'] = az + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + return ret + + def _normalize_volume_attachment(self, attachment): + """ Normalize a volume attachment object""" + + attachment = attachment.copy() + + # Discard noise + self._remove_novaclient_artifacts(attachment) + return munch.Munch(**attachment) + + def _normalize_volume_backups(self, backups): + ret = [] + for backup in backups: + ret.append(self._normalize_volume_backup(backup)) + return ret + + def _normalize_volume_backup(self, backup): + """ Normalize a valume backup object""" + + backup = backup.copy() + # Discard noise + self._remove_novaclient_artifacts(backup) + return munch.Munch(**backup) + + def _normalize_compute_usage(self, usage): + """ Normalize a compute usage object """ + + usage = usage.copy() + + # Discard noise + self._remove_novaclient_artifacts(usage) + project_id = usage.pop('tenant_id', None) + + ret = munch.Munch( + location=self._get_current_location(project_id=project_id), + ) + for key in ( + 'max_personality', + 'max_personality_size', + 'max_server_group_members', + 'max_server_groups', + 'max_server_meta', + 'max_total_cores', + 'max_total_instances', + 'max_total_keypairs', + 'max_total_ram_size', + 'total_cores_used', + 'total_hours', + 'total_instances_used', + 'total_local_gb_usage', + 'total_memory_mb_usage', + 'total_ram_used', + 'total_server_groups_used', + 'total_vcpus_usage'): + ret[key] = usage.pop(key, 0) + ret['started_at'] = usage.pop('start', None) + ret['stopped_at'] = usage.pop('stop', None) + ret['server_usages'] = self._normalize_server_usages( + usage.pop('server_usages', [])) + ret['properties'] = usage + return ret + + def _normalize_server_usage(self, server_usage): + """ Normalize a server usage object """ + + server_usage = server_usage.copy() + # TODO(mordred) Right now there is already a location on the usage + # object. Including one here seems verbose. + server_usage.pop('tenant_id') + ret = munch.Munch() + + ret['ended_at'] = server_usage.pop('ended_at', None) + ret['started_at'] = server_usage.pop('started_at', None) + for key in ( + 'flavor', + 'instance_id', + 'name', + 'state'): + ret[key] = server_usage.pop(key, '') + for key in ( + 'hours', + 'local_gb', + 'memory_mb', + 'uptime', + 'vcpus'): + ret[key] = server_usage.pop(key, 0) + ret['properties'] = server_usage + return ret + + def _normalize_server_usages(self, server_usages): + ret = [] + for server_usage in server_usages: + ret.append(self._normalize_server_usage(server_usage)) + return ret + + def _normalize_cluster_templates(self, cluster_templates): + ret = [] + for cluster_template in cluster_templates: + ret.append(self._normalize_cluster_template(cluster_template)) + return ret + + def _normalize_cluster_template(self, cluster_template): + """Normalize Magnum cluster_templates.""" + cluster_template = cluster_template.copy() + + # Discard noise + cluster_template.pop('links', None) + cluster_template.pop('human_id', None) + # model_name is a magnumclient-ism + cluster_template.pop('model_name', None) + + ct_id = cluster_template.pop('uuid') + + ret = munch.Munch( + id=ct_id, + location=self._get_current_location(), + ) + ret['is_public'] = cluster_template.pop('public') + ret['is_registry_enabled'] = cluster_template.pop('registry_enabled') + ret['is_tls_disabled'] = cluster_template.pop('tls_disabled') + # pop floating_ip_enabled since we want to hide it in a future patch + fip_enabled = cluster_template.pop('floating_ip_enabled', None) + if not self.strict_mode: + ret['uuid'] = ct_id + if fip_enabled is not None: + ret['floating_ip_enabled'] = fip_enabled + ret['public'] = ret['is_public'] + ret['registry_enabled'] = ret['is_registry_enabled'] + ret['tls_disabled'] = ret['is_tls_disabled'] + + # Optional keys + for (key, default) in ( + ('fixed_network', None), + ('fixed_subnet', None), + ('http_proxy', None), + ('https_proxy', None), + ('labels', {}), + ('master_flavor_id', None), + ('no_proxy', None)): + if key in cluster_template: + ret[key] = cluster_template.pop(key, default) + + for key in ( + 'apiserver_port', + 'cluster_distro', + 'coe', + 'created_at', + 'dns_nameserver', + 'docker_volume_size', + 'external_network_id', + 'flavor_id', + 'image_id', + 'insecure_registry', + 'keypair_id', + 'name', + 'network_driver', + 'server_type', + 'updated_at', + 'volume_driver'): + ret[key] = cluster_template.pop(key) + + ret['properties'] = cluster_template + return ret + + def _normalize_magnum_services(self, magnum_services): + ret = [] + for magnum_service in magnum_services: + ret.append(self._normalize_magnum_service(magnum_service)) + return ret + + def _normalize_magnum_service(self, magnum_service): + """Normalize Magnum magnum_services.""" + magnum_service = magnum_service.copy() + + # Discard noise + magnum_service.pop('links', None) + magnum_service.pop('human_id', None) + # model_name is a magnumclient-ism + magnum_service.pop('model_name', None) + + ret = munch.Munch(location=self._get_current_location()) + + for key in ( + 'binary', + 'created_at', + 'disabled_reason', + 'host', + 'id', + 'report_count', + 'state', + 'updated_at'): + ret[key] = magnum_service.pop(key) + ret['properties'] = magnum_service + return ret + + def _normalize_stacks(self, stacks): + """Normalize Heat Stacks""" + ret = [] + for stack in stacks: + ret.append(self._normalize_stack(stack)) + return ret + + def _normalize_stack(self, stack): + """Normalize Heat Stack""" + stack = stack.copy() + + # Discard noise + self._remove_novaclient_artifacts(stack) + + # Discard things heatclient adds that aren't in the REST + stack.pop('action', None) + stack.pop('status', None) + stack.pop('identifier', None) + + stack_status = stack.pop('stack_status') + (action, status) = stack_status.split('_', 1) + + ret = munch.Munch( + id=stack.pop('id'), + location=self._get_current_location(), + action=action, + status=status, + ) + if not self.strict_mode: + ret['stack_status'] = stack_status + + for (new_name, old_name) in ( + ('name', 'stack_name'), + ('created_at', 'creation_time'), + ('deleted_at', 'deletion_time'), + ('updated_at', 'updated_time'), + ('description', 'description'), + ('is_rollback_enabled', 'disable_rollback'), + ('parent', 'parent'), + ('notification_topics', 'notification_topics'), + ('parameters', 'parameters'), + ('outputs', 'outputs'), + ('owner', 'stack_owner'), + ('status_reason', 'stack_status_reason'), + ('stack_user_project_id', 'stack_user_project_id'), + ('tempate_description', 'template_description'), + ('timeout_mins', 'timeout_mins'), + ('tags', 'tags')): + value = stack.pop(old_name, None) + ret[new_name] = value + if not self.strict_mode: + ret[old_name] = value + ret['identifier'] = '{name}/{id}'.format( + name=ret['name'], id=ret['id']) + ret['properties'] = stack + return ret + + def _normalize_machines(self, machines): + """Normalize Ironic Machines""" + ret = [] + for machine in machines: + ret.append(self._normalize_machine(machine)) + return ret + + def _normalize_machine(self, machine): + """Normalize Ironic Machine""" + machine = machine.copy() + + # Discard noise + self._remove_novaclient_artifacts(machine) + + # TODO(mordred) Normalize this resource + + return machine diff --git a/openstack/cloud/_tasks.py b/openstack/cloud/_tasks.py new file mode 100644 index 000000000..294fae3ba --- /dev/null +++ b/openstack/cloud/_tasks.py @@ -0,0 +1,97 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import task_manager + + +class MachineCreate(task_manager.Task): + def main(self, client): + return client.ironic_client.node.create(**self.args) + + +class MachineDelete(task_manager.Task): + def main(self, client): + return client.ironic_client.node.delete(**self.args) + + +class MachinePatch(task_manager.Task): + def main(self, client): + return client.ironic_client.node.update(**self.args) + + +class MachinePortGet(task_manager.Task): + def main(self, client): + return client.ironic_client.port.get(**self.args) + + +class MachinePortGetByAddress(task_manager.Task): + def main(self, client): + return client.ironic_client.port.get_by_address(**self.args) + + +class MachinePortCreate(task_manager.Task): + def main(self, client): + return client.ironic_client.port.create(**self.args) + + +class MachinePortDelete(task_manager.Task): + def main(self, client): + return client.ironic_client.port.delete(**self.args) + + +class MachinePortList(task_manager.Task): + def main(self, client): + return client.ironic_client.port.list() + + +class MachineNodeGet(task_manager.Task): + def main(self, client): + return client.ironic_client.node.get(**self.args) + + +class MachineNodeList(task_manager.Task): + def main(self, client): + return client.ironic_client.node.list(**self.args) + + +class MachineNodePortList(task_manager.Task): + def main(self, client): + return client.ironic_client.node.list_ports(**self.args) + + +class MachineNodeUpdate(task_manager.Task): + def main(self, client): + return client.ironic_client.node.update(**self.args) + + +class MachineNodeValidate(task_manager.Task): + def main(self, client): + return client.ironic_client.node.validate(**self.args) + + +class MachineSetMaintenance(task_manager.Task): + def main(self, client): + return client.ironic_client.node.set_maintenance(**self.args) + + +class MachineSetPower(task_manager.Task): + def main(self, client): + return client.ironic_client.node.set_power_state(**self.args) + + +class MachineSetProvision(task_manager.Task): + def main(self, client): + return client.ironic_client.node.set_provision_state(**self.args) diff --git a/openstack/cloud/_utils.py b/openstack/cloud/_utils.py new file mode 100644 index 000000000..ac8ff471b --- /dev/null +++ b/openstack/cloud/_utils.py @@ -0,0 +1,713 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import fnmatch +import inspect +import jmespath +import munch +import netifaces +import re +import six +import sre_constants +import sys +import time +import uuid + +from decorator import decorator + +from openstack import _log +from openstack.cloud import exc +from openstack.cloud import meta + +_decorated_methods = [] + + +def _exc_clear(): + """Because sys.exc_clear is gone in py3 and is not in six.""" + if sys.version_info[0] == 2: + sys.exc_clear() + + +def _iterate_timeout(timeout, message, wait=2): + """Iterate and raise an exception on timeout. + + This is a generator that will continually yield and sleep for + wait seconds, and if the timeout is reached, will raise an exception + with . + + """ + log = _log.setup_logging('openstack.cloud.iterate_timeout') + + try: + # None as a wait winds up flowing well in the per-resource cache + # flow. We could spread this logic around to all of the calling + # points, but just having this treat None as "I don't have a value" + # seems friendlier + if wait is None: + wait = 2 + elif wait == 0: + # wait should be < timeout, unless timeout is None + wait = 0.1 if timeout is None else min(0.1, timeout) + wait = float(wait) + except ValueError: + raise exc.OpenStackCloudException( + "Wait value must be an int or float value. {wait} given" + " instead".format(wait=wait)) + + start = time.time() + count = 0 + while (timeout is None) or (time.time() < start + timeout): + count += 1 + yield count + log.debug('Waiting %s seconds', wait) + time.sleep(wait) + raise exc.OpenStackCloudTimeout(message) + + +def _make_unicode(input): + """Turn an input into unicode unconditionally + + :param input: + A unicode, string or other object + """ + try: + if isinstance(input, unicode): + return input + if isinstance(input, str): + return input.decode('utf-8') + else: + # int, for example + return unicode(input) + except NameError: + # python3! + return str(input) + + +def _dictify_resource(resource): + if isinstance(resource, list): + return [_dictify_resource(r) for r in resource] + else: + if hasattr(resource, 'toDict'): + return resource.toDict() + else: + return resource + + +def _filter_list(data, name_or_id, filters): + """Filter a list by name/ID and arbitrary meta data. + + :param list data: + The list of dictionary data to filter. It is expected that + each dictionary contains an 'id' and 'name' + key if a value for name_or_id is given. + :param string name_or_id: + The name or ID of the entity being filtered. Can be a glob pattern, + such as 'nb01*'. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + OR + A string containing a jmespath expression for further filtering. + """ + # The logger is openstack.cloud.fmmatch to allow a user/operator to + # configure logging not to communicate about fnmatch misses + # (they shouldn't be too spammy, but one never knows) + log = _log.setup_logging('openstack.cloud.fnmatch') + if name_or_id: + # name_or_id might already be unicode + name_or_id = _make_unicode(name_or_id) + identifier_matches = [] + bad_pattern = False + try: + fn_reg = re.compile(fnmatch.translate(name_or_id)) + except sre_constants.error: + # If the fnmatch re doesn't compile, then we don't care, + # but log it in case the user DID pass a pattern but did + # it poorly and wants to know what went wrong with their + # search + fn_reg = None + for e in data: + e_id = _make_unicode(e.get('id', None)) + e_name = _make_unicode(e.get('name', None)) + + if ((e_id and e_id == name_or_id) or + (e_name and e_name == name_or_id)): + identifier_matches.append(e) + else: + # Only try fnmatch if we don't match exactly + if not fn_reg: + # If we don't have a pattern, skip this, but set the flag + # so that we log the bad pattern + bad_pattern = True + continue + if ((e_id and fn_reg.match(e_id)) or + (e_name and fn_reg.match(e_name))): + identifier_matches.append(e) + if not identifier_matches and bad_pattern: + log.debug("Bad pattern passed to fnmatch", exc_info=True) + data = identifier_matches + + if not filters: + return data + + if isinstance(filters, six.string_types): + return jmespath.search(filters, data) + + def _dict_filter(f, d): + if not d: + return False + for key in f.keys(): + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + filtered = [] + for e in data: + filtered.append(e) + for key in filters.keys(): + if isinstance(filters[key], dict): + if not _dict_filter(filters[key], e.get(key, None)): + filtered.pop() + break + elif e.get(key, None) != filters[key]: + filtered.pop() + break + return filtered + + +def _get_entity(cloud, resource, name_or_id, filters, **kwargs): + """Return a single entity from the list returned by a given method. + + :param object cloud: + The controller class (Example: the main OpenStackCloud object) . + :param string or callable resource: + The string that identifies the resource to use to lookup the + get_<>_by_id or search_s methods(Example: network) + or a callable to invoke. + :param string name_or_id: + The name or ID of the entity being filtered or a dict + :param filters: + A dictionary of meta data to use for further filtering. + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + """ + + # Sometimes in the control flow of shade, we already have an object + # fetched. Rather than then needing to pull the name or id out of that + # object, pass it in here and rely on caching to prevent us from making + # an additional call, it's simple enough to test to see if we got an + # object and just short-circuit return it. + + if hasattr(name_or_id, 'id'): + return name_or_id + + # If a uuid is passed short-circuit it calling the + # get__by_id method + if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id): + get_resource = getattr(cloud, 'get_%s_by_id' % resource, None) + if get_resource: + return get_resource(name_or_id) + + search = resource if callable(resource) else getattr( + cloud, 'search_%ss' % resource, None) + if search: + entities = search(name_or_id, filters, **kwargs) + if entities: + if len(entities) > 1: + raise exc.OpenStackCloudException( + "Multiple matches found for %s" % name_or_id) + return entities[0] + return None + + +def normalize_keystone_services(services): + """Normalize the structure of keystone services + + In keystone v2, there is a field called "service_type". In v3, it's + "type". Just make the returned dict have both. + + :param list services: A list of keystone service dicts + + :returns: A list of normalized dicts. + """ + ret = [] + for service in services: + service_type = service.get('type', service.get('service_type')) + new_service = { + 'id': service['id'], + 'name': service['name'], + 'description': service.get('description', None), + 'type': service_type, + 'service_type': service_type, + 'enabled': service['enabled'] + } + ret.append(new_service) + return meta.obj_list_to_munch(ret) + + +def localhost_supports_ipv6(): + """Determine whether the local host supports IPv6 + + We look for a default route that supports the IPv6 address family, + and assume that if it is present, this host has globally routable + IPv6 connectivity. + """ + + try: + return netifaces.AF_INET6 in netifaces.gateways()['default'] + except AttributeError: + return False + + +def normalize_users(users): + ret = [ + dict( + id=user.get('id'), + email=user.get('email'), + name=user.get('name'), + username=user.get('username'), + default_project_id=user.get('default_project_id', + user.get('tenantId')), + domain_id=user.get('domain_id'), + enabled=user.get('enabled'), + description=user.get('description') + ) for user in users + ] + return meta.obj_list_to_munch(ret) + + +def normalize_domains(domains): + ret = [ + dict( + id=domain.get('id'), + name=domain.get('name'), + description=domain.get('description'), + enabled=domain.get('enabled'), + ) for domain in domains + ] + return meta.obj_list_to_munch(ret) + + +def normalize_groups(domains): + """Normalize Identity groups.""" + ret = [ + dict( + id=domain.get('id'), + name=domain.get('name'), + description=domain.get('description'), + domain_id=domain.get('domain_id'), + ) for domain in domains + ] + return meta.obj_list_to_munch(ret) + + +def normalize_role_assignments(assignments): + """Put role_assignments into a form that works with search/get interface. + + Role assignments have the structure:: + + [ + { + "role": { + "id": "--role-id--" + }, + "scope": { + "domain": { + "id": "--domain-id--" + } + }, + "user": { + "id": "--user-id--" + } + }, + ] + + Which is hard to work with in the rest of our interface. Map this to be:: + + [ + { + "id": "--role-id--", + "domain": "--domain-id--", + "user": "--user-id--", + } + ] + + Scope can be "domain" or "project" and "user" can also be "group". + + :param list assignments: A list of dictionaries of role assignments. + + :returns: A list of flattened/normalized role assignment dicts. + """ + new_assignments = [] + for assignment in assignments: + new_val = munch.Munch({'id': assignment['role']['id']}) + for scope in ('project', 'domain'): + if scope in assignment['scope']: + new_val[scope] = assignment['scope'][scope]['id'] + for assignee in ('user', 'group'): + if assignee in assignment: + new_val[assignee] = assignment[assignee]['id'] + new_assignments.append(new_val) + return new_assignments + + +def normalize_roles(roles): + """Normalize Identity roles.""" + ret = [ + dict( + id=role.get('id'), + name=role.get('name'), + ) for role in roles + ] + return meta.obj_list_to_munch(ret) + + +def normalize_flavor_accesses(flavor_accesses): + """Normalize Flavor access list.""" + return [munch.Munch( + dict( + flavor_id=acl.get('flavor_id'), + project_id=acl.get('project_id') or acl.get('tenant_id'), + ) + ) for acl in flavor_accesses + ] + + +def valid_kwargs(*valid_args): + # This decorator checks if argument passed as **kwargs to a function are + # present in valid_args. + # + # Typically, valid_kwargs is used when we want to distinguish between + # None and omitted arguments and we still want to validate the argument + # list. + # + # Example usage: + # + # @valid_kwargs('opt_arg1', 'opt_arg2') + # def my_func(self, mandatory_arg1, mandatory_arg2, **kwargs): + # ... + # + @decorator + def func_wrapper(func, *args, **kwargs): + argspec = inspect.getargspec(func) + for k in kwargs: + if k not in argspec.args[1:] and k not in valid_args: + raise TypeError( + "{f}() got an unexpected keyword argument " + "'{arg}'".format(f=inspect.stack()[1][3], arg=k)) + return func(*args, **kwargs) + return func_wrapper + + +def cache_on_arguments(*cache_on_args, **cache_on_kwargs): + _cache_name = cache_on_kwargs.pop('resource', None) + + def _inner_cache_on_arguments(func): + def _cache_decorator(obj, *args, **kwargs): + the_method = obj._get_cache(_cache_name).cache_on_arguments( + *cache_on_args, **cache_on_kwargs)( + func.__get__(obj, type(obj))) + return the_method(*args, **kwargs) + + def invalidate(obj, *args, **kwargs): + return obj._get_cache( + _cache_name).cache_on_arguments()(func).invalidate( + *args, **kwargs) + + _cache_decorator.invalidate = invalidate + _cache_decorator.func = func + _decorated_methods.append(func.__name__) + + return _cache_decorator + return _inner_cache_on_arguments + + +@contextlib.contextmanager +def shade_exceptions(error_message=None): + """Context manager for dealing with shade exceptions. + + :param string error_message: String to use for the exception message + content on non-OpenStackCloudExceptions. + + Useful for avoiding wrapping shade OpenStackCloudException exceptions + within themselves. Code called from within the context may throw such + exceptions without having to catch and reraise them. + + Non-OpenStackCloudException exceptions thrown within the context will + be wrapped and the exception message will be appended to the given error + message. + """ + try: + yield + except exc.OpenStackCloudException: + raise + except Exception as e: + if error_message is None: + error_message = str(e) + raise exc.OpenStackCloudException(error_message) + + +def safe_dict_min(key, data): + """Safely find the minimum for a given key in a list of dict objects. + + This will find the minimum integer value for specific dictionary key + across a list of dictionaries. The values for the given key MUST be + integers, or string representations of an integer. + + The dictionary key does not have to be present in all (or any) + of the elements/dicts within the data set. + + :param string key: The dictionary key to search for the minimum value. + :param list data: List of dicts to use for the data set. + + :returns: None if the field was not not found in any elements, or + the minimum value for the field otherwise. + """ + min_value = None + for d in data: + if (key in d) and (d[key] is not None): + try: + val = int(d[key]) + except ValueError: + raise exc.OpenStackCloudException( + "Search for minimum value failed. " + "Value for {key} is not an integer: {value}".format( + key=key, value=d[key]) + ) + if (min_value is None) or (val < min_value): + min_value = val + return min_value + + +def safe_dict_max(key, data): + """Safely find the maximum for a given key in a list of dict objects. + + This will find the maximum integer value for specific dictionary key + across a list of dictionaries. The values for the given key MUST be + integers, or string representations of an integer. + + The dictionary key does not have to be present in all (or any) + of the elements/dicts within the data set. + + :param string key: The dictionary key to search for the maximum value. + :param list data: List of dicts to use for the data set. + + :returns: None if the field was not not found in any elements, or + the maximum value for the field otherwise. + """ + max_value = None + for d in data: + if (key in d) and (d[key] is not None): + try: + val = int(d[key]) + except ValueError: + raise exc.OpenStackCloudException( + "Search for maximum value failed. " + "Value for {key} is not an integer: {value}".format( + key=key, value=d[key]) + ) + if (max_value is None) or (val > max_value): + max_value = val + return max_value + + +def parse_range(value): + """Parse a numerical range string. + + Breakdown a range expression into its operater and numerical parts. + This expression must be a string. Valid values must be an integer string, + optionally preceeded by one of the following operators:: + + - "<" : Less than + - ">" : Greater than + - "<=" : Less than or equal to + - ">=" : Greater than or equal to + + Some examples of valid values and function return values:: + + - "1024" : returns (None, 1024) + - "<5" : returns ("<", 5) + - ">=100" : returns (">=", 100) + + :param string value: The range expression to be parsed. + + :returns: A tuple with the operator string (or None if no operator + was given) and the integer value. None is returned if parsing failed. + """ + if value is None: + return None + + range_exp = re.match('(<|>|<=|>=){0,1}(\d+)$', value) + if range_exp is None: + return None + + op = range_exp.group(1) + num = int(range_exp.group(2)) + return (op, num) + + +def range_filter(data, key, range_exp): + """Filter a list by a single range expression. + + :param list data: List of dictionaries to be searched. + :param string key: Key name to search within the data set. + :param string range_exp: The expression describing the range of values. + + :returns: A list subset of the original data set. + :raises: OpenStackCloudException on invalid range expressions. + """ + filtered = [] + range_exp = str(range_exp).upper() + + if range_exp == "MIN": + key_min = safe_dict_min(key, data) + if key_min is None: + return [] + for d in data: + if int(d[key]) == key_min: + filtered.append(d) + return filtered + elif range_exp == "MAX": + key_max = safe_dict_max(key, data) + if key_max is None: + return [] + for d in data: + if int(d[key]) == key_max: + filtered.append(d) + return filtered + + # Not looking for a min or max, so a range or exact value must + # have been supplied. + val_range = parse_range(range_exp) + + # If parsing the range fails, it must be a bad value. + if val_range is None: + raise exc.OpenStackCloudException( + "Invalid range value: {value}".format(value=range_exp)) + + op = val_range[0] + if op: + # Range matching + for d in data: + d_val = int(d[key]) + if op == '<': + if d_val < val_range[1]: + filtered.append(d) + elif op == '>': + if d_val > val_range[1]: + filtered.append(d) + elif op == '<=': + if d_val <= val_range[1]: + filtered.append(d) + elif op == '>=': + if d_val >= val_range[1]: + filtered.append(d) + return filtered + else: + # Exact number match + for d in data: + if int(d[key]) == val_range[1]: + filtered.append(d) + return filtered + + +def generate_patches_from_kwargs(operation, **kwargs): + """Given a set of parameters, returns a list with the + valid patch values. + + :param string operation: The operation to perform. + :param list kwargs: Dict of parameters. + + :returns: A list with the right patch values. + """ + patches = [] + for k, v in kwargs.items(): + patch = {'op': operation, + 'value': v, + 'path': '/%s' % k} + patches.append(patch) + return sorted(patches) + + +class FileSegment(object): + """File-like object to pass to requests.""" + + def __init__(self, filename, offset, length): + self.filename = filename + self.offset = offset + self.length = length + self.pos = 0 + self._file = open(filename, 'rb') + self.seek(0) + + def tell(self): + return self._file.tell() - self.offset + + def seek(self, offset, whence=0): + if whence == 0: + self._file.seek(self.offset + offset, whence) + elif whence == 1: + self._file.seek(offset, whence) + elif whence == 2: + self._file.seek(self.offset + self.length - offset, 0) + + def read(self, size=-1): + remaining = self.length - self.pos + if remaining <= 0: + return b'' + + to_read = remaining if size < 0 else min(size, remaining) + chunk = self._file.read(to_read) + self.pos += len(chunk) + + return chunk + + def reset(self): + self._file.seek(self.offset, 0) + + +def _format_uuid_string(string): + return (string.replace('urn:', '') + .replace('uuid:', '') + .strip('{}') + .replace('-', '') + .lower()) + + +def _is_uuid_like(val): + """Returns validation of a value as a UUID. + + :param val: Value to verify + :type val: string + :returns: bool + + .. versionchanged:: 1.1.1 + Support non-lowercase UUIDs. + """ + try: + return str(uuid.UUID(val)).replace('-', '') == _format_uuid_string(val) + except (TypeError, ValueError, AttributeError): + return False diff --git a/openstack/cloud/cmd/__init__.py b/openstack/cloud/cmd/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/cloud/cmd/inventory.py b/openstack/cloud/cmd/inventory.py new file mode 100755 index 000000000..c7bc09d97 --- /dev/null +++ b/openstack/cloud/cmd/inventory.py @@ -0,0 +1,70 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import sys +import yaml + +import openstack.cloud +import openstack.cloud.inventory + + +def output_format_dict(data, use_yaml): + if use_yaml: + return yaml.safe_dump(data, default_flow_style=False) + else: + return json.dumps(data, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument('--refresh', action='store_true', + help='Refresh cached information') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + parser.add_argument('--private', action='store_true', default=False, + help='Use private IPs for interface_ip') + parser.add_argument('--cloud', default=None, + help='Return data for one cloud only') + parser.add_argument('--yaml', action='store_true', default=False, + help='Output data in nicely readable yaml') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') + return parser.parse_args() + + +def main(): + args = parse_args() + try: + openstack.cloud.simple_logging(debug=args.debug) + inventory = openstack.cloud.inventory.OpenStackInventory( + refresh=args.refresh, private=args.private, + cloud=args.cloud) + if args.list: + output = inventory.list_hosts() + elif args.host: + output = inventory.get_host(args.host) + print(output_format_dict(output, args.yaml)) + except openstack.OpenStackCloudException as e: + sys.stderr.write(e.message + '\n') + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/openstack/cloud/exc.py b/openstack/cloud/exc.py new file mode 100644 index 000000000..7635c2bfc --- /dev/null +++ b/openstack/cloud/exc.py @@ -0,0 +1,173 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import munch +from requests import exceptions as _rex + +from openstack import _log + + +class OpenStackCloudException(Exception): + + log_inner_exceptions = False + + def __init__(self, message, extra_data=None, **kwargs): + args = [message] + if extra_data: + if isinstance(extra_data, munch.Munch): + extra_data = extra_data.toDict() + args.append("Extra: {0}".format(str(extra_data))) + super(OpenStackCloudException, self).__init__(*args, **kwargs) + self.extra_data = extra_data + self.inner_exception = sys.exc_info() + self.orig_message = message + + def log_error(self, logger=None): + if not logger: + logger = _log.setup_logging('openstack.cloud.exc') + if self.inner_exception and self.inner_exception[1]: + logger.error(self.orig_message, exc_info=self.inner_exception) + + def __str__(self): + message = Exception.__str__(self) + if (self.inner_exception and self.inner_exception[1] + and not self.orig_message.endswith( + str(self.inner_exception[1]))): + message = "%s (Inner Exception: %s)" % ( + message, + str(self.inner_exception[1])) + if self.log_inner_exceptions: + self.log_error() + return message + + +class OpenStackCloudCreateException(OpenStackCloudException): + + def __init__(self, resource, resource_id, extra_data=None, **kwargs): + super(OpenStackCloudCreateException, self).__init__( + message="Error creating {resource}: {resource_id}".format( + resource=resource, resource_id=resource_id), + extra_data=extra_data, **kwargs) + self.resource_id = resource_id + + +class OpenStackCloudTimeout(OpenStackCloudException): + pass + + +class OpenStackCloudUnavailableExtension(OpenStackCloudException): + pass + + +class OpenStackCloudUnavailableFeature(OpenStackCloudException): + pass + + +class OpenStackCloudHTTPError(OpenStackCloudException, _rex.HTTPError): + + def __init__(self, *args, **kwargs): + OpenStackCloudException.__init__(self, *args, **kwargs) + _rex.HTTPError.__init__(self, *args, **kwargs) + + +class OpenStackCloudBadRequest(OpenStackCloudHTTPError): + """There is something wrong with the request payload. + + Possible reasons can include malformed json or invalid values to parameters + such as flavorRef to a server create. + """ + + +class OpenStackCloudURINotFound(OpenStackCloudHTTPError): + pass + +# Backwards compat +OpenStackCloudResourceNotFound = OpenStackCloudURINotFound + + +def _log_response_extras(response): + # Sometimes we get weird HTML errors. This is usually from load balancers + # or other things. Log them to a special logger so that they can be + # toggled indepdently - and at debug level so that a person logging + # openstack.cloud.* only gets them at debug. + if response.headers.get('content-type') != 'text/html': + return + try: + if int(response.headers.get('content-length', 0)) == 0: + return + except Exception: + return + logger = _log.setup_logging('openstack.cloud.http') + if response.reason: + logger.debug( + "Non-standard error '{reason}' returned from {url}:".format( + reason=response.reason, + url=response.url)) + else: + logger.debug( + "Non-standard error returned from {url}:".format( + url=response.url)) + for response_line in response.text.split('\n'): + logger.debug(response_line) + + +# Logic shamelessly stolen from requests +def raise_from_response(response, error_message=None): + msg = '' + if 400 <= response.status_code < 500: + source = "Client" + elif 500 <= response.status_code < 600: + source = "Server" + else: + return + + remote_error = "Error for url: {url}".format(url=response.url) + try: + details = response.json() + # Nova returns documents that look like + # {statusname: 'message': message, 'code': code} + detail_keys = list(details.keys()) + if len(detail_keys) == 1: + detail_key = detail_keys[0] + detail_message = details[detail_key].get('message') + if detail_message: + remote_error += " {message}".format(message=detail_message) + except ValueError: + if response.reason: + remote_error += " {reason}".format(reason=response.reason) + + _log_response_extras(response) + + if error_message: + msg = '{error_message}. ({code}) {source} {remote_error}'.format( + error_message=error_message, + source=source, + code=response.status_code, + remote_error=remote_error) + else: + msg = '({code}) {source} {remote_error}'.format( + code=response.status_code, + source=source, + remote_error=remote_error) + + # Special case 404 since we raised a specific one for neutron exceptions + # before + if response.status_code == 404: + raise OpenStackCloudURINotFound(msg, response=response) + elif response.status_code == 400: + raise OpenStackCloudBadRequest(msg, response=response) + if msg: + raise OpenStackCloudHTTPError(msg, response=response) diff --git a/openstack/cloud/inventory.py b/openstack/cloud/inventory.py new file mode 100644 index 000000000..c64f666c4 --- /dev/null +++ b/openstack/cloud/inventory.py @@ -0,0 +1,85 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools + +import openstack.cloud +import openstack.config +from openstack.cloud import _utils + + +class OpenStackInventory(object): + + # Put this here so the capability can be detected with hasattr on the class + extra_config = None + + def __init__( + self, config_files=None, refresh=False, private=False, + config_key=None, config_defaults=None, cloud=None, + use_direct_get=False): + if config_files is None: + config_files = [] + config = openstack.config.loader.OpenStackConfig( + config_files=openstack.config.loader.CONFIG_FILES + config_files) + self.extra_config = config.get_extra_config( + config_key, config_defaults) + + if cloud is None: + self.clouds = [ + openstack.OpenStackCloud(cloud_config=cloud_config) + for cloud_config in config.get_all_clouds() + ] + else: + try: + self.clouds = [ + openstack.OpenStackCloud( + cloud_config=config.get_one_cloud(cloud)) + ] + except openstack.config.exceptions.OpenStackConfigException as e: + raise openstack.OpenStackCloudException(e) + + if private: + for cloud in self.clouds: + cloud.private = True + + # Handle manual invalidation of entire persistent cache + if refresh: + for cloud in self.clouds: + cloud._cache.invalidate() + + def list_hosts(self, expand=True, fail_on_cloud_config=True): + hostvars = [] + + for cloud in self.clouds: + try: + # Cycle on servers + for server in cloud.list_servers(detailed=expand): + hostvars.append(server) + except openstack.OpenStackCloudException: + # Don't fail on one particular cloud as others may work + if fail_on_cloud_config: + raise + + return hostvars + + def search_hosts(self, name_or_id=None, filters=None, expand=True): + hosts = self.list_hosts(expand=expand) + return _utils._filter_list(hosts, name_or_id, filters) + + def get_host(self, name_or_id, filters=None, expand=True): + if expand: + func = self.search_hosts + else: + func = functools.partial(self.search_hosts, expand=False) + return _utils._get_entity(self, func, name_or_id, filters) diff --git a/openstack/cloud/meta.py b/openstack/cloud/meta.py new file mode 100644 index 000000000..001eb616b --- /dev/null +++ b/openstack/cloud/meta.py @@ -0,0 +1,590 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import munch +import ipaddress +import six +import socket + +from openstack import _log +from openstack.cloud import exc + + +NON_CALLABLES = (six.string_types, bool, dict, int, float, list, type(None)) + + +def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4, + mac_addr=None): + ret = [] + for (k, v) in iter(addresses.items()): + if key_name is not None and k != key_name: + # key_name is specified and it doesn't match the current network. + # Continue with the next one + continue + + for interface_spec in v: + if ext_tag is not None: + if 'OS-EXT-IPS:type' not in interface_spec: + # ext_tag is specified, but this interface has no tag + # We could actually return right away as this means that + # this cloud doesn't support OS-EXT-IPS. Nevertheless, + # it would be better to perform an explicit check. e.g.: + # cloud._has_nova_extension('OS-EXT-IPS') + # But this needs cloud to be passed to this function. + continue + elif interface_spec['OS-EXT-IPS:type'] != ext_tag: + # Type doesn't match, continue with next one + continue + + if mac_addr is not None: + if 'OS-EXT-IPS-MAC:mac_addr' not in interface_spec: + # mac_addr is specified, but this interface has no mac_addr + # We could actually return right away as this means that + # this cloud doesn't support OS-EXT-IPS-MAC. Nevertheless, + # it would be better to perform an explicit check. e.g.: + # cloud._has_nova_extension('OS-EXT-IPS-MAC') + # But this needs cloud to be passed to this function. + continue + elif interface_spec['OS-EXT-IPS-MAC:mac_addr'] != mac_addr: + # MAC doesn't match, continue with next one + continue + + if interface_spec['version'] == version: + ret.append(interface_spec) + return ret + + +def find_nova_addresses(addresses, ext_tag=None, key_name=None, version=4, + mac_addr=None): + interfaces = find_nova_interfaces(addresses, ext_tag, key_name, version, + mac_addr) + addrs = [i['addr'] for i in interfaces] + return addrs + + +def get_server_ip(server, public=False, cloud_public=True, **kwargs): + """Get an IP from the Nova addresses dict + + :param server: The server to pull the address from + :param public: Whether the address we're looking for should be considered + 'public' and therefore reachabiliity tests should be + used. (defaults to False) + :param cloud_public: Whether the cloud has been configured to use private + IPs from servers as the interface_ip. This inverts the + public reachability logic, as in this case it's the + private ip we expect shade to be able to reach + """ + addrs = find_nova_addresses(server['addresses'], **kwargs) + return find_best_address( + addrs, socket.AF_INET, public=public, cloud_public=cloud_public) + + +def get_server_private_ip(server, cloud=None): + """Find the private IP address + + If Neutron is available, search for a port on a network where + `router:external` is False and `shared` is False. This combination + indicates a private network with private IP addresses. This port should + have the private IP. + + If Neutron is not available, or something goes wrong communicating with it, + as a fallback, try the list of addresses associated with the server dict, + looking for an IP type tagged as 'fixed' in the network named 'private'. + + Last resort, ignore the IP type and just look for an IP on the 'private' + network (e.g., Rackspace). + """ + if cloud and not cloud.use_internal_network(): + return None + + # Try to get a floating IP interface. If we have one then return the + # private IP address associated with that floating IP for consistency. + fip_ints = find_nova_interfaces(server['addresses'], ext_tag='floating') + fip_mac = None + if fip_ints: + fip_mac = fip_ints[0].get('OS-EXT-IPS-MAC:mac_addr') + + # Short circuit the ports/networks search below with a heavily cached + # and possibly pre-configured network name + if cloud: + int_nets = cloud.get_internal_ipv4_networks() + for int_net in int_nets: + int_ip = get_server_ip( + server, key_name=int_net['name'], + cloud_public=not cloud.private, + mac_addr=fip_mac) + if int_ip is not None: + return int_ip + + ip = get_server_ip( + server, ext_tag='fixed', key_name='private', mac_addr=fip_mac) + if ip: + return ip + + # Last resort, and Rackspace + return get_server_ip( + server, key_name='private') + + +def get_server_external_ipv4(cloud, server): + """Find an externally routable IP for the server. + + There are 5 different scenarios we have to account for: + + * Cloud has externally routable IP from neutron but neutron APIs don't + work (only info available is in nova server record) (rackspace) + * Cloud has externally routable IP from neutron (runabove, ovh) + * Cloud has externally routable IP from neutron AND supports optional + private tenant networks (vexxhost, unitedstack) + * Cloud only has private tenant network provided by neutron and requires + floating-ip for external routing (dreamhost, hp) + * Cloud only has private tenant network provided by nova-network and + requires floating-ip for external routing (auro) + + :param cloud: the cloud we're working with + :param server: the server dict from which we want to get an IPv4 address + :return: a string containing the IPv4 address or None + """ + + if not cloud.use_external_network(): + return None + + if server['accessIPv4']: + return server['accessIPv4'] + + # Short circuit the ports/networks search below with a heavily cached + # and possibly pre-configured network name + ext_nets = cloud.get_external_ipv4_networks() + for ext_net in ext_nets: + ext_ip = get_server_ip( + server, key_name=ext_net['name'], public=True, + cloud_public=not cloud.private) + if ext_ip is not None: + return ext_ip + + # Try to get a floating IP address + # Much as I might find floating IPs annoying, if it has one, that's + # almost certainly the one that wants to be used + ext_ip = get_server_ip( + server, ext_tag='floating', public=True, + cloud_public=not cloud.private) + if ext_ip is not None: + return ext_ip + + # The cloud doesn't support Neutron or Neutron can't be contacted. The + # server might have fixed addresses that are reachable from outside the + # cloud (e.g. Rax) or have plain ol' floating IPs + + # Try to get an address from a network named 'public' + ext_ip = get_server_ip( + server, key_name='public', public=True, + cloud_public=not cloud.private) + if ext_ip is not None: + return ext_ip + + # Nothing else works, try to find a globally routable IP address + for interfaces in server['addresses'].values(): + for interface in interfaces: + try: + ip = ipaddress.ip_address(interface['addr']) + except Exception: + # Skip any error, we're looking for a working ip - if the + # cloud returns garbage, it wouldn't be the first weird thing + # but it still doesn't meet the requirement of "be a working + # ip address" + continue + if ip.version == 4 and not ip.is_private: + return str(ip) + + return None + + +def find_best_address(addresses, family, public=False, cloud_public=True): + do_check = public == cloud_public + if not addresses: + return None + if len(addresses) == 1: + return addresses[0] + if len(addresses) > 1 and do_check: + # We only want to do this check if the address is supposed to be + # reachable. Otherwise we're just debug log spamming on every listing + # of private ip addresses + for address in addresses: + # Return the first one that is reachable + try: + connect_socket = socket.socket(family, socket.SOCK_STREAM, 0) + connect_socket.settimeout(1) + connect_socket.connect((address, 22, 0, 0)) + return address + except Exception: + pass + # Give up and return the first - none work as far as we can tell + if do_check: + log = _log.setup_logging('shade') + log.debug( + 'The cloud returned multiple addresses, and none of them seem' + ' to work. That might be what you wanted, but we have no clue' + " what's going on, so we just picked one at random") + return addresses[0] + + +def get_server_external_ipv6(server): + """ Get an IPv6 address reachable from outside the cloud. + + This function assumes that if a server has an IPv6 address, that address + is reachable from outside the cloud. + + :param server: the server from which we want to get an IPv6 address + :return: a string containing the IPv6 address or None + """ + if server['accessIPv6']: + return server['accessIPv6'] + addresses = find_nova_addresses(addresses=server['addresses'], version=6) + return find_best_address(addresses, socket.AF_INET6, public=True) + + +def get_server_default_ip(cloud, server): + """ Get the configured 'default' address + + It is possible in clouds.yaml to configure for a cloud a network that + is the 'default_interface'. This is the network that should be used + to talk to instances on the network. + + :param cloud: the cloud we're working with + :param server: the server dict from which we want to get the default + IPv4 address + :return: a string containing the IPv4 address or None + """ + ext_net = cloud.get_default_network() + if ext_net: + if (cloud._local_ipv6 and not cloud.force_ipv4): + # try 6 first, fall back to four + versions = [6, 4] + else: + versions = [4] + for version in versions: + ext_ip = get_server_ip( + server, key_name=ext_net['name'], version=version, public=True, + cloud_public=not cloud.private) + if ext_ip is not None: + return ext_ip + return None + + +def _get_interface_ip(cloud, server): + """ Get the interface IP for the server + + Interface IP is the IP that should be used for communicating with the + server. It is: + - the IP on the configured default_interface network + - if cloud.private, the private ip if it exists + - if the server has a public ip, the public ip + """ + default_ip = get_server_default_ip(cloud, server) + if default_ip: + return default_ip + + if cloud.private and server['private_v4']: + return server['private_v4'] + + if (server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4): + return server['public_v6'] + else: + return server['public_v4'] + + +def get_groups_from_server(cloud, server, server_vars): + groups = [] + + region = cloud.region_name + cloud_name = cloud.name + + # Create a group for the cloud + groups.append(cloud_name) + + # Create a group on region + groups.append(region) + + # And one by cloud_region + groups.append("%s_%s" % (cloud_name, region)) + + # Check if group metadata key in servers' metadata + group = server['metadata'].get('group') + if group: + groups.append(group) + + for extra_group in server['metadata'].get('groups', '').split(','): + if extra_group: + groups.append(extra_group) + + groups.append('instance-%s' % server['id']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(server['metadata'].items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud.name, region, az)) + return groups + + +def expand_server_vars(cloud, server): + """Backwards compatibility function.""" + return add_server_interfaces(cloud, server) + + +def _make_address_dict(fip, port): + address = dict(version=4, addr=fip['floating_ip_address']) + address['OS-EXT-IPS:type'] = 'floating' + address['OS-EXT-IPS-MAC:mac_addr'] = port['mac_address'] + return address + + +def _get_supplemental_addresses(cloud, server): + fixed_ip_mapping = {} + for name, network in server['addresses'].items(): + for address in network: + if address['version'] == 6: + continue + if address.get('OS-EXT-IPS:type') == 'floating': + # We have a floating IP that nova knows about, do nothing + return server['addresses'] + fixed_ip_mapping[address['addr']] = name + try: + # Don't bother doing this before the server is active, it's a waste + # of an API call while polling for a server to come up + if (cloud.has_service('network') and cloud._has_floating_ips() and + server['status'] == 'ACTIVE'): + for port in cloud.search_ports( + filters=dict(device_id=server['id'])): + for fip in cloud.search_floating_ips( + filters=dict(port_id=port['id'])): + # This SHOULD return one and only one FIP - but doing + # it as a search/list lets the logic work regardless + if fip['fixed_ip_address'] not in fixed_ip_mapping: + log = _log.setup_logging('shade') + log.debug( + "The cloud returned floating ip %(fip)s attached" + " to server %(server)s but the fixed ip associated" + " with the floating ip in the neutron listing" + " does not exist in the nova listing. Something" + " is exceptionally broken.", + dict(fip=fip['id'], server=server['id'])) + fixed_net = fixed_ip_mapping[fip['fixed_ip_address']] + server['addresses'][fixed_net].append( + _make_address_dict(fip, port)) + except exc.OpenStackCloudException: + # If something goes wrong with a cloud call, that's cool - this is + # an attempt to provide additional data and should not block forward + # progress + pass + return server['addresses'] + + +def add_server_interfaces(cloud, server): + """Add network interface information to server. + + Query the cloud as necessary to add information to the server record + about the network information needed to interface with the server. + + Ensures that public_v4, public_v6, private_v4, private_v6, interface_ip, + accessIPv4 and accessIPv6 are always set. + """ + # First, add an IP address. Set it to '' rather than None if it does + # not exist to remain consistent with the pre-existing missing values + server['addresses'] = _get_supplemental_addresses(cloud, server) + server['public_v4'] = get_server_external_ipv4(cloud, server) or '' + server['public_v6'] = get_server_external_ipv6(server) or '' + server['private_v4'] = get_server_private_ip(server, cloud) or '' + server['interface_ip'] = _get_interface_ip(cloud, server) or '' + + # Some clouds do not set these, but they're a regular part of the Nova + # server record. Since we know them, go ahead and set them. In the case + # where they were set previous, we use the values, so this will not break + # clouds that provide the information + if cloud.private and server['private_v4']: + server['accessIPv4'] = server['private_v4'] + else: + server['accessIPv4'] = server['public_v4'] + server['accessIPv6'] = server['public_v6'] + + return server + + +def expand_server_security_groups(cloud, server): + try: + groups = cloud.list_server_security_groups(server) + except exc.OpenStackCloudException: + groups = [] + server['security_groups'] = groups or [] + + +def get_hostvars_from_server(cloud, server, mounts=None): + """Expand additional server information useful for ansible inventory. + + Variables in this function may make additional cloud queries to flesh out + possibly interesting info, making it more expensive to call than + expand_server_vars if caching is not set up. If caching is set up, + the extra cost should be minimal. + """ + server_vars = add_server_interfaces(cloud, server) + + flavor_id = server['flavor']['id'] + flavor_name = cloud.get_flavor_name(flavor_id) + if flavor_name: + server_vars['flavor']['name'] = flavor_name + + expand_server_security_groups(cloud, server) + + # OpenStack can return image as a string when you've booted from volume + if str(server['image']) == server['image']: + image_id = server['image'] + server_vars['image'] = dict(id=image_id) + else: + image_id = server['image'].get('id', None) + if image_id: + image_name = cloud.get_image_name(image_id) + if image_name: + server_vars['image']['name'] = image_name + + volumes = [] + if cloud.has_service('volume'): + try: + for volume in cloud.get_volumes(server): + # Make things easier to consume elsewhere + volume['device'] = volume['attachments'][0]['device'] + volumes.append(volume) + except exc.OpenStackCloudException: + pass + server_vars['volumes'] = volumes + if mounts: + for mount in mounts: + for vol in server_vars['volumes']: + if vol['display_name'] == mount['display_name']: + if 'mount' in mount: + vol['mount'] = mount['mount'] + + return server_vars + + +def _log_request_id(obj, request_id): + if request_id: + # Log the request id and object id in a specific logger. This way + # someone can turn it on if they're interested in this kind of tracing. + log = _log.setup_logging('openstack.cloud.request_ids') + obj_id = None + if isinstance(obj, dict): + obj_id = obj.get('id', obj.get('uuid')) + if obj_id: + log.debug("Retrieved object %(id)s. Request ID %(request_id)s", + {'id': obj.get('id', obj.get('uuid')), + 'request_id': request_id}) + else: + log.debug("Retrieved a response. Request ID %(request_id)s", + {'request_id': request_id}) + + return obj + + +def obj_to_munch(obj): + """ Turn an object with attributes into a dict suitable for serializing. + + Some of the things that are returned in OpenStack are objects with + attributes. That's awesome - except when you want to expose them as JSON + structures. We use this as the basis of get_hostvars_from_server above so + that we can just have a plain dict of all of the values that exist in the + nova metadata for a server. + """ + if obj is None: + return None + elif isinstance(obj, munch.Munch) or hasattr(obj, 'mock_add_spec'): + # If we obj_to_munch twice, don't fail, just return the munch + # Also, don't try to modify Mock objects - that way lies madness + return obj + elif isinstance(obj, dict): + # The new request-id tracking spec: + # https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/log-request-id-mappings.html + # adds a request-ids attribute to returned objects. It does this even + # with dicts, which now become dict subclasses. So we want to convert + # the dict we get, but we also want it to fall through to object + # attribute processing so that we can also get the request_ids + # data into our resulting object. + instance = munch.Munch(obj) + else: + instance = munch.Munch() + + for key in dir(obj): + try: + value = getattr(obj, key) + # some attributes can be defined as a @propierty, so we can't assure + # to have a valid value + # e.g. id in python-novaclient/tree/novaclient/v2/quotas.py + except AttributeError: + continue + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): + instance[key] = value + return instance + + +obj_to_dict = obj_to_munch + + +def obj_list_to_munch(obj_list): + """Enumerate through lists of objects and return lists of dictonaries. + + Some of the objects returned in OpenStack are actually lists of objects, + and in order to expose the data structures as JSON, we need to facilitate + the conversion to lists of dictonaries. + """ + return [obj_to_munch(obj) for obj in obj_list] + + +obj_list_to_dict = obj_list_to_munch + + +def warlock_to_dict(obj): + # This function is unused in shade - but it is a public function, so + # removing it would be rude. We don't actually have to depend on warlock + # ourselves to keep this - so just leave it here. + # + # glanceclient v2 uses warlock to construct its objects. Warlock does + # deep black magic to attribute look up to support validation things that + # means we cannot use normal obj_to_munch + obj_dict = munch.Munch() + for (key, value) in obj.items(): + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): + obj_dict[key] = value + return obj_dict + + +def get_and_munchify(key, data): + """Get the value associated to key and convert it. + + The value will be converted in a Munch object or a list of Munch objects + based on the type + """ + result = data.get(key, []) if key else data + if isinstance(result, list): + return obj_list_to_munch(result) + elif isinstance(result, dict): + return obj_to_munch(result) + return result diff --git a/openstack/cloud/openstackcloud.py b/openstack/cloud/openstackcloud.py new file mode 100644 index 000000000..c6cf33089 --- /dev/null +++ b/openstack/cloud/openstackcloud.py @@ -0,0 +1,8564 @@ +# Licensed under the Apache License, Version 3.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import collections +import functools +import hashlib +import ipaddress +import json +import jsonpatch +import keystoneauth1.session +import operator +import os +import openstack.config +import openstack.config.defaults +import six +import threading +import time +import warnings + +import dogpile.cache +import munch +import requestsexceptions +from six.moves import urllib + +import keystoneauth1.exceptions + +import openstack +from openstack import _log +from openstack.cloud.exc import * # noqa +from openstack.cloud import _adapter +from openstack.cloud._heat import event_utils +from openstack.cloud._heat import template_utils +from openstack.cloud import _normalize +from openstack.cloud import meta +from openstack.cloud import task_manager +from openstack.cloud import _utils + +# TODO(shade) shade keys were x-object-meta-x-sdk-md5 - we need to add those +# to freshness checks so that a shade->sdk transition doens't +# result in a re-upload +OBJECT_MD5_KEY = 'x-object-meta-x-sdk-md5' +OBJECT_SHA256_KEY = 'x-object-meta-x-sdk-sha256' +# TODO(shade) shade keys were owner_specified.shade.md5 - we need to add those +# to freshness checks so that a shade->sdk transition doens't +# result in a re-upload +IMAGE_MD5_KEY = 'owner_specified.openstack.md5' +IMAGE_SHA256_KEY = 'owner_specified.openstack.sha256' +IMAGE_OBJECT_KEY = 'owner_specified.openstack.object' +# Rackspace returns this for intermittent import errors +IMAGE_ERROR_396 = "Image cannot be imported. Error code: '396'" +DEFAULT_OBJECT_SEGMENT_SIZE = 1073741824 # 1GB +# This halves the current default for Swift +DEFAULT_MAX_FILE_SIZE = (5 * 1024 * 1024 * 1024 + 2) / 2 +DEFAULT_SERVER_AGE = 5 +DEFAULT_PORT_AGE = 5 +DEFAULT_FLOAT_AGE = 5 +_OCC_DOC_URL = "https://docs.openstack.org/developer/os-client-config" + + +OBJECT_CONTAINER_ACLS = { + 'public': '.r:*,.rlistings', + 'private': '', +} + + +def _no_pending_volumes(volumes): + """If there are any volumes not in a steady state, don't cache""" + for volume in volumes: + if volume['status'] not in ('available', 'error', 'in-use'): + return False + return True + + +def _no_pending_images(images): + """If there are any images not in a steady state, don't cache""" + for image in images: + if image.status not in ('active', 'deleted', 'killed'): + return False + return True + + +def _no_pending_stacks(stacks): + """If there are any stacks not in a steady state, don't cache""" + for stack in stacks: + status = stack['stack_status'] + if '_COMPLETE' not in status and '_FAILED' not in status: + return False + return True + + +class OpenStackCloud(_normalize.Normalizer): + """Represent a connection to an OpenStack Cloud. + + OpenStackCloud is the entry point for all cloud operations, regardless + of which OpenStack service those operations may ultimately come from. + The operations on an OpenStackCloud are resource oriented rather than + REST API operation oriented. For instance, one will request a Floating IP + and that Floating IP will be actualized either via neutron or via nova + depending on how this particular cloud has decided to arrange itself. + + :param TaskManager manager: Optional task manager to use for running + OpenStack API tasks. Unless you're doing + rate limiting client side, you almost + certainly don't need this. (optional) + :param bool log_inner_exceptions: Send wrapped exceptions to the error log. + Defaults to false, because there are a + number of wrapped exceptions that are + noise for normal usage. It's possible + that for a user that has python logging + configured properly, it's desirable to + have all of the wrapped exceptions be + emitted to the error log. This flag + will enable that behavior. + :param bool strict: Only return documented attributes for each resource + as per the Data Model contract. (Default False) + :param app_name: Name of the application to be appended to the user-agent + string. Optional, defaults to None. + :param app_version: Version of the application to be appended to the + user-agent string. Optional, defaults to None. + :param CloudConfig cloud_config: Cloud config object from os-client-config + In the future, this will be the only way + to pass in cloud configuration, but is + being phased in currently. + """ + + def __init__( + self, + cloud_config=None, + manager=None, log_inner_exceptions=False, + strict=False, + app_name=None, + app_version=None, + use_direct_get=False, + **kwargs): + + if log_inner_exceptions: + OpenStackCloudException.log_inner_exceptions = True + + self.log = _log.setup_logging('openstack.cloud') + + if not cloud_config: + config = openstack.config.OpenStackConfig( + app_name=app_name, app_version=app_version) + + cloud_config = config.get_one_cloud(**kwargs) + + self.name = cloud_config.name + self.auth = cloud_config.get_auth_args() + self.region_name = cloud_config.region_name + self.default_interface = cloud_config.get_interface() + self.private = cloud_config.config.get('private', False) + self.api_timeout = cloud_config.config['api_timeout'] + self.image_api_use_tasks = cloud_config.config['image_api_use_tasks'] + self.secgroup_source = cloud_config.config['secgroup_source'] + self.force_ipv4 = cloud_config.force_ipv4 + self.strict_mode = strict + # TODO(shade) The openstack.cloud default for get_flavor_extra_specs + # should be changed and this should be removed completely + self._extra_config = cloud_config._openstack_config.get_extra_config( + 'shade', { + 'get_flavor_extra_specs': True, + }) + + if manager is not None: + self.manager = manager + else: + self.manager = task_manager.TaskManager( + name=':'.join([self.name, self.region_name]), client=self) + + self._external_ipv4_names = cloud_config.get_external_ipv4_networks() + self._internal_ipv4_names = cloud_config.get_internal_ipv4_networks() + self._external_ipv6_names = cloud_config.get_external_ipv6_networks() + self._internal_ipv6_names = cloud_config.get_internal_ipv6_networks() + self._nat_destination = cloud_config.get_nat_destination() + self._default_network = cloud_config.get_default_network() + + self._floating_ip_source = cloud_config.config.get( + 'floating_ip_source') + if self._floating_ip_source: + if self._floating_ip_source.lower() == 'none': + self._floating_ip_source = None + else: + self._floating_ip_source = self._floating_ip_source.lower() + + self._use_external_network = cloud_config.config.get( + 'use_external_network', True) + self._use_internal_network = cloud_config.config.get( + 'use_internal_network', True) + + # Work around older TaskManager objects that don't have submit_task + if not hasattr(self.manager, 'submit_task'): + self.manager.submit_task = self.manager.submitTask + + (self.verify, self.cert) = cloud_config.get_requests_verify_args() + # Turn off urllib3 warnings about insecure certs if we have + # explicitly configured requests to tell it we do not want + # cert verification + if not self.verify: + self.log.debug( + "Turning off Insecure SSL warnings since verify=False") + category = requestsexceptions.InsecureRequestWarning + if category: + # InsecureRequestWarning references a Warning class or is None + warnings.filterwarnings('ignore', category=category) + + self._disable_warnings = {} + self.use_direct_get = use_direct_get + + self._servers = None + self._servers_time = 0 + self._servers_lock = threading.Lock() + + self._ports = None + self._ports_time = 0 + self._ports_lock = threading.Lock() + + self._floating_ips = None + self._floating_ips_time = 0 + self._floating_ips_lock = threading.Lock() + + self._floating_network_by_router = None + self._floating_network_by_router_run = False + self._floating_network_by_router_lock = threading.Lock() + + self._networks_lock = threading.Lock() + self._reset_network_caches() + + cache_expiration_time = int(cloud_config.get_cache_expiration_time()) + cache_class = cloud_config.get_cache_class() + cache_arguments = cloud_config.get_cache_arguments() + + self._resource_caches = {} + + if cache_class != 'dogpile.cache.null': + self.cache_enabled = True + self._cache = self._make_cache( + cache_class, cache_expiration_time, cache_arguments) + expirations = cloud_config.get_cache_expiration() + for expire_key in expirations.keys(): + # Only build caches for things we have list operations for + if getattr( + self, 'list_{0}'.format(expire_key), None): + self._resource_caches[expire_key] = self._make_cache( + cache_class, expirations[expire_key], cache_arguments) + + self._SERVER_AGE = DEFAULT_SERVER_AGE + self._PORT_AGE = DEFAULT_PORT_AGE + self._FLOAT_AGE = DEFAULT_FLOAT_AGE + else: + self.cache_enabled = False + + def _fake_invalidate(unused): + pass + + class _FakeCache(object): + def invalidate(self): + pass + + # Don't cache list_servers if we're not caching things. + # Replace this with a more specific cache configuration + # soon. + self._SERVER_AGE = 0 + self._PORT_AGE = 0 + self._FLOAT_AGE = 0 + self._cache = _FakeCache() + # Undecorate cache decorated methods. Otherwise the call stacks + # wind up being stupidly long and hard to debug + for method in _utils._decorated_methods: + meth_obj = getattr(self, method, None) + if not meth_obj: + continue + if (hasattr(meth_obj, 'invalidate') + and hasattr(meth_obj, 'func')): + new_func = functools.partial(meth_obj.func, self) + new_func.invalidate = _fake_invalidate + setattr(self, method, new_func) + + # If server expiration time is set explicitly, use that. Otherwise + # fall back to whatever it was before + self._SERVER_AGE = cloud_config.get_cache_resource_expiration( + 'server', self._SERVER_AGE) + self._PORT_AGE = cloud_config.get_cache_resource_expiration( + 'port', self._PORT_AGE) + self._FLOAT_AGE = cloud_config.get_cache_resource_expiration( + 'floating_ip', self._FLOAT_AGE) + + self._container_cache = dict() + self._file_hash_cache = dict() + + self._keystone_session = None + + self._raw_clients = {} + + self._local_ipv6 = ( + _utils.localhost_supports_ipv6() if not self.force_ipv4 else False) + + self.cloud_config = cloud_config + + def _make_cache(self, cache_class, expiration_time, arguments): + return dogpile.cache.make_region( + function_key_generator=self._make_cache_key + ).configure( + cache_class, + expiration_time=expiration_time, + arguments=arguments) + + def _make_cache_key(self, namespace, fn): + fname = fn.__name__ + if namespace is None: + name_key = self.name + else: + name_key = '%s:%s' % (self.name, namespace) + + def generate_key(*args, **kwargs): + arg_key = ','.join(args) + kw_keys = sorted(kwargs.keys()) + kwargs_key = ','.join( + ['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache']) + ans = "_".join( + [str(name_key), fname, arg_key, kwargs_key]) + return ans + return generate_key + + def _get_cache(self, resource_name): + if resource_name and resource_name in self._resource_caches: + return self._resource_caches[resource_name] + else: + return self._cache + + def _get_client( + self, service_key, client_class=None, interface_key=None, + pass_version_arg=True, **kwargs): + try: + client = self.cloud_config.get_legacy_client( + service_key=service_key, client_class=client_class, + interface_key=interface_key, pass_version_arg=pass_version_arg, + **kwargs) + except Exception: + self.log.debug( + "Couldn't construct %(service)s object", + {'service': service_key}, exc_info=True) + raise + if client is None: + raise OpenStackCloudException( + "Failed to instantiate {service} client." + " This could mean that your credentials are wrong.".format( + service=service_key)) + return client + + def _get_major_version_id(self, version): + if isinstance(version, int): + return version + elif isinstance(version, six.string_types + (tuple,)): + return int(version[0]) + return version + + def _get_versioned_client( + self, service_type, min_version=None, max_version=None): + config_version = self.cloud_config.get_api_version(service_type) + config_major = self._get_major_version_id(config_version) + max_major = self._get_major_version_id(max_version) + min_major = self._get_major_version_id(min_version) + # TODO(shade) This should be replaced with use of Connection. However, + # we need to find a sane way to deal with this additional + # logic - or we need to give up on it. If we give up on it, + # we need to make sure we can still support it in the shade + # compat layer. + # NOTE(mordred) This logic for versions is slightly different + # than the ksa Adapter constructor logic. openstack.cloud knows the + # versions it knows, and uses them when it detects them. However, if + # a user requests a version, and it's not found, and a different one + # openstack.cloud does know about is found, that's a warning in + # openstack.cloud. + if config_version: + if min_major and config_major < min_major: + raise OpenStackCloudException( + "Version {config_version} requested for {service_type}" + " but shade understands a minimum of {min_version}".format( + config_version=config_version, + service_type=service_type, + min_version=min_version)) + elif max_major and config_major > max_major: + raise OpenStackCloudException( + "Version {config_version} requested for {service_type}" + " but openstack.cloud understands a maximum of" + " {max_version}".format( + config_version=config_version, + service_type=service_type, + max_version=max_version)) + request_min_version = config_version + request_max_version = '{version}.latest'.format( + version=config_major) + adapter = _adapter.ShadeAdapter( + session=self.keystone_session, + manager=self.manager, + service_type=self.cloud_config.get_service_type(service_type), + service_name=self.cloud_config.get_service_name(service_type), + interface=self.cloud_config.get_interface(service_type), + endpoint_override=self.cloud_config.get_endpoint(service_type), + region_name=self.cloud_config.region, + min_version=request_min_version, + max_version=request_max_version, + shade_logger=self.log) + if adapter.get_endpoint(): + return adapter + + adapter = _adapter.ShadeAdapter( + session=self.keystone_session, + manager=self.manager, + service_type=self.cloud_config.get_service_type(service_type), + service_name=self.cloud_config.get_service_name(service_type), + interface=self.cloud_config.get_interface(service_type), + endpoint_override=self.cloud_config.get_endpoint(service_type), + region_name=self.cloud_config.region, + min_version=min_version, + max_version=max_version, + shade_logger=self.log) + + # data.api_version can be None if no version was detected, such + # as with neutron + api_version = adapter.get_api_major_version( + endpoint_override=self.cloud_config.get_endpoint(service_type)) + api_major = self._get_major_version_id(api_version) + + # If we detect a different version that was configured, warn the user. + # shade still knows what to do - but if the user gave us an explicit + # version and we couldn't find it, they may want to investigate. + if api_version and (api_major != config_major): + warning_msg = ( + '{service_type} is configured for {config_version}' + ' but only {api_version} is available. shade is happy' + ' with this version, but if you were trying to force an' + ' override, that did not happen. You may want to check' + ' your cloud, or remove the version specification from' + ' your config.'.format( + service_type=service_type, + config_version=config_version, + api_version='.'.join([str(f) for f in api_version]))) + self.log.debug(warning_msg) + warnings.warn(warning_msg) + return adapter + + # TODO(shade) This should be replaced with using openstack Connection + # object. + def _get_raw_client( + self, service_type, api_version=None, endpoint_override=None): + return _adapter.ShadeAdapter( + session=self.keystone_session, + manager=self.manager, + service_type=self.cloud_config.get_service_type(service_type), + service_name=self.cloud_config.get_service_name(service_type), + interface=self.cloud_config.get_interface(service_type), + endpoint_override=self.cloud_config.get_endpoint( + service_type) or endpoint_override, + region_name=self.cloud_config.region, + shade_logger=self.log) + + def _is_client_version(self, client, version): + client_name = '_{client}_client'.format(client=client) + client = getattr(self, client_name) + return client._version_matches(version) + + @property + def _application_catalog_client(self): + if 'application-catalog' not in self._raw_clients: + self._raw_clients['application-catalog'] = self._get_raw_client( + 'application-catalog') + return self._raw_clients['application-catalog'] + + @property + def _baremetal_client(self): + if 'baremetal' not in self._raw_clients: + client = self._get_raw_client('baremetal') + # Do this to force version discovery. We need to do that, because + # the endpoint-override trick we do for neutron because + # ironicclient just appends a /v1 won't work and will break + # keystoneauth - because ironic's versioned discovery endpoint + # is non-compliant and doesn't return an actual version dict. + client = self._get_versioned_client( + 'baremetal', min_version=1, max_version='1.latest') + self._raw_clients['baremetal'] = client + return self._raw_clients['baremetal'] + + @property + def _container_infra_client(self): + if 'container-infra' not in self._raw_clients: + self._raw_clients['container-infra'] = self._get_raw_client( + 'container-infra') + return self._raw_clients['container-infra'] + + @property + def _compute_client(self): + # TODO(mordred) Deal with microversions + if 'compute' not in self._raw_clients: + self._raw_clients['compute'] = self._get_raw_client('compute') + return self._raw_clients['compute'] + + @property + def _database_client(self): + if 'database' not in self._raw_clients: + self._raw_clients['database'] = self._get_raw_client('database') + return self._raw_clients['database'] + + @property + def _dns_client(self): + if 'dns' not in self._raw_clients: + dns_client = self._get_versioned_client( + 'dns', min_version=2, max_version='2.latest') + self._raw_clients['dns'] = dns_client + return self._raw_clients['dns'] + + @property + def _identity_client(self): + if 'identity' not in self._raw_clients: + self._raw_clients['identity'] = self._get_versioned_client( + 'identity', min_version=2, max_version='3.latest') + return self._raw_clients['identity'] + + @property + def _raw_image_client(self): + if 'raw-image' not in self._raw_clients: + image_client = self._get_raw_client('image') + self._raw_clients['raw-image'] = image_client + return self._raw_clients['raw-image'] + + @property + def _image_client(self): + if 'image' not in self._raw_clients: + self._raw_clients['image'] = self._get_versioned_client( + 'image', min_version=1, max_version='2.latest') + return self._raw_clients['image'] + + @property + def _network_client(self): + if 'network' not in self._raw_clients: + client = self._get_raw_client('network') + # TODO(mordred) I don't care if this is what neutronclient does, + # fix this. + # Don't bother with version discovery - there is only one version + # of neutron. This is what neutronclient does, fwiw. + endpoint = client.get_endpoint() + if not endpoint.rstrip().rsplit('/')[1] == 'v2.0': + if not endpoint.endswith('/'): + endpoint += '/' + endpoint = urllib.parse.urljoin( + endpoint, 'v2.0') + client.endpoint_override = endpoint + self._raw_clients['network'] = client + return self._raw_clients['network'] + + @property + def _object_store_client(self): + if 'object-store' not in self._raw_clients: + raw_client = self._get_raw_client('object-store') + self._raw_clients['object-store'] = raw_client + return self._raw_clients['object-store'] + + @property + def _orchestration_client(self): + if 'orchestration' not in self._raw_clients: + raw_client = self._get_raw_client('orchestration') + self._raw_clients['orchestration'] = raw_client + return self._raw_clients['orchestration'] + + @property + def _volume_client(self): + if 'volume' not in self._raw_clients: + self._raw_clients['volume'] = self._get_raw_client('volume') + return self._raw_clients['volume'] + + def pprint(self, resource): + """Wrapper aroud pprint that groks munch objects""" + # import late since this is a utility function + import pprint + new_resource = _utils._dictify_resource(resource) + pprint.pprint(new_resource) + + def pformat(self, resource): + """Wrapper aroud pformat that groks munch objects""" + # import late since this is a utility function + import pprint + new_resource = _utils._dictify_resource(resource) + return pprint.pformat(new_resource) + + @property + def keystone_session(self): + if self._keystone_session is None: + try: + self._keystone_session = self.cloud_config.get_session() + if hasattr(self._keystone_session, 'additional_user_agent'): + self._keystone_session.additional_user_agent.append( + ('openstacksdk', openstack.__version__)) + except Exception as e: + raise OpenStackCloudException( + "Error authenticating to keystone: %s " % str(e)) + return self._keystone_session + + @property + def _keystone_catalog(self): + return self.keystone_session.auth.get_access( + self.keystone_session).service_catalog + + @property + def service_catalog(self): + return self._keystone_catalog.catalog + + def endpoint_for(self, service_type, interface='public'): + return self._keystone_catalog.url_for( + service_type=service_type, interface=interface) + + @property + def auth_token(self): + # Keystone's session will reuse a token if it is still valid. + # We don't need to track validity here, just get_token() each time. + return self.keystone_session.get_token() + + @property + def current_project_id(self): + """Get the current project ID. + + Returns the project_id of the current token scope. None means that + the token is domain scoped or unscoped. + + :raises keystoneauth1.exceptions.auth.AuthorizationFailure: + if a new token fetch fails. + :raises keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin: + if a plugin is not available. + """ + return self.keystone_session.get_project_id() + + @property + def current_project(self): + """Return a ``munch.Munch`` describing the current project""" + return self._get_project_info() + + def _get_project_info(self, project_id=None): + project_info = munch.Munch( + id=project_id, + name=None, + domain_id=None, + domain_name=None, + ) + if not project_id or project_id == self.current_project_id: + # If we don't have a project_id parameter, it means a user is + # directly asking what the current state is. + # Alternately, if we have one, that means we're calling this + # from within a normalize function, which means the object has + # a project_id associated with it. If the project_id matches + # the project_id of our current token, that means we can supplement + # the info with human readable info about names if we have them. + # If they don't match, that means we're an admin who has pulled + # an object from a different project, so adding info from the + # current token would be wrong. + auth_args = self.cloud_config.config.get('auth', {}) + project_info['id'] = self.current_project_id + project_info['name'] = auth_args.get('project_name') + project_info['domain_id'] = auth_args.get('project_domain_id') + project_info['domain_name'] = auth_args.get('project_domain_name') + return project_info + + @property + def current_location(self): + """Return a ``munch.Munch`` explaining the current cloud location.""" + return self._get_current_location() + + def _get_current_location(self, project_id=None, zone=None): + return munch.Munch( + cloud=self.name, + region_name=self.region_name, + zone=zone, + project=self._get_project_info(project_id), + ) + + def _get_project_id_param_dict(self, name_or_id): + if name_or_id: + project = self.get_project(name_or_id) + if not project: + return {} + if self._is_client_version('identity', 3): + return {'default_project_id': project['id']} + else: + return {'tenant_id': project['id']} + else: + return {} + + def _get_domain_id_param_dict(self, domain_id): + """Get a useable domain.""" + + # Keystone v3 requires domains for user and project creation. v2 does + # not. However, keystone v2 does not allow user creation by non-admin + # users, so we can throw an error to the user that does not need to + # mention api versions + if self._is_client_version('identity', 3): + if not domain_id: + raise OpenStackCloudException( + "User or project creation requires an explicit" + " domain_id argument.") + else: + return {'domain_id': domain_id} + else: + return {} + + def _get_identity_params(self, domain_id=None, project=None): + """Get the domain and project/tenant parameters if needed. + + keystone v2 and v3 are divergent enough that we need to pass or not + pass project or tenant_id or domain or nothing in a sane manner. + """ + ret = {} + ret.update(self._get_domain_id_param_dict(domain_id)) + ret.update(self._get_project_id_param_dict(project)) + return ret + + def range_search(self, data, filters): + """Perform integer range searches across a list of dictionaries. + + Given a list of dictionaries, search across the list using the given + dictionary keys and a range of integer values for each key. Only + dictionaries that match ALL search filters across the entire original + data set will be returned. + + It is not a requirement that each dictionary contain the key used + for searching. Those without the key will be considered non-matching. + + The range values must be string values and is either a set of digits + representing an integer for matching, or a range operator followed by + a set of digits representing an integer for matching. If a range + operator is not given, exact value matching will be used. Valid + operators are one of: <,>,<=,>= + + :param data: List of dictionaries to be searched. + :param filters: Dict describing the one or more range searches to + perform. If more than one search is given, the result will be the + members of the original data set that match ALL searches. An + example of filtering by multiple ranges:: + + {"vcpus": "<=5", "ram": "<=2048", "disk": "1"} + + :returns: A list subset of the original data set. + :raises: OpenStackCloudException on invalid range expressions. + """ + filtered = [] + + for key, range_value in filters.items(): + # We always want to operate on the full data set so that + # calculations for minimum and maximum are correct. + results = _utils.range_filter(data, key, range_value) + + if not filtered: + # First set of results + filtered = results + else: + # The combination of all searches should be the intersection of + # all result sets from each search. So adjust the current set + # of filtered data by computing its intersection with the + # latest result set. + filtered = [r for r in results for f in filtered if r == f] + + return filtered + + def _get_and_munchify(self, key, data): + """Wrapper around meta.get_and_munchify. + + Some of the methods expect a `meta` attribute to be passed in as + part of the method signature. In those methods the meta param is + overriding the meta module making the call to meta.get_and_munchify + to fail. + """ + return meta.get_and_munchify(key, data) + + @_utils.cache_on_arguments() + def list_projects(self, domain_id=None, name_or_id=None, filters=None): + """List projects. + + With no parameters, returns a full listing of all visible projects. + + :param domain_id: domain ID to scope the searched projects. + :param name_or_id: project name or ID. + :param filters: a dict containing additional filters to use + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: a list of ``munch.Munch`` containing the projects + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + kwargs = dict( + filters=filters, + domain_id=domain_id) + if self._is_client_version('identity', 3): + kwargs['obj_name'] = 'project' + + pushdown, filters = _normalize._split_filters(**kwargs) + + try: + if self._is_client_version('identity', 3): + key = 'projects' + else: + key = 'tenants' + data = self._identity_client.get( + '/{endpoint}'.format(endpoint=key), params=pushdown) + projects = self._normalize_projects( + self._get_and_munchify(key, data)) + except Exception as e: + self.log.debug("Failed to list projects", exc_info=True) + raise OpenStackCloudException(str(e)) + return _utils._filter_list(projects, name_or_id, filters) + + def search_projects(self, name_or_id=None, filters=None, domain_id=None): + '''Backwards compatibility method for search_projects + + search_projects originally had a parameter list that was name_or_id, + filters and list had domain_id first. This method exists in this form + to allow code written with positional parameter to still work. But + really, use keyword arguments. + ''' + return self.list_projects( + domain_id=domain_id, name_or_id=name_or_id, filters=filters) + + def get_project(self, name_or_id, filters=None, domain_id=None): + """Get exactly one project. + + :param name_or_id: project name or ID. + :param filters: a dict containing additional filters to use. + :param domain_id: domain ID (identity v3 only). + + :returns: a list of ``munch.Munch`` containing the project description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + return _utils._get_entity(self, 'project', name_or_id, filters, + domain_id=domain_id) + + @_utils.valid_kwargs('description') + def update_project(self, name_or_id, enabled=None, domain_id=None, + **kwargs): + with _utils.shade_exceptions( + "Error in updating project {project}".format( + project=name_or_id)): + proj = self.get_project(name_or_id, domain_id=domain_id) + if not proj: + raise OpenStackCloudException( + "Project %s not found." % name_or_id) + if enabled is not None: + kwargs.update({'enabled': enabled}) + # NOTE(samueldmq): Current code only allow updates of description + # or enabled fields. + if self._is_client_version('identity', 3): + data = self._identity_client.patch( + '/projects/' + proj['id'], json={'project': kwargs}) + project = self._get_and_munchify('project', data) + else: + data = self._identity_client.post( + '/tenants/' + proj['id'], json={'tenant': kwargs}) + project = self._get_and_munchify('tenant', data) + project = self._normalize_project(project) + self.list_projects.invalidate(self) + return project + + def create_project( + self, name, description=None, domain_id=None, enabled=True): + """Create a project.""" + with _utils.shade_exceptions( + "Error in creating project {project}".format(project=name)): + project_ref = self._get_domain_id_param_dict(domain_id) + project_ref.update({'name': name, + 'description': description, + 'enabled': enabled}) + endpoint, key = ('tenants', 'tenant') + if self._is_client_version('identity', 3): + endpoint, key = ('projects', 'project') + data = self._identity_client.post( + '/{endpoint}'.format(endpoint=endpoint), + json={key: project_ref}) + project = self._normalize_project( + self._get_and_munchify(key, data)) + self.list_projects.invalidate(self) + return project + + def delete_project(self, name_or_id, domain_id=None): + """Delete a project. + + :param string name_or_id: Project name or ID. + :param string domain_id: Domain ID containing the project(identity v3 + only). + + :returns: True if delete succeeded, False if the project was not found. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + + with _utils.shade_exceptions( + "Error in deleting project {project}".format( + project=name_or_id)): + project = self.get_project(name_or_id, domain_id=domain_id) + if project is None: + self.log.debug( + "Project %s not found for deleting", name_or_id) + return False + + if self._is_client_version('identity', 3): + self._identity_client.delete('/projects/' + project['id']) + else: + self._identity_client.delete('/tenants/' + project['id']) + + return True + + @_utils.valid_kwargs('domain_id') + @_utils.cache_on_arguments() + def list_users(self, **kwargs): + """List users. + + :param domain_id: Domain ID. (v3) + + :returns: a list of ``munch.Munch`` containing the user description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + data = self._identity_client.get('/users', params=kwargs) + return _utils.normalize_users( + self._get_and_munchify('users', data)) + + @_utils.valid_kwargs('domain_id') + def search_users(self, name_or_id=None, filters=None, **kwargs): + """Search users. + + :param string name_or_id: user name or ID. + :param domain_id: Domain ID. (v3) + :param filters: a dict containing additional filters to use. + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: a list of ``munch.Munch`` containing the users + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + users = self.list_users(**kwargs) + return _utils._filter_list(users, name_or_id, filters) + + @_utils.valid_kwargs('domain_id') + def get_user(self, name_or_id, filters=None, **kwargs): + """Get exactly one user. + + :param string name_or_id: user name or ID. + :param domain_id: Domain ID. (v3) + :param filters: a dict containing additional filters to use. + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: a single ``munch.Munch`` containing the user description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + return _utils._get_entity(self, 'user', name_or_id, filters, **kwargs) + + def get_user_by_id(self, user_id, normalize=True): + """Get a user by ID. + + :param string user_id: user ID + :param bool normalize: Flag to control dict normalization + + :returns: a single ``munch.Munch`` containing the user description + """ + data = self._identity_client.get( + '/users/{user}'.format(user=user_id), + error_message="Error getting user with ID {user_id}".format( + user_id=user_id)) + + user = self._get_and_munchify('user', data) + if user and normalize: + user = _utils.normalize_users(user) + return user + + # NOTE(Shrews): Keystone v2 supports updating only name, email and enabled. + @_utils.valid_kwargs('name', 'email', 'enabled', 'domain_id', 'password', + 'description', 'default_project') + def update_user(self, name_or_id, **kwargs): + self.list_users.invalidate(self) + user_kwargs = {} + if 'domain_id' in kwargs and kwargs['domain_id']: + user_kwargs['domain_id'] = kwargs['domain_id'] + user = self.get_user(name_or_id, **user_kwargs) + + # TODO(mordred) When this changes to REST, force interface=admin + # in the adapter call if it's an admin force call (and figure out how + # to make that disctinction) + if self._is_client_version('identity', 2): + # Do not pass v3 args to a v2 keystone. + kwargs.pop('domain_id', None) + kwargs.pop('description', None) + kwargs.pop('default_project', None) + password = kwargs.pop('password', None) + if password is not None: + with _utils.shade_exceptions( + "Error updating password for {user}".format( + user=name_or_id)): + error_msg = "Error updating password for user {}".format( + name_or_id) + data = self._identity_client.put( + '/users/{u}/OS-KSADM/password'.format(u=user['id']), + json={'user': {'password': password}}, + error_message=error_msg) + + # Identity v2.0 implements PUT. v3 PATCH. Both work as PATCH. + data = self._identity_client.put( + '/users/{user}'.format(user=user['id']), json={'user': kwargs}, + error_message="Error in updating user {}".format(name_or_id)) + else: + # NOTE(samueldmq): now this is a REST call and domain_id is dropped + # if None. keystoneclient drops keys with None values. + if 'domain_id' in kwargs and kwargs['domain_id'] is None: + del kwargs['domain_id'] + data = self._identity_client.patch( + '/users/{user}'.format(user=user['id']), json={'user': kwargs}, + error_message="Error in updating user {}".format(name_or_id)) + + user = self._get_and_munchify('user', data) + self.list_users.invalidate(self) + return _utils.normalize_users([user])[0] + + def create_user( + self, name, password=None, email=None, default_project=None, + enabled=True, domain_id=None, description=None): + """Create a user.""" + params = self._get_identity_params(domain_id, default_project) + params.update({'name': name, 'password': password, 'email': email, + 'enabled': enabled}) + if self._is_client_version('identity', 3): + params['description'] = description + elif description is not None: + self.log.info( + "description parameter is not supported on Keystone v2") + + error_msg = "Error in creating user {user}".format(user=name) + data = self._identity_client.post('/users', json={'user': params}, + error_message=error_msg) + user = self._get_and_munchify('user', data) + + self.list_users.invalidate(self) + return _utils.normalize_users([user])[0] + + @_utils.valid_kwargs('domain_id') + def delete_user(self, name_or_id, **kwargs): + # TODO(mordred) Why are we invalidating at the TOP? + self.list_users.invalidate(self) + user = self.get_user(name_or_id, **kwargs) + if not user: + self.log.debug( + "User {0} not found for deleting".format(name_or_id)) + return False + + # TODO(mordred) Extra GET only needed to support keystoneclient. + # Can be removed as a follow-on. + user = self.get_user_by_id(user['id'], normalize=False) + self._identity_client.delete( + '/users/{user}'.format(user=user['id']), + error_message="Error in deleting user {user}".format( + user=name_or_id)) + + self.list_users.invalidate(self) + return True + + def _get_user_and_group(self, user_name_or_id, group_name_or_id): + user = self.get_user(user_name_or_id) + if not user: + raise OpenStackCloudException( + 'User {user} not found'.format(user=user_name_or_id)) + + group = self.get_group(group_name_or_id) + if not group: + raise OpenStackCloudException( + 'Group {user} not found'.format(user=group_name_or_id)) + + return (user, group) + + def add_user_to_group(self, name_or_id, group_name_or_id): + """Add a user to a group. + + :param string name_or_id: User name or ID + :param string group_name_or_id: Group name or ID + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + error_msg = "Error adding user {user} to group {group}".format( + user=name_or_id, group=group_name_or_id) + self._identity_client.put( + '/groups/{g}/users/{u}'.format(g=group['id'], u=user['id']), + error_message=error_msg) + + def is_user_in_group(self, name_or_id, group_name_or_id): + """Check to see if a user is in a group. + + :param string name_or_id: User name or ID + :param string group_name_or_id: Group name or ID + + :returns: True if user is in the group, False otherwise + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + try: + self._identity_client.head( + '/groups/{g}/users/{u}'.format(g=group['id'], u=user['id'])) + return True + except OpenStackCloudURINotFound: + # NOTE(samueldmq): knowing this URI exists, let's interpret this as + # user not found in group rather than URI not found. + return False + + def remove_user_from_group(self, name_or_id, group_name_or_id): + """Remove a user from a group. + + :param string name_or_id: User name or ID + :param string group_name_or_id: Group name or ID + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + error_msg = "Error removing user {user} from group {group}".format( + user=name_or_id, group=group_name_or_id) + self._identity_client.delete( + '/groups/{g}/users/{u}'.format(g=group['id'], u=user['id']), + error_message=error_msg) + + def get_template_contents( + self, template_file=None, template_url=None, + template_object=None, files=None): + try: + return template_utils.get_template_contents( + template_file=template_file, template_url=template_url, + template_object=template_object, files=files) + except Exception as e: + raise OpenStackCloudException( + "Error in processing template files: %s" % str(e)) + + def create_stack( + self, name, + template_file=None, template_url=None, + template_object=None, files=None, + rollback=True, + wait=False, timeout=3600, + environment_files=None, + **parameters): + """Create a stack. + + :param string name: Name of the stack. + :param string template_file: Path to the template. + :param string template_url: URL of template. + :param string template_object: URL to retrieve template object. + :param dict files: dict of additional file content to include. + :param boolean rollback: Enable rollback on create failure. + :param boolean wait: Whether to wait for the delete to finish. + :param int timeout: Stack create timeout in seconds. + :param environment_files: Paths to environment files to apply. + + Other arguments will be passed as stack parameters which will take + precedence over any parameters specified in the environments. + + Only one of template_file, template_url, template_object should be + specified. + + :returns: a dict containing the stack description + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + envfiles, env = template_utils.process_multiple_environments_and_files( + env_paths=environment_files) + tpl_files, template = template_utils.get_template_contents( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files) + params = dict( + stack_name=name, + disable_rollback=not rollback, + parameters=parameters, + template=template, + files=dict(list(tpl_files.items()) + list(envfiles.items())), + environment=env, + timeout_mins=timeout // 60, + ) + self._orchestration_client.post('/stacks', json=params) + if wait: + event_utils.poll_for_events(self, stack_name=name, + action='CREATE') + return self.get_stack(name) + + def update_stack( + self, name_or_id, + template_file=None, template_url=None, + template_object=None, files=None, + rollback=True, + wait=False, timeout=3600, + environment_files=None, + **parameters): + """Update a stack. + + :param string name_or_id: Name or ID of the stack to update. + :param string template_file: Path to the template. + :param string template_url: URL of template. + :param string template_object: URL to retrieve template object. + :param dict files: dict of additional file content to include. + :param boolean rollback: Enable rollback on update failure. + :param boolean wait: Whether to wait for the delete to finish. + :param int timeout: Stack update timeout in seconds. + :param environment_files: Paths to environment files to apply. + + Other arguments will be passed as stack parameters which will take + precedence over any parameters specified in the environments. + + Only one of template_file, template_url, template_object should be + specified. + + :returns: a dict containing the stack description + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API calls + """ + envfiles, env = template_utils.process_multiple_environments_and_files( + env_paths=environment_files) + tpl_files, template = template_utils.get_template_contents( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files) + params = dict( + disable_rollback=not rollback, + parameters=parameters, + template=template, + files=dict(list(tpl_files.items()) + list(envfiles.items())), + environment=env, + timeout_mins=timeout // 60, + ) + if wait: + # find the last event to use as the marker + events = event_utils.get_events( + self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}) + marker = events[0].id if events else None + + self._orchestration_client.put( + '/stacks/{name_or_id}'.format(name_or_id=name_or_id), json=params) + if wait: + event_utils.poll_for_events(self, + name_or_id, + action='UPDATE', + marker=marker) + return self.get_stack(name_or_id) + + def delete_stack(self, name_or_id, wait=False): + """Delete a stack + + :param string name_or_id: Stack name or ID. + :param boolean wait: Whether to wait for the delete to finish + + :returns: True if delete succeeded, False if the stack was not found. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + stack = self.get_stack(name_or_id) + if stack is None: + self.log.debug("Stack %s not found for deleting", name_or_id) + return False + + if wait: + # find the last event to use as the marker + events = event_utils.get_events( + self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1}) + marker = events[0].id if events else None + + self._orchestration_client.delete( + '/stacks/{id}'.format(id=stack['id'])) + + if wait: + try: + event_utils.poll_for_events(self, + stack_name=name_or_id, + action='DELETE', + marker=marker) + except OpenStackCloudHTTPError: + pass + stack = self.get_stack(name_or_id) + if stack and stack['stack_status'] == 'DELETE_FAILED': + raise OpenStackCloudException( + "Failed to delete stack {id}: {reason}".format( + id=name_or_id, reason=stack['stack_status_reason'])) + + return True + + def get_name(self): + return self.name + + def get_region(self): + return self.region_name + + def get_flavor_name(self, flavor_id): + flavor = self.get_flavor(flavor_id, get_extra=False) + if flavor: + return flavor['name'] + return None + + def get_flavor_by_ram(self, ram, include=None, get_extra=True): + """Get a flavor based on amount of RAM available. + + Finds the flavor with the least amount of RAM that is at least + as much as the specified amount. If `include` is given, further + filter based on matching flavor name. + + :param int ram: Minimum amount of RAM. + :param string include: If given, will return a flavor whose name + contains this string as a substring. + """ + flavors = self.list_flavors(get_extra=get_extra) + for flavor in sorted(flavors, key=operator.itemgetter('ram')): + if (flavor['ram'] >= ram and + (not include or include in flavor['name'])): + return flavor + raise OpenStackCloudException( + "Could not find a flavor with {ram} and '{include}'".format( + ram=ram, include=include)) + + def get_session_endpoint(self, service_key): + try: + return self.cloud_config.get_session_endpoint(service_key) + except keystoneauth1.exceptions.catalog.EndpointNotFound as e: + self.log.debug( + "Endpoint not found in %s cloud: %s", self.name, str(e)) + endpoint = None + except OpenStackCloudException: + raise + except Exception as e: + raise OpenStackCloudException( + "Error getting {service} endpoint on {cloud}:{region}:" + " {error}".format( + service=service_key, + cloud=self.name, + region=self.region_name, + error=str(e))) + return endpoint + + def has_service(self, service_key): + if not self.cloud_config.config.get('has_%s' % service_key, True): + # TODO(mordred) add a stamp here so that we only report this once + if not (service_key in self._disable_warnings + and self._disable_warnings[service_key]): + self.log.debug( + "Disabling %(service_key)s entry in catalog" + " per config", {'service_key': service_key}) + self._disable_warnings[service_key] = True + return False + try: + endpoint = self.get_session_endpoint(service_key) + except OpenStackCloudException: + return False + if endpoint: + return True + else: + return False + + @_utils.cache_on_arguments() + def _nova_extensions(self): + extensions = set() + data = self._compute_client.get( + '/extensions', + error_message="Error fetching extension list for nova") + for extension in self._get_and_munchify('extensions', data): + extensions.add(extension['alias']) + return extensions + + def _has_nova_extension(self, extension_name): + return extension_name in self._nova_extensions() + + def search_keypairs(self, name_or_id=None, filters=None): + keypairs = self.list_keypairs() + return _utils._filter_list(keypairs, name_or_id, filters) + + @_utils.cache_on_arguments() + def _neutron_extensions(self): + extensions = set() + data = self._network_client.get( + '/extensions.json', + error_message="Error fetching extension list for neutron") + for extension in self._get_and_munchify('extensions', data): + extensions.add(extension['alias']) + return extensions + + def _has_neutron_extension(self, extension_alias): + return extension_alias in self._neutron_extensions() + + def search_networks(self, name_or_id=None, filters=None): + """Search networks + + :param name_or_id: Name or ID of the desired network. + :param filters: a dict containing additional filters to use. e.g. + {'router:external': True} + + :returns: a list of ``munch.Munch`` containing the network description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + networks = self.list_networks(filters) + return _utils._filter_list(networks, name_or_id, filters) + + def search_routers(self, name_or_id=None, filters=None): + """Search routers + + :param name_or_id: Name or ID of the desired router. + :param filters: a dict containing additional filters to use. e.g. + {'admin_state_up': True} + + :returns: a list of ``munch.Munch`` containing the router description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + routers = self.list_routers(filters) + return _utils._filter_list(routers, name_or_id, filters) + + def search_subnets(self, name_or_id=None, filters=None): + """Search subnets + + :param name_or_id: Name or ID of the desired subnet. + :param filters: a dict containing additional filters to use. e.g. + {'enable_dhcp': True} + + :returns: a list of ``munch.Munch`` containing the subnet description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + subnets = self.list_subnets(filters) + return _utils._filter_list(subnets, name_or_id, filters) + + def search_ports(self, name_or_id=None, filters=None): + """Search ports + + :param name_or_id: Name or ID of the desired port. + :param filters: a dict containing additional filters to use. e.g. + {'device_id': '2711c67a-b4a7-43dd-ace7-6187b791c3f0'} + + :returns: a list of ``munch.Munch`` containing the port description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + # If port caching is enabled, do not push the filter down to + # neutron; get all the ports (potentially from the cache) and + # filter locally. + if self._PORT_AGE: + pushdown_filters = None + else: + pushdown_filters = filters + ports = self.list_ports(pushdown_filters) + return _utils._filter_list(ports, name_or_id, filters) + + def search_qos_policies(self, name_or_id=None, filters=None): + """Search QoS policies + + :param name_or_id: Name or ID of the desired policy. + :param filters: a dict containing additional filters to use. e.g. + {'shared': True} + + :returns: a list of ``munch.Munch`` containing the network description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + policies = self.list_qos_policies(filters) + return _utils._filter_list(policies, name_or_id, filters) + + def search_volumes(self, name_or_id=None, filters=None): + volumes = self.list_volumes() + return _utils._filter_list( + volumes, name_or_id, filters) + + def search_volume_snapshots(self, name_or_id=None, filters=None): + volumesnapshots = self.list_volume_snapshots() + return _utils._filter_list( + volumesnapshots, name_or_id, filters) + + def search_volume_backups(self, name_or_id=None, filters=None): + volume_backups = self.list_volume_backups() + return _utils._filter_list( + volume_backups, name_or_id, filters) + + def search_volume_types( + self, name_or_id=None, filters=None, get_extra=True): + volume_types = self.list_volume_types(get_extra=get_extra) + return _utils._filter_list(volume_types, name_or_id, filters) + + def search_flavors(self, name_or_id=None, filters=None, get_extra=True): + flavors = self.list_flavors(get_extra=get_extra) + return _utils._filter_list(flavors, name_or_id, filters) + + def search_security_groups(self, name_or_id=None, filters=None): + # `filters` could be a dict or a jmespath (str) + groups = self.list_security_groups( + filters=filters if isinstance(filters, dict) else None + ) + return _utils._filter_list(groups, name_or_id, filters) + + def search_servers( + self, name_or_id=None, filters=None, detailed=False, + all_projects=False, bare=False): + servers = self.list_servers( + detailed=detailed, all_projects=all_projects, bare=bare) + return _utils._filter_list(servers, name_or_id, filters) + + def search_server_groups(self, name_or_id=None, filters=None): + """Seach server groups. + + :param name: server group name or ID. + :param filters: a dict containing additional filters to use. + + :returns: a list of dicts containing the server groups + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + server_groups = self.list_server_groups() + return _utils._filter_list(server_groups, name_or_id, filters) + + def search_images(self, name_or_id=None, filters=None): + images = self.list_images() + return _utils._filter_list(images, name_or_id, filters) + + def search_floating_ip_pools(self, name=None, filters=None): + pools = self.list_floating_ip_pools() + return _utils._filter_list(pools, name, filters) + + # With Neutron, there are some cases in which full server side filtering is + # not possible (e.g. nested attributes or list of objects) so we also need + # to use the client-side filtering + # The same goes for all neutron-related search/get methods! + def search_floating_ips(self, id=None, filters=None): + # `filters` could be a jmespath expression which Neutron server doesn't + # understand, obviously. + if self._use_neutron_floating() and isinstance(filters, dict): + kwargs = {'filters': filters} + else: + kwargs = {} + floating_ips = self.list_floating_ips(**kwargs) + return _utils._filter_list(floating_ips, id, filters) + + def search_stacks(self, name_or_id=None, filters=None): + """Search stacks. + + :param name_or_id: Name or ID of the desired stack. + :param filters: a dict containing additional filters to use. e.g. + {'stack_status': 'CREATE_COMPLETE'} + + :returns: a list of ``munch.Munch`` containing the stack description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + stacks = self.list_stacks() + return _utils._filter_list(stacks, name_or_id, filters) + + def list_keypairs(self): + """List all available keypairs. + + :returns: A list of ``munch.Munch`` containing keypair info. + + """ + data = self._compute_client.get( + '/os-keypairs', + error_message="Error fetching keypair list") + return self._normalize_keypairs([ + k['keypair'] for k in self._get_and_munchify('keypairs', data)]) + + def list_networks(self, filters=None): + """List all available networks. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of ``munch.Munch`` containing network info. + + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + data = self._network_client.get("/networks.json", params=filters) + return self._get_and_munchify('networks', data) + + def list_routers(self, filters=None): + """List all available routers. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of router ``munch.Munch``. + + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + data = self._network_client.get( + "/routers.json", params=filters, + error_message="Error fetching router list") + return self._get_and_munchify('routers', data) + + def list_subnets(self, filters=None): + """List all available subnets. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of subnet ``munch.Munch``. + + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + data = self._network_client.get("/subnets.json", params=filters) + return self._get_and_munchify('subnets', data) + + def list_ports(self, filters=None): + """List all available ports. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of port ``munch.Munch``. + + """ + # If pushdown filters are specified, bypass local caching. + if filters: + return self._list_ports(filters) + # Translate None from search interface to empty {} for kwargs below + filters = {} + if (time.time() - self._ports_time) >= self._PORT_AGE: + # Since we're using cached data anyway, we don't need to + # have more than one thread actually submit the list + # ports task. Let the first one submit it while holding + # a lock, and the non-blocking acquire method will cause + # subsequent threads to just skip this and use the old + # data until it succeeds. + # Initially when we never got data, block to retrieve some data. + first_run = self._ports is None + if self._ports_lock.acquire(first_run): + try: + if not (first_run and self._ports is not None): + self._ports = self._list_ports(filters) + self._ports_time = time.time() + finally: + self._ports_lock.release() + return self._ports + + def _list_ports(self, filters): + data = self._network_client.get( + "/ports.json", params=filters, + error_message="Error fetching port list") + return self._get_and_munchify('ports', data) + + def list_qos_rule_types(self, filters=None): + """List all available QoS rule types. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of rule types ``munch.Munch``. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + data = self._network_client.get( + "/qos/rule-types.json", params=filters, + error_message="Error fetching QoS rule types list") + return self._get_and_munchify('rule_types', data) + + def get_qos_rule_type_details(self, rule_type, filters=None): + """Get a QoS rule type details by rule type name. + + :param string rule_type: Name of the QoS rule type. + + :returns: A rule type details ``munch.Munch`` or None if + no matching rule type is found. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + if not self._has_neutron_extension('qos-rule-type-details'): + raise OpenStackCloudUnavailableExtension( + 'qos-rule-type-details extension is not available ' + 'on target cloud') + + data = self._network_client.get( + "/qos/rule-types/{rule_type}.json".format(rule_type=rule_type), + error_message="Error fetching QoS details of {rule_type} " + "rule type".format(rule_type=rule_type)) + return self._get_and_munchify('rule_type', data) + + def list_qos_policies(self, filters=None): + """List all available QoS policies. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of policies ``munch.Munch``. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + data = self._network_client.get( + "/qos/policies.json", params=filters, + error_message="Error fetching QoS policies list") + return self._get_and_munchify('policies', data) + + @_utils.cache_on_arguments(should_cache_fn=_no_pending_volumes) + def list_volumes(self, cache=True): + """List all available volumes. + + :returns: A list of volume ``munch.Munch``. + + """ + def _list(data): + volumes.extend(data.get('volumes', [])) + endpoint = None + for l in data.get('volumes_links', []): + if 'rel' in l and 'next' == l['rel']: + endpoint = l['href'] + break + if endpoint: + try: + _list(self._volume_client.get(endpoint)) + except OpenStackCloudURINotFound: + # Catch and re-raise here because we are making recursive + # calls and we just have context for the log here + self.log.debug( + "While listing volumes, could not find next link" + " {link}.".format(link=data)) + raise + + if not cache: + warnings.warn('cache argument to list_volumes is deprecated. Use ' + 'invalidate instead.') + + # Fetching paginated volumes can fails for several reasons, if + # something goes wrong we'll have to start fetching volumes from + # scratch + attempts = 5 + for _ in range(attempts): + volumes = [] + data = self._volume_client.get('/volumes/detail') + if 'volumes_links' not in data: + # no pagination needed + volumes.extend(data.get('volumes', [])) + break + + try: + _list(data) + break + except OpenStackCloudURINotFound: + pass + else: + self.log.debug( + "List volumes failed to retrieve all volumes after" + " {attempts} attempts. Returning what we found.".format( + attempts=attempts)) + # list volumes didn't complete succesfully so just return what + # we found + return self._normalize_volumes( + self._get_and_munchify(key=None, data=volumes)) + + @_utils.cache_on_arguments() + def list_volume_types(self, get_extra=True): + """List all available volume types. + + :returns: A list of volume ``munch.Munch``. + + """ + data = self._volume_client.get( + '/types', + params=dict(is_public='None'), + error_message='Error fetching volume_type list') + return self._normalize_volume_types( + self._get_and_munchify('volume_types', data)) + + @_utils.cache_on_arguments() + def list_availability_zone_names(self, unavailable=False): + """List names of availability zones. + + :param bool unavailable: Whether or not to include unavailable zones + in the output. Defaults to False. + + :returns: A list of availability zone names, or an empty list if the + list could not be fetched. + """ + try: + data = self._compute_client.get('/os-availability-zone') + except OpenStackCloudHTTPError: + self.log.debug( + "Availability zone list could not be fetched", + exc_info=True) + return [] + zones = self._get_and_munchify('availabilityZoneInfo', data) + ret = [] + for zone in zones: + if zone['zoneState']['available'] or unavailable: + ret.append(zone['zoneName']) + return ret + + @_utils.cache_on_arguments() + def list_flavors(self, get_extra=None): + """List all available flavors. + + :param get_extra: Whether or not to fetch extra specs for each flavor. + Defaults to True. Default behavior value can be + overridden in clouds.yaml by setting + openstack.cloud.get_extra_specs to False. + :returns: A list of flavor ``munch.Munch``. + + """ + if get_extra is None: + get_extra = self._extra_config['get_flavor_extra_specs'] + data = self._compute_client.get( + '/flavors/detail', params=dict(is_public='None'), + error_message="Error fetching flavor list") + flavors = self._normalize_flavors( + self._get_and_munchify('flavors', data)) + + for flavor in flavors: + if not flavor.extra_specs and get_extra: + endpoint = "/flavors/{id}/os-extra_specs".format( + id=flavor.id) + try: + data = self._compute_client.get( + endpoint, + error_message="Error fetching flavor extra specs") + flavor.extra_specs = self._get_and_munchify( + 'extra_specs', data) + except OpenStackCloudHTTPError as e: + flavor.extra_specs = {} + self.log.debug( + 'Fetching extra specs for flavor failed:' + ' %(msg)s', {'msg': str(e)}) + + return flavors + + @_utils.cache_on_arguments(should_cache_fn=_no_pending_stacks) + def list_stacks(self): + """List all stacks. + + :returns: a list of ``munch.Munch`` containing the stack description. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + data = self._orchestration_client.get( + '/stacks', error_message="Error fetching stack list") + return self._normalize_stacks( + self._get_and_munchify('stacks', data)) + + def list_server_security_groups(self, server): + """List all security groups associated with the given server. + + :returns: A list of security group ``munch.Munch``. + """ + + # Don't even try if we're a cloud that doesn't have them + if not self._has_secgroups(): + return [] + + data = self._compute_client.get( + '/servers/{server_id}/os-security-groups'.format( + server_id=server['id'])) + return self._normalize_secgroups( + self._get_and_munchify('security_groups', data)) + + def _get_server_security_groups(self, server, security_groups): + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if not isinstance(server, dict): + server = self.get_server(server, bare=True) + + if server is None: + self.log.debug('Server %s not found', server) + return None, None + + if not isinstance(security_groups, (list, tuple)): + security_groups = [security_groups] + + sec_group_objs = [] + + for sg in security_groups: + if not isinstance(sg, dict): + sg = self.get_security_group(sg) + + if sg is None: + self.log.debug('Security group %s not found for adding', + sg) + + return None, None + + sec_group_objs.append(sg) + + return server, sec_group_objs + + def add_server_security_groups(self, server, security_groups): + """Add security groups to a server. + + Add existing security groups to an existing server. If the security + groups are already present on the server this will continue unaffected. + + :returns: False if server or security groups are undefined, True + otherwise. + + :raises: ``OpenStackCloudException``, on operation error. + """ + server, security_groups = self._get_server_security_groups( + server, security_groups) + + if not (server and security_groups): + return False + + for sg in security_groups: + self._compute_client.post( + '/servers/%s/action' % server['id'], + json={'addSecurityGroup': {'name': sg.name}}) + + return True + + def remove_server_security_groups(self, server, security_groups): + """Remove security groups from a server + + Remove existing security groups from an existing server. If the + security groups are not present on the server this will continue + unaffected. + + :returns: False if server or security groups are undefined, True + otherwise. + + :raises: ``OpenStackCloudException``, on operation error. + """ + server, security_groups = self._get_server_security_groups( + server, security_groups) + + if not (server and security_groups): + return False + + ret = True + + for sg in security_groups: + try: + self._compute_client.post( + '/servers/%s/action' % server['id'], + json={'removeSecurityGroup': {'name': sg.name}}) + + except OpenStackCloudURINotFound: + # NOTE(jamielennox): Is this ok? If we remove something that + # isn't present should we just conclude job done or is that an + # error? Nova returns ok if you try to add a group twice. + self.log.debug( + "The security group %s was not present on server %s so " + "no action was performed", sg.name, server.name) + ret = False + + return ret + + def list_security_groups(self, filters=None): + """List all available security groups. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of security group ``munch.Munch``. + + """ + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if not filters: + filters = {} + + data = [] + # Handle neutron security groups + if self._use_neutron_secgroups(): + # Neutron returns dicts, so no need to convert objects here. + data = self._network_client.get( + '/security-groups.json', params=filters, + error_message="Error fetching security group list") + return self._get_and_munchify('security_groups', data) + + # Handle nova security groups + else: + data = self._compute_client.get( + '/os-security-groups', params=filters) + return self._normalize_secgroups( + self._get_and_munchify('security_groups', data)) + + def list_servers(self, detailed=False, all_projects=False, bare=False): + """List all available servers. + + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param all_projects: Whether to list servers from all projects or just + the current auth scoped project. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses + dict will be populated as needed from neutron. Setting + to True implies detailed = False. + + :returns: A list of server ``munch.Munch``. + + """ + if (time.time() - self._servers_time) >= self._SERVER_AGE: + # Since we're using cached data anyway, we don't need to + # have more than one thread actually submit the list + # servers task. Let the first one submit it while holding + # a lock, and the non-blocking acquire method will cause + # subsequent threads to just skip this and use the old + # data until it succeeds. + # Initially when we never got data, block to retrieve some data. + first_run = self._servers is None + if self._servers_lock.acquire(first_run): + try: + if not (first_run and self._servers is not None): + self._servers = self._list_servers( + detailed=detailed, + all_projects=all_projects, + bare=bare) + self._servers_time = time.time() + finally: + self._servers_lock.release() + return self._servers + + def _list_servers(self, detailed=False, all_projects=False, bare=False): + error_msg = "Error fetching server list on {cloud}:{region}:".format( + cloud=self.name, + region=self.region_name) + + params = {} + if all_projects: + params['all_tenants'] = True + data = self._compute_client.get( + '/servers/detail', params=params, error_message=error_msg) + servers = self._normalize_servers( + self._get_and_munchify('servers', data)) + return [ + self._expand_server(server, detailed, bare) + for server in servers + ] + + def list_server_groups(self): + """List all available server groups. + + :returns: A list of server group dicts. + + """ + data = self._compute_client.get( + '/os-server-groups', + error_message="Error fetching server group list") + return self._get_and_munchify('server_groups', data) + + def get_compute_limits(self, name_or_id=None): + """ Get compute limits for a project + + :param name_or_id: (optional) project name or ID to get limits for + if different from the current project + :raises: OpenStackCloudException if it's not a valid project + + :returns: Munch object with the limits + """ + params = {} + project_id = None + error_msg = "Failed to get limits" + if name_or_id: + + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + project_id = proj.id + params['tenant_id'] = project_id + error_msg = "{msg} for the project: {project} ".format( + msg=error_msg, project=name_or_id) + + data = self._compute_client.get('/limits', params=params) + limits = self._get_and_munchify('limits', data) + return self._normalize_compute_limits(limits, project_id=project_id) + + @_utils.cache_on_arguments(should_cache_fn=_no_pending_images) + def list_images(self, filter_deleted=True, show_all=False): + """Get available images. + + :param filter_deleted: Control whether deleted images are returned. + :param show_all: Show all images, including images that are shared + but not accepted. (By default in glance v2 shared image that + have not been accepted are not shown) show_all will override the + value of filter_deleted to False. + :returns: A list of glance images. + """ + if show_all: + filter_deleted = False + # First, try to actually get images from glance, it's more efficient + images = [] + params = {} + image_list = [] + try: + if self._is_client_version('image', 2): + endpoint = '/images' + if show_all: + params['member_status'] = 'all' + else: + endpoint = '/images/detail' + + response = self._image_client.get(endpoint, params=params) + + except keystoneauth1.exceptions.catalog.EndpointNotFound: + # We didn't have glance, let's try nova + # If this doesn't work - we just let the exception propagate + response = self._compute_client.get('/images/detail') + while 'next' in response: + image_list.extend(meta.obj_list_to_munch(response['images'])) + endpoint = response['next'] + # Use the raw endpoint from the catalog not the one from + # version discovery so that the next links will work right + response = self._raw_image_client.get(endpoint) + if 'images' in response: + image_list.extend(meta.obj_list_to_munch(response['images'])) + else: + image_list.extend(response) + + for image in image_list: + # The cloud might return DELETED for invalid images. + # While that's cute and all, that's an implementation detail. + if not filter_deleted: + images.append(image) + elif image.status.lower() != 'deleted': + images.append(image) + return self._normalize_images(images) + + def list_floating_ip_pools(self): + """List all available floating IP pools. + + NOTE: This function supports the nova-net view of the world. nova-net + has been deprecated, so it's highly recommended to switch to using + neutron. `get_external_ipv4_floating_networks` is what you should + almost certainly be using. + + :returns: A list of floating IP pool ``munch.Munch``. + + """ + if not self._has_nova_extension('os-floating-ip-pools'): + raise OpenStackCloudUnavailableExtension( + 'Floating IP pools extension is not available on target cloud') + + data = self._compute_client.get( + 'os-floating-ip-pools', + error_message="Error fetching floating IP pool list") + pools = self._get_and_munchify('floating_ip_pools', data) + return [{'name': p['name']} for p in pools] + + def _list_floating_ips(self, filters=None): + if self._use_neutron_floating(): + try: + return self._normalize_floating_ips( + self._neutron_list_floating_ips(filters)) + except OpenStackCloudURINotFound as e: + # Nova-network don't support server-side floating ips + # filtering, so it's safer to return and empty list than + # to fallback to Nova which may return more results that + # expected. + if filters: + self.log.error( + "Neutron returned NotFound for floating IPs, which" + " means this cloud doesn't have neutron floating ips." + " shade can't fallback to trying Nova since nova" + " doesn't support server-side filtering when listing" + " floating ips and filters were given. If you do not" + " think shade should be attempting to list floating" + " ips on neutron, it is possible to control the" + " behavior by setting floating_ip_source to 'nova' or" + " None for cloud: %(cloud)s. If you are not already" + " using clouds.yaml to configure settings for your" + " cloud(s), and you want to configure this setting," + " you will need a clouds.yaml file. For more" + " information, please see %(doc_url)s", { + 'cloud': self.name, + 'doc_url': _OCC_DOC_URL, + } + ) + # We can't fallback to nova because we push-down filters. + # We got a 404 which means neutron doesn't exist. If the + # user + return [] + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + # Fall-through, trying with Nova + else: + if filters: + raise ValueError( + "Nova-network don't support server-side floating ips " + "filtering. Use the search_floatting_ips method instead" + ) + + floating_ips = self._nova_list_floating_ips() + return self._normalize_floating_ips(floating_ips) + + def list_floating_ips(self, filters=None): + """List all available floating IPs. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of floating IP ``munch.Munch``. + + """ + # If pushdown filters are specified, bypass local caching. + if filters: + return self._list_floating_ips(filters) + + if (time.time() - self._floating_ips_time) >= self._FLOAT_AGE: + # Since we're using cached data anyway, we don't need to + # have more than one thread actually submit the list + # floating ips task. Let the first one submit it while holding + # a lock, and the non-blocking acquire method will cause + # subsequent threads to just skip this and use the old + # data until it succeeds. + # Initially when we never got data, block to retrieve some data. + first_run = self._floating_ips is None + if self._floating_ips_lock.acquire(first_run): + try: + if not (first_run and self._floating_ips is not None): + self._floating_ips = self._list_floating_ips() + self._floating_ips_time = time.time() + finally: + self._floating_ips_lock.release() + return self._floating_ips + + def _neutron_list_floating_ips(self, filters=None): + if not filters: + filters = {} + data = self._network_client.get('/floatingips.json', params=filters) + return self._get_and_munchify('floatingips', data) + + def _nova_list_floating_ips(self): + try: + data = self._compute_client.get('/os-floating-ips') + except OpenStackCloudURINotFound: + return [] + return self._get_and_munchify('floating_ips', data) + + def use_external_network(self): + return self._use_external_network + + def use_internal_network(self): + return self._use_internal_network + + def _reset_network_caches(self): + # Variables to prevent us from going through the network finding + # logic again if we've done it once. This is different from just + # the cached value, since "None" is a valid value to find. + with self._networks_lock: + self._external_ipv4_networks = [] + self._external_ipv4_floating_networks = [] + self._internal_ipv4_networks = [] + self._external_ipv6_networks = [] + self._internal_ipv6_networks = [] + self._nat_destination_network = None + self._default_network_network = None + self._network_list_stamp = False + + def _set_interesting_networks(self): + external_ipv4_networks = [] + external_ipv4_floating_networks = [] + internal_ipv4_networks = [] + external_ipv6_networks = [] + internal_ipv6_networks = [] + nat_destination = None + default_network = None + + all_subnets = None + + # Filter locally because we have an or condition + try: + # TODO(mordred): Rackspace exposes neutron but it does not + # work. I think that overriding what the service catalog + # reports should be a thing os-client-config should handle + # in a vendor profile - but for now it does not. That means + # this search_networks can just totally fail. If it does + # though, that's fine, clearly the neutron introspection is + # not going to work. + all_networks = self.list_networks() + except OpenStackCloudException: + self._network_list_stamp = True + return + + for network in all_networks: + + # External IPv4 networks + if (network['name'] in self._external_ipv4_names + or network['id'] in self._external_ipv4_names): + external_ipv4_networks.append(network) + elif ((('router:external' in network + and network['router:external']) or + network.get('provider:physical_network')) and + network['name'] not in self._internal_ipv4_names and + network['id'] not in self._internal_ipv4_names): + external_ipv4_networks.append(network) + + # External Floating IPv4 networks + if ('router:external' in network and network['router:external']): + external_ipv4_floating_networks.append(network) + + # Internal networks + if (network['name'] in self._internal_ipv4_names + or network['id'] in self._internal_ipv4_names): + internal_ipv4_networks.append(network) + elif (not network.get('router:external', False) and + not network.get('provider:physical_network') and + network['name'] not in self._external_ipv4_names and + network['id'] not in self._external_ipv4_names): + internal_ipv4_networks.append(network) + + # External networks + if (network['name'] in self._external_ipv6_names + or network['id'] in self._external_ipv6_names): + external_ipv6_networks.append(network) + elif (network.get('router:external') and + network['name'] not in self._internal_ipv6_names and + network['id'] not in self._internal_ipv6_names): + external_ipv6_networks.append(network) + + # Internal networks + if (network['name'] in self._internal_ipv6_names + or network['id'] in self._internal_ipv6_names): + internal_ipv6_networks.append(network) + elif (not network.get('router:external', False) and + network['name'] not in self._external_ipv6_names and + network['id'] not in self._external_ipv6_names): + internal_ipv6_networks.append(network) + + # NAT Destination + if self._nat_destination in ( + network['name'], network['id']): + if nat_destination: + raise OpenStackCloudException( + 'Multiple networks were found matching' + ' {nat_net} which is the network configured' + ' to be the NAT destination. Please check your' + ' cloud resources. It is probably a good idea' + ' to configure this network by ID rather than' + ' by name.'.format( + nat_net=self._nat_destination)) + nat_destination = network + elif self._nat_destination is None: + # TODO(mordred) need a config value for floating + # ips for this cloud so that we can skip this + # No configured nat destination, we have to figured + # it out. + if all_subnets is None: + try: + all_subnets = self.list_subnets() + except OpenStackCloudException: + # Thanks Rackspace broken neutron + all_subnets = [] + + for subnet in all_subnets: + # TODO(mordred) trap for detecting more than + # one network with a gateway_ip without a config + if ('gateway_ip' in subnet and subnet['gateway_ip'] + and network['id'] == subnet['network_id']): + nat_destination = network + break + + # Default network + if self._default_network in ( + network['name'], network['id']): + if default_network: + raise OpenStackCloudException( + 'Multiple networks were found matching' + ' {default_net} which is the network' + ' configured to be the default interface' + ' network. Please check your cloud resources.' + ' It is probably a good idea' + ' to configure this network by ID rather than' + ' by name.'.format( + default_net=self._default_network)) + default_network = network + + # Validate config vs. reality + for net_name in self._external_ipv4_names: + if net_name not in [net['name'] for net in external_ipv4_networks]: + raise OpenStackCloudException( + "Networks: {network} was provided for external IPv4" + " access and those networks could not be found".format( + network=net_name)) + + for net_name in self._internal_ipv4_names: + if net_name not in [net['name'] for net in internal_ipv4_networks]: + raise OpenStackCloudException( + "Networks: {network} was provided for internal IPv4" + " access and those networks could not be found".format( + network=net_name)) + + for net_name in self._external_ipv6_names: + if net_name not in [net['name'] for net in external_ipv6_networks]: + raise OpenStackCloudException( + "Networks: {network} was provided for external IPv6" + " access and those networks could not be found".format( + network=net_name)) + + for net_name in self._internal_ipv6_names: + if net_name not in [net['name'] for net in internal_ipv6_networks]: + raise OpenStackCloudException( + "Networks: {network} was provided for internal IPv6" + " access and those networks could not be found".format( + network=net_name)) + + if self._nat_destination and not nat_destination: + raise OpenStackCloudException( + 'Network {network} was configured to be the' + ' destination for inbound NAT but it could not be' + ' found'.format( + network=self._nat_destination)) + + if self._default_network and not default_network: + raise OpenStackCloudException( + 'Network {network} was configured to be the' + ' default network interface but it could not be' + ' found'.format( + network=self._default_network)) + + self._external_ipv4_networks = external_ipv4_networks + self._external_ipv4_floating_networks = external_ipv4_floating_networks + self._internal_ipv4_networks = internal_ipv4_networks + self._external_ipv6_networks = external_ipv6_networks + self._internal_ipv6_networks = internal_ipv6_networks + self._nat_destination_network = nat_destination + self._default_network_network = default_network + + def _find_interesting_networks(self): + if self._networks_lock.acquire(): + try: + if self._network_list_stamp: + return + if (not self._use_external_network + and not self._use_internal_network): + # Both have been flagged as skip - don't do a list + return + if not self.has_service('network'): + return + self._set_interesting_networks() + self._network_list_stamp = True + finally: + self._networks_lock.release() + + def get_nat_destination(self): + """Return the network that is configured to be the NAT destination. + + :returns: A network dict if one is found + """ + self._find_interesting_networks() + return self._nat_destination_network + + def get_default_network(self): + """Return the network that is configured to be the default interface. + + :returns: A network dict if one is found + """ + self._find_interesting_networks() + return self._default_network_network + + def get_external_networks(self): + """Return the networks that are configured to route northbound. + + This should be avoided in favor of the specific ipv4/ipv6 method, + but is here for backwards compatibility. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return list( + set(self._external_ipv4_networks) | + set(self._external_ipv6_networks)) + + def get_internal_networks(self): + """Return the networks that are configured to not route northbound. + + This should be avoided in favor of the specific ipv4/ipv6 method, + but is here for backwards compatibility. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return list( + set(self._internal_ipv4_networks) | + set(self._internal_ipv6_networks)) + + def get_external_ipv4_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return self._external_ipv4_networks + + def get_external_ipv4_floating_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return self._external_ipv4_floating_networks + + def get_internal_ipv4_networks(self): + """Return the networks that are configured to not route northbound. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return self._internal_ipv4_networks + + def get_external_ipv6_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return self._external_ipv6_networks + + def get_internal_ipv6_networks(self): + """Return the networks that are configured to not route northbound. + + :returns: A list of network ``munch.Munch`` if one is found + """ + self._find_interesting_networks() + return self._internal_ipv6_networks + + def _has_floating_ips(self): + if not self._floating_ip_source: + return False + else: + return self._floating_ip_source in ('nova', 'neutron') + + def _use_neutron_floating(self): + return (self.has_service('network') + and self._floating_ip_source == 'neutron') + + def _has_secgroups(self): + if not self.secgroup_source: + return False + else: + return self.secgroup_source.lower() in ('nova', 'neutron') + + def _use_neutron_secgroups(self): + return (self.has_service('network') + and self.secgroup_source == 'neutron') + + def get_keypair(self, name_or_id, filters=None): + """Get a keypair by name or ID. + + :param name_or_id: Name or ID of the keypair. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A keypair ``munch.Munch`` or None if no matching keypair is + found. + """ + return _utils._get_entity(self, 'keypair', name_or_id, filters) + + def get_network(self, name_or_id, filters=None): + """Get a network by name or ID. + + :param name_or_id: Name or ID of the network. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``munch.Munch`` or None if no matching network is + found. + + """ + return _utils._get_entity(self, 'network', name_or_id, filters) + + def get_network_by_id(self, id): + """ Get a network by ID + + :param id: ID of the network. + :returns: A network ``munch.Munch``. + """ + data = self._network_client.get( + '/networks/{id}'.format(id=id), + error_message="Error getting network with ID {id}".format(id=id) + ) + network = self._get_and_munchify('network', data) + + return network + + def get_router(self, name_or_id, filters=None): + """Get a router by name or ID. + + :param name_or_id: Name or ID of the router. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A router ``munch.Munch`` or None if no matching router is + found. + + """ + return _utils._get_entity(self, 'router', name_or_id, filters) + + def get_subnet(self, name_or_id, filters=None): + """Get a subnet by name or ID. + + :param name_or_id: Name or ID of the subnet. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + :returns: A subnet ``munch.Munch`` or None if no matching subnet is + found. + + """ + return _utils._get_entity(self, 'subnet', name_or_id, filters) + + def get_subnet_by_id(self, id): + """ Get a subnet by ID + + :param id: ID of the subnet. + :returns: A subnet ``munch.Munch``. + """ + data = self._network_client.get( + '/subnets/{id}'.format(id=id), + error_message="Error getting subnet with ID {id}".format(id=id) + ) + subnet = self._get_and_munchify('subnet', data) + + return subnet + + def get_port(self, name_or_id, filters=None): + """Get a port by name or ID. + + :param name_or_id: Name or ID of the port. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A port ``munch.Munch`` or None if no matching port is found. + + """ + return _utils._get_entity(self, 'port', name_or_id, filters) + + def get_port_by_id(self, id): + """ Get a port by ID + + :param id: ID of the port. + :returns: A port ``munch.Munch``. + """ + data = self._network_client.get( + '/ports/{id}'.format(id=id), + error_message="Error getting port with ID {id}".format(id=id) + ) + port = self._get_and_munchify('port', data) + + return port + + def get_qos_policy(self, name_or_id, filters=None): + """Get a QoS policy by name or ID. + + :param name_or_id: Name or ID of the policy. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A policy ``munch.Munch`` or None if no matching network is + found. + + """ + return _utils._get_entity( + self, 'qos_policie', name_or_id, filters) + + def get_volume(self, name_or_id, filters=None): + """Get a volume by name or ID. + + :param name_or_id: Name or ID of the volume. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``munch.Munch`` or None if no matching volume is + found. + + """ + return _utils._get_entity(self, 'volume', name_or_id, filters) + + def get_volume_by_id(self, id): + """ Get a volume by ID + + :param id: ID of the volume. + :returns: A volume ``munch.Munch``. + """ + data = self._volume_client.get( + '/volumes/{id}'.format(id=id), + error_message="Error getting volume with ID {id}".format(id=id) + ) + volume = self._normalize_volume( + self._get_and_munchify('volume', data)) + + return volume + + def get_volume_type(self, name_or_id, filters=None): + """Get a volume type by name or ID. + + :param name_or_id: Name or ID of the volume. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``munch.Munch`` or None if no matching volume is + found. + + """ + return _utils._get_entity( + self, 'volume_type', name_or_id, filters) + + def get_flavor(self, name_or_id, filters=None, get_extra=True): + """Get a flavor by name or ID. + + :param name_or_id: Name or ID of the flavor. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param get_extra: + Whether or not the list_flavors call should get the extra flavor + specs. + + :returns: A flavor ``munch.Munch`` or None if no matching flavor is + found. + + """ + search_func = functools.partial( + self.search_flavors, get_extra=get_extra) + return _utils._get_entity(self, search_func, name_or_id, filters) + + def get_flavor_by_id(self, id, get_extra=True): + """ Get a flavor by ID + + :param id: ID of the flavor. + :param get_extra: + Whether or not the list_flavors call should get the extra flavor + specs. + :returns: A flavor ``munch.Munch``. + """ + data = self._compute_client.get( + '/flavors/{id}'.format(id=id), + error_message="Error getting flavor with ID {id}".format(id=id) + ) + flavor = self._normalize_flavor( + self._get_and_munchify('flavor', data)) + + if get_extra is None: + get_extra = self._extra_config['get_flavor_extra_specs'] + + if not flavor.extra_specs and get_extra: + endpoint = "/flavors/{id}/os-extra_specs".format( + id=flavor.id) + try: + data = self._compute_client.get( + endpoint, + error_message="Error fetching flavor extra specs") + flavor.extra_specs = self._get_and_munchify( + 'extra_specs', data) + except OpenStackCloudHTTPError as e: + flavor.extra_specs = {} + self.log.debug( + 'Fetching extra specs for flavor failed:' + ' %(msg)s', {'msg': str(e)}) + + return flavor + + def get_security_group(self, name_or_id, filters=None): + """Get a security group by name or ID. + + :param name_or_id: Name or ID of the security group. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A security group ``munch.Munch`` or None if no matching + security group is found. + + """ + return _utils._get_entity( + self, 'security_group', name_or_id, filters) + + def get_security_group_by_id(self, id): + """ Get a security group by ID + + :param id: ID of the security group. + :returns: A security group ``munch.Munch``. + """ + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + error_message = ("Error getting security group with" + " ID {id}".format(id=id)) + if self._use_neutron_secgroups(): + data = self._network_client.get( + '/security-groups/{id}'.format(id=id), + error_message=error_message) + else: + data = self._compute_client.get( + '/os-security-groups/{id}'.format(id=id), + error_message=error_message) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data)) + + def get_server_console(self, server, length=None): + """Get the console log for a server. + + :param server: The server to fetch the console log for. Can be either + a server dict or the Name or ID of the server. + :param int length: The number of lines you would like to retrieve from + the end of the log. (optional, defaults to all) + + :returns: A string containing the text of the console log or an + empty string if the cloud does not support console logs. + :raises: OpenStackCloudException if an invalid server argument is given + or if something else unforseen happens + """ + + if not isinstance(server, dict): + server = self.get_server(server, bare=True) + + if not server: + raise OpenStackCloudException( + "Console log requested for invalid server") + + try: + return self._get_server_console_output(server['id'], length) + except OpenStackCloudBadRequest: + return "" + + def _get_server_console_output(self, server_id, length=None): + data = self._compute_client.post( + '/servers/{server_id}/action'.format(server_id=server_id), + json={'os-getConsoleOutput': {'length': length}}) + return self._get_and_munchify('output', data) + + def get_server( + self, name_or_id=None, filters=None, detailed=False, bare=False): + """Get a server by name or ID. + + :param name_or_id: Name or ID of the server. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses + dict will be populated as needed from neutron. Setting + to True implies detailed = False. + + :returns: A server ``munch.Munch`` or None if no matching server is + found. + + """ + searchfunc = functools.partial(self.search_servers, + detailed=detailed, bare=True) + server = _utils._get_entity(self, searchfunc, name_or_id, filters) + return self._expand_server(server, detailed, bare) + + def _expand_server(self, server, detailed, bare): + if bare or not server: + return server + elif detailed: + return meta.get_hostvars_from_server(self, server) + else: + return meta.add_server_interfaces(self, server) + + def get_server_by_id(self, id): + data = self._compute_client.get('/servers/{id}'.format(id=id)) + server = self._get_and_munchify('server', data) + return meta.add_server_interfaces(self, self._normalize_server(server)) + + def get_server_group(self, name_or_id=None, filters=None): + """Get a server group by name or ID. + + :param name_or_id: Name or ID of the server group. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'policy': 'affinity', + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A server groups dict or None if no matching server group + is found. + + """ + return _utils._get_entity(self, 'server_group', name_or_id, + filters) + + def get_image(self, name_or_id, filters=None): + """Get an image by name or ID. + + :param name_or_id: Name or ID of the image. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: An image ``munch.Munch`` or None if no matching image + is found + + """ + return _utils._get_entity(self, 'image', name_or_id, filters) + + def get_image_by_id(self, id): + """ Get a image by ID + + :param id: ID of the image. + :returns: An image ``munch.Munch``. + """ + data = self._image_client.get( + '/images/{id}'.format(id=id), + error_message="Error getting image with ID {id}".format(id=id) + ) + key = 'image' if 'image' in data else None + image = self._normalize_image( + self._get_and_munchify(key, data)) + + return image + + def download_image( + self, name_or_id, output_path=None, output_file=None, + chunk_size=1024): + """Download an image by name or ID + + :param str name_or_id: Name or ID of the image. + :param output_path: the output path to write the image to. Either this + or output_file must be specified + :param output_file: a file object (or file-like object) to write the + image data to. Only write() will be called on this object. Either + this or output_path must be specified + :param int chunk_size: size in bytes to read from the wire and buffer + at one time. Defaults to 1024 + + :raises: OpenStackCloudException in the event download_image is called + without exactly one of either output_path or output_file + :raises: OpenStackCloudResourceNotFound if no images are found matching + the name or ID provided + """ + if output_path is None and output_file is None: + raise OpenStackCloudException('No output specified, an output path' + ' or file object is necessary to ' + 'write the image data to') + elif output_path is not None and output_file is not None: + raise OpenStackCloudException('Both an output path and file object' + ' were provided, however only one ' + 'can be used at once') + + image = self.search_images(name_or_id) + if len(image) == 0: + raise OpenStackCloudResourceNotFound( + "No images with name or ID %s were found" % name_or_id, None) + if self._is_client_version('image', 2): + endpoint = '/images/{id}/file'.format(id=image[0]['id']) + else: + endpoint = '/images/{id}'.format(id=image[0]['id']) + + response = self._image_client.get(endpoint, stream=True) + + with _utils.shade_exceptions("Unable to download image"): + if output_path: + with open(output_path, 'wb') as fd: + for chunk in response.iter_content(chunk_size=chunk_size): + fd.write(chunk) + return + elif output_file: + for chunk in response.iter_content(chunk_size=chunk_size): + output_file.write(chunk) + return + + def get_floating_ip(self, id, filters=None): + """Get a floating IP by ID + + :param id: ID of the floating IP. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A floating IP ``munch.Munch`` or None if no matching floating + IP is found. + + """ + return _utils._get_entity(self, 'floating_ip', id, filters) + + def get_floating_ip_by_id(self, id): + """ Get a floating ip by ID + + :param id: ID of the floating ip. + :returns: A floating ip ``munch.Munch``. + """ + error_message = "Error getting floating ip with ID {id}".format(id=id) + + if self._use_neutron_floating(): + data = self._network_client.get( + '/floatingips/{id}'.format(id=id), + error_message=error_message) + return self._normalize_floating_ip( + self._get_and_munchify('floatingip', data)) + else: + data = self._compute_client.get( + '/os-floating-ips/{id}'.format(id=id), + error_message=error_message) + return self._normalize_floating_ip( + self._get_and_munchify('floating_ip', data)) + + def get_stack(self, name_or_id, filters=None): + """Get exactly one stack. + + :param name_or_id: Name or ID of the desired stack. + :param filters: a dict containing additional filters to use. e.g. + {'stack_status': 'CREATE_COMPLETE'} + + :returns: a ``munch.Munch`` containing the stack description + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call or if multiple matches are found. + """ + + def _search_one_stack(name_or_id=None, filters=None): + # stack names are mandatory and enforced unique in the project + # so a StackGet can always be used for name or ID. + try: + data = self._orchestration_client.get( + '/stacks/{name_or_id}'.format(name_or_id=name_or_id), + error_message="Error fetching stack") + stack = self._get_and_munchify('stack', data) + # Treat DELETE_COMPLETE stacks as a NotFound + if stack['stack_status'] == 'DELETE_COMPLETE': + return [] + except OpenStackCloudURINotFound: + return [] + stack = self._normalize_stack(stack) + return _utils._filter_list([stack], name_or_id, filters) + + return _utils._get_entity( + self, _search_one_stack, name_or_id, filters) + + def create_keypair(self, name, public_key=None): + """Create a new keypair. + + :param name: Name of the keypair being created. + :param public_key: Public key for the new keypair. + + :raises: OpenStackCloudException on operation error. + """ + keypair = { + 'name': name, + } + if public_key: + keypair['public_key'] = public_key + data = self._compute_client.post( + '/os-keypairs', + json={'keypair': keypair}, + error_message="Unable to create keypair {name}".format(name=name)) + return self._normalize_keypair( + self._get_and_munchify('keypair', data)) + + def delete_keypair(self, name): + """Delete a keypair. + + :param name: Name of the keypair to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + try: + self._compute_client.delete('/os-keypairs/{name}'.format( + name=name)) + except OpenStackCloudURINotFound: + self.log.debug("Keypair %s not found for deleting", name) + return False + return True + + def create_network(self, name, shared=False, admin_state_up=True, + external=False, provider=None, project_id=None): + """Create a network. + + :param string name: Name of the network being created. + :param bool shared: Set the network as shared. + :param bool admin_state_up: Set the network administrative state to up. + :param bool external: Whether this network is externally accessible. + :param dict provider: A dict of network provider options. Example:: + + { 'network_type': 'vlan', 'segmentation_id': 'vlan1' } + :param string project_id: Specify the project ID this network + will be created on (admin-only). + + :returns: The network object. + :raises: OpenStackCloudException on operation error. + """ + network = { + 'name': name, + 'admin_state_up': admin_state_up, + } + + if shared: + network['shared'] = shared + + if project_id is not None: + network['tenant_id'] = project_id + + if provider: + if not isinstance(provider, dict): + raise OpenStackCloudException( + "Parameter 'provider' must be a dict") + # Only pass what we know + for attr in ('physical_network', 'network_type', + 'segmentation_id'): + if attr in provider: + arg = "provider:" + attr + network[arg] = provider[attr] + + # Do not send 'router:external' unless it is explicitly + # set since sending it *might* cause "Forbidden" errors in + # some situations. It defaults to False in the client, anyway. + if external: + network['router:external'] = True + + data = self._network_client.post("/networks.json", + json={'network': network}) + + # Reset cache so the new network is picked up + self._reset_network_caches() + return self._get_and_munchify('network', data) + + def delete_network(self, name_or_id): + """Delete a network. + + :param name_or_id: Name or ID of the network being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + network = self.get_network(name_or_id) + if not network: + self.log.debug("Network %s not found for deleting", name_or_id) + return False + + self._network_client.delete( + "/networks/{network_id}.json".format(network_id=network['id'])) + + # Reset cache so the deleted network is removed + self._reset_network_caches() + + return True + + @_utils.valid_kwargs("name", "description", "shared", "default", + "project_id") + def create_qos_policy(self, **kwargs): + """Create a QoS policy. + + :param string name: Name of the QoS policy being created. + :param string description: Description of created QoS policy. + :param bool shared: Set the QoS policy as shared. + :param bool default: Set the QoS policy as default for project. + :param string project_id: Specify the project ID this QoS policy + will be created on (admin-only). + + :returns: The QoS policy object. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + default = kwargs.pop("default", None) + if default is not None: + if self._has_neutron_extension('qos-default'): + kwargs['is_default'] = default + else: + self.log.debug("'qos-default' extension is not available on " + "target cloud") + + data = self._network_client.post("/qos/policies.json", + json={'policy': kwargs}) + return self._get_and_munchify('policy', data) + + @_utils.valid_kwargs("name", "description", "shared", "default", + "project_id") + def update_qos_policy(self, name_or_id, **kwargs): + """Update an existing QoS policy. + + :param string name_or_id: + Name or ID of the QoS policy to update. + :param string policy_name: + The new name of the QoS policy. + :param string description: + The new description of the QoS policy. + :param bool shared: + If True, the QoS policy will be set as shared. + :param bool default: + If True, the QoS policy will be set as default for project. + + :returns: The updated QoS policy object. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + default = kwargs.pop("default", None) + if default is not None: + if self._has_neutron_extension('qos-default'): + kwargs['is_default'] = default + else: + self.log.debug("'qos-default' extension is not available on " + "target cloud") + + if not kwargs: + self.log.debug("No QoS policy data to update") + return + + curr_policy = self.get_qos_policy(name_or_id) + if not curr_policy: + raise OpenStackCloudException( + "QoS policy %s not found." % name_or_id) + + data = self._network_client.put( + "/qos/policies/{policy_id}.json".format( + policy_id=curr_policy['id']), + json={'policy': kwargs}) + return self._get_and_munchify('policy', data) + + def delete_qos_policy(self, name_or_id): + """Delete a QoS policy. + + :param name_or_id: Name or ID of the policy being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + policy = self.get_qos_policy(name_or_id) + if not policy: + self.log.debug("QoS policy %s not found for deleting", name_or_id) + return False + + self._network_client.delete( + "/qos/policies/{policy_id}.json".format(policy_id=policy['id'])) + + return True + + def search_qos_bandwidth_limit_rules(self, policy_name_or_id, rule_id=None, + filters=None): + """Search QoS bandwidth limit rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'max_kbps': 1000} + + :returns: a list of ``munch.Munch`` containing the bandwidth limit + rule descriptions. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + rules = self.list_qos_bandwidth_limit_rules(policy_name_or_id, filters) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_bandwidth_limit_rules(self, policy_name_or_id, filters=None): + """List all available QoS bandwith limit rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) dict of filter conditions to push down + :returns: A list of ``munch.Munch`` containing rule info. + + :raises: ``OpenStackCloudResourceNotFound`` if QoS policy will not be + found. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + data = self._network_client.get( + "/qos/policies/{policy_id}/bandwidth_limit_rules.json".format( + policy_id=policy['id']), + params=filters, + error_message="Error fetching QoS bandwith limit rules from " + "{policy}".format(policy=policy['id'])) + return self._get_and_munchify('bandwidth_limit_rules', data) + + def get_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): + """Get a QoS bandwidth limit rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + + :returns: A bandwidth limit rule ``munch.Munch`` or None if + no matching rule is found. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + data = self._network_client.get( + "/qos/policies/{policy_id}/bandwidth_limit_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + error_message="Error fetching QoS bandwith limit rule {rule_id} " + "from {policy}".format(rule_id=rule_id, + policy=policy['id'])) + return self._get_and_munchify('bandwidth_limit_rule', data) + + @_utils.valid_kwargs("max_burst_kbps", "direction") + def create_qos_bandwidth_limit_rule(self, policy_name_or_id, max_kbps, + **kwargs): + """Create a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int max_kbps: Maximum bandwidth limit value + (in kilobits per second). + :param int max_burst_kbps: Maximum burst value (in kilobits). + :param string direction: Ingress or egress. + The direction in which the traffic will be limited. + + :returns: The QoS bandwidth limit rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + if kwargs.get("direction") is not None: + if not self._has_neutron_extension('qos-bw-limit-direction'): + kwargs.pop("direction") + self.log.debug( + "'qos-bw-limit-direction' extension is not available on " + "target cloud") + + kwargs['max_kbps'] = max_kbps + data = self._network_client.post( + "/qos/policies/{policy_id}/bandwidth_limit_rules".format( + policy_id=policy['id']), + json={'bandwidth_limit_rule': kwargs}) + return self._get_and_munchify('bandwidth_limit_rule', data) + + @_utils.valid_kwargs("max_kbps", "max_burst_kbps", "direction") + def update_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id, + **kwargs): + """Update a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int max_kbps: Maximum bandwidth limit value + (in kilobits per second). + :param int max_burst_kbps: Maximum burst value (in kilobits). + :param string direction: Ingress or egress. + The direction in which the traffic will be limited. + + :returns: The updated QoS bandwidth limit rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + if kwargs.get("direction") is not None: + if not self._has_neutron_extension('qos-bw-limit-direction'): + kwargs.pop("direction") + self.log.debug( + "'qos-bw-limit-direction' extension is not available on " + "target cloud") + + if not kwargs: + self.log.debug("No QoS bandwidth limit rule data to update") + return + + curr_rule = self.get_qos_bandwidth_limit_rule( + policy_name_or_id, rule_id) + if not curr_rule: + raise OpenStackCloudException( + "QoS bandwidth_limit_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, + policy_id=policy['id'])) + + data = self._network_client.put( + "/qos/policies/{policy_id}/bandwidth_limit_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + json={'bandwidth_limit_rule': kwargs}) + return self._get_and_munchify('bandwidth_limit_rule', data) + + def delete_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): + """Delete a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + try: + self._network_client.delete( + "/qos/policies/{policy}/bandwidth_limit_rules/{rule}.json". + format(policy=policy['id'], rule=rule_id)) + except OpenStackCloudURINotFound: + self.log.debug( + "QoS bandwidth limit rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format(rule_id=rule_id, + policy_id=policy['id'])) + return False + + return True + + def search_qos_dscp_marking_rules(self, policy_name_or_id, rule_id=None, + filters=None): + """Search QoS DSCP marking rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'dscp_mark': 32} + + :returns: a list of ``munch.Munch`` containing the dscp marking + rule descriptions. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + rules = self.list_qos_dscp_marking_rules(policy_name_or_id, filters) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_dscp_marking_rules(self, policy_name_or_id, filters=None): + """List all available QoS DSCP marking rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) dict of filter conditions to push down + :returns: A list of ``munch.Munch`` containing rule info. + + :raises: ``OpenStackCloudResourceNotFound`` if QoS policy will not be + found. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + data = self._network_client.get( + "/qos/policies/{policy_id}/dscp_marking_rules.json".format( + policy_id=policy['id']), + params=filters, + error_message="Error fetching QoS DSCP marking rules from " + "{policy}".format(policy=policy['id'])) + return meta.get_and_munchify('dscp_marking_rules', data) + + def get_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): + """Get a QoS DSCP marking rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + + :returns: A bandwidth limit rule ``munch.Munch`` or None if + no matching rule is found. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + data = self._network_client.get( + "/qos/policies/{policy_id}/dscp_marking_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + error_message="Error fetching QoS DSCP marking rule {rule_id} " + "from {policy}".format(rule_id=rule_id, + policy=policy['id'])) + return meta.get_and_munchify('dscp_marking_rule', data) + + def create_qos_dscp_marking_rule(self, policy_name_or_id, dscp_mark): + """Create a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int dscp_mark: DSCP mark value + + :returns: The QoS DSCP marking rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + body = { + 'dscp_mark': dscp_mark + } + data = self._network_client.post( + "/qos/policies/{policy_id}/dscp_marking_rules".format( + policy_id=policy['id']), + json={'dscp_marking_rule': body}) + return meta.get_and_munchify('dscp_marking_rule', data) + + @_utils.valid_kwargs("dscp_mark") + def update_qos_dscp_marking_rule(self, policy_name_or_id, rule_id, + **kwargs): + """Update a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int dscp_mark: DSCP mark value + + :returns: The updated QoS bandwidth limit rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + if not kwargs: + self.log.debug("No QoS DSCP marking rule data to update") + return + + curr_rule = self.get_qos_dscp_marking_rule( + policy_name_or_id, rule_id) + if not curr_rule: + raise OpenStackCloudException( + "QoS dscp_marking_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, + policy_id=policy['id'])) + + data = self._network_client.put( + "/qos/policies/{policy_id}/dscp_marking_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + json={'dscp_marking_rule': kwargs}) + return meta.get_and_munchify('dscp_marking_rule', data) + + def delete_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): + """Delete a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + try: + self._network_client.delete( + "/qos/policies/{policy}/dscp_marking_rules/{rule}.json". + format(policy=policy['id'], rule=rule_id)) + except OpenStackCloudURINotFound: + self.log.debug( + "QoS DSCP marking rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format(rule_id=rule_id, + policy_id=policy['id'])) + return False + + return True + + def search_qos_minimum_bandwidth_rules(self, policy_name_or_id, + rule_id=None, filters=None): + """Search QoS minimum bandwidth rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'min_kbps': 1000} + + :returns: a list of ``munch.Munch`` containing the bandwidth limit + rule descriptions. + + :raises: ``OpenStackCloudException`` if something goes wrong during the + OpenStack API call. + """ + rules = self.list_qos_minimum_bandwidth_rules( + policy_name_or_id, filters) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_minimum_bandwidth_rules(self, policy_name_or_id, + filters=None): + """List all available QoS minimum bandwith rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) dict of filter conditions to push down + :returns: A list of ``munch.Munch`` containing rule info. + + :raises: ``OpenStackCloudResourceNotFound`` if QoS policy will not be + found. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + data = self._network_client.get( + "/qos/policies/{policy_id}/minimum_bandwidth_rules.json".format( + policy_id=policy['id']), + params=filters, + error_message="Error fetching QoS minimum bandwith rules from " + "{policy}".format(policy=policy['id'])) + return self._get_and_munchify('minimum_bandwidth_rules', data) + + def get_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): + """Get a QoS minimum bandwidth rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + + :returns: A bandwidth limit rule ``munch.Munch`` or None if + no matching rule is found. + + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + data = self._network_client.get( + "/qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + error_message="Error fetching QoS minimum_bandwith rule {rule_id} " + "from {policy}".format(rule_id=rule_id, + policy=policy['id'])) + return self._get_and_munchify('minimum_bandwidth_rule', data) + + @_utils.valid_kwargs("direction") + def create_qos_minimum_bandwidth_rule(self, policy_name_or_id, min_kbps, + **kwargs): + """Create a QoS minimum bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int min_kbps: Minimum bandwidth value (in kilobits per second). + :param string direction: Ingress or egress. + The direction in which the traffic will be available. + + :returns: The QoS minimum bandwidth rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + kwargs['min_kbps'] = min_kbps + data = self._network_client.post( + "/qos/policies/{policy_id}/minimum_bandwidth_rules".format( + policy_id=policy['id']), + json={'minimum_bandwidth_rule': kwargs}) + return self._get_and_munchify('minimum_bandwidth_rule', data) + + @_utils.valid_kwargs("min_kbps", "direction") + def update_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id, + **kwargs): + """Update a QoS minimum bandwidth rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int min_kbps: Minimum bandwidth value (in kilobits per second). + :param string direction: Ingress or egress. + The direction in which the traffic will be available. + + :returns: The updated QoS minimum bandwidth rule. + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + if not kwargs: + self.log.debug("No QoS minimum bandwidth rule data to update") + return + + curr_rule = self.get_qos_minimum_bandwidth_rule( + policy_name_or_id, rule_id) + if not curr_rule: + raise OpenStackCloudException( + "QoS minimum_bandwidth_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, + policy_id=policy['id'])) + + data = self._network_client.put( + "/qos/policies/{policy_id}/minimum_bandwidth_rules/{rule_id}.json". + format(policy_id=policy['id'], rule_id=rule_id), + json={'minimum_bandwidth_rule': kwargs}) + return self._get_and_munchify('minimum_bandwidth_rule', data) + + def delete_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): + """Delete a QoS minimum bandwidth rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to delete. + + :raises: OpenStackCloudException on operation error. + """ + if not self._has_neutron_extension('qos'): + raise OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud') + + policy = self.get_qos_policy(policy_name_or_id) + if not policy: + raise OpenStackCloudResourceNotFound( + "QoS policy {name_or_id} not Found.".format( + name_or_id=policy_name_or_id)) + + try: + self._network_client.delete( + "/qos/policies/{policy}/minimum_bandwidth_rules/{rule}.json". + format(policy=policy['id'], rule=rule_id)) + except OpenStackCloudURINotFound: + self.log.debug( + "QoS minimum bandwidth rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format(rule_id=rule_id, + policy_id=policy['id'])) + return False + + return True + + def _build_external_gateway_info(self, ext_gateway_net_id, enable_snat, + ext_fixed_ips): + info = {} + if ext_gateway_net_id: + info['network_id'] = ext_gateway_net_id + # Only send enable_snat if it is different from the Neutron + # default of True. Sending it can cause a policy violation error + # on some clouds. + if enable_snat is not None and not enable_snat: + info['enable_snat'] = False + if ext_fixed_ips: + info['external_fixed_ips'] = ext_fixed_ips + if info: + return info + return None + + def add_router_interface(self, router, subnet_id=None, port_id=None): + """Attach a subnet to an internal router interface. + + Either a subnet ID or port ID must be specified for the internal + interface. Supplying both will result in an error. + + :param dict router: The dict object of the router being changed + :param string subnet_id: The ID of the subnet to use for the interface + :param string port_id: The ID of the port to use for the interface + + :returns: A ``munch.Munch`` with the router ID (ID), + subnet ID (subnet_id), port ID (port_id) and tenant ID + (tenant_id). + + :raises: OpenStackCloudException on operation error. + """ + json_body = {} + if subnet_id: + json_body['subnet_id'] = subnet_id + if port_id: + json_body['port_id'] = port_id + + return self._network_client.put( + "/routers/{router_id}/add_router_interface.json".format( + router_id=router['id']), + json=json_body, + error_message="Error attaching interface to router {0}".format( + router['id'])) + + def remove_router_interface(self, router, subnet_id=None, port_id=None): + """Detach a subnet from an internal router interface. + + At least one of subnet_id or port_id must be supplied. + + If you specify both subnet and port ID, the subnet ID must + correspond to the subnet ID of the first IP address on the port + specified by the port ID. Otherwise an error occurs. + + :param dict router: The dict object of the router being changed + :param string subnet_id: The ID of the subnet to use for the interface + :param string port_id: The ID of the port to use for the interface + + :returns: None on success + + :raises: OpenStackCloudException on operation error. + """ + json_body = {} + if subnet_id: + json_body['subnet_id'] = subnet_id + if port_id: + json_body['port_id'] = port_id + + if not json_body: + raise ValueError( + "At least one of subnet_id or port_id must be supplied.") + + self._network_client.put( + "/routers/{router_id}/remove_router_interface.json".format( + router_id=router['id']), + json=json_body, + error_message="Error detaching interface from router {0}".format( + router['id'])) + + def list_router_interfaces(self, router, interface_type=None): + """List all interfaces for a router. + + :param dict router: A router dict object. + :param string interface_type: One of None, "internal", or "external". + Controls whether all, internal interfaces or external interfaces + are returned. + + :returns: A list of port ``munch.Munch`` objects. + """ + # Find only router interface and gateway ports, ignore L3 HA ports etc. + router_interfaces = self.search_ports(filters={ + 'device_id': router['id'], + 'device_owner': 'network:router_interface'}) + router_gateways = self.search_ports(filters={ + 'device_id': router['id'], + 'device_owner': 'network:router_gateway'}) + ports = router_interfaces + router_gateways + + if interface_type: + if interface_type == 'internal': + return router_interfaces + if interface_type == 'external': + return router_gateways + return ports + + def create_router(self, name=None, admin_state_up=True, + ext_gateway_net_id=None, enable_snat=None, + ext_fixed_ips=None, project_id=None): + """Create a logical router. + + :param string name: The router name. + :param bool admin_state_up: The administrative state of the router. + :param string ext_gateway_net_id: Network ID for the external gateway. + :param bool enable_snat: Enable Source NAT (SNAT) attribute. + :param ext_fixed_ips: + List of dictionaries of desired IP and/or subnet on the + external network. Example:: + + [ + { + "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "ip_address": "192.168.10.2" + } + ] + :param string project_id: Project ID for the router. + + :returns: The router object. + :raises: OpenStackCloudException on operation error. + """ + router = { + 'admin_state_up': admin_state_up + } + if project_id is not None: + router['tenant_id'] = project_id + if name: + router['name'] = name + ext_gw_info = self._build_external_gateway_info( + ext_gateway_net_id, enable_snat, ext_fixed_ips + ) + if ext_gw_info: + router['external_gateway_info'] = ext_gw_info + + data = self._network_client.post( + "/routers.json", json={"router": router}, + error_message="Error creating router {0}".format(name)) + return self._get_and_munchify('router', data) + + def update_router(self, name_or_id, name=None, admin_state_up=None, + ext_gateway_net_id=None, enable_snat=None, + ext_fixed_ips=None): + """Update an existing logical router. + + :param string name_or_id: The name or UUID of the router to update. + :param string name: The new router name. + :param bool admin_state_up: The administrative state of the router. + :param string ext_gateway_net_id: + The network ID for the external gateway. + :param bool enable_snat: Enable Source NAT (SNAT) attribute. + :param ext_fixed_ips: + List of dictionaries of desired IP and/or subnet on the + external network. Example:: + + [ + { + "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "ip_address": "192.168.10.2" + } + ] + + :returns: The router object. + :raises: OpenStackCloudException on operation error. + """ + router = {} + if name: + router['name'] = name + if admin_state_up is not None: + router['admin_state_up'] = admin_state_up + ext_gw_info = self._build_external_gateway_info( + ext_gateway_net_id, enable_snat, ext_fixed_ips + ) + if ext_gw_info: + router['external_gateway_info'] = ext_gw_info + + if not router: + self.log.debug("No router data to update") + return + + curr_router = self.get_router(name_or_id) + if not curr_router: + raise OpenStackCloudException( + "Router %s not found." % name_or_id) + + data = self._network_client.put( + "/routers/{router_id}.json".format(router_id=curr_router['id']), + json={"router": router}, + error_message="Error updating router {0}".format(name_or_id)) + return self._get_and_munchify('router', data) + + def delete_router(self, name_or_id): + """Delete a logical router. + + If a name, instead of a unique UUID, is supplied, it is possible + that we could find more than one matching router since names are + not required to be unique. An error will be raised in this case. + + :param name_or_id: Name or ID of the router being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + router = self.get_router(name_or_id) + if not router: + self.log.debug("Router %s not found for deleting", name_or_id) + return False + + self._network_client.delete( + "/routers/{router_id}.json".format(router_id=router['id']), + error_message="Error deleting router {0}".format(name_or_id)) + + return True + + def get_image_exclude(self, name_or_id, exclude): + for image in self.search_images(name_or_id): + if exclude: + if exclude not in image.name: + return image + else: + return image + return None + + def get_image_name(self, image_id, exclude=None): + image = self.get_image_exclude(image_id, exclude) + if image: + return image.name + return None + + def get_image_id(self, image_name, exclude=None): + image = self.get_image_exclude(image_name, exclude) + if image: + return image.id + return None + + def create_image_snapshot( + self, name, server, wait=False, timeout=3600, **metadata): + """Create an image by snapshotting an existing server. + + ..note:: + On most clouds this is a cold snapshot - meaning that the server + in question will be shutdown before taking the snapshot. It is + possible that it's a live snapshot - but there is no way to know + as a user, so caveat emptor. + + :param name: Name of the image to be created + :param server: Server name or ID or dict representing the server + to be snapshotted + :param wait: If true, waits for image to be created. + :param timeout: Seconds to wait for image creation. None is forever. + :param metadata: Metadata to give newly-created image entity + + :returns: A ``munch.Munch`` of the Image object + + :raises: OpenStackCloudException if there are problems uploading + """ + if not isinstance(server, dict): + server_obj = self.get_server(server, bare=True) + if not server_obj: + raise OpenStackCloudException( + "Server {server} could not be found and therefore" + " could not be snapshotted.".format(server=server)) + server = server_obj + response = self._compute_client.post( + '/servers/{server_id}/action'.format(server_id=server['id']), + json={ + "createImage": { + "name": name, + "metadata": metadata, + } + }) + # You won't believe it - wait, who am I kidding - of course you will! + # Nova returns the URL of the image created in the Location + # header of the response. (what?) But, even better, the URL it responds + # with has a very good chance of being wrong (it is built from + # nova.conf values that point to internal API servers in any cloud + # large enough to have both public and internal endpoints. + # However, nobody has ever noticed this because novaclient doesn't + # actually use that URL - it extracts the id from the end of + # the url, then returns the id. This leads us to question: + # a) why Nova is going to return a value in a header + # b) why it's going to return data that probably broken + # c) indeed the very nature of the fabric of reality + # Although it fills us with existential dread, we have no choice but + # to follow suit like a lemming being forced over a cliff by evil + # producers from Disney. + # TODO(mordred) Update this to consume json microversion when it is + # available. + # blueprint:remove-create-image-location-header-response + image_id = response.headers['Location'].rsplit('/', 1)[1] + self.list_images.invalidate(self) + image = self.get_image(image_id) + + if not wait: + return image + return self.wait_for_image(image, timeout=timeout) + + def wait_for_image(self, image, timeout=3600): + image_id = image['id'] + for count in _utils._iterate_timeout( + timeout, "Timeout waiting for image to snapshot"): + self.list_images.invalidate(self) + image = self.get_image(image_id) + if not image: + continue + if image['status'] == 'active': + return image + elif image['status'] == 'error': + raise OpenStackCloudException( + 'Image {image} hit error state'.format(image=image_id)) + + def delete_image( + self, name_or_id, wait=False, timeout=3600, delete_objects=True): + """Delete an existing image. + + :param name_or_id: Name of the image to be deleted. + :param wait: If True, waits for image to be deleted. + :param timeout: Seconds to wait for image deletion. None is forever. + :param delete_objects: If True, also deletes uploaded swift objects. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException if there are problems deleting. + """ + image = self.get_image(name_or_id) + if not image: + return False + self._image_client.delete( + '/images/{id}'.format(id=image.id), + error_message="Error in deleting image") + self.list_images.invalidate(self) + + # Task API means an image was uploaded to swift + if self.image_api_use_tasks and IMAGE_OBJECT_KEY in image: + (container, objname) = image[IMAGE_OBJECT_KEY].split('/', 1) + self.delete_object(container=container, name=objname) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the image to be deleted."): + self._get_cache(None).invalidate() + if self.get_image(image.id) is None: + break + return True + + def _get_name_and_filename(self, name): + # See if name points to an existing file + if os.path.exists(name): + # Neat. Easy enough + return (os.path.splitext(os.path.basename(name))[0], name) + + # Try appending the disk format + name_with_ext = '.'.join(( + name, self.cloud_config.config['image_format'])) + if os.path.exists(name_with_ext): + return (os.path.basename(name), name_with_ext) + + raise OpenStackCloudException( + 'No filename parameter was given to create_image,' + ' and {name} was not the path to an existing file.' + ' Please provide either a path to an existing file' + ' or a name and a filename'.format(name=name)) + + def _hashes_up_to_date(self, md5, sha256, md5_key, sha256_key): + '''Compare md5 and sha256 hashes for being up to date + + md5 and sha256 are the current values. + md5_key and sha256_key are the previous values. + ''' + up_to_date = False + if md5 and md5_key == md5: + up_to_date = True + if sha256 and sha256_key == sha256: + up_to_date = True + if md5 and md5_key != md5: + up_to_date = False + if sha256 and sha256_key != sha256: + up_to_date = False + return up_to_date + + def create_image( + self, name, filename=None, container='images', + md5=None, sha256=None, + disk_format=None, container_format=None, + disable_vendor_agent=True, + wait=False, timeout=3600, + allow_duplicates=False, meta=None, volume=None, **kwargs): + """Upload an image. + + :param str name: Name of the image to create. If it is a pathname + of an image, the name will be constructed from the + extensionless basename of the path. + :param str filename: The path to the file to upload, if needed. + (optional, defaults to None) + :param str container: Name of the container in swift where images + should be uploaded for import if the cloud + requires such a thing. (optiona, defaults to + 'images') + :param str md5: md5 sum of the image file. If not given, an md5 will + be calculated. + :param str sha256: sha256 sum of the image file. If not given, an md5 + will be calculated. + :param str disk_format: The disk format the image is in. (optional, + defaults to the os-client-config config value + for this cloud) + :param str container_format: The container format the image is in. + (optional, defaults to the + os-client-config config value for this + cloud) + :param bool disable_vendor_agent: Whether or not to append metadata + flags to the image to inform the + cloud in question to not expect a + vendor agent to be runing. + (optional, defaults to True) + :param bool wait: If true, waits for image to be created. Defaults to + true - however, be aware that one of the upload + methods is always synchronous. + :param timeout: Seconds to wait for image creation. None is forever. + :param allow_duplicates: If true, skips checks that enforce unique + image name. (optional, defaults to False) + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + :param volume: Name or ID or volume object of a volume to create an + image from. Mutually exclusive with (optional, defaults + to None) + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + + If you are sure you have all of your data types correct or have an + advanced need to be explicit, use meta. If you are just a normal + consumer, using kwargs is likely the right choice. + + If a value is in meta and kwargs, meta wins. + + :returns: A ``munch.Munch`` of the Image object + + :raises: OpenStackCloudException if there are problems uploading + """ + if not meta: + meta = {} + + if not disk_format: + disk_format = self.cloud_config.config['image_format'] + if not container_format: + # https://docs.openstack.org/image-guide/image-formats.html + container_format = 'bare' + + if volume: + if 'id' in volume: + volume_id = volume['id'] + else: + volume_obj = self.get_volume(volume) + if not volume_obj: + raise OpenStackCloudException( + "Volume {volume} given to create_image could" + " not be foud".format(volume=volume)) + volume_id = volume_obj['id'] + return self._upload_image_from_volume( + name=name, volume_id=volume_id, + allow_duplicates=allow_duplicates, + container_format=container_format, disk_format=disk_format, + wait=wait, timeout=timeout) + + # If there is no filename, see if name is actually the filename + if not filename: + name, filename = self._get_name_and_filename(name) + if not (md5 or sha256): + (md5, sha256) = self._get_file_hashes(filename) + if allow_duplicates: + current_image = None + else: + current_image = self.get_image(name) + if current_image: + md5_key = current_image.get(IMAGE_MD5_KEY, '') + sha256_key = current_image.get(IMAGE_SHA256_KEY, '') + up_to_date = self._hashes_up_to_date( + md5=md5, sha256=sha256, + md5_key=md5_key, sha256_key=sha256_key) + if up_to_date: + self.log.debug( + "image %(name)s exists and is up to date", + {'name': name}) + return current_image + kwargs[IMAGE_MD5_KEY] = md5 or '' + kwargs[IMAGE_SHA256_KEY] = sha256 or '' + kwargs[IMAGE_OBJECT_KEY] = '/'.join([container, name]) + + if disable_vendor_agent: + kwargs.update(self.cloud_config.config['disable_vendor_agent']) + + # We can never have nice things. Glance v1 took "is_public" as a + # boolean. Glance v2 takes "visibility". If the user gives us + # is_public, we know what they mean. If they give us visibility, they + # know that they mean. + if self._is_client_version('image', 2): + if 'is_public' in kwargs: + is_public = kwargs.pop('is_public') + if is_public: + kwargs['visibility'] = 'public' + else: + kwargs['visibility'] = 'private' + + try: + # This makes me want to die inside + if self.image_api_use_tasks: + return self._upload_image_task( + name, filename, container, + current_image=current_image, + wait=wait, timeout=timeout, + md5=md5, sha256=sha256, + meta=meta, **kwargs) + else: + # If a user used the v1 calling format, they will have + # passed a dict called properties along + properties = kwargs.pop('properties', {}) + kwargs.update(properties) + image_kwargs = dict(properties=kwargs) + if disk_format: + image_kwargs['disk_format'] = disk_format + if container_format: + image_kwargs['container_format'] = container_format + + return self._upload_image_put( + name, filename, meta=meta, + wait=wait, timeout=timeout, + **image_kwargs) + except OpenStackCloudException: + self.log.debug("Image creation failed", exc_info=True) + raise + except Exception as e: + raise OpenStackCloudException( + "Image creation failed: {message}".format(message=str(e))) + + def _make_v2_image_params(self, meta, properties): + ret = {} + for k, v in iter(properties.items()): + if k in ('min_disk', 'min_ram', 'size', 'virtual_size'): + ret[k] = int(v) + elif k == 'protected': + ret[k] = v + else: + if v is None: + ret[k] = None + else: + ret[k] = str(v) + ret.update(meta) + return ret + + def _upload_image_from_volume( + self, name, volume_id, allow_duplicates, + container_format, disk_format, wait, timeout): + data = self._volume_client.post( + '/volumes/{id}/action'.format(id=volume_id), + json={ + 'os-volume_upload_image': { + 'force': allow_duplicates, + 'image_name': name, + 'container_format': container_format, + 'disk_format': disk_format}}) + response = self._get_and_munchify('os-volume_upload_image', data) + + if not wait: + return self.get_image(response['image_id']) + try: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the image to finish."): + image_obj = self.get_image(response['image_id']) + if image_obj and image_obj.status not in ('queued', 'saving'): + return image_obj + except OpenStackCloudTimeout: + self.log.debug( + "Timeout waiting for image to become ready. Deleting.") + self.delete_image(response['image_id'], wait=True) + raise + + def _upload_image_put_v2(self, name, image_data, meta, **image_kwargs): + properties = image_kwargs.pop('properties', {}) + + image_kwargs.update(self._make_v2_image_params(meta, properties)) + image_kwargs['name'] = name + + data = self._image_client.post('/images', json=image_kwargs) + image = self._get_and_munchify(key=None, data=data) + + try: + self._image_client.put( + '/images/{id}/file'.format(id=image.id), + headers={'Content-Type': 'application/octet-stream'}, + data=image_data) + + except Exception: + self.log.debug("Deleting failed upload of image %s", name) + try: + self._image_client.delete( + '/images/{id}'.format(id=image.id)) + except OpenStackCloudHTTPError: + # We're just trying to clean up - if it doesn't work - shrug + self.log.debug( + "Failed deleting image after we failed uploading it.", + exc_info=True) + raise + + return image + + def _upload_image_put_v1( + self, name, image_data, meta, **image_kwargs): + + image_kwargs['properties'].update(meta) + image_kwargs['name'] = name + + image = self._image_client.post('/images', json=image_kwargs) + checksum = image_kwargs['properties'].get(IMAGE_MD5_KEY, '') + + try: + # Let us all take a brief moment to be grateful that this + # is not actually how OpenStack APIs work anymore + headers = { + 'x-glance-registry-purge-props': 'false', + } + if checksum: + headers['x-image-meta-checksum'] = checksum + + image = self._image_client.put( + '/images/{id}'.format(id=image.id), + headers=headers, data=image_data) + + except OpenStackCloudHTTPError: + self.log.debug("Deleting failed upload of image %s", name) + try: + self._image_client.delete('/images/{id}'.format(id=image.id)) + except OpenStackCloudHTTPError: + # We're just trying to clean up - if it doesn't work - shrug + self.log.debug( + "Failed deleting image after we failed uploading it.", + exc_info=True) + raise + return image + + def _upload_image_put( + self, name, filename, meta, wait, timeout, **image_kwargs): + image_data = open(filename, 'rb') + # Because reasons and crying bunnies + if self._is_client_version('image', 2): + image = self._upload_image_put_v2( + name, image_data, meta, **image_kwargs) + else: + image = self._upload_image_put_v1( + name, image_data, meta, **image_kwargs) + self._get_cache(None).invalidate() + if not wait: + return image + try: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the image to finish."): + image_obj = self.get_image(image.id) + if image_obj and image_obj.status not in ('queued', 'saving'): + return image_obj + except OpenStackCloudTimeout: + self.log.debug( + "Timeout waiting for image to become ready. Deleting.") + self.delete_image(image.id, wait=True) + raise + + def _upload_image_task( + self, name, filename, container, current_image, + wait, timeout, meta, md5=None, sha256=None, **image_kwargs): + + parameters = image_kwargs.pop('parameters', {}) + image_kwargs.update(parameters) + + self.create_object( + container, name, filename, + md5=md5, sha256=sha256, + **{'content-type': 'application/octet-stream'}) + if not current_image: + current_image = self.get_image(name) + # TODO(mordred): Can we do something similar to what nodepool does + # using glance properties to not delete then upload but instead make a + # new "good" image and then mark the old one as "bad" + task_args = dict( + type='import', input=dict( + import_from='{container}/{name}'.format( + container=container, name=name), + image_properties=dict(name=name))) + data = self._image_client.post('/tasks', json=task_args) + glance_task = self._get_and_munchify(key=None, data=data) + self.list_images.invalidate(self) + if wait: + start = time.time() + image_id = None + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the image to import."): + try: + if image_id is None: + data = self._image_client.get( + '/tasks/{id}'.format(id=glance_task.id)) + status = self._get_and_munchify('images', data=data) + except OpenStackCloudHTTPError as e: + if e.response.status_code == 503: + # Clear the exception so that it doesn't linger + # and get reported as an Inner Exception later + _utils._exc_clear() + # Intermittent failure - catch and try again + continue + raise + + if status.status == 'success': + image_id = status.result['image_id'] + try: + image = self.get_image(image_id) + except OpenStackCloudHTTPError as e: + if e.response.status_code == 503: + # Clear the exception so that it doesn't linger + # and get reported as an Inner Exception later + _utils._exc_clear() + # Intermittent failure - catch and try again + continue + raise + if image is None: + continue + self.update_image_properties( + image=image, meta=meta, **image_kwargs) + self.log.debug( + "Image Task %s imported %s in %s", + glance_task.id, image_id, (time.time() - start)) + return self.get_image(image_id) + if status.status == 'failure': + if status.message == IMAGE_ERROR_396: + glance_task = self._image_client.post( + '/tasks', data=task_args) + self.list_images.invalidate(self) + else: + raise OpenStackCloudException( + "Image creation failed: {message}".format( + message=status.message), + extra_data=status) + else: + return glance_task + + def update_image_properties( + self, image=None, name_or_id=None, meta=None, **properties): + if image is None: + image = self.get_image(name_or_id) + + if not meta: + meta = {} + + img_props = {} + for k, v in iter(properties.items()): + if v and k in ['ramdisk', 'kernel']: + v = self.get_image_id(v) + k = '{0}_id'.format(k) + img_props[k] = v + + # This makes me want to die inside + if self._is_client_version('image', 2): + return self._update_image_properties_v2(image, meta, img_props) + else: + return self._update_image_properties_v1(image, meta, img_props) + + def _update_image_properties_v2(self, image, meta, properties): + img_props = image.properties.copy() + for k, v in iter(self._make_v2_image_params(meta, properties).items()): + if image.get(k, None) != v: + img_props[k] = v + if not img_props: + return False + headers = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch'} + patch = sorted(list(jsonpatch.JsonPatch.from_diff( + image.properties, img_props)), key=operator.itemgetter('value')) + + # No need to fire an API call if there is an empty patch + if patch: + self._image_client.patch( + '/images/{id}'.format(id=image.id), + headers=headers, + data=json.dumps(patch)) + + self.list_images.invalidate(self) + return True + + def _update_image_properties_v1(self, image, meta, properties): + properties.update(meta) + img_props = {} + for k, v in iter(properties.items()): + if image.properties.get(k, None) != v: + img_props['x-image-meta-{key}'.format(key=k)] = v + if not img_props: + return False + self._image_client.put( + '/images/{id}'.format(image.id), headers=img_props) + self.list_images.invalidate(self) + return True + + def create_volume( + self, size, + wait=True, timeout=None, image=None, **kwargs): + """Create a volume. + + :param size: Size, in GB of the volume to create. + :param name: (optional) Name for the volume. + :param description: (optional) Name for the volume. + :param wait: If true, waits for volume to be created. + :param timeout: Seconds to wait for volume creation. None is forever. + :param image: (optional) Image name, ID or object from which to create + the volume + :param kwargs: Keyword arguments as expected for cinder client. + + :returns: The created volume object. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + if image: + image_obj = self.get_image(image) + if not image_obj: + raise OpenStackCloudException( + "Image {image} was requested as the basis for a new" + " volume, but was not found on the cloud".format( + image=image)) + kwargs['imageRef'] = image_obj['id'] + kwargs = self._get_volume_kwargs(kwargs) + kwargs['size'] = size + payload = dict(volume=kwargs) + if 'scheduler_hints' in kwargs: + payload['OS-SCH-HNT:scheduler_hints'] = kwargs.pop( + 'scheduler_hints', None) + data = self._volume_client.post( + '/volumes', + json=dict(payload), + error_message='Error in creating volume') + volume = self._get_and_munchify('volume', data) + self.list_volumes.invalidate(self) + + if volume['status'] == 'error': + raise OpenStackCloudException("Error in creating volume") + + if wait: + vol_id = volume['id'] + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the volume to be available."): + volume = self.get_volume(vol_id) + + if not volume: + continue + + if volume['status'] == 'available': + return volume + + if volume['status'] == 'error': + raise OpenStackCloudException("Error in creating volume") + + return self._normalize_volume(volume) + + def delete_volume(self, name_or_id=None, wait=True, timeout=None, + force=False): + """Delete a volume. + + :param name_or_id: Name or unique ID of the volume. + :param wait: If true, waits for volume to be deleted. + :param timeout: Seconds to wait for volume deletion. None is forever. + :param force: Force delete volume even if the volume is in deleting + or error_deleting state. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + + self.list_volumes.invalidate(self) + volume = self.get_volume(name_or_id) + + if not volume: + self.log.debug( + "Volume %(name_or_id)s does not exist", + {'name_or_id': name_or_id}, + exc_info=True) + return False + + with _utils.shade_exceptions("Error in deleting volume"): + try: + if force: + self._volume_client.post( + 'volumes/{id}/action'.format(id=volume['id']), + json={'os-force_delete': None}) + else: + self._volume_client.delete( + 'volumes/{id}'.format(id=volume['id'])) + except OpenStackCloudURINotFound: + self.log.debug( + "Volume {id} not found when deleting. Ignoring.".format( + id=volume['id'])) + return False + + self.list_volumes.invalidate(self) + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the volume to be deleted."): + + if not self.get_volume(volume['id']): + break + + return True + + def get_volumes(self, server, cache=True): + volumes = [] + for volume in self.list_volumes(cache=cache): + for attach in volume['attachments']: + if attach['server_id'] == server['id']: + volumes.append(volume) + return volumes + + def get_volume_id(self, name_or_id): + volume = self.get_volume(name_or_id) + if volume: + return volume['id'] + return None + + def volume_exists(self, name_or_id): + return self.get_volume(name_or_id) is not None + + def get_volume_attach_device(self, volume, server_id): + """Return the device name a volume is attached to for a server. + + This can also be used to verify if a volume is attached to + a particular server. + + :param volume: Volume dict + :param server_id: ID of server to check + + :returns: Device name if attached, None if volume is not attached. + """ + for attach in volume['attachments']: + if server_id == attach['server_id']: + return attach['device'] + return None + + def detach_volume(self, server, volume, wait=True, timeout=None): + """Detach a volume from a server. + + :param server: The server dict to detach from. + :param volume: The volume dict to detach. + :param wait: If true, waits for volume to be detached. + :param timeout: Seconds to wait for volume detachment. None is forever. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + + self._compute_client.delete( + '/servers/{server_id}/os-volume_attachments/{volume_id}'.format( + server_id=server['id'], volume_id=volume['id']), + error_message=( + "Error detaching volume {volume} from server {server}".format( + volume=volume['id'], server=server['id']))) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for volume %s to detach." % volume['id']): + try: + vol = self.get_volume(volume['id']) + except Exception: + self.log.debug( + "Error getting volume info %s", volume['id'], + exc_info=True) + continue + + if vol['status'] == 'available': + return + + if vol['status'] == 'error': + raise OpenStackCloudException( + "Error in detaching volume %s" % volume['id'] + ) + + def attach_volume(self, server, volume, device=None, + wait=True, timeout=None): + """Attach a volume to a server. + + This will attach a volume, described by the passed in volume + dict (as returned by get_volume()), to the server described by + the passed in server dict (as returned by get_server()) on the + named device on the server. + + If the volume is already attached to the server, or generally not + available, then an exception is raised. To re-attach to a server, + but under a different device, the user must detach it first. + + :param server: The server dict to attach to. + :param volume: The volume dict to attach. + :param device: The device name where the volume will attach. + :param wait: If true, waits for volume to be attached. + :param timeout: Seconds to wait for volume attachment. None is forever. + + :returns: a volume attachment object. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + dev = self.get_volume_attach_device(volume, server['id']) + if dev: + raise OpenStackCloudException( + "Volume %s already attached to server %s on device %s" + % (volume['id'], server['id'], dev) + ) + + if volume['status'] != 'available': + raise OpenStackCloudException( + "Volume %s is not available. Status is '%s'" + % (volume['id'], volume['status']) + ) + + payload = {'volumeId': volume['id']} + if device: + payload['device'] = device + data = self._compute_client.post( + '/servers/{server_id}/os-volume_attachments'.format( + server_id=server['id']), + json=dict(volumeAttachment=payload), + error_message="Error attaching volume {volume_id} to server " + "{server_id}".format(volume_id=volume['id'], + server_id=server['id'])) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for volume %s to attach." % volume['id']): + try: + self.list_volumes.invalidate(self) + vol = self.get_volume(volume['id']) + except Exception: + self.log.debug( + "Error getting volume info %s", volume['id'], + exc_info=True) + continue + + if self.get_volume_attach_device(vol, server['id']): + break + + # TODO(Shrews) check to see if a volume can be in error status + # and also attached. If so, we should move this + # above the get_volume_attach_device call + if vol['status'] == 'error': + raise OpenStackCloudException( + "Error in attaching volume %s" % volume['id'] + ) + return self._normalize_volume_attachment( + self._get_and_munchify('volumeAttachment', data)) + + def _get_volume_kwargs(self, kwargs): + name = kwargs.pop('name', kwargs.pop('display_name', None)) + description = kwargs.pop('description', + kwargs.pop('display_description', None)) + if name: + if self._is_client_version('volume', 2): + kwargs['name'] = name + else: + kwargs['display_name'] = name + if description: + if self._is_client_version('volume', 2): + kwargs['description'] = description + else: + kwargs['display_description'] = description + return kwargs + + @_utils.valid_kwargs('name', 'display_name', + 'description', 'display_description') + def create_volume_snapshot(self, volume_id, force=False, + wait=True, timeout=None, **kwargs): + """Create a volume. + + :param volume_id: the ID of the volume to snapshot. + :param force: If set to True the snapshot will be created even if the + volume is attached to an instance, if False it will not + :param name: name of the snapshot, one will be generated if one is + not provided + :param description: description of the snapshot, one will be generated + if one is not provided + :param wait: If true, waits for volume snapshot to be created. + :param timeout: Seconds to wait for volume snapshot creation. None is + forever. + + :returns: The created volume object. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + + kwargs = self._get_volume_kwargs(kwargs) + payload = {'volume_id': volume_id, 'force': force} + payload.update(kwargs) + data = self._volume_client.post( + '/snapshots', + json=dict(snapshot=payload), + error_message="Error creating snapshot of volume " + "{volume_id}".format(volume_id=volume_id)) + snapshot = self._get_and_munchify('snapshot', data) + if wait: + snapshot_id = snapshot['id'] + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the volume snapshot to be available." + ): + snapshot = self.get_volume_snapshot_by_id(snapshot_id) + + if snapshot['status'] == 'available': + break + + if snapshot['status'] == 'error': + raise OpenStackCloudException( + "Error in creating volume snapshot") + + # TODO(mordred) need to normalize snapshots. We were normalizing them + # as volumes, which is an error. They need to be normalized as + # volume snapshots, which are completely different objects + return snapshot + + def get_volume_snapshot_by_id(self, snapshot_id): + """Takes a snapshot_id and gets a dict of the snapshot + that maches that ID. + + Note: This is more efficient than get_volume_snapshot. + + param: snapshot_id: ID of the volume snapshot. + + """ + data = self._volume_client.get( + '/snapshots/{snapshot_id}'.format(snapshot_id=snapshot_id), + error_message="Error getting snapshot " + "{snapshot_id}".format(snapshot_id=snapshot_id)) + return self._normalize_volume( + self._get_and_munchify('snapshot', data)) + + def get_volume_snapshot(self, name_or_id, filters=None): + """Get a volume by name or ID. + + :param name_or_id: Name or ID of the volume snapshot. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``munch.Munch`` or None if no matching volume is + found. + """ + return _utils._get_entity(self, 'volume_snapshot', name_or_id, + filters) + + def create_volume_backup(self, volume_id, name=None, description=None, + force=False, wait=True, timeout=None): + """Create a volume backup. + + :param volume_id: the ID of the volume to backup. + :param name: name of the backup, one will be generated if one is + not provided + :param description: description of the backup, one will be generated + if one is not provided + :param force: If set to True the backup will be created even if the + volume is attached to an instance, if False it will not + :param wait: If true, waits for volume backup to be created. + :param timeout: Seconds to wait for volume backup creation. None is + forever. + + :returns: The created volume backup object. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + payload = { + 'name': name, + 'volume_id': volume_id, + 'description': description, + 'force': force, + } + + data = self._volume_client.post( + '/backups', json=dict(backup=payload), + error_message="Error creating backup of volume " + "{volume_id}".format(volume_id=volume_id)) + backup = self._get_and_munchify('backup', data) + + if wait: + backup_id = backup['id'] + msg = ("Timeout waiting for the volume backup {} to be " + "available".format(backup_id)) + for _ in _utils._iterate_timeout(timeout, msg): + backup = self.get_volume_backup(backup_id) + + if backup['status'] == 'available': + break + + if backup['status'] == 'error': + raise OpenStackCloudException( + "Error in creating volume backup {id}".format( + id=backup_id)) + + return backup + + def get_volume_backup(self, name_or_id, filters=None): + """Get a volume backup by name or ID. + + :returns: A backup ``munch.Munch`` or None if no matching backup is + found. + """ + return _utils._get_entity(self, 'volume_backup', name_or_id, + filters) + + def list_volume_snapshots(self, detailed=True, search_opts=None): + """List all volume snapshots. + + :returns: A list of volume snapshots ``munch.Munch``. + + """ + endpoint = '/snapshots/detail' if detailed else '/snapshots' + data = self._volume_client.get( + endpoint, + params=search_opts, + error_message="Error getting a list of snapshots") + return self._get_and_munchify('snapshots', data) + + def list_volume_backups(self, detailed=True, search_opts=None): + """ + List all volume backups. + + :param bool detailed: Also list details for each entry + :param dict search_opts: Search options + A dictionary of meta data to use for further filtering. Example:: + + { + 'name': 'my-volume-backup', + 'status': 'available', + 'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56', + 'all_tenants': 1 + } + + :returns: A list of volume backups ``munch.Munch``. + """ + endpoint = '/backups/detail' if detailed else '/backups' + data = self._volume_client.get( + endpoint, params=search_opts, + error_message="Error getting a list of backups") + return self._get_and_munchify('backups', data) + + def delete_volume_backup(self, name_or_id=None, force=False, wait=False, + timeout=None): + """Delete a volume backup. + + :param name_or_id: Name or unique ID of the volume backup. + :param force: Allow delete in state other than error or available. + :param wait: If true, waits for volume backup to be deleted. + :param timeout: Seconds to wait for volume backup deletion. None is + forever. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + + volume_backup = self.get_volume_backup(name_or_id) + + if not volume_backup: + return False + + msg = "Error in deleting volume backup" + if force: + self._volume_client.post( + '/backups/{backup_id}/action'.format( + backup_id=volume_backup['id']), + json={'os-force_delete': None}, + error_message=msg) + else: + self._volume_client.delete( + '/backups/{backup_id}'.format( + backup_id=volume_backup['id']), + error_message=msg) + if wait: + msg = "Timeout waiting for the volume backup to be deleted." + for count in _utils._iterate_timeout(timeout, msg): + if not self.get_volume_backup(volume_backup['id']): + break + + return True + + def delete_volume_snapshot(self, name_or_id=None, wait=False, + timeout=None): + """Delete a volume snapshot. + + :param name_or_id: Name or unique ID of the volume snapshot. + :param wait: If true, waits for volume snapshot to be deleted. + :param timeout: Seconds to wait for volume snapshot deletion. None is + forever. + + :raises: OpenStackCloudTimeout if wait time exceeded. + :raises: OpenStackCloudException on operation error. + """ + + volumesnapshot = self.get_volume_snapshot(name_or_id) + + if not volumesnapshot: + return False + + self._volume_client.delete( + '/snapshots/{snapshot_id}'.format( + snapshot_id=volumesnapshot['id']), + error_message="Error in deleting volume snapshot") + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the volume snapshot to be deleted."): + if not self.get_volume_snapshot(volumesnapshot['id']): + break + + return True + + def get_server_id(self, name_or_id): + server = self.get_server(name_or_id, bare=True) + if server: + return server['id'] + return None + + def get_server_private_ip(self, server): + return meta.get_server_private_ip(server, self) + + def get_server_public_ip(self, server): + return meta.get_server_external_ipv4(self, server) + + def get_server_meta(self, server): + # TODO(mordred) remove once ansible has moved to Inventory interface + server_vars = meta.get_hostvars_from_server(self, server) + groups = meta.get_groups_from_server(self, server, server_vars) + return dict(server_vars=server_vars, groups=groups) + + def get_openstack_vars(self, server): + return meta.get_hostvars_from_server(self, server) + + def _expand_server_vars(self, server): + # Used by nodepool + # TODO(mordred) remove after these make it into what we + # actually want the API to be. + return meta.expand_server_vars(self, server) + + def _find_floating_network_by_router(self): + """Find the network providing floating ips by looking at routers.""" + + if self._floating_network_by_router_lock.acquire( + not self._floating_network_by_router_run): + if self._floating_network_by_router_run: + self._floating_network_by_router_lock.release() + return self._floating_network_by_router + try: + for router in self.list_routers(): + if router['admin_state_up']: + network_id = router.get( + 'external_gateway_info', {}).get('network_id') + if network_id: + self._floating_network_by_router = network_id + finally: + self._floating_network_by_router_run = True + self._floating_network_by_router_lock.release() + return self._floating_network_by_router + + def available_floating_ip(self, network=None, server=None): + """Get a floating IP from a network or a pool. + + Return the first available floating IP or allocate a new one. + + :param network: Name or ID of the network. + :param server: Server the IP is for if known + + :returns: a (normalized) structure with a floating IP address + description. + """ + if self._use_neutron_floating(): + try: + f_ips = self._normalize_floating_ips( + self._neutron_available_floating_ips( + network=network, server=server)) + return f_ips[0] + except OpenStackCloudURINotFound as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + # Fall-through, trying with Nova + + f_ips = self._normalize_floating_ips( + self._nova_available_floating_ips(pool=network) + ) + return f_ips[0] + + def _get_floating_network_id(self): + # Get first existing external IPv4 network + networks = self.get_external_ipv4_floating_networks() + if networks: + floating_network_id = networks[0]['id'] + else: + floating_network = self._find_floating_network_by_router() + if floating_network: + floating_network_id = floating_network + else: + raise OpenStackCloudResourceNotFound( + "unable to find an external network") + return floating_network_id + + def _neutron_available_floating_ips( + self, network=None, project_id=None, server=None): + """Get a floating IP from a network. + + Return a list of available floating IPs or allocate a new one and + return it in a list of 1 element. + + :param network: A single network name or ID, or a list of them. + :param server: (server) Server the Floating IP is for + + :returns: a list of floating IP addresses. + + :raises: ``OpenStackCloudResourceNotFound``, if an external network + that meets the specified criteria cannot be found. + """ + if project_id is None: + # Make sure we are only listing floatingIPs allocated the current + # tenant. This is the default behaviour of Nova + project_id = self.current_project_id + + if network: + if isinstance(network, six.string_types): + network = [network] + + # Use given list to get first matching external network + floating_network_id = None + for net in network: + for ext_net in self.get_external_ipv4_floating_networks(): + if net in (ext_net['name'], ext_net['id']): + floating_network_id = ext_net['id'] + break + if floating_network_id: + break + + if floating_network_id is None: + raise OpenStackCloudResourceNotFound( + "unable to find external network {net}".format( + net=network) + ) + else: + floating_network_id = self._get_floating_network_id() + + filters = { + 'port': None, + 'network': floating_network_id, + 'location': {'project': {'id': project_id}}, + } + + floating_ips = self._list_floating_ips() + available_ips = _utils._filter_list( + floating_ips, name_or_id=None, filters=filters) + if available_ips: + return available_ips + + # No available IP found or we didn't try + # allocate a new Floating IP + f_ip = self._neutron_create_floating_ip( + network_id=floating_network_id, server=server) + + return [f_ip] + + def _nova_available_floating_ips(self, pool=None): + """Get available floating IPs from a floating IP pool. + + Return a list of available floating IPs or allocate a new one and + return it in a list of 1 element. + + :param pool: Nova floating IP pool name. + + :returns: a list of floating IP addresses. + + :raises: ``OpenStackCloudResourceNotFound``, if a floating IP pool + is not specified and cannot be found. + """ + + with _utils.shade_exceptions( + "Unable to create floating IP in pool {pool}".format( + pool=pool)): + if pool is None: + pools = self.list_floating_ip_pools() + if not pools: + raise OpenStackCloudResourceNotFound( + "unable to find a floating ip pool") + pool = pools[0]['name'] + + filters = { + 'instance_id': None, + 'pool': pool + } + + floating_ips = self._nova_list_floating_ips() + available_ips = _utils._filter_list( + floating_ips, name_or_id=None, filters=filters) + if available_ips: + return available_ips + + # No available IP found or we did not try. + # Allocate a new Floating IP + f_ip = self._nova_create_floating_ip(pool=pool) + + return [f_ip] + + def create_floating_ip(self, network=None, server=None, + fixed_address=None, nat_destination=None, + port=None, wait=False, timeout=60): + """Allocate a new floating IP from a network or a pool. + + :param network: Name or ID of the network + that the floating IP should come from. + :param server: (optional) Server dict for the server to create + the IP for and to which it should be attached. + :param fixed_address: (optional) Fixed IP to attach the floating + ip to. + :param nat_destination: (optional) Name or ID of the network + that the fixed IP to attach the floating + IP to should be on. + :param port: (optional) The port ID that the floating IP should be + attached to. Specifying a port conflicts + with specifying a server, fixed_address or + nat_destination. + :param wait: (optional) Whether to wait for the IP to be active. + Defaults to False. Only applies if a server is + provided. + :param timeout: (optional) How long to wait for the IP to be active. + Defaults to 60. Only applies if a server is + provided. + + :returns: a floating IP address + + :raises: ``OpenStackCloudException``, on operation error. + """ + if self._use_neutron_floating(): + try: + return self._neutron_create_floating_ip( + network_name_or_id=network, server=server, + fixed_address=fixed_address, + nat_destination=nat_destination, + port=port, + wait=wait, timeout=timeout) + except OpenStackCloudURINotFound as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + # Fall-through, trying with Nova + + if port: + raise OpenStackCloudException( + "This cloud uses nova-network which does not support" + " arbitrary floating-ip/port mappings. Please nudge" + " your cloud provider to upgrade the networking stack" + " to neutron, or alternately provide the server," + " fixed_address and nat_destination arguments as appropriate") + # Else, we are using Nova network + f_ips = self._normalize_floating_ips( + [self._nova_create_floating_ip(pool=network)]) + return f_ips[0] + + def _submit_create_fip(self, kwargs): + # Split into a method to aid in test mocking + data = self._network_client.post( + "/floatingips.json", json={"floatingip": kwargs}) + return self._normalize_floating_ip( + self._get_and_munchify('floatingip', data)) + + def _neutron_create_floating_ip( + self, network_name_or_id=None, server=None, + fixed_address=None, nat_destination=None, + port=None, + wait=False, timeout=60, network_id=None): + + if not network_id: + if network_name_or_id: + network = self.get_network(network_name_or_id) + if not network: + raise OpenStackCloudResourceNotFound( + "unable to find network for floating ips with ID " + "{0}".format(network_name_or_id)) + network_id = network['id'] + else: + network_id = self._get_floating_network_id() + kwargs = { + 'floating_network_id': network_id, + } + if not port: + if server: + (port_obj, fixed_ip_address) = self._nat_destination_port( + server, fixed_address=fixed_address, + nat_destination=nat_destination) + if port_obj: + port = port_obj['id'] + if fixed_ip_address: + kwargs['fixed_ip_address'] = fixed_ip_address + if port: + kwargs['port_id'] = port + + fip = self._submit_create_fip(kwargs) + fip_id = fip['id'] + + if port: + # The FIP is only going to become active in this context + # when we've attached it to something, which only occurs + # if we've provided a port as a parameter + if wait: + try: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for the floating IP" + " to be ACTIVE", + wait=self._FLOAT_AGE): + fip = self.get_floating_ip(fip_id) + if fip and fip['status'] == 'ACTIVE': + break + except OpenStackCloudTimeout: + self.log.error( + "Timed out on floating ip %(fip)s becoming active." + " Deleting", {'fip': fip_id}) + try: + self.delete_floating_ip(fip_id) + except Exception as e: + self.log.error( + "FIP LEAK: Attempted to delete floating ip " + "%(fip)s but received %(exc)s exception: " + "%(err)s", {'fip': fip_id, 'exc': e.__class__, + 'err': str(e)}) + raise + if fip['port_id'] != port: + if server: + raise OpenStackCloudException( + "Attempted to create FIP on port {port} for server" + " {server} but FIP has port {port_id}".format( + port=port, port_id=fip['port_id'], + server=server['id'])) + else: + raise OpenStackCloudException( + "Attempted to create FIP on port {port}" + " but something went wrong".format(port=port)) + return fip + + def _nova_create_floating_ip(self, pool=None): + with _utils.shade_exceptions( + "Unable to create floating IP in pool {pool}".format( + pool=pool)): + if pool is None: + pools = self.list_floating_ip_pools() + if not pools: + raise OpenStackCloudResourceNotFound( + "unable to find a floating ip pool") + pool = pools[0]['name'] + + data = self._compute_client.post( + '/os-floating-ips', json=dict(pool=pool)) + pool_ip = self._get_and_munchify('floating_ip', data) + # TODO(mordred) Remove this - it's just for compat + data = self._compute_client.get('/os-floating-ips/{id}'.format( + id=pool_ip['id'])) + return self._get_and_munchify('floating_ip', data) + + def delete_floating_ip(self, floating_ip_id, retry=1): + """Deallocate a floating IP from a project. + + :param floating_ip_id: a floating IP address ID. + :param retry: number of times to retry. Optional, defaults to 1, + which is in addition to the initial delete call. + A value of 0 will also cause no checking of results to + occur. + + :returns: True if the IP address has been deleted, False if the IP + address was not found. + + :raises: ``OpenStackCloudException``, on operation error. + """ + for count in range(0, max(0, retry) + 1): + result = self._delete_floating_ip(floating_ip_id) + + if (retry == 0) or not result: + return result + + # Wait for the cached floating ip list to be regenerated + if self._FLOAT_AGE: + time.sleep(self._FLOAT_AGE) + + # neutron sometimes returns success when deleting a floating + # ip. That's awesome. SO - verify that the delete actually + # worked. Some clouds will set the status to DOWN rather than + # deleting the IP immediately. This is, of course, a bit absurd. + f_ip = self.get_floating_ip(id=floating_ip_id) + if not f_ip or f_ip['status'] == 'DOWN': + return True + + raise OpenStackCloudException( + "Attempted to delete Floating IP {ip} with ID {id} a total of" + " {retry} times. Although the cloud did not indicate any errors" + " the floating ip is still in existence. Aborting further" + " operations.".format( + id=floating_ip_id, ip=f_ip['floating_ip_address'], + retry=retry + 1)) + + def _delete_floating_ip(self, floating_ip_id): + if self._use_neutron_floating(): + try: + return self._neutron_delete_floating_ip(floating_ip_id) + except OpenStackCloudURINotFound as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + return self._nova_delete_floating_ip(floating_ip_id) + + def _neutron_delete_floating_ip(self, floating_ip_id): + try: + self._network_client.delete( + "/floatingips/{fip_id}.json".format(fip_id=floating_ip_id), + error_message="unable to delete floating IP") + except OpenStackCloudResourceNotFound: + return False + except Exception as e: + raise OpenStackCloudException( + "Unable to delete floating IP ID {fip_id}: {msg}".format( + fip_id=floating_ip_id, msg=str(e))) + return True + + def _nova_delete_floating_ip(self, floating_ip_id): + try: + self._compute_client.delete( + '/os-floating-ips/{id}'.format(id=floating_ip_id), + error_message='Unable to delete floating IP {fip_id}'.format( + fip_id=floating_ip_id)) + except OpenStackCloudURINotFound: + return False + return True + + def delete_unattached_floating_ips(self, retry=1): + """Safely delete unattached floating ips. + + If the cloud can safely purge any unattached floating ips without + race conditions, do so. + + Safely here means a specific thing. It means that you are not running + this while another process that might do a two step create/attach + is running. You can safely run this method while another process + is creating servers and attaching floating IPs to them if either that + process is using add_auto_ip from shade, or is creating the floating + IPs by passing in a server to the create_floating_ip call. + + :param retry: number of times to retry. Optional, defaults to 1, + which is in addition to the initial delete call. + A value of 0 will also cause no checking of results to + occur. + + :returns: True if Floating IPs have been deleted, False if not + + :raises: ``OpenStackCloudException``, on operation error. + """ + processed = [] + if self._use_neutron_floating(): + for ip in self.list_floating_ips(): + if not ip['attached']: + processed.append(self.delete_floating_ip( + floating_ip_id=ip['id'], retry=retry)) + return all(processed) if processed else False + + def _attach_ip_to_server( + self, server, floating_ip, + fixed_address=None, wait=False, + timeout=60, skip_attach=False, nat_destination=None): + """Attach a floating IP to a server. + + :param server: Server dict + :param floating_ip: Floating IP dict to attach + :param fixed_address: (optional) fixed address to which attach the + floating IP to. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param skip_attach: (optional) Skip the actual attach and just do + the wait. Defaults to False. + :param nat_destination: The fixed network the server's port for the + FIP to attach to will come from. + + :returns: The server ``munch.Munch`` + + :raises: OpenStackCloudException, on operation error. + """ + # Short circuit if we're asking to attach an IP that's already + # attached + ext_ip = meta.get_server_ip(server, ext_tag='floating', public=True) + if ext_ip == floating_ip['floating_ip_address']: + return server + + if self._use_neutron_floating(): + if not skip_attach: + try: + self._neutron_attach_ip_to_server( + server=server, floating_ip=floating_ip, + fixed_address=fixed_address, + nat_destination=nat_destination) + except OpenStackCloudURINotFound as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + # Fall-through, trying with Nova + else: + # Nova network + self._nova_attach_ip_to_server( + server_id=server['id'], floating_ip_id=floating_ip['id'], + fixed_address=fixed_address) + + if wait: + # Wait for the address to be assigned to the server + server_id = server['id'] + for _ in _utils._iterate_timeout( + timeout, + "Timeout waiting for the floating IP to be attached.", + wait=self._SERVER_AGE): + server = self.get_server(server_id) + ext_ip = meta.get_server_ip( + server, ext_tag='floating', public=True) + if ext_ip == floating_ip['floating_ip_address']: + return server + return server + + def _nat_destination_port( + self, server, fixed_address=None, nat_destination=None): + """Returns server port that is on a nat_destination network + + Find a port attached to the server which is on a network which + has a subnet which can be the destination of NAT. Such a network + is referred to in shade as a "nat_destination" network. So this + then is a function which returns a port on such a network that is + associated with the given server. + + :param server: Server dict. + :param fixed_address: Fixed ip address of the port + :param nat_destination: Name or ID of the network of the port. + """ + # If we are caching port lists, we may not find the port for + # our server if the list is old. Try for at least 2 cache + # periods if that is the case. + if self._PORT_AGE: + timeout = self._PORT_AGE * 2 + else: + timeout = None + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for port to show up in list", + wait=self._PORT_AGE): + try: + port_filter = {'device_id': server['id']} + ports = self.search_ports(filters=port_filter) + break + except OpenStackCloudTimeout: + ports = None + if not ports: + return (None, None) + port = None + if not fixed_address: + if len(ports) > 1: + if nat_destination: + nat_network = self.get_network(nat_destination) + if not nat_network: + raise OpenStackCloudException( + 'NAT Destination {nat_destination} was configured' + ' but not found on the cloud. Please check your' + ' config and your cloud and try again.'.format( + nat_destination=nat_destination)) + else: + nat_network = self.get_nat_destination() + + if not nat_network: + raise OpenStackCloudException( + 'Multiple ports were found for server {server}' + ' but none of the networks are a valid NAT' + ' destination, so it is impossible to add a' + ' floating IP. If you have a network that is a valid' + ' destination for NAT and we could not find it,' + ' please file a bug. But also configure the' + ' nat_destination property of the networks list in' + ' your clouds.yaml file. If you do not have a' + ' clouds.yaml file, please make one - your setup' + ' is complicated.'.format(server=server['id'])) + + maybe_ports = [] + for maybe_port in ports: + if maybe_port['network_id'] == nat_network['id']: + maybe_ports.append(maybe_port) + if not maybe_ports: + raise OpenStackCloudException( + 'No port on server {server} was found matching' + ' your NAT destination network {dest}. Please ' + ' check your config'.format( + server=server['id'], dest=nat_network['name'])) + ports = maybe_ports + + # Select the most recent available IPv4 address + # To do this, sort the ports in reverse order by the created_at + # field which is a string containing an ISO DateTime (which + # thankfully sort properly) This way the most recent port created, + # if there are more than one, will be the arbitrary port we + # select. + for port in sorted( + ports, + key=lambda p: p.get('created_at', 0), + reverse=True): + for address in port.get('fixed_ips', list()): + try: + ip = ipaddress.ip_address(address['ip_address']) + except Exception: + continue + if ip.version == 4: + fixed_address = address['ip_address'] + return port, fixed_address + raise OpenStackCloudException( + "unable to find a free fixed IPv4 address for server " + "{0}".format(server['id'])) + # unfortunately a port can have more than one fixed IP: + # we can't use the search_ports filtering for fixed_address as + # they are contained in a list. e.g. + # + # "fixed_ips": [ + # { + # "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", + # "ip_address": "172.24.4.2" + # } + # ] + # + # Search fixed_address + for p in ports: + for fixed_ip in p['fixed_ips']: + if fixed_address == fixed_ip['ip_address']: + return (p, fixed_address) + return (None, None) + + def _neutron_attach_ip_to_server( + self, server, floating_ip, fixed_address=None, + nat_destination=None): + + # Find an available port + (port, fixed_address) = self._nat_destination_port( + server, fixed_address=fixed_address, + nat_destination=nat_destination) + if not port: + raise OpenStackCloudException( + "unable to find a port for server {0}".format( + server['id'])) + + floating_ip_args = {'port_id': port['id']} + if fixed_address is not None: + floating_ip_args['fixed_ip_address'] = fixed_address + + return self._network_client.put( + "/floatingips/{fip_id}.json".format(fip_id=floating_ip['id']), + json={'floatingip': floating_ip_args}, + error_message=("Error attaching IP {ip} to " + "server {server_id}".format( + ip=floating_ip['id'], + server_id=server['id']))) + + def _nova_attach_ip_to_server(self, server_id, floating_ip_id, + fixed_address=None): + f_ip = self.get_floating_ip( + id=floating_ip_id) + if f_ip is None: + raise OpenStackCloudException( + "unable to find floating IP {0}".format(floating_ip_id)) + error_message = "Error attaching IP {ip} to instance {id}".format( + ip=floating_ip_id, id=server_id) + body = { + 'address': f_ip['floating_ip_address'] + } + if fixed_address: + body['fixed_address'] = fixed_address + return self._compute_client.post( + '/servers/{server_id}/action'.format(server_id=server_id), + json=dict(addFloatingIp=body), + error_message=error_message) + + def detach_ip_from_server(self, server_id, floating_ip_id): + """Detach a floating IP from a server. + + :param server_id: ID of a server. + :param floating_ip_id: Id of the floating IP to detach. + + :returns: True if the IP has been detached, or False if the IP wasn't + attached to any server. + + :raises: ``OpenStackCloudException``, on operation error. + """ + if self._use_neutron_floating(): + try: + return self._neutron_detach_ip_from_server( + server_id=server_id, floating_ip_id=floating_ip_id) + except OpenStackCloudURINotFound as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", {'msg': str(e)}) + # Fall-through, trying with Nova + + # Nova network + self._nova_detach_ip_from_server( + server_id=server_id, floating_ip_id=floating_ip_id) + + def _neutron_detach_ip_from_server(self, server_id, floating_ip_id): + f_ip = self.get_floating_ip(id=floating_ip_id) + if f_ip is None or not f_ip['attached']: + return False + self._network_client.put( + "/floatingips/{fip_id}.json".format(fip_id=floating_ip_id), + json={"floatingip": {"port_id": None}}, + error_message=("Error detaching IP {ip} from " + "server {server_id}".format( + ip=floating_ip_id, server_id=server_id))) + + return True + + def _nova_detach_ip_from_server(self, server_id, floating_ip_id): + + f_ip = self.get_floating_ip(id=floating_ip_id) + if f_ip is None: + raise OpenStackCloudException( + "unable to find floating IP {0}".format(floating_ip_id)) + error_message = "Error detaching IP {ip} from instance {id}".format( + ip=floating_ip_id, id=server_id) + return self._compute_client.post( + '/servers/{server_id}/action'.format(server_id=server_id), + json=dict(removeFloatingIp=dict( + address=f_ip['floating_ip_address'])), + error_message=error_message) + + return True + + def _add_ip_from_pool( + self, server, network, fixed_address=None, reuse=True, + wait=False, timeout=60, nat_destination=None): + """Add a floating IP to a server from a given pool + + This method reuses available IPs, when possible, or allocate new IPs + to the current tenant. + The floating IP is attached to the given fixed address or to the + first server port/fixed address + + :param server: Server dict + :param network: Name or ID of the network. + :param fixed_address: a fixed address + :param reuse: Try to reuse existing ips. Defaults to True. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param nat_destination: (optional) the name of the network of the + port to associate with the floating ip. + + :returns: the updated server ``munch.Munch`` + """ + if reuse: + f_ip = self.available_floating_ip(network=network) + else: + start_time = time.time() + f_ip = self.create_floating_ip( + server=server, + network=network, nat_destination=nat_destination, + wait=wait, timeout=timeout) + timeout = timeout - (time.time() - start_time) + # Wait for cache invalidation time so that we don't try + # to attach the FIP a second time below + time.sleep(self._SERVER_AGE) + server = self.get_server(server.id) + + # We run attach as a second call rather than in the create call + # because there are code flows where we will not have an attached + # FIP yet. However, even if it was attached in the create, we run + # the attach function below to get back the server dict refreshed + # with the FIP information. + return self._attach_ip_to_server( + server=server, floating_ip=f_ip, fixed_address=fixed_address, + wait=wait, timeout=timeout, nat_destination=nat_destination) + + def add_ip_list( + self, server, ips, wait=False, timeout=60, + fixed_address=None): + """Attach a list of IPs to a server. + + :param server: a server object + :param ips: list of floating IP addresses or a single address + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param fixed_address: (optional) Fixed address of the server to + attach the IP to + + :returns: The updated server ``munch.Munch`` + + :raises: ``OpenStackCloudException``, on operation error. + """ + if type(ips) == list: + ip = ips[0] + else: + ip = ips + f_ip = self.get_floating_ip( + id=None, filters={'floating_ip_address': ip}) + return self._attach_ip_to_server( + server=server, floating_ip=f_ip, wait=wait, timeout=timeout, + fixed_address=fixed_address) + + def add_auto_ip(self, server, wait=False, timeout=60, reuse=True): + """Add a floating IP to a server. + + This method is intended for basic usage. For advanced network + architecture (e.g. multiple external networks or servers with multiple + interfaces), use other floating IP methods. + + This method can reuse available IPs, or allocate new IPs to the current + project. + + :param server: a server dictionary. + :param reuse: Whether or not to attempt to reuse IPs, defaults + to True. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param reuse: Try to reuse existing ips. Defaults to True. + + :returns: Floating IP address attached to server. + + """ + server = self._add_auto_ip( + server, wait=wait, timeout=timeout, reuse=reuse) + return server['interface_ip'] or None + + def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True): + skip_attach = False + created = False + if reuse: + f_ip = self.available_floating_ip() + else: + start_time = time.time() + f_ip = self.create_floating_ip( + server=server, wait=wait, timeout=timeout) + timeout = timeout - (time.time() - start_time) + if server: + # This gets passed in for both nova and neutron + # but is only meaningful for the neutron logic branch + skip_attach = True + created = True + + try: + # We run attach as a second call rather than in the create call + # because there are code flows where we will not have an attached + # FIP yet. However, even if it was attached in the create, we run + # the attach function below to get back the server dict refreshed + # with the FIP information. + return self._attach_ip_to_server( + server=server, floating_ip=f_ip, wait=wait, timeout=timeout, + skip_attach=skip_attach) + except OpenStackCloudTimeout: + if self._use_neutron_floating() and created: + # We are here because we created an IP on the port + # It failed. Delete so as not to leak an unmanaged + # resource + self.log.error( + "Timeout waiting for floating IP to become" + " active. Floating IP %(ip)s:%(id)s was created for" + " server %(server)s but is being deleted due to" + " activation failure.", { + 'ip': f_ip['floating_ip_address'], + 'id': f_ip['id'], + 'server': server['id']}) + try: + self.delete_floating_ip(f_ip['id']) + except Exception as e: + self.log.error( + "FIP LEAK: Attempted to delete floating ip " + "%(fip)s but received %(exc)s exception: %(err)s", + {'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)}) + raise e + raise + + def add_ips_to_server( + self, server, auto_ip=True, ips=None, ip_pool=None, + wait=False, timeout=60, reuse=True, fixed_address=None, + nat_destination=None): + if ip_pool: + server = self._add_ip_from_pool( + server, ip_pool, reuse=reuse, wait=wait, timeout=timeout, + fixed_address=fixed_address, nat_destination=nat_destination) + elif ips: + server = self.add_ip_list( + server, ips, wait=wait, timeout=timeout, + fixed_address=fixed_address) + elif auto_ip: + if self._needs_floating_ip(server, nat_destination): + server = self._add_auto_ip( + server, wait=wait, timeout=timeout, reuse=reuse) + return server + + def _needs_floating_ip(self, server, nat_destination): + """Figure out if auto_ip should add a floating ip to this server. + + If the server has a public_v4 it does not need a floating ip. + + If the server does not have a private_v4 it does not need a + floating ip. + + If self.private then the server does not need a floating ip. + + If the cloud runs nova, and the server has a private_v4 and not + a public_v4, then the server needs a floating ip. + + If the server has a private_v4 and no public_v4 and the cloud has + a network from which floating IPs come that is connected via a + router to the network from which the private_v4 address came, + then the server needs a floating ip. + + If the server has a private_v4 and no public_v4 and the cloud + does not have a network from which floating ips come, or it has + one but that network is not connected to the network from which + the server's private_v4 address came via a router, then the + server does not need a floating ip. + """ + if not self._has_floating_ips(): + return False + + if server['public_v4']: + return False + + if not server['private_v4']: + return False + + if self.private: + return False + + if not self.has_service('network'): + return True + + # No floating ip network - no FIPs + try: + self._get_floating_network_id() + except OpenStackCloudException: + return False + + (port_obj, fixed_ip_address) = self._nat_destination_port( + server, nat_destination=nat_destination) + + if not port_obj or not fixed_ip_address: + return False + + return True + + def _get_boot_from_volume_kwargs( + self, image, boot_from_volume, boot_volume, volume_size, + terminate_volume, volumes, kwargs): + """Return block device mappings + + :param image: Image dict, name or id to boot with. + + """ + # TODO(mordred) We're only testing this in functional tests. We need + # to add unit tests for this too. + if boot_volume or boot_from_volume or volumes: + kwargs.setdefault('block_device_mapping_v2', []) + else: + return kwargs + + # If we have boot_from_volume but no root volume, then we're + # booting an image from volume + if boot_volume: + volume = self.get_volume(boot_volume) + if not volume: + raise OpenStackCloudException( + 'Volume {boot_volume} is not a valid volume' + ' in {cloud}:{region}'.format( + boot_volume=boot_volume, + cloud=self.name, region=self.region_name)) + block_mapping = { + 'boot_index': '0', + 'delete_on_termination': terminate_volume, + 'destination_type': 'volume', + 'uuid': volume['id'], + 'source_type': 'volume', + } + kwargs['block_device_mapping_v2'].append(block_mapping) + kwargs['imageRef'] = '' + elif boot_from_volume: + + if isinstance(image, dict): + image_obj = image + else: + image_obj = self.get_image(image) + if not image_obj: + raise OpenStackCloudException( + 'Image {image} is not a valid image in' + ' {cloud}:{region}'.format( + image=image, + cloud=self.name, region=self.region_name)) + + block_mapping = { + 'boot_index': '0', + 'delete_on_termination': terminate_volume, + 'destination_type': 'volume', + 'uuid': image_obj['id'], + 'source_type': 'image', + 'volume_size': volume_size, + } + kwargs['imageRef'] = '' + kwargs['block_device_mapping_v2'].append(block_mapping) + if volumes and kwargs['imageRef']: + # If we're attaching volumes on boot but booting from an image, + # we need to specify that in the BDM. + block_mapping = { + u'boot_index': 0, + u'delete_on_termination': True, + u'destination_type': u'local', + u'source_type': u'image', + u'uuid': kwargs['imageRef'], + } + kwargs['block_device_mapping_v2'].append(block_mapping) + for volume in volumes: + volume_obj = self.get_volume(volume) + if not volume_obj: + raise OpenStackCloudException( + 'Volume {volume} is not a valid volume' + ' in {cloud}:{region}'.format( + volume=volume, + cloud=self.name, region=self.region_name)) + block_mapping = { + 'boot_index': '-1', + 'delete_on_termination': False, + 'destination_type': 'volume', + 'uuid': volume_obj['id'], + 'source_type': 'volume', + } + kwargs['block_device_mapping_v2'].append(block_mapping) + if boot_volume or boot_from_volume or volumes: + self.list_volumes.invalidate(self) + return kwargs + + def _encode_server_userdata(self, userdata): + if hasattr(userdata, 'read'): + userdata = userdata.read() + + if not isinstance(userdata, six.binary_type): + # If the userdata passed in is bytes, just send it unmodified + if not isinstance(userdata, six.string_types): + raise TypeError("%s can't be encoded" % type(text)) + # If it's not bytes, make it bytes + userdata = userdata.encode('utf-8', 'strict') + + # Once we have base64 bytes, make them into a utf-8 string for REST + return base64.b64encode(userdata).decode('utf-8') + + @_utils.valid_kwargs( + 'meta', 'files', 'userdata', + 'reservation_id', 'return_raw', 'min_count', + 'max_count', 'security_groups', 'key_name', + 'availability_zone', 'block_device_mapping', + 'block_device_mapping_v2', 'nics', 'scheduler_hints', + 'config_drive', 'admin_pass', 'disk_config') + def create_server( + self, name, image, flavor, + auto_ip=True, ips=None, ip_pool=None, + root_volume=None, terminate_volume=False, + wait=False, timeout=180, reuse_ips=True, + network=None, boot_from_volume=False, volume_size='50', + boot_volume=None, volumes=None, nat_destination=None, + **kwargs): + """Create a virtual server instance. + + :param name: Something to name the server. + :param image: Image dict, name or ID to boot with. + :param flavor: Flavor dict, name or ID to boot onto. + :param auto_ip: Whether to take actions to find a routable IP for + the server. (defaults to True) + :param ips: List of IPs to attach to the server (defaults to None) + :param ip_pool: Name of the network or floating IP pool to get an + address from. (defaults to None) + :param root_volume: Name or ID of a volume to boot from + (defaults to None - deprecated, use boot_volume) + :param boot_volume: Name or ID of a volume to boot from + (defaults to None) + :param terminate_volume: If booting from a volume, whether it should + be deleted when the server is destroyed. + (defaults to False) + :param volumes: (optional) A list of volumes to attach to the server + :param meta: (optional) A dict of arbitrary key/value metadata to + store for this server. Both keys and values must be + <=255 characters. + :param files: (optional, deprecated) A dict of files to overwrite + on the server upon boot. Keys are file names (i.e. + ``/etc/passwd``) and values + are the file contents (either as a string or as a + file-like object). A maximum of five entries is allowed, + and each file must be 10k or less. + :param reservation_id: a UUID for the set of servers being requested. + :param min_count: (optional extension) The minimum number of + servers to launch. + :param max_count: (optional extension) The maximum number of + servers to launch. + :param security_groups: A list of security group names + :param userdata: user data to pass to be exposed by the metadata + server this can be a file type object as well or a + string. + :param key_name: (optional extension) name of previously created + keypair to inject into the instance. + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping: (optional) A dict of block + device mappings for this server. + :param block_device_mapping_v2: (optional) A dict of block + device mappings for this server. + :param nics: (optional extension) an ordered list of nics to be + added to this server, with information about + connected networks, fixed IPs, port etc. + :param scheduler_hints: (optional extension) arbitrary key-value pairs + specified by the client to help boot an instance + :param config_drive: (optional extension) value for config drive + either boolean, or volume-id + :param disk_config: (optional extension) control how the disk is + partitioned when the server is created. possible + values are 'AUTO' or 'MANUAL'. + :param admin_pass: (optional extension) add a user supplied admin + password. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param reuse_ips: (optional) Whether to attempt to reuse pre-existing + floating ips should a floating IP be + needed (defaults to True) + :param network: (optional) Network dict or name or ID to attach the + server to. Mutually exclusive with the nics parameter. + Can also be be a list of network names or IDs or + network dicts. + :param boot_from_volume: Whether to boot from volume. 'boot_volume' + implies True, but boot_from_volume=True with + no boot_volume is valid and will create a + volume from the image and use that. + :param volume_size: When booting an image from volume, how big should + the created volume be? Defaults to 50. + :param nat_destination: Which network should a created floating IP + be attached to, if it's not possible to + infer from the cloud's configuration. + (Optional, defaults to None) + :returns: A ``munch.Munch`` representing the created server. + :raises: OpenStackCloudException on operation error. + """ + # TODO(mordred) Add support for description starting in 2.19 + security_groups = kwargs.get('security_groups', []) + if security_groups and not isinstance(kwargs['security_groups'], list): + security_groups = [security_groups] + if security_groups: + kwargs['security_groups'] = [] + for group in security_groups: + kwargs['security_groups'].append(dict(name=group)) + if 'userdata' in kwargs: + user_data = kwargs.pop('userdata') + if user_data: + kwargs['user_data'] = self._encode_server_userdata(user_data) + for (desired, given) in ( + ('OS-DCF:diskConfig', 'disk_config'), + ('os:scheduler_hints', 'scheduler_hints'), + ('config_drive', 'config_drive'), + ('key_name', 'key_name'), + ('metadata', 'meta'), + ('adminPass', 'admin_pass')): + value = kwargs.pop(given, None) + if value: + kwargs[desired] = value + + kwargs.setdefault('max_count', kwargs.get('max_count', 1)) + kwargs.setdefault('min_count', kwargs.get('min_count', 1)) + + if 'nics' in kwargs and not isinstance(kwargs['nics'], list): + if isinstance(kwargs['nics'], dict): + # Be nice and help the user out + kwargs['nics'] = [kwargs['nics']] + else: + raise OpenStackCloudException( + 'nics parameter to create_server takes a list of dicts.' + ' Got: {nics}'.format(nics=kwargs['nics'])) + + if network and ('nics' not in kwargs or not kwargs['nics']): + nics = [] + if not isinstance(network, list): + network = [network] + for net_name in network: + if isinstance(net_name, dict) and 'id' in net_name: + network_obj = net_name + else: + network_obj = self.get_network(name_or_id=net_name) + if not network_obj: + raise OpenStackCloudException( + 'Network {network} is not a valid network in' + ' {cloud}:{region}'.format( + network=network, + cloud=self.name, region=self.region_name)) + nics.append({'net-id': network_obj['id']}) + + kwargs['nics'] = nics + if not network and ('nics' not in kwargs or not kwargs['nics']): + default_network = self.get_default_network() + if default_network: + kwargs['nics'] = [{'net-id': default_network['id']}] + + networks = [] + for nic in kwargs.pop('nics', []): + net = {} + if 'net-id' in nic: + # TODO(mordred) Make sure this is in uuid format + net['uuid'] = nic.pop('net-id') + # If there's a net-id, ignore net-name + nic.pop('net-name', None) + elif 'net-name' in nic: + nic_net = self.get_network(nic['net-name']) + if not nic_net: + raise OpenStackCloudException( + "Requested network {net} could not be found.".format( + net=nic['net-name'])) + net['uuid'] = nic_net['id'] + # TODO(mordred) Add support for tag if server supports microversion + # 2.32-2.36 or >= 2.42 + for key in ('port', 'fixed_ip'): + if key in nic: + net[key] = nic.pop(key) + if 'port-id' in nic: + net['port'] = nic.pop('port-id') + if nic: + raise OpenStackCloudException( + "Additional unsupported keys given for server network" + " creation: {keys}".format(keys=nic.keys())) + networks.append(net) + if networks: + kwargs['networks'] = networks + + if image: + if isinstance(image, dict): + kwargs['imageRef'] = image['id'] + else: + kwargs['imageRef'] = self.get_image(image).id + if flavor and isinstance(flavor, dict): + kwargs['flavorRef'] = flavor['id'] + else: + kwargs['flavorRef'] = self.get_flavor(flavor, get_extra=False).id + + if volumes is None: + volumes = [] + + # nova cli calls this boot_volume. Let's be the same + if root_volume and not boot_volume: + boot_volume = root_volume + + kwargs = self._get_boot_from_volume_kwargs( + image=image, boot_from_volume=boot_from_volume, + boot_volume=boot_volume, volume_size=str(volume_size), + terminate_volume=terminate_volume, + volumes=volumes, kwargs=kwargs) + + kwargs['name'] = name + endpoint = '/servers' + # TODO(mordred) We're only testing this in functional tests. We need + # to add unit tests for this too. + if 'block_device_mapping_v2' in kwargs: + endpoint = '/os-volumes_boot' + with _utils.shade_exceptions("Error in creating instance"): + data = self._compute_client.post( + endpoint, json={'server': kwargs}) + server = self._get_and_munchify('server', data) + admin_pass = server.get('adminPass') or kwargs.get('admin_pass') + if not wait: + # This is a direct get call to skip the list_servers + # cache which has absolutely no chance of containing the + # new server. + # Only do this if we're not going to wait for the server + # to complete booting, because the only reason we do it + # is to get a server record that is the return value from + # get/list rather than the return value of create. If we're + # going to do the wait loop below, this is a waste of a call + server = self.get_server_by_id(server.id) + if server.status == 'ERROR': + raise OpenStackCloudCreationException( + resource='server', resource_id=server.id) + + if wait: + server = self.wait_for_server( + server, + auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, + reuse=reuse_ips, timeout=timeout, + nat_destination=nat_destination, + ) + + server.adminPass = admin_pass + return server + + def wait_for_server( + self, server, auto_ip=True, ips=None, ip_pool=None, + reuse=True, timeout=180, nat_destination=None): + """ + Wait for a server to reach ACTIVE status. + """ + server_id = server['id'] + timeout_message = "Timeout waiting for the server to come up." + start_time = time.time() + + # There is no point in iterating faster than the list_servers cache + for count in _utils._iterate_timeout( + timeout, + timeout_message, + # if _SERVER_AGE is 0 we still want to wait a bit + # to be friendly with the server. + wait=self._SERVER_AGE or 2): + try: + # Use the get_server call so that the list_servers + # cache can be leveraged + server = self.get_server(server_id) + except Exception: + continue + if not server: + continue + + # We have more work to do, but the details of that are + # hidden from the user. So, calculate remaining timeout + # and pass it down into the IP stack. + remaining_timeout = timeout - int(time.time() - start_time) + if remaining_timeout <= 0: + raise OpenStackCloudTimeout(timeout_message) + + server = self.get_active_server( + server=server, reuse=reuse, + auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, + wait=True, timeout=remaining_timeout, + nat_destination=nat_destination) + + if server is not None and server['status'] == 'ACTIVE': + return server + + def get_active_server( + self, server, auto_ip=True, ips=None, ip_pool=None, + reuse=True, wait=False, timeout=180, nat_destination=None): + + if server['status'] == 'ERROR': + if 'fault' in server and 'message' in server['fault']: + raise OpenStackCloudException( + "Error in creating the server: {reason}".format( + reason=server['fault']['message']), + extra_data=dict(server=server)) + + raise OpenStackCloudException( + "Error in creating the server", extra_data=dict(server=server)) + + if server['status'] == 'ACTIVE': + if 'addresses' in server and server['addresses']: + return self.add_ips_to_server( + server, auto_ip, ips, ip_pool, reuse=reuse, + nat_destination=nat_destination, + wait=wait, timeout=timeout) + + self.log.debug( + 'Server %(server)s reached ACTIVE state without' + ' being allocated an IP address.' + ' Deleting server.', {'server': server['id']}) + try: + self._delete_server( + server=server, wait=wait, timeout=timeout) + except Exception as e: + raise OpenStackCloudException( + 'Server reached ACTIVE state without being' + ' allocated an IP address AND then could not' + ' be deleted: {0}'.format(e), + extra_data=dict(server=server)) + raise OpenStackCloudException( + 'Server reached ACTIVE state without being' + ' allocated an IP address.', + extra_data=dict(server=server)) + return None + + def rebuild_server(self, server_id, image_id, admin_pass=None, + detailed=False, bare=False, + wait=False, timeout=180): + kwargs = {} + if image_id: + kwargs['imageRef'] = image_id + if admin_pass: + kwargs['adminPass'] = admin_pass + + data = self._compute_client.post( + '/servers/{server_id}/action'.format(server_id=server_id), + error_message="Error in rebuilding instance", + json={'rebuild': kwargs}) + server = self._get_and_munchify('server', data) + if not wait: + return self._expand_server( + self._normalize_server(server), bare=bare, detailed=detailed) + + admin_pass = server.get('adminPass') or admin_pass + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for server {0} to " + "rebuild.".format(server_id), + wait=self._SERVER_AGE): + try: + server = self.get_server(server_id, bare=True) + except Exception: + continue + if not server: + continue + + if server['status'] == 'ERROR': + raise OpenStackCloudException( + "Error in rebuilding the server", + extra_data=dict(server=server)) + + if server['status'] == 'ACTIVE': + server.adminPass = admin_pass + break + + return self._expand_server(server, detailed=detailed, bare=bare) + + def set_server_metadata(self, name_or_id, metadata): + """Set metadata in a server instance. + + :param str name_or_id: The name or ID of the server instance + to update. + :param dict metadata: A dictionary with the key=value pairs + to set in the server instance. It only updates the key=value + pairs provided. Existing ones will remain untouched. + + :raises: OpenStackCloudException on operation error. + """ + server = self.get_server(name_or_id, bare=True) + if not server: + raise OpenStackCloudException( + 'Invalid Server {server}'.format(server=name_or_id)) + + self._compute_client.post( + '/servers/{server_id}/metadata'.format(server_id=server['id']), + json={'metadata': metadata}, + error_message='Error updating server metadata') + + def delete_server_metadata(self, name_or_id, metadata_keys): + """Delete metadata from a server instance. + + :param str name_or_id: The name or ID of the server instance + to update. + :param metadata_keys: A list with the keys to be deleted + from the server instance. + + :raises: OpenStackCloudException on operation error. + """ + server = self.get_server(name_or_id, bare=True) + if not server: + raise OpenStackCloudException( + 'Invalid Server {server}'.format(server=name_or_id)) + + for key in metadata_keys: + error_message = 'Error deleting metadata {key} on {server}'.format( + key=key, server=name_or_id) + self._compute_client.delete( + '/servers/{server_id}/metadata/{key}'.format( + server_id=server['id'], + key=key), + error_message=error_message) + + def delete_server( + self, name_or_id, wait=False, timeout=180, delete_ips=False, + delete_ip_retry=1): + """Delete a server instance. + + :param name_or_id: name or ID of the server to delete + :param bool wait: If true, waits for server to be deleted. + :param int timeout: Seconds to wait for server deletion. + :param bool delete_ips: If true, deletes any floating IPs + associated with the instance. + :param int delete_ip_retry: Number of times to retry deleting + any floating ips, should the first try be unsuccessful. + + :returns: True if delete succeeded, False otherwise if the + server does not exist. + + :raises: OpenStackCloudException on operation error. + """ + # If delete_ips is True, we need the server to not be bare. + server = self.get_server(name_or_id, bare=True) + if not server: + return False + + # This portion of the code is intentionally left as a separate + # private method in order to avoid an unnecessary API call to get + # a server we already have. + return self._delete_server( + server, wait=wait, timeout=timeout, delete_ips=delete_ips, + delete_ip_retry=delete_ip_retry) + + def _delete_server_floating_ips(self, server, delete_ip_retry): + # Does the server have floating ips in its + # addresses dict? If not, skip this. + server_floats = meta.find_nova_interfaces( + server['addresses'], ext_tag='floating') + for fip in server_floats: + try: + ip = self.get_floating_ip(id=None, filters={ + 'floating_ip_address': fip['addr']}) + except OpenStackCloudURINotFound: + # We're deleting. If it doesn't exist - awesome + # NOTE(mordred) If the cloud is a nova FIP cloud but + # floating_ip_source is set to neutron, this + # can lead to a FIP leak. + continue + if not ip: + continue + deleted = self.delete_floating_ip( + ip['id'], retry=delete_ip_retry) + if not deleted: + raise OpenStackCloudException( + "Tried to delete floating ip {floating_ip}" + " associated with server {id} but there was" + " an error deleting it. Not deleting server.".format( + floating_ip=ip['floating_ip_address'], + id=server['id'])) + + def _delete_server( + self, server, wait=False, timeout=180, delete_ips=False, + delete_ip_retry=1): + if not server: + return False + + if delete_ips and self._has_floating_ips(): + self._delete_server_floating_ips(server, delete_ip_retry) + + try: + self._compute_client.delete( + '/servers/{id}'.format(id=server['id']), + error_message="Error in deleting server") + except OpenStackCloudURINotFound: + return False + except Exception: + raise + + if not wait: + return True + + # If the server has volume attachments, or if it has booted + # from volume, deleting it will change volume state so we will + # need to invalidate the cache. Avoid the extra API call if + # caching is not enabled. + reset_volume_cache = False + if (self.cache_enabled + and self.has_service('volume') + and self.get_volumes(server)): + reset_volume_cache = True + + for count in _utils._iterate_timeout( + timeout, + "Timed out waiting for server to get deleted.", + # if _SERVER_AGE is 0 we still want to wait a bit + # to be friendly with the server. + wait=self._SERVER_AGE or 2): + with _utils.shade_exceptions("Error in deleting server"): + server = self.get_server(server['id'], bare=True) + if not server: + break + + if reset_volume_cache: + self.list_volumes.invalidate(self) + + # Reset the list servers cache time so that the next list server + # call gets a new list + self._servers_time = self._servers_time - self._SERVER_AGE + return True + + @_utils.valid_kwargs( + 'name', 'description') + def update_server(self, name_or_id, detailed=False, bare=False, **kwargs): + """Update a server. + + :param name_or_id: Name of the server to be updated. + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses + dict will be populated as needed from neutron. Setting + to True implies detailed = False. + :name: New name for the server + :description: New description for the server + + :returns: a dictionary representing the updated server. + + :raises: OpenStackCloudException on operation error. + """ + server = self.get_server(name_or_id=name_or_id, bare=True) + if server is None: + raise OpenStackCloudException( + "failed to find server '{server}'".format(server=name_or_id)) + + data = self._compute_client.put( + '/servers/{server_id}'.format(server_id=server['id']), + error_message="Error updating server {0}".format(name_or_id), + json={'server': kwargs}) + server = self._normalize_server( + self._get_and_munchify('server', data)) + return self._expand_server(server, bare=bare, detailed=detailed) + + def create_server_group(self, name, policies): + """Create a new server group. + + :param name: Name of the server group being created + :param policies: List of policies for the server group. + + :returns: a dict representing the new server group. + + :raises: OpenStackCloudException on operation error. + """ + data = self._compute_client.post( + '/os-server-groups', + json={ + 'server_group': { + 'name': name, + 'policies': policies}}, + error_message="Unable to create server group {name}".format( + name=name)) + return self._get_and_munchify('server_group', data) + + def delete_server_group(self, name_or_id): + """Delete a server group. + + :param name_or_id: Name or ID of the server group to delete + + :returns: True if delete succeeded, False otherwise + + :raises: OpenStackCloudException on operation error. + """ + server_group = self.get_server_group(name_or_id) + if not server_group: + self.log.debug("Server group %s not found for deleting", + name_or_id) + return False + + self._compute_client.delete( + '/os-server-groups/{id}'.format(id=server_group['id']), + error_message="Error deleting server group {name}".format( + name=name_or_id)) + + return True + + def list_containers(self, full_listing=True): + """List containers. + + :param full_listing: Ignored. Present for backwards compat + + :returns: list of Munch of the container objects + + :raises: OpenStackCloudException on operation error. + """ + return self._object_store_client.get('/', params=dict(format='json')) + + def get_container(self, name, skip_cache=False): + if skip_cache or name not in self._container_cache: + try: + container = self._object_store_client.head(name) + self._container_cache[name] = container.headers + except OpenStackCloudHTTPError as e: + if e.response.status_code == 404: + return None + raise + return self._container_cache[name] + + def create_container(self, name, public=False): + container = self.get_container(name) + if container: + return container + self._object_store_client.put(name) + if public: + self.set_container_access(name, 'public') + return self.get_container(name, skip_cache=True) + + def delete_container(self, name): + try: + self._object_store_client.delete(name) + return True + except OpenStackCloudHTTPError as e: + if e.response.status_code == 404: + return False + if e.response.status_code == 409: + raise OpenStackCloudException( + 'Attempt to delete container {container} failed. The' + ' container is not empty. Please delete the objects' + ' inside it before deleting the container'.format( + container=name)) + raise + + def update_container(self, name, headers): + self._object_store_client.post(name, headers=headers) + + def set_container_access(self, name, access): + if access not in OBJECT_CONTAINER_ACLS: + raise OpenStackCloudException( + "Invalid container access specified: %s. Must be one of %s" + % (access, list(OBJECT_CONTAINER_ACLS.keys()))) + header = {'x-container-read': OBJECT_CONTAINER_ACLS[access]} + self.update_container(name, header) + + def get_container_access(self, name): + container = self.get_container(name, skip_cache=True) + if not container: + raise OpenStackCloudException("Container not found: %s" % name) + acl = container.get('x-container-read', '') + for key, value in OBJECT_CONTAINER_ACLS.items(): + # Convert to string for the comparison because swiftclient + # returns byte values as bytes sometimes and apparently == + # on bytes doesn't work like you'd think + if str(acl) == str(value): + return key + raise OpenStackCloudException( + "Could not determine container access for ACL: %s." % acl) + + def _get_file_hashes(self, filename): + file_key = "{filename}:{mtime}".format( + filename=filename, + mtime=os.stat(filename).st_mtime) + if file_key not in self._file_hash_cache: + self.log.debug( + 'Calculating hashes for %(filename)s', {'filename': filename}) + md5 = hashlib.md5() + sha256 = hashlib.sha256() + with open(filename, 'rb') as file_obj: + for chunk in iter(lambda: file_obj.read(8192), b''): + md5.update(chunk) + sha256.update(chunk) + self._file_hash_cache[file_key] = dict( + md5=md5.hexdigest(), sha256=sha256.hexdigest()) + self.log.debug( + "Image file %(filename)s md5:%(md5)s sha256:%(sha256)s", + {'filename': filename, + 'md5': self._file_hash_cache[file_key]['md5'], + 'sha256': self._file_hash_cache[file_key]['sha256']}) + return (self._file_hash_cache[file_key]['md5'], + self._file_hash_cache[file_key]['sha256']) + + @_utils.cache_on_arguments() + def get_object_capabilities(self): + # The endpoint in the catalog has version and project-id in it + # To get capabilities, we have to disassemble and reassemble the URL + # This logic is taken from swiftclient + endpoint = urllib.parse.urlparse( + self._object_store_client.get_endpoint()) + url = "{scheme}://{netloc}/info".format( + scheme=endpoint.scheme, netloc=endpoint.netloc) + + return self._object_store_client.get(url) + + def get_object_segment_size(self, segment_size): + """Get a segment size that will work given capabilities""" + if segment_size is None: + segment_size = DEFAULT_OBJECT_SEGMENT_SIZE + min_segment_size = 0 + try: + caps = self.get_object_capabilities() + except OpenStackCloudHTTPError as e: + if e.response.status_code in (404, 412): + # Clear the exception so that it doesn't linger + # and get reported as an Inner Exception later + _utils._exc_clear() + server_max_file_size = DEFAULT_MAX_FILE_SIZE + self.log.info( + "Swift capabilities not supported. " + "Using default max file size.") + else: + raise + else: + server_max_file_size = caps.get('swift', {}).get('max_file_size', + 0) + min_segment_size = caps.get('slo', {}).get('min_segment_size', 0) + + if segment_size > server_max_file_size: + return server_max_file_size + if segment_size < min_segment_size: + return min_segment_size + return segment_size + + def is_object_stale( + self, container, name, filename, file_md5=None, file_sha256=None): + + metadata = self.get_object_metadata(container, name) + if not metadata: + self.log.debug( + "swift stale check, no object: {container}/{name}".format( + container=container, name=name)) + return True + + if not (file_md5 or file_sha256): + (file_md5, file_sha256) = self._get_file_hashes(filename) + md5_key = metadata.get(OBJECT_MD5_KEY, '') + sha256_key = metadata.get(OBJECT_SHA256_KEY, '') + up_to_date = self._hashes_up_to_date( + md5=file_md5, sha256=file_sha256, + md5_key=md5_key, sha256_key=sha256_key) + + if not up_to_date: + self.log.debug( + "swift checksum mismatch: " + " %(filename)s!=%(container)s/%(name)s", + {'filename': filename, 'container': container, 'name': name}) + return True + + self.log.debug( + "swift object up to date: %(container)s/%(name)s", + {'container': container, 'name': name}) + return False + + def create_object( + self, container, name, filename=None, + md5=None, sha256=None, segment_size=None, + use_slo=True, metadata=None, + **headers): + """Create a file object + + :param container: The name of the container to store the file in. + This container will be created if it does not exist already. + :param name: Name for the object within the container. + :param filename: The path to the local file whose contents will be + uploaded. + :param md5: A hexadecimal md5 of the file. (Optional), if it is known + and can be passed here, it will save repeating the expensive md5 + process. It is assumed to be accurate. + :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. + :param segment_size: Break the uploaded object into segments of this + many bytes. (Optional) Shade will attempt to discover the maximum + value for this from the server if it is not specified, or will use + a reasonable default. + :param headers: These will be passed through to the object creation + API as HTTP Headers. + :param use_slo: If the object is large enough to need to be a Large + Object, use a static rather than dynamic object. Static Objects + will delete segment objects when the manifest object is deleted. + (optional, defaults to True) + :param metadata: This dict will get changed into headers that set + metadata of the object + + :raises: ``OpenStackCloudException`` on operation error. + """ + if not metadata: + metadata = {} + + if not filename: + filename = name + + # segment_size gets used as a step value in a range call, so needs + # to be an int + if segment_size: + segment_size = int(segment_size) + segment_size = self.get_object_segment_size(segment_size) + file_size = os.path.getsize(filename) + + if not (md5 or sha256): + (md5, sha256) = self._get_file_hashes(filename) + headers[OBJECT_MD5_KEY] = md5 or '' + headers[OBJECT_SHA256_KEY] = sha256 or '' + for (k, v) in metadata.items(): + headers['x-object-meta-' + k] = v + + # On some clouds this is not necessary. On others it is. I'm confused. + self.create_container(container) + + if self.is_object_stale(container, name, filename, md5, sha256): + + endpoint = '{container}/{name}'.format( + container=container, name=name) + self.log.debug( + "swift uploading %(filename)s to %(endpoint)s", + {'filename': filename, 'endpoint': endpoint}) + + if file_size <= segment_size: + self._upload_object(endpoint, filename, headers) + else: + self._upload_large_object( + endpoint, filename, headers, + file_size, segment_size, use_slo) + + def _upload_object(self, endpoint, filename, headers): + return self._object_store_client.put( + endpoint, headers=headers, data=open(filename, 'r')) + + def _get_file_segments(self, endpoint, filename, file_size, segment_size): + # Use an ordered dict here so that testing can replicate things + segments = collections.OrderedDict() + for (index, offset) in enumerate(range(0, file_size, segment_size)): + remaining = file_size - (index * segment_size) + segment = _utils.FileSegment( + filename, offset, + segment_size if segment_size < remaining else remaining) + name = '{endpoint}/{index:0>6}'.format( + endpoint=endpoint, index=index) + segments[name] = segment + return segments + + def _object_name_from_url(self, url): + '''Get container_name/object_name from the full URL called. + + Remove the Swift endpoint from the front of the URL, and remove + the leaving / that will leave behind.''' + endpoint = self._object_store_client.get_endpoint() + object_name = url.replace(endpoint, '') + if object_name.startswith('/'): + object_name = object_name[1:] + return object_name + + def _add_etag_to_manifest(self, segment_results, manifest): + for result in segment_results: + if 'Etag' not in result.headers: + continue + name = self._object_name_from_url(result.url) + for entry in manifest: + if entry['path'] == '/{name}'.format(name=name): + entry['etag'] = result.headers['Etag'] + + def _upload_large_object( + self, endpoint, filename, + headers, file_size, segment_size, use_slo): + # If the object is big, we need to break it up into segments that + # are no larger than segment_size, upload each of them individually + # and then upload a manifest object. The segments can be uploaded in + # parallel, so we'll use the async feature of the TaskManager. + + segment_futures = [] + segment_results = [] + retry_results = [] + retry_futures = [] + manifest = [] + + # Get an OrderedDict with keys being the swift location for the + # segment, the value a FileSegment file-like object that is a + # slice of the data for the segment. + segments = self._get_file_segments( + endpoint, filename, file_size, segment_size) + + # Schedule the segments for upload + for name, segment in segments.items(): + # Async call to put - schedules execution and returns a future + segment_future = self._object_store_client.put( + name, headers=headers, data=segment, run_async=True) + segment_futures.append(segment_future) + # TODO(mordred) Collect etags from results to add to this manifest + # dict. Then sort the list of dicts by path. + manifest.append(dict( + path='/{name}'.format(name=name), + size_bytes=segment.length)) + + # Try once and collect failed results to retry + segment_results, retry_results = task_manager.wait_for_futures( + segment_futures, raise_on_error=False) + + self._add_etag_to_manifest(segment_results, manifest) + + for result in retry_results: + # Grab the FileSegment for the failed upload so we can retry + name = self._object_name_from_url(result.url) + segment = segments[name] + segment.seek(0) + # Async call to put - schedules execution and returns a future + segment_future = self._object_store_client.put( + name, headers=headers, data=segment, run_async=True) + # TODO(mordred) Collect etags from results to add to this manifest + # dict. Then sort the list of dicts by path. + retry_futures.append(segment_future) + + # If any segments fail the second time, just throw the error + segment_results, retry_results = task_manager.wait_for_futures( + retry_futures, raise_on_error=True) + + self._add_etag_to_manifest(segment_results, manifest) + + if use_slo: + return self._finish_large_object_slo(endpoint, headers, manifest) + else: + return self._finish_large_object_dlo(endpoint, headers) + + def _finish_large_object_slo(self, endpoint, headers, manifest): + # TODO(mordred) send an etag of the manifest, which is the md5sum + # of the concatenation of the etags of the results + headers = headers.copy() + return self._object_store_client.put( + endpoint, + params={'multipart-manifest': 'put'}, + headers=headers, data=json.dumps(manifest)) + + def _finish_large_object_dlo(self, endpoint, headers): + headers = headers.copy() + headers['X-Object-Manifest'] = endpoint + return self._object_store_client.put(endpoint, headers=headers) + + def update_object(self, container, name, metadata=None, **headers): + """Update the metadata of an object + + :param container: The name of the container the object is in + :param name: Name for the object within the container. + :param metadata: This dict will get changed into headers that set + metadata of the object + :param headers: These will be passed through to the object update + API as HTTP Headers. + + :raises: ``OpenStackCloudException`` on operation error. + """ + if not metadata: + metadata = {} + + metadata_headers = {} + + for (k, v) in metadata.items(): + metadata_headers['x-object-meta-' + k] = v + + headers = dict(headers, **metadata_headers) + + return self._object_store_client.post( + '{container}/{object}'.format( + container=container, object=name), + headers=headers) + + def list_objects(self, container, full_listing=True): + """List objects. + + :param container: Name of the container to list objects in. + :param full_listing: Ignored. Present for backwards compat + + :returns: list of Munch of the objects + + :raises: OpenStackCloudException on operation error. + """ + return self._object_store_client.get( + container, params=dict(format='json')) + + def delete_object(self, container, name): + """Delete an object from a container. + + :param string container: Name of the container holding the object. + :param string name: Name of the object to delete. + + :returns: True if delete succeeded, False if the object was not found. + + :raises: OpenStackCloudException on operation error. + """ + # TODO(mordred) DELETE for swift returns status in text/plain format + # like so: + # Number Deleted: 15 + # Number Not Found: 0 + # Response Body: + # Response Status: 200 OK + # Errors: + # We should ultimately do something with that + try: + meta = self.get_object_metadata(container, name) + if not meta: + return False + params = {} + if meta.get('X-Static-Large-Object', None) == 'True': + params['multipart-manifest'] = 'delete' + self._object_store_client.delete( + '{container}/{object}'.format( + container=container, object=name), + params=params) + return True + except OpenStackCloudHTTPError: + return False + + def get_object_metadata(self, container, name): + try: + return self._object_store_client.head( + '{container}/{object}'.format( + container=container, object=name)).headers + except OpenStackCloudException as e: + if e.response.status_code == 404: + return None + raise + + def get_object(self, container, obj, query_string=None, + resp_chunk_size=1024, outfile=None): + """Get the headers and body of an object + + :param string container: name of the container. + :param string obj: name of the object. + :param string query_string: query args for uri. + (delimiter, prefix, etc.) + :param int resp_chunk_size: chunk size of data to read. Only used + if the results are being written to a + file. (optional, defaults to 1k) + :param outfile: Write the object to a file instead of + returning the contents. If this option is + given, body in the return tuple will be None. outfile + can either be a file path given as a string, or a + File like object. + + :returns: Tuple (headers, body) of the object, or None if the object + is not found (404) + :raises: OpenStackCloudException on operation error. + """ + # TODO(mordred) implement resp_chunk_size + try: + endpoint = '{container}/{object}'.format( + container=container, object=obj) + if query_string: + endpoint = '{endpoint}?{query_string}'.format( + endpoint=endpoint, query_string=query_string) + response = self._object_store_client.get( + endpoint, stream=True) + response_headers = { + k.lower(): v for k, v in response.headers.items()} + if outfile: + if isinstance(outfile, six.string_types): + outfile_handle = open(outfile, 'wb') + else: + outfile_handle = outfile + for chunk in response.iter_content( + resp_chunk_size, decode_unicode=False): + outfile_handle.write(chunk) + if isinstance(outfile, six.string_types): + outfile_handle.close() + else: + outfile_handle.flush() + return (response_headers, None) + else: + return (response_headers, response.text) + except OpenStackCloudHTTPError as e: + if e.response.status_code == 404: + return None + raise + + def create_subnet(self, network_name_or_id, cidr=None, ip_version=4, + enable_dhcp=False, subnet_name=None, tenant_id=None, + allocation_pools=None, + gateway_ip=None, disable_gateway_ip=False, + dns_nameservers=None, host_routes=None, + ipv6_ra_mode=None, ipv6_address_mode=None, + use_default_subnetpool=False): + """Create a subnet on a specified network. + + :param string network_name_or_id: + The unique name or ID of the attached network. If a non-unique + name is supplied, an exception is raised. + :param string cidr: + The CIDR. + :param int ip_version: + The IP version, which is 4 or 6. + :param bool enable_dhcp: + Set to ``True`` if DHCP is enabled and ``False`` if disabled. + Default is ``False``. + :param string subnet_name: + The name of the subnet. + :param string tenant_id: + The ID of the tenant who owns the network. Only administrative users + can specify a tenant ID other than their own. + :param allocation_pools: + A list of dictionaries of the start and end addresses for the + allocation pools. For example:: + + [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ] + + :param string gateway_ip: + The gateway IP address. When you specify both allocation_pools and + gateway_ip, you must ensure that the gateway IP does not overlap + with the specified allocation pools. + :param bool disable_gateway_ip: + Set to ``True`` if gateway IP address is disabled and ``False`` if + enabled. It is not allowed with gateway_ip. + Default is ``False``. + :param dns_nameservers: + A list of DNS name servers for the subnet. For example:: + + [ "8.8.8.7", "8.8.8.8" ] + + :param host_routes: + A list of host route dictionaries for the subnet. For example:: + + [ + { + "destination": "0.0.0.0/0", + "nexthop": "123.456.78.9" + }, + { + "destination": "192.168.0.0/24", + "nexthop": "192.168.0.1" + } + ] + + :param string ipv6_ra_mode: + IPv6 Router Advertisement mode. Valid values are: 'dhcpv6-stateful', + 'dhcpv6-stateless', or 'slaac'. + :param string ipv6_address_mode: + IPv6 address mode. Valid values are: 'dhcpv6-stateful', + 'dhcpv6-stateless', or 'slaac'. + :param bool use_default_subnetpool: + Use the default subnetpool for ``ip_version`` to obtain a CIDR. It + is required to pass ``None`` to the ``cidr`` argument when enabling + this option. + + :returns: The new subnet object. + :raises: OpenStackCloudException on operation error. + """ + + network = self.get_network(network_name_or_id) + if not network: + raise OpenStackCloudException( + "Network %s not found." % network_name_or_id) + + if disable_gateway_ip and gateway_ip: + raise OpenStackCloudException( + 'arg:disable_gateway_ip is not allowed with arg:gateway_ip') + + if not cidr and not use_default_subnetpool: + raise OpenStackCloudException( + 'arg:cidr is required when a subnetpool is not used') + + if cidr and use_default_subnetpool: + raise OpenStackCloudException( + 'arg:cidr must be set to None when use_default_subnetpool == ' + 'True') + + # Be friendly on ip_version and allow strings + if isinstance(ip_version, six.string_types): + try: + ip_version = int(ip_version) + except ValueError: + raise OpenStackCloudException('ip_version must be an integer') + + # The body of the neutron message for the subnet we wish to create. + # This includes attributes that are required or have defaults. + subnet = { + 'network_id': network['id'], + 'ip_version': ip_version, + 'enable_dhcp': enable_dhcp + } + + # Add optional attributes to the message. + if cidr: + subnet['cidr'] = cidr + if subnet_name: + subnet['name'] = subnet_name + if tenant_id: + subnet['tenant_id'] = tenant_id + if allocation_pools: + subnet['allocation_pools'] = allocation_pools + if gateway_ip: + subnet['gateway_ip'] = gateway_ip + if disable_gateway_ip: + subnet['gateway_ip'] = None + if dns_nameservers: + subnet['dns_nameservers'] = dns_nameservers + if host_routes: + subnet['host_routes'] = host_routes + if ipv6_ra_mode: + subnet['ipv6_ra_mode'] = ipv6_ra_mode + if ipv6_address_mode: + subnet['ipv6_address_mode'] = ipv6_address_mode + if use_default_subnetpool: + subnet['use_default_subnetpool'] = True + + data = self._network_client.post("/subnets.json", + json={"subnet": subnet}) + + return self._get_and_munchify('subnet', data) + + def delete_subnet(self, name_or_id): + """Delete a subnet. + + If a name, instead of a unique UUID, is supplied, it is possible + that we could find more than one matching subnet since names are + not required to be unique. An error will be raised in this case. + + :param name_or_id: Name or ID of the subnet being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + subnet = self.get_subnet(name_or_id) + if not subnet: + self.log.debug("Subnet %s not found for deleting", name_or_id) + return False + + self._network_client.delete( + "/subnets/{subnet_id}.json".format(subnet_id=subnet['id'])) + return True + + def update_subnet(self, name_or_id, subnet_name=None, enable_dhcp=None, + gateway_ip=None, disable_gateway_ip=None, + allocation_pools=None, dns_nameservers=None, + host_routes=None): + """Update an existing subnet. + + :param string name_or_id: + Name or ID of the subnet to update. + :param string subnet_name: + The new name of the subnet. + :param bool enable_dhcp: + Set to ``True`` if DHCP is enabled and ``False`` if disabled. + :param string gateway_ip: + The gateway IP address. When you specify both allocation_pools and + gateway_ip, you must ensure that the gateway IP does not overlap + with the specified allocation pools. + :param bool disable_gateway_ip: + Set to ``True`` if gateway IP address is disabled and ``False`` if + enabled. It is not allowed with gateway_ip. + Default is ``False``. + :param allocation_pools: + A list of dictionaries of the start and end addresses for the + allocation pools. For example:: + + [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ] + + :param dns_nameservers: + A list of DNS name servers for the subnet. For example:: + + [ "8.8.8.7", "8.8.8.8" ] + + :param host_routes: + A list of host route dictionaries for the subnet. For example:: + + [ + { + "destination": "0.0.0.0/0", + "nexthop": "123.456.78.9" + }, + { + "destination": "192.168.0.0/24", + "nexthop": "192.168.0.1" + } + ] + + :returns: The updated subnet object. + :raises: OpenStackCloudException on operation error. + """ + subnet = {} + if subnet_name: + subnet['name'] = subnet_name + if enable_dhcp is not None: + subnet['enable_dhcp'] = enable_dhcp + if gateway_ip: + subnet['gateway_ip'] = gateway_ip + if disable_gateway_ip: + subnet['gateway_ip'] = None + if allocation_pools: + subnet['allocation_pools'] = allocation_pools + if dns_nameservers: + subnet['dns_nameservers'] = dns_nameservers + if host_routes: + subnet['host_routes'] = host_routes + + if not subnet: + self.log.debug("No subnet data to update") + return + + if disable_gateway_ip and gateway_ip: + raise OpenStackCloudException( + 'arg:disable_gateway_ip is not allowed with arg:gateway_ip') + + curr_subnet = self.get_subnet(name_or_id) + if not curr_subnet: + raise OpenStackCloudException( + "Subnet %s not found." % name_or_id) + + data = self._network_client.put( + "/subnets/{subnet_id}.json".format(subnet_id=curr_subnet['id']), + json={"subnet": subnet}) + return self._get_and_munchify('subnet', data) + + @_utils.valid_kwargs('name', 'admin_state_up', 'mac_address', 'fixed_ips', + 'subnet_id', 'ip_address', 'security_groups', + 'allowed_address_pairs', 'extra_dhcp_opts', + 'device_owner', 'device_id') + def create_port(self, network_id, **kwargs): + """Create a port + + :param network_id: The ID of the network. (Required) + :param name: A symbolic name for the port. (Optional) + :param admin_state_up: The administrative status of the port, + which is up (true, default) or down (false). (Optional) + :param mac_address: The MAC address. (Optional) + :param fixed_ips: List of ip_addresses and subnet_ids. See subnet_id + and ip_address. (Optional) + For example:: + + [ + { + "ip_address": "10.29.29.13", + "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd" + }, ... + ] + :param subnet_id: If you specify only a subnet ID, OpenStack Networking + allocates an available IP from that subnet to the port. (Optional) + If you specify both a subnet ID and an IP address, OpenStack + Networking tries to allocate the specified address to the port. + :param ip_address: If you specify both a subnet ID and an IP address, + OpenStack Networking tries to allocate the specified address to + the port. + :param security_groups: List of security group UUIDs. (Optional) + :param allowed_address_pairs: Allowed address pairs list (Optional) + For example:: + + [ + { + "ip_address": "23.23.23.1", + "mac_address": "fa:16:3e:c4:cd:3f" + }, ... + ] + :param extra_dhcp_opts: Extra DHCP options. (Optional). + For example:: + + [ + { + "opt_name": "opt name1", + "opt_value": "value1" + }, ... + ] + :param device_owner: The ID of the entity that uses this port. + For example, a DHCP agent. (Optional) + :param device_id: The ID of the device that uses this port. + For example, a virtual server. (Optional) + + :returns: a ``munch.Munch`` describing the created port. + + :raises: ``OpenStackCloudException`` on operation error. + """ + kwargs['network_id'] = network_id + + data = self._network_client.post( + "/ports.json", json={'port': kwargs}, + error_message="Error creating port for network {0}".format( + network_id)) + return self._get_and_munchify('port', data) + + @_utils.valid_kwargs('name', 'admin_state_up', 'fixed_ips', + 'security_groups', 'allowed_address_pairs', + 'extra_dhcp_opts', 'device_owner', 'device_id') + def update_port(self, name_or_id, **kwargs): + """Update a port + + Note: to unset an attribute use None value. To leave an attribute + untouched just omit it. + + :param name_or_id: name or ID of the port to update. (Required) + :param name: A symbolic name for the port. (Optional) + :param admin_state_up: The administrative status of the port, + which is up (true) or down (false). (Optional) + :param fixed_ips: List of ip_addresses and subnet_ids. (Optional) + If you specify only a subnet ID, OpenStack Networking allocates + an available IP from that subnet to the port. + If you specify both a subnet ID and an IP address, OpenStack + Networking tries to allocate the specified address to the port. + For example:: + + [ + { + "ip_address": "10.29.29.13", + "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd" + }, ... + ] + :param security_groups: List of security group UUIDs. (Optional) + :param allowed_address_pairs: Allowed address pairs list (Optional) + For example:: + + [ + { + "ip_address": "23.23.23.1", + "mac_address": "fa:16:3e:c4:cd:3f" + }, ... + ] + :param extra_dhcp_opts: Extra DHCP options. (Optional). + For example:: + + [ + { + "opt_name": "opt name1", + "opt_value": "value1" + }, ... + ] + :param device_owner: The ID of the entity that uses this port. + For example, a DHCP agent. (Optional) + :param device_id: The ID of the resource this port is attached to. + + :returns: a ``munch.Munch`` describing the updated port. + + :raises: OpenStackCloudException on operation error. + """ + port = self.get_port(name_or_id=name_or_id) + if port is None: + raise OpenStackCloudException( + "failed to find port '{port}'".format(port=name_or_id)) + + data = self._network_client.put( + "/ports/{port_id}.json".format(port_id=port['id']), + json={"port": kwargs}, + error_message="Error updating port {0}".format(name_or_id)) + return self._get_and_munchify('port', data) + + def delete_port(self, name_or_id): + """Delete a port + + :param name_or_id: ID or name of the port to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + port = self.get_port(name_or_id=name_or_id) + if port is None: + self.log.debug("Port %s not found for deleting", name_or_id) + return False + + self._network_client.delete( + "/ports/{port_id}.json".format(port_id=port['id']), + error_message="Error deleting port {0}".format(name_or_id)) + return True + + def create_security_group(self, name, description, project_id=None): + """Create a new security group + + :param string name: A name for the security group. + :param string description: Describes the security group. + :param string project_id: + Specify the project ID this security group will be created + on (admin-only). + + :returns: A ``munch.Munch`` representing the new security group. + + :raises: OpenStackCloudException on operation error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + data = [] + security_group_json = { + 'security_group': { + 'name': name, 'description': description + }} + if project_id is not None: + security_group_json['security_group']['tenant_id'] = project_id + if self._use_neutron_secgroups(): + data = self._network_client.post( + '/security-groups.json', + json=security_group_json, + error_message="Error creating security group {0}".format(name)) + else: + data = self._compute_client.post( + '/os-security-groups', json=security_group_json) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data)) + + def delete_security_group(self, name_or_id): + """Delete a security group + + :param string name_or_id: The name or unique ID of the security group. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + # TODO(mordred): Let's come back and stop doing a GET before we do + # the delete. + secgroup = self.get_security_group(name_or_id) + if secgroup is None: + self.log.debug('Security group %s not found for deleting', + name_or_id) + return False + + if self._use_neutron_secgroups(): + self._network_client.delete( + '/security-groups/{sg_id}.json'.format(sg_id=secgroup['id']), + error_message="Error deleting security group {0}".format( + name_or_id) + ) + return True + + else: + self._compute_client.delete( + '/os-security-groups/{id}'.format(id=secgroup['id'])) + return True + + @_utils.valid_kwargs('name', 'description') + def update_security_group(self, name_or_id, **kwargs): + """Update a security group + + :param string name_or_id: Name or ID of the security group to update. + :param string name: New name for the security group. + :param string description: New description for the security group. + + :returns: A ``munch.Munch`` describing the updated security group. + + :raises: OpenStackCloudException on operation error. + """ + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + group = self.get_security_group(name_or_id) + + if group is None: + raise OpenStackCloudException( + "Security group %s not found." % name_or_id) + + if self._use_neutron_secgroups(): + data = self._network_client.put( + '/security-groups/{sg_id}.json'.format(sg_id=group['id']), + json={'security_group': kwargs}, + error_message="Error updating security group {0}".format( + name_or_id)) + else: + for key in ('name', 'description'): + kwargs.setdefault(key, group[key]) + data = self._compute_client.put( + '/os-security-groups/{id}'.format(id=group['id']), + json={'security-group': kwargs}) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data)) + + def create_security_group_rule(self, + secgroup_name_or_id, + port_range_min=None, + port_range_max=None, + protocol=None, + remote_ip_prefix=None, + remote_group_id=None, + direction='ingress', + ethertype='IPv4', + project_id=None): + """Create a new security group rule + + :param string secgroup_name_or_id: + The security group name or ID to associate with this security + group rule. If a non-unique group name is given, an exception + is raised. + :param int port_range_min: + The minimum port number in the range that is matched by the + security group rule. If the protocol is TCP or UDP, this value + must be less than or equal to the port_range_max attribute value. + If nova is used by the cloud provider for security groups, then + a value of None will be transformed to -1. + :param int port_range_max: + The maximum port number in the range that is matched by the + security group rule. The port_range_min attribute constrains the + port_range_max attribute. If nova is used by the cloud provider + for security groups, then a value of None will be transformed + to -1. + :param string protocol: + The protocol that is matched by the security group rule. Valid + values are None, tcp, udp, and icmp. + :param string remote_ip_prefix: + The remote IP prefix to be associated with this security group + rule. This attribute matches the specified IP prefix as the + source IP address of the IP packet. + :param string remote_group_id: + The remote group ID to be associated with this security group + rule. + :param string direction: + Ingress or egress: The direction in which the security group + rule is applied. For a compute instance, an ingress security + group rule is applied to incoming (ingress) traffic for that + instance. An egress rule is applied to traffic leaving the + instance. + :param string ethertype: + Must be IPv4 or IPv6, and addresses represented in CIDR must + match the ingress or egress rules. + :param string project_id: + Specify the project ID this security group will be created + on (admin-only). + + :returns: A ``munch.Munch`` representing the new security group rule. + + :raises: OpenStackCloudException on operation error. + """ + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + secgroup = self.get_security_group(secgroup_name_or_id) + if not secgroup: + raise OpenStackCloudException( + "Security group %s not found." % secgroup_name_or_id) + + if self._use_neutron_secgroups(): + # NOTE: Nova accepts -1 port numbers, but Neutron accepts None + # as the equivalent value. + rule_def = { + 'security_group_id': secgroup['id'], + 'port_range_min': + None if port_range_min == -1 else port_range_min, + 'port_range_max': + None if port_range_max == -1 else port_range_max, + 'protocol': protocol, + 'remote_ip_prefix': remote_ip_prefix, + 'remote_group_id': remote_group_id, + 'direction': direction, + 'ethertype': ethertype + } + if project_id is not None: + rule_def['tenant_id'] = project_id + + data = self._network_client.post( + '/security-group-rules.json', + json={'security_group_rule': rule_def}, + error_message="Error creating security group rule") + else: + # NOTE: Neutron accepts None for protocol. Nova does not. + if protocol is None: + raise OpenStackCloudException('Protocol must be specified') + + if direction == 'egress': + self.log.debug( + 'Rule creation failed: Nova does not support egress rules' + ) + raise OpenStackCloudException('No support for egress rules') + + # NOTE: Neutron accepts None for ports, but Nova requires -1 + # as the equivalent value for ICMP. + # + # For TCP/UDP, if both are None, Neutron allows this and Nova + # represents this as all ports (1-65535). Nova does not accept + # None values, so to hide this difference, we will automatically + # convert to the full port range. If only a single port value is + # specified, it will error as normal. + if protocol == 'icmp': + if port_range_min is None: + port_range_min = -1 + if port_range_max is None: + port_range_max = -1 + elif protocol in ['tcp', 'udp']: + if port_range_min is None and port_range_max is None: + port_range_min = 1 + port_range_max = 65535 + + security_group_rule_dict = dict(security_group_rule=dict( + parent_group_id=secgroup['id'], + ip_protocol=protocol, + from_port=port_range_min, + to_port=port_range_max, + cidr=remote_ip_prefix, + group_id=remote_group_id + )) + if project_id is not None: + security_group_rule_dict[ + 'security_group_rule']['tenant_id'] = project_id + data = self._compute_client.post( + '/os-security-group-rules', json=security_group_rule_dict + ) + return self._normalize_secgroup_rule( + self._get_and_munchify('security_group_rule', data)) + + def delete_security_group_rule(self, rule_id): + """Delete a security group rule + + :param string rule_id: The unique ID of the security group rule. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + # Security groups not supported + if not self._has_secgroups(): + raise OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if self._use_neutron_secgroups(): + try: + self._network_client.delete( + '/security-group-rules/{sg_id}.json'.format(sg_id=rule_id), + error_message="Error deleting security group rule " + "{0}".format(rule_id)) + except OpenStackCloudResourceNotFound: + return False + return True + + else: + self._compute_client.delete( + '/os-security-group-rules/{id}'.format(id=rule_id)) + return True + + def list_zones(self): + """List all available zones. + + :returns: A list of zones dicts. + + """ + data = self._dns_client.get( + "/zones", + error_message="Error fetching zones list") + return self._get_and_munchify('zones', data) + + def get_zone(self, name_or_id, filters=None): + """Get a zone by name or ID. + + :param name_or_id: Name or ID of the zone + :param filters: + A dictionary of meta data to use for further filtering + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A zone dict or None if no matching zone is found. + + """ + return _utils._get_entity(self, 'zone', name_or_id, filters) + + def search_zones(self, name_or_id=None, filters=None): + zones = self.list_zones() + return _utils._filter_list(zones, name_or_id, filters) + + def create_zone(self, name, zone_type=None, email=None, description=None, + ttl=None, masters=None): + """Create a new zone. + + :param name: Name of the zone being created. + :param zone_type: Type of the zone (primary/secondary) + :param email: Email of the zone owner (only + applies if zone_type is primary) + :param description: Description of the zone + :param ttl: TTL (Time to live) value in seconds + :param masters: Master nameservers (only applies + if zone_type is secondary) + + :returns: a dict representing the created zone. + + :raises: OpenStackCloudException on operation error. + """ + + # We capitalize in case the user passes time in lowercase, as + # designate call expects PRIMARY/SECONDARY + if zone_type is not None: + zone_type = zone_type.upper() + if zone_type not in ('PRIMARY', 'SECONDARY'): + raise OpenStackCloudException( + "Invalid type %s, valid choices are PRIMARY or SECONDARY" % + zone_type) + + zone = { + "name": name, + "email": email, + "description": description, + } + if ttl is not None: + zone["ttl"] = ttl + + if zone_type is not None: + zone["type"] = zone_type + + if masters is not None: + zone["masters"] = masters + + data = self._dns_client.post( + "/zones", json=zone, + error_message="Unable to create zone {name}".format(name=name)) + return self._get_and_munchify(key=None, data=data) + + @_utils.valid_kwargs('email', 'description', 'ttl', 'masters') + def update_zone(self, name_or_id, **kwargs): + """Update a zone. + + :param name_or_id: Name or ID of the zone being updated. + :param email: Email of the zone owner (only + applies if zone_type is primary) + :param description: Description of the zone + :param ttl: TTL (Time to live) value in seconds + :param masters: Master nameservers (only applies + if zone_type is secondary) + + :returns: a dict representing the updated zone. + + :raises: OpenStackCloudException on operation error. + """ + zone = self.get_zone(name_or_id) + if not zone: + raise OpenStackCloudException( + "Zone %s not found." % name_or_id) + + data = self._dns_client.patch( + "/zones/{zone_id}".format(zone_id=zone['id']), json=kwargs, + error_message="Error updating zone {0}".format(name_or_id)) + return self._get_and_munchify(key=None, data=data) + + def delete_zone(self, name_or_id): + """Delete a zone. + + :param name_or_id: Name or ID of the zone being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + + zone = self.get_zone(name_or_id) + if zone is None: + self.log.debug("Zone %s not found for deleting", name_or_id) + return False + + return self._dns_client.delete( + "/zones/{zone_id}".format(zone_id=zone['id']), + error_message="Error deleting zone {0}".format(name_or_id)) + + return True + + def list_recordsets(self, zone): + """List all available recordsets. + + :param zone: Name or ID of the zone managing the recordset + + :returns: A list of recordsets. + + """ + return self._dns_client.get( + "/zones/{zone_id}/recordsets".format(zone_id=zone), + error_message="Error fetching recordsets list") + + def get_recordset(self, zone, name_or_id): + """Get a recordset by name or ID. + + :param zone: Name or ID of the zone managing the recordset + :param name_or_id: Name or ID of the recordset + + :returns: A recordset dict or None if no matching recordset is + found. + + """ + try: + return self._dns_client.get( + "/zones/{zone_id}/recordsets/{recordset_id}".format( + zone_id=zone, recordset_id=name_or_id), + error_message="Error fetching recordset") + except Exception: + return None + + def search_recordsets(self, zone, name_or_id=None, filters=None): + recordsets = self.list_recordsets(zone=zone) + return _utils._filter_list(recordsets, name_or_id, filters) + + def create_recordset(self, zone, name, recordset_type, records, + description=None, ttl=None): + """Create a recordset. + + :param zone: Name or ID of the zone managing the recordset + :param name: Name of the recordset + :param recordset_type: Type of the recordset + :param records: List of the recordset definitions + :param description: Description of the recordset + :param ttl: TTL value of the recordset + + :returns: a dict representing the created recordset. + + :raises: OpenStackCloudException on operation error. + + """ + if self.get_zone(zone) is None: + raise OpenStackCloudException( + "Zone %s not found." % zone) + + # We capitalize the type in case the user sends in lowercase + recordset_type = recordset_type.upper() + + body = { + 'name': name, + 'type': recordset_type, + 'records': records + } + + if description: + body['description'] = description + + if ttl: + body['ttl'] = ttl + + return self._dns_client.post( + "/zones/{zone_id}/recordsets".format(zone_id=zone), + json=body, + error_message="Error creating recordset {name}".format(name=name)) + + @_utils.valid_kwargs('description', 'ttl', 'records') + def update_recordset(self, zone, name_or_id, **kwargs): + """Update a recordset. + + :param zone: Name or ID of the zone managing the recordset + :param name_or_id: Name or ID of the recordset being updated. + :param records: List of the recordset definitions + :param description: Description of the recordset + :param ttl: TTL (Time to live) value in seconds of the recordset + + :returns: a dict representing the updated recordset. + + :raises: OpenStackCloudException on operation error. + """ + zone_obj = self.get_zone(zone) + if zone_obj is None: + raise OpenStackCloudException( + "Zone %s not found." % zone) + + recordset_obj = self.get_recordset(zone, name_or_id) + if recordset_obj is None: + raise OpenStackCloudException( + "Recordset %s not found." % name_or_id) + + new_recordset = self._dns_client.put( + "/zones/{zone_id}/recordsets/{recordset_id}".format( + zone_id=zone_obj['id'], recordset_id=name_or_id), json=kwargs, + error_message="Error updating recordset {0}".format(name_or_id)) + + return new_recordset + + def delete_recordset(self, zone, name_or_id): + """Delete a recordset. + + :param zone: Name or ID of the zone managing the recordset. + :param name_or_id: Name or ID of the recordset being deleted. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + + zone = self.get_zone(zone) + if zone is None: + self.log.debug("Zone %s not found for deleting", zone) + return False + + recordset = self.get_recordset(zone['id'], name_or_id) + if recordset is None: + self.log.debug("Recordset %s not found for deleting", name_or_id) + return False + + self._dns_client.delete( + "/zones/{zone_id}/recordsets/{recordset_id}".format( + zone_id=zone['id'], recordset_id=name_or_id), + error_message="Error deleting recordset {0}".format(name_or_id)) + + return True + + @_utils.cache_on_arguments() + def list_cluster_templates(self, detail=False): + """List cluster templates. + + :param bool detail. Ignored. Included for backwards compat. + ClusterTemplates are always returned with full details. + + :returns: a list of dicts containing the cluster template details. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + with _utils.shade_exceptions("Error fetching cluster template list"): + data = self._container_infra_client.get( + '/baymodels/detail') + return self._normalize_cluster_templates( + self._get_and_munchify('baymodels', data)) + list_baymodels = list_cluster_templates + + def search_cluster_templates( + self, name_or_id=None, filters=None, detail=False): + """Search cluster templates. + + :param name_or_id: cluster template name or ID. + :param filters: a dict containing additional filters to use. + :param detail: a boolean to control if we need summarized or + detailed output. + + :returns: a list of dict containing the cluster templates + + :raises: ``OpenStackCloudException``: if something goes wrong during + the OpenStack API call. + """ + cluster_templates = self.list_cluster_templates(detail=detail) + return _utils._filter_list( + cluster_templates, name_or_id, filters) + search_baymodels = search_cluster_templates + + def get_cluster_template(self, name_or_id, filters=None, detail=False): + """Get a cluster template by name or ID. + + :param name_or_id: Name or ID of the cluster template. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'last_name': 'Smith', + 'other': { + 'gender': 'Female' + } + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A cluster template dict or None if no matching + cluster template is found. + """ + return _utils._get_entity(self, 'cluster_template', name_or_id, + filters=filters, detail=detail) + get_baymodel = get_cluster_template + + def create_cluster_template( + self, name, image_id=None, keypair_id=None, coe=None, **kwargs): + """Create a cluster template. + + :param string name: Name of the cluster template. + :param string image_id: Name or ID of the image to use. + :param string keypair_id: Name or ID of the keypair to use. + :param string coe: Name of the coe for the cluster template. + + Other arguments will be passed in kwargs. + + :returns: a dict containing the cluster template description + + :raises: ``OpenStackCloudException`` if something goes wrong during + the OpenStack API call + """ + error_message = ("Error creating cluster template of name" + " {cluster_template_name}".format( + cluster_template_name=name)) + with _utils.shade_exceptions(error_message): + body = kwargs.copy() + body['name'] = name + body['image_id'] = image_id + body['keypair_id'] = keypair_id + body['coe'] = coe + + cluster_template = self._container_infra_client.post( + '/baymodels', json=body) + + self.list_cluster_templates.invalidate(self) + return cluster_template + create_baymodel = create_cluster_template + + def delete_cluster_template(self, name_or_id): + """Delete a cluster template. + + :param name_or_id: Name or unique ID of the cluster template. + :returns: True if the delete succeeded, False if the + cluster template was not found. + + :raises: OpenStackCloudException on operation error. + """ + + cluster_template = self.get_cluster_template(name_or_id) + + if not cluster_template: + self.log.debug( + "Cluster template %(name_or_id)s does not exist", + {'name_or_id': name_or_id}, + exc_info=True) + return False + + with _utils.shade_exceptions("Error in deleting cluster template"): + self._container_infra_client.delete( + '/baymodels/{id}'.format(id=cluster_template['id'])) + self.list_cluster_templates.invalidate(self) + + return True + delete_baymodel = delete_cluster_template + + @_utils.valid_kwargs('name', 'image_id', 'flavor_id', 'master_flavor_id', + 'keypair_id', 'external_network_id', 'fixed_network', + 'dns_nameserver', 'docker_volume_size', 'labels', + 'coe', 'http_proxy', 'https_proxy', 'no_proxy', + 'network_driver', 'tls_disabled', 'public', + 'registry_enabled', 'volume_driver') + def update_cluster_template(self, name_or_id, operation, **kwargs): + """Update a cluster template. + + :param name_or_id: Name or ID of the cluster template being updated. + :param operation: Operation to perform - add, remove, replace. + + Other arguments will be passed with kwargs. + + :returns: a dict representing the updated cluster template. + + :raises: OpenStackCloudException on operation error. + """ + self.list_cluster_templates.invalidate(self) + cluster_template = self.get_cluster_template(name_or_id) + if not cluster_template: + raise OpenStackCloudException( + "Cluster template %s not found." % name_or_id) + + if operation not in ['add', 'replace', 'remove']: + raise TypeError( + "%s operation not in 'add', 'replace', 'remove'" % operation) + + patches = _utils.generate_patches_from_kwargs(operation, **kwargs) + # No need to fire an API call if there is an empty patch + if not patches: + return cluster_template + + with _utils.shade_exceptions( + "Error updating cluster template {0}".format(name_or_id)): + self._container_infra_client.patch( + '/baymodels/{id}'.format(id=cluster_template['id']), + json=patches) + + new_cluster_template = self.get_cluster_template(name_or_id) + return new_cluster_template + update_baymodel = update_cluster_template diff --git a/openstack/cloud/operatorcloud.py b/openstack/cloud/operatorcloud.py new file mode 100644 index 000000000..0ff6cf302 --- /dev/null +++ b/openstack/cloud/operatorcloud.py @@ -0,0 +1,2419 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import iso8601 +import jsonpatch +import munch + +from openstack.cloud.exc import * # noqa +from openstack.cloud import openstackcloud +from openstack.cloud import _tasks +from openstack.cloud import _utils + + +class OperatorCloud(openstackcloud.OpenStackCloud): + """Represent a privileged/operator connection to an OpenStack Cloud. + + `OperatorCloud` is the entry point for all admin operations, regardless + of which OpenStack service those operations are for. + + See the :class:`OpenStackCloud` class for a description of most options. + """ + + # TODO(shade) Finish porting ironic to REST/sdk + ironic_client = None + + def list_nics(self): + with _utils.shade_exceptions("Error fetching machine port list"): + return self.manager.submit_task(_tasks.MachinePortList()) + + def list_nics_for_machine(self, uuid): + with _utils.shade_exceptions( + "Error fetching port list for node {node_id}".format( + node_id=uuid)): + return self.manager.submit_task( + _tasks.MachineNodePortList(node_id=uuid)) + + def get_nic_by_mac(self, mac): + """Get Machine by MAC address""" + # TODO(shade) Finish porting ironic to REST/sdk + # try: + # return self.manager.submit_task( + # _tasks.MachineNodePortGet(port_id=mac)) + # except ironic_exceptions.ClientException: + # return None + + def list_machines(self): + return self._normalize_machines( + self.manager.submit_task(_tasks.MachineNodeList())) + + def get_machine(self, name_or_id): + """Get Machine by name or uuid + + Search the baremetal host out by utilizing the supplied id value + which can consist of a name or UUID. + + :param name_or_id: A node name or UUID that will be looked up. + + :returns: ``munch.Munch`` representing the node found or None if no + nodes are found. + """ + # TODO(shade) Finish porting ironic to REST/sdk + # try: + # return self._normalize_machine( + # self.manager.submit_task( + # _tasks.MachineNodeGet(node_id=name_or_id))) + # except ironic_exceptions.ClientException: + # return None + + def get_machine_by_mac(self, mac): + """Get machine by port MAC address + + :param mac: Port MAC address to query in order to return a node. + + :returns: ``munch.Munch`` representing the node found or None + if the node is not found. + """ + # try: + # port = self.manager.submit_task( + # _tasks.MachinePortGetByAddress(address=mac)) + # return self.manager.submit_task( + # _tasks.MachineNodeGet(node_id=port.node_uuid)) + # except ironic_exceptions.ClientException: + # return None + + def inspect_machine(self, name_or_id, wait=False, timeout=3600): + """Inspect a Barmetal machine + + Engages the Ironic node inspection behavior in order to collect + metadata about the baremetal machine. + + :param name_or_id: String representing machine name or UUID value in + order to identify the machine. + + :param wait: Boolean value controlling if the method is to wait for + the desired state to be reached or a failure to occur. + + :param timeout: Integer value, defautling to 3600 seconds, for the$ + wait state to reach completion. + + :returns: ``munch.Munch`` representing the current state of the machine + upon exit of the method. + """ + + return_to_available = False + + machine = self.get_machine(name_or_id) + if not machine: + raise OpenStackCloudException( + "Machine inspection failed to find: %s." % name_or_id) + + # NOTE(TheJulia): If in available state, we can do this, however + # We need to to move the host back to m + if "available" in machine['provision_state']: + return_to_available = True + # NOTE(TheJulia): Changing available machine to managedable state + # and due to state transitions we need to until that transition has + # completd. + self.node_set_provision_state(machine['uuid'], 'manage', + wait=True, timeout=timeout) + elif ("manage" not in machine['provision_state'] and + "inspect failed" not in machine['provision_state']): + raise OpenStackCloudException( + "Machine must be in 'manage' or 'available' state to " + "engage inspection: Machine: %s State: %s" + % (machine['uuid'], machine['provision_state'])) + with _utils.shade_exceptions("Error inspecting machine"): + machine = self.node_set_provision_state(machine['uuid'], 'inspect') + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for node transition to " + "target state of 'inspect'"): + machine = self.get_machine(name_or_id) + + if "inspect failed" in machine['provision_state']: + raise OpenStackCloudException( + "Inspection of node %s failed, last error: %s" + % (machine['uuid'], machine['last_error'])) + + if "manageable" in machine['provision_state']: + break + + if return_to_available: + machine = self.node_set_provision_state( + machine['uuid'], 'provide', wait=wait, timeout=timeout) + + return(machine) + + def register_machine(self, nics, wait=False, timeout=3600, + lock_timeout=600, **kwargs): + """Register Baremetal with Ironic + + Allows for the registration of Baremetal nodes with Ironic + and population of pertinant node information or configuration + to be passed to the Ironic API for the node. + + This method also creates ports for a list of MAC addresses passed + in to be utilized for boot and potentially network configuration. + + If a failure is detected creating the network ports, any ports + created are deleted, and the node is removed from Ironic. + + :param nics: + An array of MAC addresses that represent the + network interfaces for the node to be created. + + Example:: + + [ + {'mac': 'aa:bb:cc:dd:ee:01'}, + {'mac': 'aa:bb:cc:dd:ee:02'} + ] + + :param wait: Boolean value, defaulting to false, to wait for the + node to reach the available state where the node can be + provisioned. It must be noted, when set to false, the + method will still wait for locks to clear before sending + the next required command. + + :param timeout: Integer value, defautling to 3600 seconds, for the + wait state to reach completion. + + :param lock_timeout: Integer value, defaulting to 600 seconds, for + locks to clear. + + :param kwargs: Key value pairs to be passed to the Ironic API, + including uuid, name, chassis_uuid, driver_info, + parameters. + + :raises: OpenStackCloudException on operation error. + + :returns: Returns a ``munch.Munch`` representing the new + baremetal node. + """ + with _utils.shade_exceptions("Error registering machine with Ironic"): + machine = self.manager.submit_task(_tasks.MachineCreate(**kwargs)) + + created_nics = [] + try: + for row in nics: + nic = self.manager.submit_task( + _tasks.MachinePortCreate(address=row['mac'], + node_uuid=machine['uuid'])) + created_nics.append(nic.uuid) + + except Exception as e: + self.log.debug("ironic NIC registration failed", exc_info=True) + # TODO(mordred) Handle failures here + try: + for uuid in created_nics: + try: + self.manager.submit_task( + _tasks.MachinePortDelete( + port_id=uuid)) + except Exception: + pass + finally: + self.manager.submit_task( + _tasks.MachineDelete(node_id=machine['uuid'])) + raise OpenStackCloudException( + "Error registering NICs with the baremetal service: %s" + % str(e)) + + with _utils.shade_exceptions( + "Error transitioning node to available state"): + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for node transition to " + "available state"): + + machine = self.get_machine(machine['uuid']) + + # Note(TheJulia): Per the Ironic state code, a node + # that fails returns to enroll state, which means a failed + # node cannot be determined at this point in time. + if machine['provision_state'] in ['enroll']: + self.node_set_provision_state( + machine['uuid'], 'manage') + elif machine['provision_state'] in ['manageable']: + self.node_set_provision_state( + machine['uuid'], 'provide') + elif machine['last_error'] is not None: + raise OpenStackCloudException( + "Machine encountered a failure: %s" + % machine['last_error']) + + # Note(TheJulia): Earlier versions of Ironic default to + # None and later versions default to available up until + # the introduction of enroll state. + # Note(TheJulia): The node will transition through + # cleaning if it is enabled, and we will wait for + # completion. + elif machine['provision_state'] in ['available', None]: + break + + else: + if machine['provision_state'] in ['enroll']: + self.node_set_provision_state(machine['uuid'], 'manage') + # Note(TheJulia): We need to wait for the lock to clear + # before we attempt to set the machine into provide state + # which allows for the transition to available. + for count in _utils._iterate_timeout( + lock_timeout, + "Timeout waiting for reservation to clear " + "before setting provide state"): + machine = self.get_machine(machine['uuid']) + if (machine['reservation'] is None and + machine['provision_state'] is not 'enroll'): + + self.node_set_provision_state( + machine['uuid'], 'provide') + machine = self.get_machine(machine['uuid']) + break + + elif machine['provision_state'] in [ + 'cleaning', + 'available']: + break + + elif machine['last_error'] is not None: + raise OpenStackCloudException( + "Machine encountered a failure: %s" + % machine['last_error']) + + return machine + + def unregister_machine(self, nics, uuid, wait=False, timeout=600): + """Unregister Baremetal from Ironic + + Removes entries for Network Interfaces and baremetal nodes + from an Ironic API + + :param nics: An array of strings that consist of MAC addresses + to be removed. + :param string uuid: The UUID of the node to be deleted. + + :param wait: Boolean value, defaults to false, if to block the method + upon the final step of unregistering the machine. + + :param timeout: Integer value, representing seconds with a default + value of 600, which controls the maximum amount of + time to block the method's completion on. + + :raises: OpenStackCloudException on operation failure. + """ + + machine = self.get_machine(uuid) + invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] + if machine['provision_state'] in invalid_states: + raise OpenStackCloudException( + "Error unregistering node '%s' due to current provision " + "state '%s'" % (uuid, machine['provision_state'])) + + for nic in nics: + with _utils.shade_exceptions( + "Error removing NIC {nic} from baremetal API for node " + "{uuid}".format(nic=nic, uuid=uuid)): + port = self.manager.submit_task( + _tasks.MachinePortGetByAddress(address=nic['mac'])) + self.manager.submit_task( + _tasks.MachinePortDelete(port_id=port.uuid)) + with _utils.shade_exceptions( + "Error unregistering machine {node_id} from the baremetal " + "API".format(node_id=uuid)): + self.manager.submit_task( + _tasks.MachineDelete(node_id=uuid)) + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for machine to be deleted"): + if not self.get_machine(uuid): + break + + def patch_machine(self, name_or_id, patch): + """Patch Machine Information + + This method allows for an interface to manipulate node entries + within Ironic. Specifically, it is a pass-through for the + ironicclient.nodes.update interface which allows the Ironic Node + properties to be updated. + + :param node_id: The server object to attach to. + :param patch: + The JSON Patch document is a list of dictonary objects + that comply with RFC 6902 which can be found at + https://tools.ietf.org/html/rfc6902. + + Example patch construction:: + + patch=[] + patch.append({ + 'op': 'remove', + 'path': '/instance_info' + }) + patch.append({ + 'op': 'replace', + 'path': '/name', + 'value': 'newname' + }) + patch.append({ + 'op': 'add', + 'path': '/driver_info/username', + 'value': 'administrator' + }) + + :raises: OpenStackCloudException on operation error. + + :returns: ``munch.Munch`` representing the newly updated node. + """ + + with _utils.shade_exceptions( + "Error updating machine via patch operation on node " + "{node}".format(node=name_or_id) + ): + return self._normalize_machine( + self.manager.submit_task( + _tasks.MachinePatch(node_id=name_or_id, + patch=patch, + http_method='PATCH'))) + + def update_machine(self, name_or_id, chassis_uuid=None, driver=None, + driver_info=None, name=None, instance_info=None, + instance_uuid=None, properties=None): + """Update a machine with new configuration information + + A user-friendly method to perform updates of a machine, in whole or + part. + + :param string name_or_id: A machine name or UUID to be updated. + :param string chassis_uuid: Assign a chassis UUID to the machine. + NOTE: As of the Kilo release, this value + cannot be changed once set. If a user + attempts to change this value, then the + Ironic API, as of Kilo, will reject the + request. + :param string driver: The driver name for controlling the machine. + :param dict driver_info: The dictonary defining the configuration + that the driver will utilize to control + the machine. Permutations of this are + dependent upon the specific driver utilized. + :param string name: A human relatable name to represent the machine. + :param dict instance_info: A dictonary of configuration information + that conveys to the driver how the host + is to be configured when deployed. + be deployed to the machine. + :param string instance_uuid: A UUID value representing the instance + that the deployed machine represents. + :param dict properties: A dictonary defining the properties of a + machine. + + :raises: OpenStackCloudException on operation error. + + :returns: ``munch.Munch`` containing a machine sub-dictonary consisting + of the updated data returned from the API update operation, + and a list named changes which contains all of the API paths + that received updates. + """ + machine = self.get_machine(name_or_id) + if not machine: + raise OpenStackCloudException( + "Machine update failed to find Machine: %s. " % name_or_id) + + machine_config = {} + new_config = {} + + try: + if chassis_uuid: + machine_config['chassis_uuid'] = machine['chassis_uuid'] + new_config['chassis_uuid'] = chassis_uuid + + if driver: + machine_config['driver'] = machine['driver'] + new_config['driver'] = driver + + if driver_info: + machine_config['driver_info'] = machine['driver_info'] + new_config['driver_info'] = driver_info + + if name: + machine_config['name'] = machine['name'] + new_config['name'] = name + + if instance_info: + machine_config['instance_info'] = machine['instance_info'] + new_config['instance_info'] = instance_info + + if instance_uuid: + machine_config['instance_uuid'] = machine['instance_uuid'] + new_config['instance_uuid'] = instance_uuid + + if properties: + machine_config['properties'] = machine['properties'] + new_config['properties'] = properties + except KeyError as e: + self.log.debug( + "Unexpected machine response missing key %s [%s]", + e.args[0], name_or_id) + raise OpenStackCloudException( + "Machine update failed - machine [%s] missing key %s. " + "Potential API issue." + % (name_or_id, e.args[0])) + + try: + patch = jsonpatch.JsonPatch.from_diff(machine_config, new_config) + except Exception as e: + raise OpenStackCloudException( + "Machine update failed - Error generating JSON patch object " + "for submission to the API. Machine: %s Error: %s" + % (name_or_id, str(e))) + + with _utils.shade_exceptions( + "Machine update failed - patch operation failed on Machine " + "{node}".format(node=name_or_id) + ): + if not patch: + return dict( + node=machine, + changes=None + ) + else: + machine = self.patch_machine(machine['uuid'], list(patch)) + change_list = [] + for change in list(patch): + change_list.append(change['path']) + return dict( + node=machine, + changes=change_list + ) + + def validate_node(self, uuid): + with _utils.shade_exceptions(): + ifaces = self.manager.submit_task( + _tasks.MachineNodeValidate(node_uuid=uuid)) + + if not ifaces.deploy or not ifaces.power: + raise OpenStackCloudException( + "ironic node %s failed to validate. " + "(deploy: %s, power: %s)" % (ifaces.deploy, ifaces.power)) + + def node_set_provision_state(self, + name_or_id, + state, + configdrive=None, + wait=False, + timeout=3600): + """Set Node Provision State + + Enables a user to provision a Machine and optionally define a + config drive to be utilized. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + :param string state: The desired provision state for the + baremetal node. + :param string configdrive: An optional URL or file or path + representing the configdrive. In the + case of a directory, the client API + will create a properly formatted + configuration drive file and post the + file contents to the API for + deployment. + :param boolean wait: A boolean value, defaulted to false, to control + if the method will wait for the desire end state + to be reached before returning. + :param integer timeout: Integer value, defaulting to 3600 seconds, + representing the amount of time to wait for + the desire end state to be reached. + + :raises: OpenStackCloudException on operation error. + + :returns: ``munch.Munch`` representing the current state of the machine + upon exit of the method. + """ + with _utils.shade_exceptions( + "Baremetal machine node failed change provision state to " + "{state}".format(state=state) + ): + machine = self.manager.submit_task( + _tasks.MachineSetProvision(node_uuid=name_or_id, + state=state, + configdrive=configdrive)) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for node transition to " + "target state of '%s'" % state): + machine = self.get_machine(name_or_id) + if 'failed' in machine['provision_state']: + raise OpenStackCloudException( + "Machine encountered a failure.") + # NOTE(TheJulia): This performs matching if the requested + # end state matches the state the node has reached. + if state in machine['provision_state']: + break + # NOTE(TheJulia): This performs matching for cases where + # the reqeusted state action ends in available state. + if ("available" in machine['provision_state'] and + state in ["provide", "deleted"]): + break + else: + machine = self.get_machine(name_or_id) + return machine + + def set_machine_maintenance_state( + self, + name_or_id, + state=True, + reason=None): + """Set Baremetal Machine Maintenance State + + Sets Baremetal maintenance state and maintenance reason. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + :param boolean state: The desired state of the node. True being in + maintenance where as False means the machine + is not in maintenance mode. This value + defaults to True if not explicitly set. + :param string reason: An optional freeform string that is supplied to + the baremetal API to allow for notation as to why + the node is in maintenance state. + + :raises: OpenStackCloudException on operation error. + + :returns: None + """ + with _utils.shade_exceptions( + "Error setting machine maintenance state to {state} on node " + "{node}".format(state=state, node=name_or_id) + ): + if state: + result = self.manager.submit_task( + _tasks.MachineSetMaintenance(node_id=name_or_id, + state='true', + maint_reason=reason)) + else: + result = self.manager.submit_task( + _tasks.MachineSetMaintenance(node_id=name_or_id, + state='false')) + if result is not None: + raise OpenStackCloudException( + "Failed setting machine maintenance state to %s " + "on node %s. Received: %s" % ( + state, name_or_id, result)) + return None + + def remove_machine_from_maintenance(self, name_or_id): + """Remove Baremetal Machine from Maintenance State + + Similarly to set_machine_maintenance_state, this method + removes a machine from maintenance state. It must be noted + that this method simpily calls set_machine_maintenace_state + for the name_or_id requested and sets the state to False. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + + :raises: OpenStackCloudException on operation error. + + :returns: None + """ + self.set_machine_maintenance_state(name_or_id, False) + + def _set_machine_power_state(self, name_or_id, state): + """Set machine power state to on or off + + This private method allows a user to turn power on or off to + a node via the Baremetal API. + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "on" + state. + :params string state: A value of "on", "off", or "reboot" that is + passed to the baremetal API to be asserted to + the machine. In the case of the "reboot" state, + Ironic will return the host to the "on" state. + + :raises: OpenStackCloudException on operation error or. + + :returns: None + """ + with _utils.shade_exceptions( + "Error setting machine power state to {state} on node " + "{node}".format(state=state, node=name_or_id) + ): + power = self.manager.submit_task( + _tasks.MachineSetPower(node_id=name_or_id, + state=state)) + if power is not None: + raise OpenStackCloudException( + "Failed setting machine power state %s on node %s. " + "Received: %s" % (state, name_or_id, power)) + return None + + def set_machine_power_on(self, name_or_id): + """Activate baremetal machine power + + This is a method that sets the node power state to "on". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "on" + state. + + :raises: OpenStackCloudException on operation error. + + :returns: None + """ + self._set_machine_power_state(name_or_id, 'on') + + def set_machine_power_off(self, name_or_id): + """De-activate baremetal machine power + + This is a method that sets the node power state to "off". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "off" + state. + + :raises: OpenStackCloudException on operation error. + + :returns: + """ + self._set_machine_power_state(name_or_id, 'off') + + def set_machine_power_reboot(self, name_or_id): + """De-activate baremetal machine power + + This is a method that sets the node power state to "reboot", which + in essence changes the machine power state to "off", and that back + to "on". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "off" + state. + + :raises: OpenStackCloudException on operation error. + + :returns: None + """ + self._set_machine_power_state(name_or_id, 'reboot') + + def activate_node(self, uuid, configdrive=None, + wait=False, timeout=1200): + self.node_set_provision_state( + uuid, 'active', configdrive, wait=wait, timeout=timeout) + + def deactivate_node(self, uuid, wait=False, + timeout=1200): + self.node_set_provision_state( + uuid, 'deleted', wait=wait, timeout=timeout) + + def set_node_instance_info(self, uuid, patch): + with _utils.shade_exceptions(): + return self.manager.submit_task( + _tasks.MachineNodeUpdate(node_id=uuid, patch=patch)) + + def purge_node_instance_info(self, uuid): + patch = [] + patch.append({'op': 'remove', 'path': '/instance_info'}) + with _utils.shade_exceptions(): + return self.manager.submit_task( + _tasks.MachineNodeUpdate(node_id=uuid, patch=patch)) + + @_utils.valid_kwargs('type', 'service_type', 'description') + def create_service(self, name, enabled=True, **kwargs): + """Create a service. + + :param name: Service name. + :param type: Service type. (type or service_type required.) + :param service_type: Service type. (type or service_type required.) + :param description: Service description (optional). + :param enabled: Whether the service is enabled (v3 only) + + :returns: a ``munch.Munch`` containing the services description, + i.e. the following attributes:: + - id: + - name: + - type: + - service_type: + - description: + + :raises: ``OpenStackCloudException`` if something goes wrong during the + openstack API call. + + """ + type_ = kwargs.pop('type', None) + service_type = kwargs.pop('service_type', None) + + # TODO(mordred) When this changes to REST, force interface=admin + # in the adapter call + if self._is_client_version('identity', 2): + url, key = '/OS-KSADM/services', 'OS-KSADM:service' + kwargs['type'] = type_ or service_type + else: + url, key = '/services', 'service' + kwargs['type'] = type_ or service_type + kwargs['enabled'] = enabled + kwargs['name'] = name + + msg = 'Failed to create service {name}'.format(name=name) + data = self._identity_client.post( + url, json={key: kwargs}, error_message=msg) + service = self._get_and_munchify(key, data) + return _utils.normalize_keystone_services([service])[0] + + @_utils.valid_kwargs('name', 'enabled', 'type', 'service_type', + 'description') + def update_service(self, name_or_id, **kwargs): + # NOTE(SamYaple): Service updates are only available on v3 api + if self._is_client_version('identity', 2): + raise OpenStackCloudUnavailableFeature( + 'Unavailable Feature: Service update requires Identity v3' + ) + + # NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts + # both 'type' and 'service_type' with a preference + # towards 'type' + type_ = kwargs.pop('type', None) + service_type = kwargs.pop('service_type', None) + if type_ or service_type: + kwargs['type'] = type_ or service_type + + if self._is_client_version('identity', 2): + url, key = '/OS-KSADM/services', 'OS-KSADM:service' + else: + url, key = '/services', 'service' + + service = self.get_service(name_or_id) + msg = 'Error in updating service {service}'.format(service=name_or_id) + data = self._identity_client.patch( + '{url}/{id}'.format(url=url, id=service['id']), json={key: kwargs}, + endpoint_filter={'interface': 'admin'}, error_message=msg) + service = self._get_and_munchify(key, data) + return _utils.normalize_keystone_services([service])[0] + + def list_services(self): + """List all Keystone services. + + :returns: a list of ``munch.Munch`` containing the services description + + :raises: ``OpenStackCloudException`` if something goes wrong during the + openstack API call. + """ + if self._is_client_version('identity', 2): + url, key = '/OS-KSADM/services', 'OS-KSADM:services' + else: + url, key = '/services', 'services' + data = self._identity_client.get( + url, endpoint_filter={'interface': 'admin'}, + error_message="Failed to list services") + services = self._get_and_munchify(key, data) + return _utils.normalize_keystone_services(services) + + def search_services(self, name_or_id=None, filters=None): + """Search Keystone services. + + :param name_or_id: Name or id of the desired service. + :param filters: a dict containing additional filters to use. e.g. + {'type': 'network'}. + + :returns: a list of ``munch.Munch`` containing the services description + + :raises: ``OpenStackCloudException`` if something goes wrong during the + openstack API call. + """ + services = self.list_services() + return _utils._filter_list(services, name_or_id, filters) + + def get_service(self, name_or_id, filters=None): + """Get exactly one Keystone service. + + :param name_or_id: Name or id of the desired service. + :param filters: a dict containing additional filters to use. e.g. + {'type': 'network'} + + :returns: a ``munch.Munch`` containing the services description, + i.e. the following attributes:: + - id: + - name: + - type: + - description: + + :raises: ``OpenStackCloudException`` if something goes wrong during the + openstack API call or if multiple matches are found. + """ + return _utils._get_entity(self, 'service', name_or_id, filters) + + def delete_service(self, name_or_id): + """Delete a Keystone service. + + :param name_or_id: Service name or id. + + :returns: True if delete succeeded, False otherwise. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the openstack API call + """ + service = self.get_service(name_or_id=name_or_id) + if service is None: + self.log.debug("Service %s not found for deleting", name_or_id) + return False + + if self._is_client_version('identity', 2): + url = '/OS-KSADM/services' + else: + url = '/services' + + error_msg = 'Failed to delete service {id}'.format(id=service['id']) + self._identity_client.delete( + '{url}/{id}'.format(url=url, id=service['id']), + endpoint_filter={'interface': 'admin'}, error_message=error_msg) + + return True + + @_utils.valid_kwargs('public_url', 'internal_url', 'admin_url') + def create_endpoint(self, service_name_or_id, url=None, interface=None, + region=None, enabled=True, **kwargs): + """Create a Keystone endpoint. + + :param service_name_or_id: Service name or id for this endpoint. + :param url: URL of the endpoint + :param interface: Interface type of the endpoint + :param public_url: Endpoint public URL. + :param internal_url: Endpoint internal URL. + :param admin_url: Endpoint admin URL. + :param region: Endpoint region. + :param enabled: Whether the endpoint is enabled + + NOTE: Both v2 (public_url, internal_url, admin_url) and v3 + (url, interface) calling semantics are supported. But + you can only use one of them at a time. + + :returns: a list of ``munch.Munch`` containing the endpoint description + + :raises: OpenStackCloudException if the service cannot be found or if + something goes wrong during the openstack API call. + """ + public_url = kwargs.pop('public_url', None) + internal_url = kwargs.pop('internal_url', None) + admin_url = kwargs.pop('admin_url', None) + + if (url or interface) and (public_url or internal_url or admin_url): + raise OpenStackCloudException( + "create_endpoint takes either url and interface OR" + " public_url, internal_url, admin_url") + + service = self.get_service(name_or_id=service_name_or_id) + if service is None: + raise OpenStackCloudException("service {service} not found".format( + service=service_name_or_id)) + + if self._is_client_version('identity', 2): + if url: + # v2.0 in use, v3-like arguments, one endpoint created + if interface != 'public': + raise OpenStackCloudException( + "Error adding endpoint for service {service}." + " On a v2 cloud the url/interface API may only be" + " used for public url. Try using the public_url," + " internal_url, admin_url parameters instead of" + " url and interface".format( + service=service_name_or_id)) + endpoint_args = {'publicurl': url} + else: + # v2.0 in use, v2.0-like arguments, one endpoint created + endpoint_args = {} + if public_url: + endpoint_args.update({'publicurl': public_url}) + if internal_url: + endpoint_args.update({'internalurl': internal_url}) + if admin_url: + endpoint_args.update({'adminurl': admin_url}) + + # keystone v2.0 requires 'region' arg even if it is None + endpoint_args.update( + {'service_id': service['id'], 'region': region}) + + data = self._identity_client.post( + '/endpoints', json={'endpoint': endpoint_args}, + endpoint_filter={'interface': 'admin'}, + error_message=("Failed to create endpoint for service" + " {service}".format(service=service['name']))) + return [self._get_and_munchify('endpoint', data)] + else: + endpoints_args = [] + if url: + # v3 in use, v3-like arguments, one endpoint created + endpoints_args.append( + {'url': url, 'interface': interface, + 'service_id': service['id'], 'enabled': enabled, + 'region': region}) + else: + # v3 in use, v2.0-like arguments, one endpoint created for each + # interface url provided + endpoint_args = {'region': region, 'enabled': enabled, + 'service_id': service['id']} + if public_url: + endpoint_args.update({'url': public_url, + 'interface': 'public'}) + endpoints_args.append(endpoint_args.copy()) + if internal_url: + endpoint_args.update({'url': internal_url, + 'interface': 'internal'}) + endpoints_args.append(endpoint_args.copy()) + if admin_url: + endpoint_args.update({'url': admin_url, + 'interface': 'admin'}) + endpoints_args.append(endpoint_args.copy()) + + endpoints = [] + error_msg = ("Failed to create endpoint for service" + " {service}".format(service=service['name'])) + for args in endpoints_args: + data = self._identity_client.post( + '/endpoints', json={'endpoint': args}, + error_message=error_msg) + endpoints.append(self._get_and_munchify('endpoint', data)) + return endpoints + + @_utils.valid_kwargs('enabled', 'service_name_or_id', 'url', 'interface', + 'region') + def update_endpoint(self, endpoint_id, **kwargs): + # NOTE(SamYaple): Endpoint updates are only available on v3 api + if self._is_client_version('identity', 2): + raise OpenStackCloudUnavailableFeature( + 'Unavailable Feature: Endpoint update' + ) + + service_name_or_id = kwargs.pop('service_name_or_id', None) + if service_name_or_id is not None: + kwargs['service_id'] = service_name_or_id + + data = self._identity_client.patch( + '/endpoints/{}'.format(endpoint_id), json={'endpoint': kwargs}, + error_message="Failed to update endpoint {}".format(endpoint_id)) + return self._get_and_munchify('endpoint', data) + + def list_endpoints(self): + """List Keystone endpoints. + + :returns: a list of ``munch.Munch`` containing the endpoint description + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + # Force admin interface if v2.0 is in use + v2 = self._is_client_version('identity', 2) + kwargs = {'endpoint_filter': {'interface': 'admin'}} if v2 else {} + + data = self._identity_client.get( + '/endpoints', error_message="Failed to list endpoints", **kwargs) + endpoints = self._get_and_munchify('endpoints', data) + + return endpoints + + def search_endpoints(self, id=None, filters=None): + """List Keystone endpoints. + + :param id: endpoint id. + :param filters: a dict containing additional filters to use. e.g. + {'region': 'region-a.geo-1'} + + :returns: a list of ``munch.Munch`` containing the endpoint + description. Each dict contains the following attributes:: + - id: + - region: + - public_url: + - internal_url: (optional) + - admin_url: (optional) + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + # NOTE(SamYaple): With keystone v3 we can filter directly via the + # the keystone api, but since the return of all the endpoints even in + # large environments is small, we can continue to filter in shade just + # like the v2 api. + endpoints = self.list_endpoints() + return _utils._filter_list(endpoints, id, filters) + + def get_endpoint(self, id, filters=None): + """Get exactly one Keystone endpoint. + + :param id: endpoint id. + :param filters: a dict containing additional filters to use. e.g. + {'region': 'region-a.geo-1'} + + :returns: a ``munch.Munch`` containing the endpoint description. + i.e. a ``munch.Munch`` containing the following attributes:: + - id: + - region: + - public_url: + - internal_url: (optional) + - admin_url: (optional) + """ + return _utils._get_entity(self, 'endpoint', id, filters) + + def delete_endpoint(self, id): + """Delete a Keystone endpoint. + + :param id: Id of the endpoint to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the openstack API call. + """ + endpoint = self.get_endpoint(id=id) + if endpoint is None: + self.log.debug("Endpoint %s not found for deleting", id) + return False + + # Force admin interface if v2.0 is in use + v2 = self._is_client_version('identity', 2) + kwargs = {'endpoint_filter': {'interface': 'admin'}} if v2 else {} + + error_msg = "Failed to delete endpoint {id}".format(id=id) + self._identity_client.delete('/endpoints/{id}'.format(id=id), + error_message=error_msg, **kwargs) + + return True + + def create_domain(self, name, description=None, enabled=True): + """Create a domain. + + :param name: The name of the domain. + :param description: A description of the domain. + :param enabled: Is the domain enabled or not (default True). + + :returns: a ``munch.Munch`` containing the domain representation. + + :raise OpenStackCloudException: if the domain cannot be created. + """ + domain_ref = {'name': name, 'enabled': enabled} + if description is not None: + domain_ref['description'] = description + msg = 'Failed to create domain {name}'.format(name=name) + data = self._identity_client.post( + '/domains', json={'domain': domain_ref}, error_message=msg) + domain = self._get_and_munchify('domain', data) + return _utils.normalize_domains([domain])[0] + + def update_domain( + self, domain_id=None, name=None, description=None, + enabled=None, name_or_id=None): + if domain_id is None: + if name_or_id is None: + raise OpenStackCloudException( + "You must pass either domain_id or name_or_id value" + ) + dom = self.get_domain(None, name_or_id) + if dom is None: + raise OpenStackCloudException( + "Domain {0} not found for updating".format(name_or_id) + ) + domain_id = dom['id'] + + domain_ref = {} + domain_ref.update({'name': name} if name else {}) + domain_ref.update({'description': description} if description else {}) + domain_ref.update({'enabled': enabled} if enabled is not None else {}) + + error_msg = "Error in updating domain {id}".format(id=domain_id) + data = self._identity_client.patch( + '/domains/{id}'.format(id=domain_id), + json={'domain': domain_ref}, error_message=error_msg) + domain = self._get_and_munchify('domain', data) + return _utils.normalize_domains([domain])[0] + + def delete_domain(self, domain_id=None, name_or_id=None): + """Delete a domain. + + :param domain_id: ID of the domain to delete. + :param name_or_id: Name or ID of the domain to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the openstack API call. + """ + if domain_id is None: + if name_or_id is None: + raise OpenStackCloudException( + "You must pass either domain_id or name_or_id value" + ) + dom = self.get_domain(name_or_id=name_or_id) + if dom is None: + self.log.debug( + "Domain %s not found for deleting", name_or_id) + return False + domain_id = dom['id'] + + # A domain must be disabled before deleting + self.update_domain(domain_id, enabled=False) + error_msg = "Failed to delete domain {id}".format(id=domain_id) + self._identity_client.delete('/domains/{id}'.format(id=domain_id), + error_message=error_msg) + + return True + + def list_domains(self, **filters): + """List Keystone domains. + + :returns: a list of ``munch.Munch`` containing the domain description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + data = self._identity_client.get( + '/domains', params=filters, error_message="Failed to list domains") + domains = self._get_and_munchify('domains', data) + return _utils.normalize_domains(domains) + + def search_domains(self, filters=None, name_or_id=None): + """Search Keystone domains. + + :param name_or_id: domain name or id + :param dict filters: A dict containing additional filters to use. + Keys to search on are id, name, enabled and description. + + :returns: a list of ``munch.Munch`` containing the domain description. + Each ``munch.Munch`` contains the following attributes:: + - id: + - name: + - description: + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + if filters is None: + filters = {} + if name_or_id is not None: + domains = self.list_domains() + return _utils._filter_list(domains, name_or_id, filters) + else: + return self.list_domains(**filters) + + def get_domain(self, domain_id=None, name_or_id=None, filters=None): + """Get exactly one Keystone domain. + + :param domain_id: domain id. + :param name_or_id: domain name or id. + :param dict filters: A dict containing additional filters to use. + Keys to search on are id, name, enabled and description. + + :returns: a ``munch.Munch`` containing the domain description, or None + if not found. Each ``munch.Munch`` contains the following + attributes:: + - id: + - name: + - description: + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + if domain_id is None: + # NOTE(SamYaple): search_domains() has filters and name_or_id + # in the wrong positional order which prevents _get_entity from + # being able to return quickly if passing a domain object so we + # duplicate that logic here + if hasattr(name_or_id, 'id'): + return name_or_id + return _utils._get_entity(self, 'domain', filters, name_or_id) + else: + error_msg = 'Failed to get domain {id}'.format(id=domain_id) + data = self._identity_client.get( + '/domains/{id}'.format(id=domain_id), + error_message=error_msg) + domain = self._get_and_munchify('domain', data) + return _utils.normalize_domains([domain])[0] + + @_utils.valid_kwargs('domain_id') + @_utils.cache_on_arguments() + def list_groups(self, **kwargs): + """List Keystone Groups. + + :param domain_id: domain id. + + :returns: A list of ``munch.Munch`` containing the group description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + data = self._identity_client.get( + '/groups', params=kwargs, error_message="Failed to list groups") + return _utils.normalize_groups(self._get_and_munchify('groups', data)) + + @_utils.valid_kwargs('domain_id') + def search_groups(self, name_or_id=None, filters=None, **kwargs): + """Search Keystone groups. + + :param name: Group name or id. + :param filters: A dict containing additional filters to use. + :param domain_id: domain id. + + :returns: A list of ``munch.Munch`` containing the group description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + groups = self.list_groups(**kwargs) + return _utils._filter_list(groups, name_or_id, filters) + + @_utils.valid_kwargs('domain_id') + def get_group(self, name_or_id, filters=None, **kwargs): + """Get exactly one Keystone group. + + :param id: Group name or id. + :param filters: A dict containing additional filters to use. + :param domain_id: domain id. + + :returns: A ``munch.Munch`` containing the group description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + return _utils._get_entity(self, 'group', name_or_id, filters, **kwargs) + + def create_group(self, name, description, domain=None): + """Create a group. + + :param string name: Group name. + :param string description: Group description. + :param string domain: Domain name or ID for the group. + + :returns: A ``munch.Munch`` containing the group description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + group_ref = {'name': name} + if description: + group_ref['description'] = description + if domain: + dom = self.get_domain(domain) + if not dom: + raise OpenStackCloudException( + "Creating group {group} failed: Invalid domain " + "{domain}".format(group=name, domain=domain) + ) + group_ref['domain_id'] = dom['id'] + + error_msg = "Error creating group {group}".format(group=name) + data = self._identity_client.post( + '/groups', json={'group': group_ref}, error_message=error_msg) + group = self._get_and_munchify('group', data) + self.list_groups.invalidate(self) + return _utils.normalize_groups([group])[0] + + @_utils.valid_kwargs('domain_id') + def update_group(self, name_or_id, name=None, description=None, + **kwargs): + """Update an existing group + + :param string name: New group name. + :param string description: New group description. + :param domain_id: domain id. + + :returns: A ``munch.Munch`` containing the group description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + self.list_groups.invalidate(self) + group = self.get_group(name_or_id, **kwargs) + if group is None: + raise OpenStackCloudException( + "Group {0} not found for updating".format(name_or_id) + ) + + group_ref = {} + if name: + group_ref['name'] = name + if description: + group_ref['description'] = description + + error_msg = "Unable to update group {name}".format(name=name_or_id) + data = self._identity_client.patch( + '/groups/{id}'.format(id=group['id']), + json={'group': group_ref}, error_message=error_msg) + group = self._get_and_munchify('group', data) + self.list_groups.invalidate(self) + return _utils.normalize_groups([group])[0] + + @_utils.valid_kwargs('domain_id') + def delete_group(self, name_or_id, **kwargs): + """Delete a group + + :param name_or_id: ID or name of the group to delete. + :param domain_id: domain id. + + :returns: True if delete succeeded, False otherwise. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + group = self.get_group(name_or_id, **kwargs) + if group is None: + self.log.debug( + "Group %s not found for deleting", name_or_id) + return False + + error_msg = "Unable to delete group {name}".format(name=name_or_id) + self._identity_client.delete('/groups/{id}'.format(id=group['id']), + error_message=error_msg) + + self.list_groups.invalidate(self) + return True + + def list_roles(self): + """List Keystone roles. + + :returns: a list of ``munch.Munch`` containing the role description. + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + v2 = self._is_client_version('identity', 2) + url = '/OS-KSADM/roles' if v2 else '/roles' + data = self._identity_client.get( + url, error_message="Failed to list roles") + return _utils.normalize_roles(self._get_and_munchify('roles', data)) + + def search_roles(self, name_or_id=None, filters=None): + """Seach Keystone roles. + + :param string name: role name or id. + :param dict filters: a dict containing additional filters to use. + + :returns: a list of ``munch.Munch`` containing the role description. + Each ``munch.Munch`` contains the following attributes:: + + - id: + - name: + - description: + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + roles = self.list_roles() + return _utils._filter_list(roles, name_or_id, filters) + + def get_role(self, name_or_id, filters=None): + """Get exactly one Keystone role. + + :param id: role name or id. + :param filters: a dict containing additional filters to use. + + :returns: a single ``munch.Munch`` containing the role description. + Each ``munch.Munch`` contains the following attributes:: + + - id: + - name: + - description: + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + return _utils._get_entity(self, 'role', name_or_id, filters) + + def _keystone_v2_role_assignments(self, user, project=None, + role=None, **kwargs): + data = self._identity_client.get( + "/tenants/{tenant}/users/{user}/roles".format( + tenant=project, user=user), + error_message="Failed to list role assignments") + + roles = self._get_and_munchify('roles', data) + + ret = [] + for tmprole in roles: + if role is not None and role != tmprole.id: + continue + ret.append({ + 'role': { + 'id': tmprole.id + }, + 'scope': { + 'project': { + 'id': project, + } + }, + 'user': { + 'id': user, + } + }) + return ret + + def _keystone_v3_role_assignments(self, **filters): + # NOTE(samueldmq): different parameters have different representation + # patterns as query parameters in the call to the list role assignments + # API. The code below handles each set of patterns separately and + # renames the parameters names accordingly, ignoring 'effective', + # 'include_names' and 'include_subtree' whose do not need any renaming. + for k in ('group', 'role', 'user'): + if k in filters: + filters[k + '.id'] = filters[k] + del filters[k] + for k in ('project', 'domain'): + if k in filters: + filters['scope.' + k + '.id'] = filters[k] + del filters[k] + if 'os_inherit_extension_inherited_to' in filters: + filters['scope.OS-INHERIT:inherited_to'] = ( + filters['os_inherit_extension_inherited_to']) + del filters['os_inherit_extension_inherited_to'] + + data = self._identity_client.get( + '/role_assignments', params=filters, + error_message="Failed to list role assignments") + return self._get_and_munchify('role_assignments', data) + + def list_role_assignments(self, filters=None): + """List Keystone role assignments + + :param dict filters: Dict of filter conditions. Acceptable keys are: + + * 'user' (string) - User ID to be used as query filter. + * 'group' (string) - Group ID to be used as query filter. + * 'project' (string) - Project ID to be used as query filter. + * 'domain' (string) - Domain ID to be used as query filter. + * 'role' (string) - Role ID to be used as query filter. + * 'os_inherit_extension_inherited_to' (string) - Return inherited + role assignments for either 'projects' or 'domains' + * 'effective' (boolean) - Return effective role assignments. + * 'include_subtree' (boolean) - Include subtree + + 'user' and 'group' are mutually exclusive, as are 'domain' and + 'project'. + + NOTE: For keystone v2, only user, project, and role are used. + Project and user are both required in filters. + + :returns: a list of ``munch.Munch`` containing the role assignment + description. Contains the following attributes:: + + - id: + - user|group: + - project|domain: + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + # NOTE(samueldmq): although 'include_names' is a valid query parameter + # in the keystone v3 list role assignments API, it would have NO effect + # on shade due to normalization. It is not documented as an acceptable + # filter in the docs above per design! + + if not filters: + filters = {} + + # NOTE(samueldmq): the docs above say filters are *IDs*, though if + # munch.Munch objects are passed, this still works for backwards + # compatibility as keystoneclient allows either IDs or objects to be + # passed in. + # TODO(samueldmq): fix the docs above to advertise munch.Munch objects + # can be provided as parameters too + for k, v in filters.items(): + if isinstance(v, munch.Munch): + filters[k] = v['id'] + + if self._is_client_version('identity', 2): + if filters.get('project') is None or filters.get('user') is None: + raise OpenStackCloudException( + "Must provide project and user for keystone v2" + ) + assignments = self._keystone_v2_role_assignments(**filters) + else: + assignments = self._keystone_v3_role_assignments(**filters) + + return _utils.normalize_role_assignments(assignments) + + def create_flavor(self, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create a new flavor. + + :param name: Descriptive name of the flavor + :param ram: Memory in MB for the flavor + :param vcpus: Number of VCPUs for the flavor + :param disk: Size of local disk in GB + :param flavorid: ID for the flavor (optional) + :param ephemeral: Ephemeral space size in GB + :param swap: Swap space in MB + :param rxtx_factor: RX/TX factor + :param is_public: Make flavor accessible to the public + + :returns: A ``munch.Munch`` describing the new flavor. + + :raises: OpenStackCloudException on operation error. + """ + with _utils.shade_exceptions("Failed to create flavor {name}".format( + name=name)): + payload = { + 'disk': disk, + 'OS-FLV-EXT-DATA:ephemeral': ephemeral, + 'id': flavorid, + 'os-flavor-access:is_public': is_public, + 'name': name, + 'ram': ram, + 'rxtx_factor': rxtx_factor, + 'swap': swap, + 'vcpus': vcpus, + } + if flavorid == 'auto': + payload['id'] = None + data = self._compute_client.post( + '/flavors', + json=dict(flavor=payload)) + + return self._normalize_flavor( + self._get_and_munchify('flavor', data)) + + def delete_flavor(self, name_or_id): + """Delete a flavor + + :param name_or_id: ID or name of the flavor to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + flavor = self.get_flavor(name_or_id, get_extra=False) + if flavor is None: + self.log.debug( + "Flavor %s not found for deleting", name_or_id) + return False + + with _utils.shade_exceptions("Unable to delete flavor {name}".format( + name=name_or_id)): + self._compute_client.delete( + '/flavors/{id}'.format(id=flavor['id'])) + + return True + + def set_flavor_specs(self, flavor_id, extra_specs): + """Add extra specs to a flavor + + :param string flavor_id: ID of the flavor to update. + :param dict extra_specs: Dictionary of key-value pairs. + + :raises: OpenStackCloudException on operation error. + :raises: OpenStackCloudResourceNotFound if flavor ID is not found. + """ + try: + self._compute_client.post( + "/flavors/{id}/os-extra_specs".format(id=flavor_id), + json=dict(extra_specs=extra_specs)) + except Exception as e: + raise OpenStackCloudException( + "Unable to set flavor specs: {0}".format(str(e)) + ) + + def unset_flavor_specs(self, flavor_id, keys): + """Delete extra specs from a flavor + + :param string flavor_id: ID of the flavor to update. + :param keys: List of spec keys to delete. + + :raises: OpenStackCloudException on operation error. + :raises: OpenStackCloudResourceNotFound if flavor ID is not found. + """ + for key in keys: + try: + self._compute_client.delete( + "/flavors/{id}/os-extra_specs/{key}".format( + id=flavor_id, key=key)) + except Exception as e: + raise OpenStackCloudException( + "Unable to delete flavor spec {0}: {1}".format( + key, str(e))) + + def _mod_flavor_access(self, action, flavor_id, project_id): + """Common method for adding and removing flavor access + """ + with _utils.shade_exceptions("Error trying to {action} access from " + "flavor ID {flavor}".format( + action=action, flavor=flavor_id)): + endpoint = '/flavors/{id}/action'.format(id=flavor_id) + access = {'tenant': project_id} + access_key = '{action}TenantAccess'.format(action=action) + + self._compute_client.post(endpoint, json={access_key: access}) + + def add_flavor_access(self, flavor_id, project_id): + """Grant access to a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + :param string project_id: ID of the project/tenant. + + :raises: OpenStackCloudException on operation error. + """ + self._mod_flavor_access('add', flavor_id, project_id) + + def remove_flavor_access(self, flavor_id, project_id): + """Revoke access from a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + :param string project_id: ID of the project/tenant. + + :raises: OpenStackCloudException on operation error. + """ + self._mod_flavor_access('remove', flavor_id, project_id) + + def list_flavor_access(self, flavor_id): + """List access from a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + + :returns: a list of ``munch.Munch`` containing the access description + + :raises: OpenStackCloudException on operation error. + """ + with _utils.shade_exceptions("Error trying to list access from " + "flavor ID {flavor}".format( + flavor=flavor_id)): + data = self._compute_client.get( + '/flavors/{id}/os-flavor-access'.format(id=flavor_id)) + return _utils.normalize_flavor_accesses( + self._get_and_munchify('flavor_access', data)) + + def create_role(self, name): + """Create a Keystone role. + + :param string name: The name of the role. + + :returns: a ``munch.Munch`` containing the role description + + :raise OpenStackCloudException: if the role cannot be created + """ + v2 = self._is_client_version('identity', 2) + url = '/OS-KSADM/roles' if v2 else '/roles' + msg = 'Failed to create role {name}'.format(name=name) + data = self._identity_client.post( + url, json={'role': {'name': name}}, error_message=msg) + role = self._get_and_munchify('role', data) + return _utils.normalize_roles([role])[0] + + def delete_role(self, name_or_id): + """Delete a Keystone role. + + :param string id: Name or id of the role to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: ``OpenStackCloudException`` if something goes wrong during + the openstack API call. + """ + role = self.get_role(name_or_id) + if role is None: + self.log.debug( + "Role %s not found for deleting", name_or_id) + return False + + v2 = self._is_client_version('identity', 2) + url = '{preffix}/{id}'.format( + preffix='/OS-KSADM/roles' if v2 else '/roles', id=role['id']) + error_msg = "Unable to delete role {name}".format(name=name_or_id) + self._identity_client.delete(url, error_message=error_msg) + + return True + + def _get_grant_revoke_params(self, role, user=None, group=None, + project=None, domain=None): + role = self.get_role(role) + if role is None: + return {} + data = {'role': role.id} + + # domain and group not available in keystone v2.0 + is_keystone_v2 = self._is_client_version('identity', 2) + + filters = {} + if not is_keystone_v2 and domain: + filters['domain_id'] = data['domain'] = \ + self.get_domain(domain)['id'] + + if user: + data['user'] = self.get_user(user, filters=filters) + + if project: + # drop domain in favor of project + data.pop('domain', None) + data['project'] = self.get_project(project, filters=filters) + + if not is_keystone_v2 and group: + data['group'] = self.get_group(group, filters=filters) + + return data + + def grant_role(self, name_or_id, user=None, group=None, + project=None, domain=None, wait=False, timeout=60): + """Grant a role to a user. + + :param string name_or_id: The name or id of the role. + :param string user: The name or id of the user. + :param string group: The name or id of the group. (v3) + :param string project: The name or id of the project. + :param string domain: The id of the domain. (v3) + :param bool wait: Wait for role to be granted + :param int timeout: Timeout to wait for role to be granted + + NOTE: domain is a required argument when the grant is on a project, + user or group specified by name. In that situation, they are all + considered to be in that domain. If different domains are in use + in the same role grant, it is required to specify those by ID. + + NOTE: for wait and timeout, sometimes granting roles is not + instantaneous. + + NOTE: project is required for keystone v2 + + :returns: True if the role is assigned, otherwise False + + :raise OpenStackCloudException: if the role cannot be granted + """ + data = self._get_grant_revoke_params(name_or_id, user, group, + project, domain) + filters = data.copy() + if not data: + raise OpenStackCloudException( + 'Role {0} not found.'.format(name_or_id)) + + if data.get('user') is not None and data.get('group') is not None: + raise OpenStackCloudException( + 'Specify either a group or a user, not both') + if data.get('user') is None and data.get('group') is None: + raise OpenStackCloudException( + 'Must specify either a user or a group') + if self._is_client_version('identity', 2) and \ + data.get('project') is None: + raise OpenStackCloudException( + 'Must specify project for keystone v2') + + if self.list_role_assignments(filters=filters): + self.log.debug('Assignment already exists') + return False + + error_msg = "Error granting access to role: {0}".format(data) + if self._is_client_version('identity', 2): + # For v2.0, only tenant/project assignment is supported + url = "/tenants/{t}/users/{u}/roles/OS-KSADM/{r}".format( + t=data['project']['id'], u=data['user']['id'], r=data['role']) + + self._identity_client.put(url, error_message=error_msg, + endpoint_filter={'interface': 'admin'}) + else: + if data.get('project') is None and data.get('domain') is None: + raise OpenStackCloudException( + 'Must specify either a domain or project') + + # For v3, figure out the assignment type and build the URL + if data.get('domain'): + url = "/domains/{}".format(data['domain']) + else: + url = "/projects/{}".format(data['project']['id']) + if data.get('group'): + url += "/groups/{}".format(data['group']['id']) + else: + url += "/users/{}".format(data['user']['id']) + url += "/roles/{}".format(data.get('role')) + + self._identity_client.put(url, error_message=error_msg) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for role to be granted"): + if self.list_role_assignments(filters=filters): + break + return True + + def revoke_role(self, name_or_id, user=None, group=None, + project=None, domain=None, wait=False, timeout=60): + """Revoke a role from a user. + + :param string name_or_id: The name or id of the role. + :param string user: The name or id of the user. + :param string group: The name or id of the group. (v3) + :param string project: The name or id of the project. + :param string domain: The id of the domain. (v3) + :param bool wait: Wait for role to be revoked + :param int timeout: Timeout to wait for role to be revoked + + NOTE: for wait and timeout, sometimes revoking roles is not + instantaneous. + + NOTE: project is required for keystone v2 + + :returns: True if the role is revoke, otherwise False + + :raise OpenStackCloudException: if the role cannot be removed + """ + data = self._get_grant_revoke_params(name_or_id, user, group, + project, domain) + filters = data.copy() + + if not data: + raise OpenStackCloudException( + 'Role {0} not found.'.format(name_or_id)) + + if data.get('user') is not None and data.get('group') is not None: + raise OpenStackCloudException( + 'Specify either a group or a user, not both') + if data.get('user') is None and data.get('group') is None: + raise OpenStackCloudException( + 'Must specify either a user or a group') + if self._is_client_version('identity', 2) and \ + data.get('project') is None: + raise OpenStackCloudException( + 'Must specify project for keystone v2') + + if not self.list_role_assignments(filters=filters): + self.log.debug('Assignment does not exist') + return False + + error_msg = "Error revoking access to role: {0}".format(data) + if self._is_client_version('identity', 2): + # For v2.0, only tenant/project assignment is supported + url = "/tenants/{t}/users/{u}/roles/OS-KSADM/{r}".format( + t=data['project']['id'], u=data['user']['id'], r=data['role']) + + self._identity_client.delete( + url, error_message=error_msg, + endpoint_filter={'interface': 'admin'}) + else: + if data.get('project') is None and data.get('domain') is None: + raise OpenStackCloudException( + 'Must specify either a domain or project') + + # For v3, figure out the assignment type and build the URL + if data.get('domain'): + url = "/domains/{}".format(data['domain']) + else: + url = "/projects/{}".format(data['project']['id']) + if data.get('group'): + url += "/groups/{}".format(data['group']['id']) + else: + url += "/users/{}".format(data['user']['id']) + url += "/roles/{}".format(data.get('role')) + + self._identity_client.delete(url, error_message=error_msg) + + if wait: + for count in _utils._iterate_timeout( + timeout, + "Timeout waiting for role to be revoked"): + if not self.list_role_assignments(filters=filters): + break + return True + + def list_hypervisors(self): + """List all hypervisors + + :returns: A list of hypervisor ``munch.Munch``. + """ + + data = self._compute_client.get( + '/os-hypervisors/detail', + error_message="Error fetching hypervisor list") + return self._get_and_munchify('hypervisors', data) + + def search_aggregates(self, name_or_id=None, filters=None): + """Seach host aggregates. + + :param name: aggregate name or id. + :param filters: a dict containing additional filters to use. + + :returns: a list of dicts containing the aggregates + + :raises: ``OpenStackCloudException``: if something goes wrong during + the openstack API call. + """ + aggregates = self.list_aggregates() + return _utils._filter_list(aggregates, name_or_id, filters) + + def list_aggregates(self): + """List all available host aggregates. + + :returns: A list of aggregate dicts. + + """ + data = self._compute_client.get( + '/os-aggregates', + error_message="Error fetching aggregate list") + return self._get_and_munchify('aggregates', data) + + def get_aggregate(self, name_or_id, filters=None): + """Get an aggregate by name or ID. + + :param name_or_id: Name or ID of the aggregate. + :param dict filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + { + 'availability_zone': 'nova', + 'metadata': { + 'cpu_allocation_ratio': '1.0' + } + } + + :returns: An aggregate dict or None if no matching aggregate is + found. + + """ + return _utils._get_entity(self, 'aggregate', name_or_id, filters) + + def create_aggregate(self, name, availability_zone=None): + """Create a new host aggregate. + + :param name: Name of the host aggregate being created + :param availability_zone: Availability zone to assign hosts + + :returns: a dict representing the new host aggregate. + + :raises: OpenStackCloudException on operation error. + """ + data = self._compute_client.post( + '/os-aggregates', + json={'aggregate': { + 'name': name, + 'availability_zone': availability_zone + }}, + error_message="Unable to create host aggregate {name}".format( + name=name)) + return self._get_and_munchify('aggregate', data) + + @_utils.valid_kwargs('name', 'availability_zone') + def update_aggregate(self, name_or_id, **kwargs): + """Update a host aggregate. + + :param name_or_id: Name or ID of the aggregate being updated. + :param name: New aggregate name + :param availability_zone: Availability zone to assign to hosts + + :returns: a dict representing the updated host aggregate. + + :raises: OpenStackCloudException on operation error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise OpenStackCloudException( + "Host aggregate %s not found." % name_or_id) + + data = self._compute_client.put( + '/os-aggregates/{id}'.format(id=aggregate['id']), + json={'aggregate': kwargs}, + error_message="Error updating aggregate {name}".format( + name=name_or_id)) + return self._get_and_munchify('aggregate', data) + + def delete_aggregate(self, name_or_id): + """Delete a host aggregate. + + :param name_or_id: Name or ID of the host aggregate to delete. + + :returns: True if delete succeeded, False otherwise. + + :raises: OpenStackCloudException on operation error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + self.log.debug("Aggregate %s not found for deleting", name_or_id) + return False + + return self._compute_client.delete( + '/os-aggregates/{id}'.format(id=aggregate['id']), + error_message="Error deleting aggregate {name}".format( + name=name_or_id)) + + return True + + def set_aggregate_metadata(self, name_or_id, metadata): + """Set aggregate metadata, replacing the existing metadata. + + :param name_or_id: Name of the host aggregate to update + :param metadata: Dict containing metadata to replace (Use + {'key': None} to remove a key) + + :returns: a dict representing the new host aggregate. + + :raises: OpenStackCloudException on operation error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise OpenStackCloudException( + "Host aggregate %s not found." % name_or_id) + + err_msg = "Unable to set metadata for host aggregate {name}".format( + name=name_or_id) + + data = self._compute_client.post( + '/os-aggregates/{id}/action'.format(id=aggregate['id']), + json={'set_metadata': {'metadata': metadata}}, + error_message=err_msg) + return self._get_and_munchify('aggregate', data) + + def add_host_to_aggregate(self, name_or_id, host_name): + """Add a host to an aggregate. + + :param name_or_id: Name or ID of the host aggregate. + :param host_name: Host to add. + + :raises: OpenStackCloudException on operation error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise OpenStackCloudException( + "Host aggregate %s not found." % name_or_id) + + err_msg = "Unable to add host {host} to aggregate {name}".format( + host=host_name, name=name_or_id) + + return self._compute_client.post( + '/os-aggregates/{id}/action'.format(id=aggregate['id']), + json={'add_host': {'host': host_name}}, + error_message=err_msg) + + def remove_host_from_aggregate(self, name_or_id, host_name): + """Remove a host from an aggregate. + + :param name_or_id: Name or ID of the host aggregate. + :param host_name: Host to remove. + + :raises: OpenStackCloudException on operation error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise OpenStackCloudException( + "Host aggregate %s not found." % name_or_id) + + err_msg = "Unable to remove host {host} to aggregate {name}".format( + host=host_name, name=name_or_id) + + return self._compute_client.post( + '/os-aggregates/{id}/action'.format(id=aggregate['id']), + json={'remove_host': {'host': host_name}}, + error_message=err_msg) + + def get_volume_type_access(self, name_or_id): + """Return a list of volume_type_access. + + :param name_or_id: Name or ID of the volume type. + + :raises: OpenStackCloudException on operation error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise OpenStackCloudException( + "VolumeType not found: %s" % name_or_id) + + data = self._volume_client.get( + '/types/{id}/os-volume-type-access'.format(id=volume_type.id), + error_message="Unable to get volume type access" + " {name}".format(name=name_or_id)) + return self._normalize_volume_type_accesses( + self._get_and_munchify('volume_type_access', data)) + + def add_volume_type_access(self, name_or_id, project_id): + """Grant access on a volume_type to a project. + + :param name_or_id: ID or name of a volume_type + :param project_id: A project id + + NOTE: the call works even if the project does not exist. + + :raises: OpenStackCloudException on operation error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise OpenStackCloudException( + "VolumeType not found: %s" % name_or_id) + with _utils.shade_exceptions(): + payload = {'project': project_id} + self._volume_client.post( + '/types/{id}/action'.format(id=volume_type.id), + json=dict(addProjectAccess=payload), + error_message="Unable to authorize {project} " + "to use volume type {name}".format( + name=name_or_id, project=project_id)) + + def remove_volume_type_access(self, name_or_id, project_id): + """Revoke access on a volume_type to a project. + + :param name_or_id: ID or name of a volume_type + :param project_id: A project id + + :raises: OpenStackCloudException on operation error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise OpenStackCloudException( + "VolumeType not found: %s" % name_or_id) + with _utils.shade_exceptions(): + payload = {'project': project_id} + self._volume_client.post( + '/types/{id}/action'.format(id=volume_type.id), + json=dict(removeProjectAccess=payload), + error_message="Unable to revoke {project} " + "to use volume type {name}".format( + name=name_or_id, project=project_id)) + + def set_compute_quotas(self, name_or_id, **kwargs): + """ Set a quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :raises: OpenStackCloudException if the resource to set the + quota does not exist. + """ + + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + + # compute_quotas = {key: val for key, val in kwargs.items() + # if key in quota.COMPUTE_QUOTAS} + # TODO(ghe): Manage volume and network quotas + # network_quotas = {key: val for key, val in kwargs.items() + # if key in quota.NETWORK_QUOTAS} + # volume_quotas = {key: val for key, val in kwargs.items() + # if key in quota.VOLUME_QUOTAS} + + kwargs['force'] = True + self._compute_client.put( + '/os-quota-sets/{project}'.format(project=proj.id), + json={'quota_set': kwargs}, + error_message="No valid quota or resource") + + def get_compute_quotas(self, name_or_id): + """ Get quota for a project + + :param name_or_id: project name or id + :raises: OpenStackCloudException if it's not a valid project + + :returns: Munch object with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + data = self._compute_client.get( + '/os-quota-sets/{project}'.format(project=proj.id)) + return self._get_and_munchify('quota_set', data) + + def delete_compute_quotas(self, name_or_id): + """ Delete quota for a project + + :param name_or_id: project name or id + :raises: OpenStackCloudException if it's not a valid project or the + nova client call failed + + :returns: dict with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + return self._compute_client.delete( + '/os-quota-sets/{project}'.format(project=proj.id)) + + def get_compute_usage(self, name_or_id, start=None, end=None): + """ Get usage for a specific project + + :param name_or_id: project name or id + :param start: :class:`datetime.datetime` or string. Start date in UTC + Defaults to 2010-07-06T12:00:00Z (the date the OpenStack + project was started) + :param end: :class:`datetime.datetime` or string. End date in UTC. + Defaults to now + :raises: OpenStackCloudException if it's not a valid project + + :returns: Munch object with the usage + """ + def parse_date(date): + try: + return iso8601.parse_date(date) + except iso8601.iso8601.ParseError: + # Yes. This is an exception mask. However,iso8601 is an + # implementation detail - and the error message is actually + # less informative. + raise OpenStackCloudException( + "Date given, {date}, is invalid. Please pass in a date" + " string in ISO 8601 format -" + " YYYY-MM-DDTHH:MM:SS".format( + date=date)) + + def parse_datetime_for_nova(date): + # Must strip tzinfo from the date- it breaks Nova. Also, + # Nova is expecting this in UTC. If someone passes in an + # ISO8601 date string or a datetime with timzeone data attached, + # strip the timezone data but apply offset math first so that + # the user's well formed perfectly valid date will be used + # correctly. + offset = date.utcoffset() + if offset: + date = date - datetime.timedelta(hours=offset) + return date.replace(tzinfo=None) + + if not start: + start = parse_date('2010-07-06') + elif not isinstance(start, datetime.datetime): + start = parse_date(start) + if not end: + end = datetime.datetime.utcnow() + elif not isinstance(start, datetime.datetime): + end = parse_date(end) + + start = parse_datetime_for_nova(start) + end = parse_datetime_for_nova(end) + + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist: {}".format( + name=proj.id)) + + data = self._compute_client.get( + '/os-simple-tenant-usage/{project}'.format(project=proj.id), + params=dict(start=start.isoformat(), end=end.isoformat()), + error_message="Unable to get usage for project: {name}".format( + name=proj.id)) + return self._normalize_compute_usage( + self._get_and_munchify('tenant_usage', data)) + + def set_volume_quotas(self, name_or_id, **kwargs): + """ Set a volume quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :raises: OpenStackCloudException if the resource to set the + quota does not exist. + """ + + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + + kwargs['tenant_id'] = proj.id + self._volume_client.put( + '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id), + json={'quota_set': kwargs}, + error_message="No valid quota or resource") + + def get_volume_quotas(self, name_or_id): + """ Get volume quotas for a project + + :param name_or_id: project name or id + :raises: OpenStackCloudException if it's not a valid project + + :returns: Munch object with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + + data = self._volume_client.get( + '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id), + error_message="cinder client call failed") + return self._get_and_munchify('quota_set', data) + + def delete_volume_quotas(self, name_or_id): + """ Delete volume quotas for a project + + :param name_or_id: project name or id + :raises: OpenStackCloudException if it's not a valid project or the + cinder client call failed + + :returns: dict with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + + return self._volume_client.delete( + '/os-quota-sets/{tenant_id}'.format(tenant_id=proj.id), + error_message="cinder client call failed") + + def set_network_quotas(self, name_or_id, **kwargs): + """ Set a network quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :raises: OpenStackCloudException if the resource to set the + quota does not exist. + """ + + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + + self._network_client.put( + '/quotas/{project_id}.json'.format(project_id=proj.id), + json={'quota': kwargs}, + error_message=("Error setting Neutron's quota for " + "project {0}".format(proj.id))) + + def get_network_quotas(self, name_or_id, details=False): + """ Get network quotas for a project + + :param name_or_id: project name or id + :param details: if set to True it will return details about usage + of quotas by given project + :raises: OpenStackCloudException if it's not a valid project + + :returns: Munch object with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + url = '/quotas/{project_id}'.format(project_id=proj.id) + if details: + url = url + "/details" + url = url + ".json" + data = self._network_client.get( + url, + error_message=("Error fetching Neutron's quota for " + "project {0}".format(proj.id))) + return self._get_and_munchify('quota', data) + + def delete_network_quotas(self, name_or_id): + """ Delete network quotas for a project + + :param name_or_id: project name or id + :raises: OpenStackCloudException if it's not a valid project or the + network client call failed + + :returns: dict with the quotas + """ + proj = self.get_project(name_or_id) + if not proj: + raise OpenStackCloudException("project does not exist") + self._network_client.delete( + '/quotas/{project_id}.json'.format(project_id=proj.id), + error_message=("Error deleting Neutron's quota for " + "project {0}".format(proj.id))) + + def list_magnum_services(self): + """List all Magnum services. + :returns: a list of dicts containing the service details. + + :raises: OpenStackCloudException on operation error. + """ + with _utils.shade_exceptions("Error fetching Magnum services list"): + data = self._container_infra_client.get('/mservices') + return self._normalize_magnum_services( + self._get_and_munchify('mservices', data)) diff --git a/openstack/cloud/task_manager.py b/openstack/cloud/task_manager.py new file mode 100644 index 000000000..358e9eed2 --- /dev/null +++ b/openstack/cloud/task_manager.py @@ -0,0 +1,334 @@ +# Copyright (C) 2011-2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import concurrent.futures +import sys +import threading +import time +import types + +import keystoneauth1.exceptions +import six + +from openstack import _log +from openstack.cloud import exc +from openstack.cloud import meta + + +def _is_listlike(obj): + # NOTE(Shrews): Since the client API might decide to subclass one + # of these result types, we use isinstance() here instead of type(). + return ( + isinstance(obj, list) or + isinstance(obj, types.GeneratorType)) + + +def _is_objlike(obj): + # NOTE(Shrews): Since the client API might decide to subclass one + # of these result types, we use isinstance() here instead of type(). + return ( + not isinstance(obj, bool) and + not isinstance(obj, int) and + not isinstance(obj, float) and + not isinstance(obj, six.string_types) and + not isinstance(obj, set) and + not isinstance(obj, tuple)) + + +@six.add_metaclass(abc.ABCMeta) +class BaseTask(object): + """Represent a task to be performed on an OpenStack Cloud. + + Some consumers need to inject things like rate-limiting or auditing + around each external REST interaction. Task provides an interface + to encapsulate each such interaction. Also, although shade itself + operates normally in a single-threaded direct action manner, consuming + programs may provide a multi-threaded TaskManager themselves. For that + reason, Task uses threading events to ensure appropriate wait conditions. + These should be a no-op in single-threaded applications. + + A consumer is expected to overload the main method. + + :param dict kw: Any args that are expected to be passed to something in + the main payload at execution time. + """ + + def __init__(self, **kw): + self._exception = None + self._traceback = None + self._result = None + self._response = None + self._finished = threading.Event() + self.run_async = False + self.args = kw + self.name = type(self).__name__ + + @abc.abstractmethod + def main(self, client): + """ Override this method with the actual workload to be performed """ + + def done(self, result): + self._result = result + self._finished.set() + + def exception(self, e, tb): + self._exception = e + self._traceback = tb + self._finished.set() + + def wait(self, raw=False): + self._finished.wait() + + if self._exception: + six.reraise(type(self._exception), self._exception, + self._traceback) + + return self._result + + def run(self, client): + self._client = client + try: + # Retry one time if we get a retriable connection failure + try: + # Keep time for connection retrying logging + start = time.time() + self.done(self.main(client)) + except keystoneauth1.exceptions.RetriableConnectionFailure as e: + end = time.time() + dt = end - start + if client.region_name: + client.log.debug(str(e)) + client.log.debug( + "Connection failure on %(cloud)s:%(region)s" + " for %(name)s after %(secs)s seconds, retrying", + {'cloud': client.name, + 'region': client.region_name, + 'secs': dt, + 'name': self.name}) + else: + client.log.debug( + "Connection failure on %(cloud)s for %(name)s after" + " %(secs)s seconds, retrying", + {'cloud': client.name, 'name': self.name, 'secs': dt}) + self.done(self.main(client)) + except Exception: + raise + except Exception as e: + self.exception(e, sys.exc_info()[2]) + + +class Task(BaseTask): + """ Shade specific additions to the BaseTask Interface. """ + + def wait(self, raw=False): + super(Task, self).wait() + + if raw: + # Do NOT convert the result. + return self._result + + if _is_listlike(self._result): + return meta.obj_list_to_munch(self._result) + elif _is_objlike(self._result): + return meta.obj_to_munch(self._result) + else: + return self._result + + +class RequestTask(BaseTask): + """ Extensions to the Shade Tasks to handle raw requests """ + + # It's totally legit for calls to not return things + result_key = None + + # keystoneauth1 throws keystoneauth1.exceptions.http.HttpError on !200 + def done(self, result): + self._response = result + + try: + result_json = self._response.json() + except ValueError as e: + result_json = self._response.text + self._client.log.debug( + 'Could not decode json in response: %(e)s', {'e': str(e)}) + self._client.log.debug(result_json) + + if self.result_key: + self._result = result_json[self.result_key] + else: + self._result = result_json + + self._request_id = self._response.headers.get('x-openstack-request-id') + self._finished.set() + + def wait(self, raw=False): + super(RequestTask, self).wait() + + if raw: + # Do NOT convert the result. + return self._result + + if _is_listlike(self._result): + return meta.obj_list_to_munch( + self._result, request_id=self._request_id) + elif _is_objlike(self._result): + return meta.obj_to_munch(self._result, request_id=self._request_id) + return self._result + + +def _result_filter_cb(result): + return result + + +def generate_task_class(method, name, result_filter_cb): + if name is None: + if callable(method): + name = method.__name__ + else: + name = method + + class RunTask(Task): + def __init__(self, **kw): + super(RunTask, self).__init__(**kw) + self.name = name + self._method = method + + def wait(self, raw=False): + super(RunTask, self).wait() + + if raw: + # Do NOT convert the result. + return self._result + return result_filter_cb(self._result) + + def main(self, client): + if callable(self._method): + return method(**self.args) + else: + meth = getattr(client, self._method) + return meth(**self.args) + return RunTask + + +class TaskManager(object): + log = _log.setup_logging('openstack.cloud.task_manager') + + def __init__( + self, client, name, result_filter_cb=None, workers=5, **kwargs): + self.name = name + self._client = client + self._executor = concurrent.futures.ThreadPoolExecutor( + max_workers=workers) + if not result_filter_cb: + self._result_filter_cb = _result_filter_cb + else: + self._result_filter_cb = result_filter_cb + + def set_client(self, client): + self._client = client + + def stop(self): + """ This is a direct action passthrough TaskManager """ + self._executor.shutdown(wait=True) + + def run(self): + """ This is a direct action passthrough TaskManager """ + pass + + def submit_task(self, task, raw=False): + """Submit and execute the given task. + + :param task: The task to execute. + :param bool raw: If True, return the raw result as received from the + underlying client call. + """ + return self.run_task(task=task, raw=raw) + + def _run_task_async(self, task, raw=False): + self.log.debug( + "Manager %s submitting task %s", self.name, task.name) + return self._executor.submit(self._run_task, task, raw=raw) + + def run_task(self, task, raw=False): + if hasattr(task, 'run_async') and task.run_async: + return self._run_task_async(task, raw=raw) + else: + return self._run_task(task, raw=raw) + + def _run_task(self, task, raw=False): + self.log.debug( + "Manager %s running task %s", self.name, task.name) + start = time.time() + task.run(self._client) + end = time.time() + dt = end - start + self.log.debug( + "Manager %s ran task %s in %ss", self.name, task.name, dt) + + self.post_run_task(dt, task) + + return task.wait(raw) + + def post_run_task(self, elasped_time, task): + pass + + # Backwards compatibility + submitTask = submit_task + + def submit_function( + self, method, name=None, result_filter_cb=None, **kwargs): + """ Allows submitting an arbitrary method for work. + + :param method: Method to run in the TaskManager. Can be either the + name of a method to find on self.client, or a callable. + """ + if not result_filter_cb: + result_filter_cb = self._result_filter_cb + + task_class = generate_task_class(method, name, result_filter_cb) + + return self._executor.submit_task(task_class(**kwargs)) + + +def wait_for_futures(futures, raise_on_error=True, log=None): + '''Collect results or failures from a list of running future tasks.''' + + results = [] + retries = [] + + # Check on each result as its thread finishes + for completed in concurrent.futures.as_completed(futures): + try: + result = completed.result() + # We have to do this here because munch_response doesn't + # get called on async job results + exc.raise_from_response(result) + results.append(result) + except (keystoneauth1.exceptions.RetriableConnectionFailure, + exc.OpenStackCloudException) as e: + if log: + log.debug( + "Exception processing async task: {e}".format( + e=str(e)), + exc_info=True) + # If we get an exception, put the result into a list so we + # can try again + if raise_on_error: + raise + else: + retries.append(result) + return results, retries diff --git a/openstack/cloud/tests/__init__.py b/openstack/cloud/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/config/__init__.py b/openstack/config/__init__.py new file mode 100644 index 000000000..5fe8fcede --- /dev/null +++ b/openstack/config/__init__.py @@ -0,0 +1,90 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from openstack.config.loader import OpenStackConfig # noqa + +_config = None + + +def get_config( + service_key=None, options=None, + app_name=None, app_version=None, + **kwargs): + load_yaml_config = kwargs.pop('load_yaml_config', True) + global _config + if not _config: + _config = OpenStackConfig( + load_yaml_config=load_yaml_config, + app_name=app_name, app_version=app_version) + if options: + _config.register_argparse_arguments(options, sys.argv, service_key) + parsed_options = options.parse_known_args(sys.argv) + else: + parsed_options = None + + return _config.get_one_cloud(options=parsed_options, **kwargs) + + +def make_rest_client( + service_key, options=None, + app_name=None, app_version=None, + **kwargs): + """Simple wrapper function. It has almost no features. + + This will get you a raw requests Session Adapter that is mounted + on the given service from the keystone service catalog. If you leave + off cloud and region_name, it will assume that you've got env vars + set, but if you give them, it'll use clouds.yaml as you'd expect. + + This function is deliberately simple. It has no flexibility. If you + want flexibility, you can make a cloud config object and call + get_session_client on it. This function is to make it easy to poke + at OpenStack REST APIs with a properly configured keystone session. + """ + cloud = get_config( + service_key=service_key, options=options, + app_name=app_name, app_version=app_version, + **kwargs) + return cloud.get_session_client(service_key) +# Backwards compat - simple_client was a terrible name +simple_client = make_rest_client +# Backwards compat - session_client was a terrible name +session_client = make_rest_client + + +def make_connection(options=None, **kwargs): + """Simple wrapper for getting an OpenStack SDK Connection. + + For completeness, provide a mechanism that matches make_client and + make_rest_client. The heavy lifting here is done in openstacksdk. + + :rtype: :class:`~openstack.connection.Connection` + """ + from openstack import connection + cloud = get_config(options=options, **kwargs) + return connection.from_config(cloud_config=cloud, options=options) + + +def make_cloud(options=None, **kwargs): + """Simple wrapper for getting an OpenStackCloud object + + A mechanism that matches make_connection and make_rest_client. + + :rtype: :class:`~openstack.OpenStackCloud` + """ + import openstack.cloud + cloud = get_config(options=options, **kwargs) + return openstack.OpenStackCloud(cloud_config=cloud, **kwargs) diff --git a/openstack/config/cloud_config.py b/openstack/config/cloud_config.py new file mode 100644 index 000000000..404769f69 --- /dev/null +++ b/openstack/config/cloud_config.py @@ -0,0 +1,558 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import importlib +import math +import warnings + +from keystoneauth1 import adapter +import keystoneauth1.exceptions.catalog +from keystoneauth1 import session +import requestsexceptions + +import openstack +from openstack import _log +from openstack.config import constructors +from openstack.config import exceptions + + +def _get_client(service_key): + class_mapping = constructors.get_constructor_mapping() + if service_key not in class_mapping: + raise exceptions.OpenStackConfigException( + "Service {service_key} is unkown. Please pass in a client" + " constructor or submit a patch to os-client-config".format( + service_key=service_key)) + mod_name, ctr_name = class_mapping[service_key].rsplit('.', 1) + lib_name = mod_name.split('.')[0] + try: + mod = importlib.import_module(mod_name) + except ImportError: + raise exceptions.OpenStackConfigException( + "Client for '{service_key}' was requested, but" + " {mod_name} was unable to be imported. Either import" + " the module yourself and pass the constructor in as an argument," + " or perhaps you do not have python-{lib_name} installed.".format( + service_key=service_key, + mod_name=mod_name, + lib_name=lib_name)) + try: + ctr = getattr(mod, ctr_name) + except AttributeError: + raise exceptions.OpenStackConfigException( + "Client for '{service_key}' was requested, but although" + " {mod_name} imported fine, the constructor at {fullname}" + " as not found. Please check your installation, we have no" + " clue what is wrong with your computer.".format( + service_key=service_key, + mod_name=mod_name, + fullname=class_mapping[service_key])) + return ctr + + +def _make_key(key, service_type): + if not service_type: + return key + else: + service_type = service_type.lower().replace('-', '_') + return "_".join([service_type, key]) + + +class CloudConfig(object): + def __init__(self, name, region, config, + force_ipv4=False, auth_plugin=None, + openstack_config=None, session_constructor=None, + app_name=None, app_version=None): + self.name = name + self.region = region + self.config = config + self.log = _log.setup_logging(__name__) + self._force_ipv4 = force_ipv4 + self._auth = auth_plugin + self._openstack_config = openstack_config + self._keystone_session = None + self._session_constructor = session_constructor or session.Session + self._app_name = app_name + self._app_version = app_version + + def __getattr__(self, key): + """Return arbitrary attributes.""" + + if key.startswith('os_'): + key = key[3:] + + if key in [attr.replace('-', '_') for attr in self.config]: + return self.config[key] + else: + return None + + def __iter__(self): + return self.config.__iter__() + + def __eq__(self, other): + return (self.name == other.name and self.region == other.region + and self.config == other.config) + + def __ne__(self, other): + return not self == other + + def set_session_constructor(self, session_constructor): + """Sets the Session constructor.""" + self._session_constructor = session_constructor + + def get_requests_verify_args(self): + """Return the verify and cert values for the requests library.""" + if self.config['verify'] and self.config['cacert']: + verify = self.config['cacert'] + else: + verify = self.config['verify'] + if self.config['cacert']: + warnings.warn( + "You are specifying a cacert for the cloud {0} but " + "also to ignore the host verification. The host SSL cert " + "will not be verified.".format(self.name)) + + cert = self.config.get('cert', None) + if cert: + if self.config['key']: + cert = (cert, self.config['key']) + return (verify, cert) + + def get_services(self): + """Return a list of service types we know something about.""" + services = [] + for key, val in self.config.items(): + if (key.endswith('api_version') + or key.endswith('service_type') + or key.endswith('service_name')): + services.append("_".join(key.split('_')[:-2])) + return list(set(services)) + + def get_auth_args(self): + return self.config['auth'] + + def get_interface(self, service_type=None): + key = _make_key('interface', service_type) + interface = self.config.get('interface') + return self.config.get(key, interface) + + def get_region_name(self, service_type=None): + if not service_type: + return self.region + key = _make_key('region_name', service_type) + return self.config.get(key, self.region) + + def get_api_version(self, service_type): + key = _make_key('api_version', service_type) + return self.config.get(key, None) + + def get_service_type(self, service_type): + key = _make_key('service_type', service_type) + # Cinder did an evil thing where they defined a second service + # type in the catalog. Of course, that's insane, so let's hide this + # atrocity from the as-yet-unsullied eyes of our users. + # Of course, if the user requests a volumev2, that structure should + # still work. + # What's even more amazing is that they did it AGAIN with cinder v3 + # And then I learned that mistral copied it. + if service_type == 'volume': + if self.get_api_version(service_type).startswith('2'): + service_type = 'volumev2' + elif self.get_api_version(service_type).startswith('3'): + service_type = 'volumev3' + elif service_type == 'workflow': + if self.get_api_version(service_type).startswith('2'): + service_type = 'workflowv2' + return self.config.get(key, service_type) + + def get_service_name(self, service_type): + key = _make_key('service_name', service_type) + return self.config.get(key, None) + + def get_endpoint(self, service_type): + key = _make_key('endpoint_override', service_type) + old_key = _make_key('endpoint', service_type) + return self.config.get(key, self.config.get(old_key, None)) + + @property + def prefer_ipv6(self): + return not self._force_ipv4 + + @property + def force_ipv4(self): + return self._force_ipv4 + + def get_auth(self): + """Return a keystoneauth plugin from the auth credentials.""" + return self._auth + + def get_session(self): + """Return a keystoneauth session based on the auth credentials.""" + if self._keystone_session is None: + if not self._auth: + raise exceptions.OpenStackConfigException( + "Problem with auth parameters") + (verify, cert) = self.get_requests_verify_args() + # Turn off urllib3 warnings about insecure certs if we have + # explicitly configured requests to tell it we do not want + # cert verification + if not verify: + self.log.debug( + "Turning off SSL warnings for {cloud}:{region}" + " since verify=False".format( + cloud=self.name, region=self.region)) + requestsexceptions.squelch_warnings(insecure_requests=not verify) + self._keystone_session = self._session_constructor( + auth=self._auth, + verify=verify, + cert=cert, + timeout=self.config['api_timeout']) + if hasattr(self._keystone_session, 'additional_user_agent'): + self._keystone_session.additional_user_agent.append( + ('openstacksdk', openstack.__version__)) + # Using old keystoneauth with new os-client-config fails if + # we pass in app_name and app_version. Those are not essential, + # nor a reason to bump our minimum, so just test for the session + # having the attribute post creation and set them then. + if hasattr(self._keystone_session, 'app_name'): + self._keystone_session.app_name = self._app_name + if hasattr(self._keystone_session, 'app_version'): + self._keystone_session.app_version = self._app_version + return self._keystone_session + + def get_service_catalog(self): + """Helper method to grab the service catalog.""" + return self._auth.get_access(self.get_session()).service_catalog + + def get_session_client(self, service_key): + """Return a prepped requests adapter for a given service. + + This is useful for making direct requests calls against a + 'mounted' endpoint. That is, if you do: + + client = get_session_client('compute') + + then you can do: + + client.get('/flavors') + + and it will work like you think. + """ + + return adapter.Adapter( + session=self.get_session(), + service_type=self.get_service_type(service_key), + service_name=self.get_service_name(service_key), + interface=self.get_interface(service_key), + region_name=self.region) + + def _get_highest_endpoint(self, service_types, kwargs): + session = self.get_session() + for service_type in service_types: + kwargs['service_type'] = service_type + try: + # Return the highest version we find that matches + # the request + return session.get_endpoint(**kwargs) + except keystoneauth1.exceptions.catalog.EndpointNotFound: + pass + + def get_session_endpoint( + self, service_key, min_version=None, max_version=None): + """Return the endpoint from config or the catalog. + + If a configuration lists an explicit endpoint for a service, + return that. Otherwise, fetch the service catalog from the + keystone session and return the appropriate endpoint. + + :param service_key: Generic key for service, such as 'compute' or + 'network' + + """ + + override_endpoint = self.get_endpoint(service_key) + if override_endpoint: + return override_endpoint + endpoint = None + kwargs = { + 'service_name': self.get_service_name(service_key), + 'region_name': self.region + } + kwargs['interface'] = self.get_interface(service_key) + if service_key == 'volume' and not self.get_api_version('volume'): + # If we don't have a configured cinder version, we can't know + # to request a different service_type + min_version = float(min_version or 1) + max_version = float(max_version or 3) + min_major = math.trunc(float(min_version)) + max_major = math.trunc(float(max_version)) + versions = range(int(max_major) + 1, int(min_major), -1) + service_types = [] + for version in versions: + if version == 1: + service_types.append('volume') + else: + service_types.append('volumev{v}'.format(v=version)) + else: + service_types = [self.get_service_type(service_key)] + endpoint = self._get_highest_endpoint(service_types, kwargs) + if not endpoint: + self.log.warning( + "Keystone catalog entry not found (" + "service_type=%s,service_name=%s" + "interface=%s,region_name=%s)", + service_key, + kwargs['service_name'], + kwargs['interface'], + kwargs['region_name']) + return endpoint + + def get_legacy_client( + self, service_key, client_class=None, interface_key=None, + pass_version_arg=True, version=None, min_version=None, + max_version=None, **kwargs): + """Return a legacy OpenStack client object for the given config. + + Most of the OpenStack python-*client libraries have the same + interface for their client constructors, but there are several + parameters one wants to pass given a :class:`CloudConfig` object. + + In the future, OpenStack API consumption should be done through + the OpenStack SDK, but that's not ready yet. This is for getting + Client objects from python-*client only. + + :param service_key: Generic key for service, such as 'compute' or + 'network' + :param client_class: Class of the client to be instantiated. This + should be the unversioned version if there + is one, such as novaclient.client.Client, or + the versioned one, such as + neutronclient.v2_0.client.Client if there isn't + :param interface_key: (optional) Some clients, such as glanceclient + only accept the parameter 'interface' instead + of 'endpoint_type' - this is a get-out-of-jail + parameter for those until they can be aligned. + os-client-config understands this to be the + case if service_key is image, so this is really + only for use with other unknown broken clients. + :param pass_version_arg: (optional) If a versioned Client constructor + was passed to client_class, set this to + False, which will tell get_client to not + pass a version parameter. os-client-config + already understand that this is the + case for network, so it can be omitted in + that case. + :param version: (optional) Version string to override the configured + version string. + :param min_version: (options) Minimum version acceptable. + :param max_version: (options) Maximum version acceptable. + :param kwargs: (optional) keyword args are passed through to the + Client constructor, so this is in case anything + additional needs to be passed in. + """ + if not client_class: + client_class = _get_client(service_key) + + interface = self.get_interface(service_key) + # trigger exception on lack of service + endpoint = self.get_session_endpoint( + service_key, min_version=min_version, max_version=max_version) + endpoint_override = self.get_endpoint(service_key) + + if service_key == 'object-store': + constructor_kwargs = dict( + session=self.get_session(), + os_options=dict( + service_type=self.get_service_type(service_key), + object_storage_url=endpoint_override, + region_name=self.region)) + else: + constructor_kwargs = dict( + session=self.get_session(), + service_name=self.get_service_name(service_key), + service_type=self.get_service_type(service_key), + endpoint_override=endpoint_override, + region_name=self.region) + + if service_key == 'image': + # os-client-config does not depend on glanceclient, but if + # the user passed in glanceclient.client.Client, which they + # would need to do if they were requesting 'image' - then + # they necessarily have glanceclient installed + from glanceclient.common import utils as glance_utils + endpoint, detected_version = glance_utils.strip_version(endpoint) + # If the user has passed in a version, that's explicit, use it + if not version: + version = detected_version + # If the user has passed in or configured an override, use it. + # Otherwise, ALWAYS pass in an endpoint_override becuase + # we've already done version stripping, so we don't want version + # reconstruction to happen twice + if not endpoint_override: + constructor_kwargs['endpoint_override'] = endpoint + constructor_kwargs.update(kwargs) + if pass_version_arg and service_key != 'object-store': + if not version: + version = self.get_api_version(service_key) + if not version and service_key == 'volume': + from cinderclient import client as cinder_client + version = cinder_client.get_volume_api_from_url(endpoint) + # Temporary workaround while we wait for python-openstackclient + # to be able to handle 2.0 which is what neutronclient expects + if service_key == 'network' and version == '2': + version = '2.0' + if service_key == 'identity': + # Workaround for bug#1513839 + if 'endpoint' not in constructor_kwargs: + endpoint = self.get_session_endpoint('identity') + constructor_kwargs['endpoint'] = endpoint + if service_key == 'network': + constructor_kwargs['api_version'] = version + elif service_key == 'baremetal': + if version != '1': + # Set Ironic Microversion + constructor_kwargs['os_ironic_api_version'] = version + # Version arg is the major version, not the full microstring + constructor_kwargs['version'] = version[0] + else: + constructor_kwargs['version'] = version + if min_version and min_version > float(version): + raise exceptions.OpenStackConfigVersionException( + "Minimum version {min_version} requested but {version}" + " found".format(min_version=min_version, version=version), + version=version) + if max_version and max_version < float(version): + raise exceptions.OpenStackConfigVersionException( + "Maximum version {max_version} requested but {version}" + " found".format(max_version=max_version, version=version), + version=version) + if service_key == 'database': + # TODO(mordred) Remove when https://review.openstack.org/314032 + # has landed and released. We're passing in a Session, but the + # trove Client object has username and password as required + # args + constructor_kwargs['username'] = None + constructor_kwargs['password'] = None + + if not interface_key: + if service_key in ('image', 'key-manager'): + interface_key = 'interface' + elif (service_key == 'identity' + and version and version.startswith('3')): + interface_key = 'interface' + else: + interface_key = 'endpoint_type' + if service_key == 'object-store': + constructor_kwargs['os_options'][interface_key] = interface + else: + constructor_kwargs[interface_key] = interface + + return client_class(**constructor_kwargs) + + def get_cache_expiration_time(self): + if self._openstack_config: + return self._openstack_config.get_cache_expiration_time() + + def get_cache_path(self): + if self._openstack_config: + return self._openstack_config.get_cache_path() + + def get_cache_class(self): + if self._openstack_config: + return self._openstack_config.get_cache_class() + + def get_cache_arguments(self): + if self._openstack_config: + return self._openstack_config.get_cache_arguments() + + def get_cache_expiration(self): + if self._openstack_config: + return self._openstack_config.get_cache_expiration() + + def get_cache_resource_expiration(self, resource, default=None): + """Get expiration time for a resource + + :param resource: Name of the resource type + :param default: Default value to return if not found (optional, + defaults to None) + + :returns: Expiration time for the resource type as float or default + """ + if self._openstack_config: + expiration = self._openstack_config.get_cache_expiration() + if resource not in expiration: + return default + return float(expiration[resource]) + + def requires_floating_ip(self): + """Return whether or not this cloud requires floating ips. + + + :returns: True of False if know, None if discovery is needed. + If requires_floating_ip is not configured but the cloud is + known to not provide floating ips, will return False. + """ + if self.config['floating_ip_source'] == "None": + return False + return self.config.get('requires_floating_ip') + + def get_external_networks(self): + """Get list of network names for external networks.""" + return [ + net['name'] for net in self.config['networks'] + if net['routes_externally']] + + def get_external_ipv4_networks(self): + """Get list of network names for external IPv4 networks.""" + return [ + net['name'] for net in self.config['networks'] + if net['routes_ipv4_externally']] + + def get_external_ipv6_networks(self): + """Get list of network names for external IPv6 networks.""" + return [ + net['name'] for net in self.config['networks'] + if net['routes_ipv6_externally']] + + def get_internal_networks(self): + """Get list of network names for internal networks.""" + return [ + net['name'] for net in self.config['networks'] + if not net['routes_externally']] + + def get_internal_ipv4_networks(self): + """Get list of network names for internal IPv4 networks.""" + return [ + net['name'] for net in self.config['networks'] + if not net['routes_ipv4_externally']] + + def get_internal_ipv6_networks(self): + """Get list of network names for internal IPv6 networks.""" + return [ + net['name'] for net in self.config['networks'] + if not net['routes_ipv6_externally']] + + def get_default_network(self): + """Get network used for default interactions.""" + for net in self.config['networks']: + if net['default_interface']: + return net['name'] + return None + + def get_nat_destination(self): + """Get network used for NAT destination.""" + for net in self.config['networks']: + if net['nat_destination']: + return net['name'] + return None diff --git a/openstack/config/constructors.json b/openstack/config/constructors.json new file mode 100644 index 000000000..9acb7cfb9 --- /dev/null +++ b/openstack/config/constructors.json @@ -0,0 +1,16 @@ +{ + "application-catalog": "muranoclient.client.Client", + "baremetal": "ironicclient.client.Client", + "compute": "novaclient.client.Client", + "container-infra": "magnumclient.client.Client", + "database": "troveclient.client.Client", + "dns": "designateclient.client.Client", + "identity": "keystoneclient.client.Client", + "image": "glanceclient.Client", + "key-manager": "barbicanclient.client.Client", + "metering": "ceilometerclient.client.Client", + "network": "neutronclient.neutron.client.Client", + "object-store": "swiftclient.client.Connection", + "orchestration": "heatclient.client.Client", + "volume": "cinderclient.client.Client" +} diff --git a/openstack/config/constructors.py b/openstack/config/constructors.py new file mode 100644 index 000000000..579bb2d5e --- /dev/null +++ b/openstack/config/constructors.py @@ -0,0 +1,36 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +import threading + +_json_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'constructors.json') +_class_mapping = None +_class_mapping_lock = threading.Lock() + + +def get_constructor_mapping(): + global _class_mapping + if _class_mapping is not None: + return _class_mapping.copy() + with _class_mapping_lock: + if _class_mapping is not None: + return _class_mapping.copy() + tmp_class_mapping = {} + with open(_json_path, 'r') as json_file: + tmp_class_mapping.update(json.load(json_file)) + _class_mapping = tmp_class_mapping + return tmp_class_mapping.copy() diff --git a/openstack/config/defaults.json b/openstack/config/defaults.json new file mode 100644 index 000000000..2a195c426 --- /dev/null +++ b/openstack/config/defaults.json @@ -0,0 +1,27 @@ +{ + "application_catalog_api_version": "1", + "auth_type": "password", + "baremetal_api_version": "1", + "container_api_version": "1", + "container_infra_api_version": "1", + "compute_api_version": "2", + "database_api_version": "1.0", + "disable_vendor_agent": {}, + "dns_api_version": "2", + "interface": "public", + "floating_ip_source": "neutron", + "identity_api_version": "2.0", + "image_api_use_tasks": false, + "image_api_version": "2", + "image_format": "qcow2", + "key_manager_api_version": "v1", + "message": "", + "metering_api_version": "2", + "network_api_version": "2", + "object_store_api_version": "1", + "orchestration_api_version": "1", + "secgroup_source": "neutron", + "status": "active", + "volume_api_version": "2", + "workflow_api_version": "2" +} diff --git a/openstack/config/defaults.py b/openstack/config/defaults.py new file mode 100644 index 000000000..1231cce92 --- /dev/null +++ b/openstack/config/defaults.py @@ -0,0 +1,52 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +import threading + +_json_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'defaults.json') +_defaults = None +_defaults_lock = threading.Lock() + + +def get_defaults(): + global _defaults + if _defaults is not None: + return _defaults.copy() + with _defaults_lock: + if _defaults is not None: + # Did someone else just finish filling it? + return _defaults.copy() + # Python language specific defaults + # These are defaults related to use of python libraries, they are + # not qualities of a cloud. + # + # NOTE(harlowja): update a in-memory dict, before updating + # the global one so that other callers of get_defaults do not + # see the partially filled one. + tmp_defaults = dict( + api_timeout=None, + verify=True, + cacert=None, + cert=None, + key=None, + ) + with open(_json_path, 'r') as json_file: + updates = json.load(json_file) + if updates is not None: + tmp_defaults.update(updates) + _defaults = tmp_defaults + return tmp_defaults.copy() diff --git a/openstack/config/exceptions.py b/openstack/config/exceptions.py new file mode 100644 index 000000000..556dd49bc --- /dev/null +++ b/openstack/config/exceptions.py @@ -0,0 +1,25 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class OpenStackConfigException(Exception): + """Something went wrong with parsing your OpenStack Config.""" + + +class OpenStackConfigVersionException(OpenStackConfigException): + """A version was requested that is different than what was found.""" + + def __init__(self, version): + super(OpenStackConfigVersionException, self).__init__() + self.version = version diff --git a/openstack/config/loader.py b/openstack/config/loader.py new file mode 100644 index 000000000..9049ebf74 --- /dev/null +++ b/openstack/config/loader.py @@ -0,0 +1,1241 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# alias because we already had an option named argparse +import argparse as argparse_mod +import collections +import copy +import json +import os +import re +import sys +import warnings + +import appdirs +from keystoneauth1 import adapter +from keystoneauth1 import loading +import yaml + +from openstack import _log +from openstack.config import cloud_config +from openstack.config import defaults +from openstack.config import exceptions +from openstack.config import vendors + +APPDIRS = appdirs.AppDirs('openstack', 'OpenStack', multipath='/etc') +CONFIG_HOME = APPDIRS.user_config_dir +CACHE_PATH = APPDIRS.user_cache_dir + +UNIX_CONFIG_HOME = os.path.join( + os.path.expanduser(os.path.join('~', '.config')), 'openstack') +UNIX_SITE_CONFIG_HOME = '/etc/openstack' + +SITE_CONFIG_HOME = APPDIRS.site_config_dir + +CONFIG_SEARCH_PATH = [ + os.getcwd(), + CONFIG_HOME, UNIX_CONFIG_HOME, + SITE_CONFIG_HOME, UNIX_SITE_CONFIG_HOME +] +YAML_SUFFIXES = ('.yaml', '.yml') +JSON_SUFFIXES = ('.json',) +CONFIG_FILES = [ + os.path.join(d, 'clouds' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] +SECURE_FILES = [ + os.path.join(d, 'secure' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] +VENDOR_FILES = [ + os.path.join(d, 'clouds-public' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] + +BOOL_KEYS = ('insecure', 'cache') + + +# NOTE(dtroyer): This turns out to be not the best idea so let's move +# overriding defaults to a kwarg to OpenStackConfig.__init__() +# Remove this sometime in June 2015 once OSC is comfortably +# changed-over and global-defaults is updated. +def set_default(key, value): + warnings.warn( + "Use of set_default() is deprecated. Defaults should be set with the " + "`override_defaults` parameter of OpenStackConfig." + ) + defaults.get_defaults() # make sure the dict is initialized + defaults._defaults[key] = value + + +def get_boolean(value): + if value is None: + return False + if type(value) is bool: + return value + if value.lower() == 'true': + return True + return False + + +def _get_os_environ(envvar_prefix=None): + ret = defaults.get_defaults() + if not envvar_prefix: + # This makes the or below be OS_ or OS_ which is a no-op + envvar_prefix = 'OS_' + environkeys = [k for k in os.environ.keys() + if (k.startswith('OS_') or k.startswith(envvar_prefix)) + and not k.startswith('OS_TEST') # infra CI var + and not k.startswith('OS_STD') # infra CI var + ] + for k in environkeys: + newkey = k.split('_', 1)[-1].lower() + ret[newkey] = os.environ[k] + # If the only environ keys are selectors or behavior modification, don't + # return anything + selectors = set([ + 'OS_CLOUD', 'OS_REGION_NAME', + 'OS_CLIENT_CONFIG_FILE', 'OS_CLIENT_SECURE_FILE', 'OS_CLOUD_NAME']) + if set(environkeys) - selectors: + return ret + return None + + +def _merge_clouds(old_dict, new_dict): + """Like dict.update, except handling nested dicts.""" + ret = old_dict.copy() + for (k, v) in new_dict.items(): + if isinstance(v, dict): + if k in ret: + ret[k] = _merge_clouds(ret[k], v) + else: + ret[k] = v.copy() + else: + ret[k] = v + return ret + + +def _auth_update(old_dict, new_dict_source): + """Like dict.update, except handling the nested dict called auth.""" + new_dict = copy.deepcopy(new_dict_source) + for (k, v) in new_dict.items(): + if k == 'auth': + if k in old_dict: + old_dict[k].update(v) + else: + old_dict[k] = v.copy() + else: + old_dict[k] = v + return old_dict + + +def _fix_argv(argv): + # Transform any _ characters in arg names to - so that we don't + # have to throw billions of compat argparse arguments around all + # over the place. + processed = collections.defaultdict(list) + for index in range(0, len(argv)): + # If the value starts with '--' and has '-' or '_' in it, then + # it's worth looking at it + if re.match('^--.*(_|-)+.*', argv[index]): + split_args = argv[index].split('=') + orig = split_args[0] + new = orig.replace('_', '-') + if orig != new: + split_args[0] = new + argv[index] = "=".join(split_args) + # Save both for later so we can throw an error about dupes + processed[new].append(orig) + overlap = [] + for new, old in processed.items(): + if len(old) > 1: + overlap.extend(old) + if overlap: + raise exceptions.OpenStackConfigException( + "The following options were given: '{options}' which contain" + " duplicates except that one has _ and one has -. There is" + " no sane way for us to know what you're doing. Remove the" + " duplicate option and try again".format( + options=','.join(overlap))) + + +class OpenStackConfig(object): + + def __init__(self, config_files=None, vendor_files=None, + override_defaults=None, force_ipv4=None, + envvar_prefix=None, secure_files=None, + pw_func=None, session_constructor=None, + app_name=None, app_version=None, + load_yaml_config=True): + self.log = _log.setup_logging(__name__) + self._session_constructor = session_constructor + self._app_name = app_name + self._app_version = app_version + + if load_yaml_config: + self._config_files = config_files or CONFIG_FILES + self._secure_files = secure_files or SECURE_FILES + self._vendor_files = vendor_files or VENDOR_FILES + else: + self._config_files = [] + self._secure_files = [] + self._vendor_files = [] + + config_file_override = os.environ.get('OS_CLIENT_CONFIG_FILE') + if config_file_override: + self._config_files.insert(0, config_file_override) + + secure_file_override = os.environ.get('OS_CLIENT_SECURE_FILE') + if secure_file_override: + self._secure_files.insert(0, secure_file_override) + + self.defaults = defaults.get_defaults() + if override_defaults: + self.defaults.update(override_defaults) + + # First, use a config file if it exists where expected + self.config_filename, self.cloud_config = self._load_config_file() + _, secure_config = self._load_secure_file() + if secure_config: + self.cloud_config = _merge_clouds( + self.cloud_config, secure_config) + + if not self.cloud_config: + self.cloud_config = {'clouds': {}} + if 'clouds' not in self.cloud_config: + self.cloud_config['clouds'] = {} + + # Grab ipv6 preference settings from env + client_config = self.cloud_config.get('client', {}) + + if force_ipv4 is not None: + # If it's passed in to the constructor, honor it. + self.force_ipv4 = force_ipv4 + else: + # Get the backwards compat value + prefer_ipv6 = get_boolean( + os.environ.get( + 'OS_PREFER_IPV6', client_config.get( + 'prefer_ipv6', client_config.get( + 'prefer-ipv6', True)))) + force_ipv4 = get_boolean( + os.environ.get( + 'OS_FORCE_IPV4', client_config.get( + 'force_ipv4', client_config.get( + 'broken-ipv6', False)))) + + self.force_ipv4 = force_ipv4 + if not prefer_ipv6: + # this will only be false if someone set it explicitly + # honor their wishes + self.force_ipv4 = True + + # Next, process environment variables and add them to the mix + self.envvar_key = os.environ.get('OS_CLOUD_NAME', 'envvars') + if self.envvar_key in self.cloud_config['clouds']: + raise exceptions.OpenStackConfigException( + '"{0}" defines a cloud named "{1}", but' + ' OS_CLOUD_NAME is also set to "{1}". Please rename' + ' either your environment based cloud, or one of your' + ' file-based clouds.'.format(self.config_filename, + self.envvar_key)) + + self.default_cloud = os.environ.get('OS_CLOUD') + + envvars = _get_os_environ(envvar_prefix=envvar_prefix) + if envvars: + self.cloud_config['clouds'][self.envvar_key] = envvars + if not self.default_cloud: + self.default_cloud = self.envvar_key + + # Finally, fall through and make a cloud that starts with defaults + # because we need somewhere to put arguments, and there are neither + # config files or env vars + if not self.cloud_config['clouds']: + self.cloud_config = dict( + clouds=dict(defaults=dict(self.defaults))) + self.default_cloud = 'defaults' + + self._cache_expiration_time = 0 + self._cache_path = CACHE_PATH + self._cache_class = 'dogpile.cache.null' + self._cache_arguments = {} + self._cache_expiration = {} + if 'cache' in self.cloud_config: + cache_settings = self._normalize_keys(self.cloud_config['cache']) + + # expiration_time used to be 'max_age' but the dogpile setting + # is expiration_time. Support max_age for backwards compat. + self._cache_expiration_time = cache_settings.get( + 'expiration_time', cache_settings.get( + 'max_age', self._cache_expiration_time)) + + # If cache class is given, use that. If not, but if cache time + # is given, default to memory. Otherwise, default to nothing. + # to memory. + if self._cache_expiration_time: + self._cache_class = 'dogpile.cache.memory' + self._cache_class = self.cloud_config['cache'].get( + 'class', self._cache_class) + + self._cache_path = os.path.expanduser( + cache_settings.get('path', self._cache_path)) + self._cache_arguments = cache_settings.get( + 'arguments', self._cache_arguments) + self._cache_expiration = cache_settings.get( + 'expiration', self._cache_expiration) + + # Flag location to hold the peeked value of an argparse timeout value + self._argv_timeout = False + + # Save the password callback + # password = self._pw_callback(prompt="Password: ") + self._pw_callback = pw_func + + def get_extra_config(self, key, defaults=None): + """Fetch an arbitrary extra chunk of config, laying in defaults. + + :param string key: name of the config section to fetch + :param dict defaults: (optional) default values to merge under the + found config + """ + if not defaults: + defaults = {} + return _merge_clouds( + self._normalize_keys(defaults), + self._normalize_keys(self.cloud_config.get(key, {}))) + + def _load_config_file(self): + return self._load_yaml_json_file(self._config_files) + + def _load_secure_file(self): + return self._load_yaml_json_file(self._secure_files) + + def _load_vendor_file(self): + return self._load_yaml_json_file(self._vendor_files) + + def _load_yaml_json_file(self, filelist): + for path in filelist: + if os.path.exists(path): + with open(path, 'r') as f: + if path.endswith('json'): + return path, json.load(f) + else: + return path, yaml.safe_load(f) + return (None, {}) + + def _normalize_keys(self, config): + new_config = {} + for key, value in config.items(): + key = key.replace('-', '_') + if isinstance(value, dict): + new_config[key] = self._normalize_keys(value) + elif isinstance(value, bool): + new_config[key] = value + elif isinstance(value, int) and key != 'verbose_level': + new_config[key] = str(value) + elif isinstance(value, float): + new_config[key] = str(value) + else: + new_config[key] = value + return new_config + + def get_cache_expiration_time(self): + return int(self._cache_expiration_time) + + def get_cache_interval(self): + return self.get_cache_expiration_time() + + def get_cache_max_age(self): + return self.get_cache_expiration_time() + + def get_cache_path(self): + return self._cache_path + + def get_cache_class(self): + return self._cache_class + + def get_cache_arguments(self): + return copy.deepcopy(self._cache_arguments) + + def get_cache_expiration(self): + return copy.deepcopy(self._cache_expiration) + + def _expand_region_name(self, region_name): + return {'name': region_name, 'values': {}} + + def _expand_regions(self, regions): + ret = [] + for region in regions: + if isinstance(region, dict): + ret.append(copy.deepcopy(region)) + else: + ret.append(self._expand_region_name(region)) + return ret + + def _get_regions(self, cloud): + if cloud not in self.cloud_config['clouds']: + return [self._expand_region_name('')] + regions = self._get_known_regions(cloud) + if not regions: + # We don't know of any regions use a workable default. + regions = [self._expand_region_name('')] + return regions + + def _get_known_regions(self, cloud): + config = self._normalize_keys(self.cloud_config['clouds'][cloud]) + if 'regions' in config: + return self._expand_regions(config['regions']) + elif 'region_name' in config: + if isinstance(config['region_name'], list): + regions = config['region_name'] + else: + regions = config['region_name'].split(',') + if len(regions) > 1: + warnings.warn( + "Comma separated lists in region_name are deprecated." + " Please use a yaml list in the regions" + " parameter in {0} instead.".format(self.config_filename)) + return self._expand_regions(regions) + else: + # crappit. we don't have a region defined. + new_cloud = dict() + our_cloud = self.cloud_config['clouds'].get(cloud, dict()) + self._expand_vendor_profile(cloud, new_cloud, our_cloud) + if 'regions' in new_cloud and new_cloud['regions']: + return self._expand_regions(new_cloud['regions']) + elif 'region_name' in new_cloud and new_cloud['region_name']: + return [self._expand_region_name(new_cloud['region_name'])] + + def _get_region(self, cloud=None, region_name=''): + if region_name is None: + region_name = '' + if not cloud: + return self._expand_region_name(region_name) + + regions = self._get_known_regions(cloud) + if not regions: + return self._expand_region_name(region_name) + + if not region_name: + return regions[0] + + for region in regions: + if region['name'] == region_name: + return region + + raise exceptions.OpenStackConfigException( + 'Region {region_name} is not a valid region name for cloud' + ' {cloud}. Valid choices are {region_list}. Please note that' + ' region names are case sensitive.'.format( + region_name=region_name, + region_list=','.join([r['name'] for r in regions]), + cloud=cloud)) + + def get_cloud_names(self): + return self.cloud_config['clouds'].keys() + + def _get_base_cloud_config(self, name): + cloud = dict() + + # Only validate cloud name if one was given + if name and name not in self.cloud_config['clouds']: + raise exceptions.OpenStackConfigException( + "Cloud {name} was not found.".format( + name=name)) + + our_cloud = self.cloud_config['clouds'].get(name, dict()) + + # Get the defaults + cloud.update(self.defaults) + self._expand_vendor_profile(name, cloud, our_cloud) + + if 'auth' not in cloud: + cloud['auth'] = dict() + + _auth_update(cloud, our_cloud) + if 'cloud' in cloud: + del cloud['cloud'] + + return cloud + + def _expand_vendor_profile(self, name, cloud, our_cloud): + # Expand a profile if it exists. 'cloud' is an old confusing name + # for this. + profile_name = our_cloud.get('profile', our_cloud.get('cloud', None)) + if profile_name and profile_name != self.envvar_key: + if 'cloud' in our_cloud: + warnings.warn( + "{0} use the keyword 'cloud' to reference a known " + "vendor profile. This has been deprecated in favor of the " + "'profile' keyword.".format(self.config_filename)) + vendor_filename, vendor_file = self._load_vendor_file() + if vendor_file and profile_name in vendor_file['public-clouds']: + _auth_update(cloud, vendor_file['public-clouds'][profile_name]) + else: + profile_data = vendors.get_profile(profile_name) + if profile_data: + status = profile_data.pop('status', 'active') + message = profile_data.pop('message', '') + if status == 'deprecated': + warnings.warn( + "{profile_name} is deprecated: {message}".format( + profile_name=profile_name, message=message)) + elif status == 'shutdown': + raise exceptions.OpenStackConfigException( + "{profile_name} references a cloud that no longer" + " exists: {message}".format( + profile_name=profile_name, message=message)) + _auth_update(cloud, profile_data) + else: + # Can't find the requested vendor config, go about business + warnings.warn("Couldn't find the vendor profile '{0}', for" + " the cloud '{1}'".format(profile_name, + name)) + + def _project_scoped(self, cloud): + return ('project_id' in cloud or 'project_name' in cloud + or 'project_id' in cloud['auth'] + or 'project_name' in cloud['auth']) + + def _validate_networks(self, networks, key): + value = None + for net in networks: + if value and net[key]: + raise exceptions.OpenStackConfigException( + "Duplicate network entries for {key}: {net1} and {net2}." + " Only one network can be flagged with {key}".format( + key=key, + net1=value['name'], + net2=net['name'])) + if not value and net[key]: + value = net + + def _fix_backwards_networks(self, cloud): + # Leave the external_network and internal_network keys in the + # dict because consuming code might be expecting them. + networks = [] + # Normalize existing network entries + for net in cloud.get('networks', []): + name = net.get('name') + if not name: + raise exceptions.OpenStackConfigException( + 'Entry in network list is missing required field "name".') + network = dict( + name=name, + routes_externally=get_boolean(net.get('routes_externally')), + nat_destination=get_boolean(net.get('nat_destination')), + default_interface=get_boolean(net.get('default_interface')), + ) + # routes_ipv4_externally defaults to the value of routes_externally + network['routes_ipv4_externally'] = get_boolean( + net.get( + 'routes_ipv4_externally', network['routes_externally'])) + # routes_ipv6_externally defaults to the value of routes_externally + network['routes_ipv6_externally'] = get_boolean( + net.get( + 'routes_ipv6_externally', network['routes_externally'])) + networks.append(network) + + for key in ('external_network', 'internal_network'): + external = key.startswith('external') + if key in cloud and 'networks' in cloud: + raise exceptions.OpenStackConfigException( + "Both {key} and networks were specified in the config." + " Please remove {key} from the config and use the network" + " list to configure network behavior.".format(key=key)) + if key in cloud: + warnings.warn( + "{key} is deprecated. Please replace with an entry in" + " a dict inside of the networks list with name: {name}" + " and routes_externally: {external}".format( + key=key, name=cloud[key], external=external)) + networks.append(dict( + name=cloud[key], + routes_externally=external, + nat_destination=not external, + default_interface=external)) + + # Validate that we don't have duplicates + self._validate_networks(networks, 'nat_destination') + self._validate_networks(networks, 'default_interface') + + cloud['networks'] = networks + return cloud + + def _handle_domain_id(self, cloud): + # Allow people to just specify domain once if it's the same + mappings = { + 'domain_id': ('user_domain_id', 'project_domain_id'), + 'domain_name': ('user_domain_name', 'project_domain_name'), + } + for target_key, possible_values in mappings.items(): + if not self._project_scoped(cloud): + if target_key in cloud and target_key not in cloud['auth']: + cloud['auth'][target_key] = cloud.pop(target_key) + continue + for key in possible_values: + if target_key in cloud['auth'] and key not in cloud['auth']: + cloud['auth'][key] = cloud['auth'][target_key] + cloud.pop(target_key, None) + cloud['auth'].pop(target_key, None) + return cloud + + def _fix_backwards_project(self, cloud): + # Do the lists backwards so that project_name is the ultimate winner + # Also handle moving domain names into auth so that domain mapping + # is easier + mappings = { + 'domain_id': ('domain_id', 'domain-id'), + 'domain_name': ('domain_name', 'domain-name'), + 'user_domain_id': ('user_domain_id', 'user-domain-id'), + 'user_domain_name': ('user_domain_name', 'user-domain-name'), + 'project_domain_id': ('project_domain_id', 'project-domain-id'), + 'project_domain_name': ( + 'project_domain_name', 'project-domain-name'), + 'token': ('auth-token', 'auth_token', 'token'), + } + if cloud.get('auth_type', None) == 'v2password': + # If v2password is explcitly requested, this is to deal with old + # clouds. That's fine - we need to map settings in the opposite + # direction + mappings['tenant_id'] = ( + 'project_id', 'project-id', 'tenant_id', 'tenant-id') + mappings['tenant_name'] = ( + 'project_name', 'project-name', 'tenant_name', 'tenant-name') + else: + mappings['project_id'] = ( + 'tenant_id', 'tenant-id', 'project_id', 'project-id') + mappings['project_name'] = ( + 'tenant_name', 'tenant-name', 'project_name', 'project-name') + for target_key, possible_values in mappings.items(): + target = None + for key in possible_values: + if key in cloud: + target = str(cloud[key]) + del cloud[key] + if key in cloud['auth']: + target = str(cloud['auth'][key]) + del cloud['auth'][key] + if target: + cloud['auth'][target_key] = target + return cloud + + def _fix_backwards_auth_plugin(self, cloud): + # Do the lists backwards so that auth_type is the ultimate winner + mappings = { + 'auth_type': ('auth_plugin', 'auth_type'), + } + for target_key, possible_values in mappings.items(): + target = None + for key in possible_values: + if key in cloud: + target = cloud[key] + del cloud[key] + cloud[target_key] = target + # Because we force alignment to v3 nouns, we want to force + # use of the auth plugin that can do auto-selection and dealing + # with that based on auth parameters. v2password is basically + # completely broken + return cloud + + def register_argparse_arguments(self, parser, argv, service_keys=None): + """Register all of the common argparse options needed. + + Given an argparse parser, register the keystoneauth Session arguments, + the keystoneauth Auth Plugin Options and os-cloud. Also, peek in the + argv to see if all of the auth plugin options should be registered + or merely the ones already configured. + + :param argparse.ArgumentParser: parser to attach argparse options to + :param argv: the arguments provided to the application + :param string service_keys: Service or list of services this argparse + should be specialized for, if known. + The first item in the list will be used + as the default value for service_type + (optional) + + :raises exceptions.OpenStackConfigException if an invalid auth-type + is requested + """ + + if service_keys is None: + service_keys = [] + + # Fix argv in place - mapping any keys with embedded _ in them to - + _fix_argv(argv) + + local_parser = argparse_mod.ArgumentParser(add_help=False) + + for p in (parser, local_parser): + p.add_argument( + '--os-cloud', + metavar='', + default=os.environ.get('OS_CLOUD', None), + help='Named cloud to connect to') + + # we need to peek to see if timeout was actually passed, since + # the keystoneauth declaration of it has a default, which means + # we have no clue if the value we get is from the ksa default + # for from the user passing it explicitly. We'll stash it for later + local_parser.add_argument('--timeout', metavar='') + + # We need for get_one_cloud to be able to peek at whether a token + # was passed so that we can swap the default from password to + # token if it was. And we need to also peek for --os-auth-token + # for novaclient backwards compat + local_parser.add_argument('--os-token') + local_parser.add_argument('--os-auth-token') + + # Peek into the future and see if we have an auth-type set in + # config AND a cloud set, so that we know which command line + # arguments to register and show to the user (the user may want + # to say something like: + # openstack --os-cloud=foo --os-oidctoken=bar + # although I think that user is the cause of my personal pain + options, _args = local_parser.parse_known_args(argv) + if options.timeout: + self._argv_timeout = True + + # validate = False because we're not _actually_ loading here + # we're only peeking, so it's the wrong time to assert that + # the rest of the arguments given are invalid for the plugin + # chosen (for instance, --help may be requested, so that the + # user can see what options he may want to give + cloud = self.get_one_cloud(argparse=options, validate=False) + default_auth_type = cloud.config['auth_type'] + + try: + loading.register_auth_argparse_arguments( + parser, argv, default=default_auth_type) + except Exception: + # Hidiing the keystoneauth exception because we're not actually + # loading the auth plugin at this point, so the error message + # from it doesn't actually make sense to os-client-config users + options, _args = parser.parse_known_args(argv) + plugin_names = loading.get_available_plugin_names() + raise exceptions.OpenStackConfigException( + "An invalid auth-type was specified: {auth_type}." + " Valid choices are: {plugin_names}.".format( + auth_type=options.os_auth_type, + plugin_names=",".join(plugin_names))) + + if service_keys: + primary_service = service_keys[0] + else: + primary_service = None + loading.register_session_argparse_arguments(parser) + adapter.register_adapter_argparse_arguments( + parser, service_type=primary_service) + for service_key in service_keys: + # legacy clients have un-prefixed api-version options + parser.add_argument( + '--{service_key}-api-version'.format( + service_key=service_key.replace('_', '-'), + help=argparse_mod.SUPPRESS)) + adapter.register_service_adapter_argparse_arguments( + parser, service_type=service_key) + + # Backwards compat options for legacy clients + parser.add_argument('--http-timeout', help=argparse_mod.SUPPRESS) + parser.add_argument('--os-endpoint-type', help=argparse_mod.SUPPRESS) + parser.add_argument('--endpoint-type', help=argparse_mod.SUPPRESS) + + def _fix_backwards_interface(self, cloud): + new_cloud = {} + for key in cloud.keys(): + if key.endswith('endpoint_type'): + target_key = key.replace('endpoint_type', 'interface') + else: + target_key = key + new_cloud[target_key] = cloud[key] + return new_cloud + + def _fix_backwards_api_timeout(self, cloud): + new_cloud = {} + # requests can only have one timeout, which means that in a single + # cloud there is no point in different timeout values. However, + # for some reason many of the legacy clients decided to shove their + # service name in to the arg name for reasons surpassin sanity. If + # we find any values that are not api_timeout, overwrite api_timeout + # with the value + service_timeout = None + for key in cloud.keys(): + if key.endswith('timeout') and not ( + key == 'timeout' or key == 'api_timeout'): + service_timeout = cloud[key] + else: + new_cloud[key] = cloud[key] + if service_timeout is not None: + new_cloud['api_timeout'] = service_timeout + # The common argparse arg from keystoneauth is called timeout, but + # os-client-config expects it to be called api_timeout + if self._argv_timeout: + if 'timeout' in new_cloud and new_cloud['timeout']: + new_cloud['api_timeout'] = new_cloud.pop('timeout') + return new_cloud + + def get_all_clouds(self): + + clouds = [] + + for cloud in self.get_cloud_names(): + for region in self._get_regions(cloud): + if region: + clouds.append(self.get_one_cloud( + cloud, region_name=region['name'])) + return clouds + + def _fix_args(self, args=None, argparse=None): + """Massage the passed-in options + + Replace - with _ and strip os_ prefixes. + + Convert an argparse Namespace object to a dict, removing values + that are either None or ''. + """ + if not args: + args = {} + + if argparse: + # Convert the passed-in Namespace + o_dict = vars(argparse) + parsed_args = dict() + for k in o_dict: + if o_dict[k] is not None and o_dict[k] != '': + parsed_args[k] = o_dict[k] + args.update(parsed_args) + + os_args = dict() + new_args = dict() + for (key, val) in iter(args.items()): + if type(args[key]) == dict: + # dive into the auth dict + new_args[key] = self._fix_args(args[key]) + continue + + key = key.replace('-', '_') + if key.startswith('os_'): + os_args[key[3:]] = val + else: + new_args[key] = val + new_args.update(os_args) + return new_args + + def _find_winning_auth_value(self, opt, config): + opt_name = opt.name.replace('-', '_') + if opt_name in config: + return config[opt_name] + else: + deprecated = getattr(opt, 'deprecated', getattr( + opt, 'deprecated_opts', [])) + for d_opt in deprecated: + d_opt_name = d_opt.name.replace('-', '_') + if d_opt_name in config: + return config[d_opt_name] + + def auth_config_hook(self, config): + """Allow examination of config values before loading auth plugin + + OpenStackClient will override this to perform additional checks + on auth_type. + """ + return config + + def _get_auth_loader(self, config): + # Re-use the admin_token plugin for the "None" plugin + # since it does not look up endpoints or tokens but rather + # does a passthrough. This is useful for things like Ironic + # that have a keystoneless operational mode, but means we're + # still dealing with a keystoneauth Session object, so all the + # _other_ things (SSL arg handling, timeout) all work consistently + if config['auth_type'] in (None, "None", ''): + config['auth_type'] = 'admin_token' + # Set to notused rather than None because validate_auth will + # strip the value if it's actually python None + config['auth']['token'] = 'notused' + elif config['auth_type'] == 'token_endpoint': + # Humans have been trained to use a thing called token_endpoint + # That it does not exist in keystoneauth is irrelvant- it not + # doing what they want causes them sorrow. + config['auth_type'] = 'admin_token' + return loading.get_plugin_loader(config['auth_type']) + + def _validate_auth(self, config, loader): + # May throw a keystoneauth1.exceptions.NoMatchingPlugin + + plugin_options = loader.get_options() + + for p_opt in plugin_options: + # if it's in config.auth, win, kill it from config dict + # if it's in config and not in config.auth, move it + # deprecated loses to current + # provided beats default, deprecated or not + winning_value = self._find_winning_auth_value( + p_opt, + config['auth'], + ) + if not winning_value: + winning_value = self._find_winning_auth_value( + p_opt, + config, + ) + + config = self._clean_up_after_ourselves( + config, + p_opt, + winning_value, + ) + + if winning_value: + # Prefer the plugin configuration dest value if the value's key + # is marked as deprecated. + if p_opt.dest is None: + good_name = p_opt.name.replace('-', '_') + config['auth'][good_name] = winning_value + else: + config['auth'][p_opt.dest] = winning_value + + # See if this needs a prompting + config = self.option_prompt(config, p_opt) + + return config + + def _validate_auth_correctly(self, config, loader): + # May throw a keystoneauth1.exceptions.NoMatchingPlugin + + plugin_options = loader.get_options() + + for p_opt in plugin_options: + # if it's in config, win, move it and kill it from config dict + # if it's in config.auth but not in config it's good + # deprecated loses to current + # provided beats default, deprecated or not + winning_value = self._find_winning_auth_value( + p_opt, + config, + ) + if not winning_value: + winning_value = self._find_winning_auth_value( + p_opt, + config['auth'], + ) + + config = self._clean_up_after_ourselves( + config, + p_opt, + winning_value, + ) + + # See if this needs a prompting + config = self.option_prompt(config, p_opt) + + return config + + def option_prompt(self, config, p_opt): + """Prompt user for option that requires a value""" + if ( + p_opt.prompt is not None and + p_opt.dest not in config['auth'] and + self._pw_callback is not None + ): + config['auth'][p_opt.dest] = self._pw_callback(p_opt.prompt) + return config + + def _clean_up_after_ourselves(self, config, p_opt, winning_value): + + # Clean up after ourselves + for opt in [p_opt.name] + [o.name for o in p_opt.deprecated]: + opt = opt.replace('-', '_') + config.pop(opt, None) + config['auth'].pop(opt, None) + + if winning_value: + # Prefer the plugin configuration dest value if the value's key + # is marked as depreciated. + if p_opt.dest is None: + config['auth'][p_opt.name.replace('-', '_')] = ( + winning_value) + else: + config['auth'][p_opt.dest] = winning_value + return config + + def magic_fixes(self, config): + """Perform the set of magic argument fixups""" + + # Infer token plugin if a token was given + if (('auth' in config and 'token' in config['auth']) or + ('auth_token' in config and config['auth_token']) or + ('token' in config and config['token'])): + config.setdefault('token', config.pop('auth_token', None)) + + # These backwards compat values are only set via argparse. If it's + # there, it's because it was passed in explicitly, and should win + config = self._fix_backwards_api_timeout(config) + if 'endpoint_type' in config: + config['interface'] = config.pop('endpoint_type') + + config = self._fix_backwards_auth_plugin(config) + config = self._fix_backwards_project(config) + config = self._fix_backwards_interface(config) + config = self._fix_backwards_networks(config) + config = self._handle_domain_id(config) + + for key in BOOL_KEYS: + if key in config: + if type(config[key]) is not bool: + config[key] = get_boolean(config[key]) + + # TODO(mordred): Special casing auth_url here. We should + # come back to this betterer later so that it's + # more generalized + if 'auth' in config and 'auth_url' in config['auth']: + config['auth']['auth_url'] = config['auth']['auth_url'].format( + **config) + + return config + + def get_one_cloud(self, cloud=None, validate=True, + argparse=None, **kwargs): + """Retrieve a single cloud configuration and merge additional options + + :param string cloud: + The name of the configuration to load from clouds.yaml + :param boolean validate: + Validate the config. Setting this to False causes no auth plugin + to be created. It's really only useful for testing. + :param Namespace argparse: + An argparse Namespace object; allows direct passing in of + argparse options to be added to the cloud config. Values + of None and '' will be removed. + :param region_name: Name of the region of the cloud. + :param kwargs: Additional configuration options + + :raises: keystoneauth1.exceptions.MissingRequiredOptions + on missing required auth parameters + """ + + args = self._fix_args(kwargs, argparse=argparse) + + if cloud is None: + if 'cloud' in args: + cloud = args['cloud'] + else: + cloud = self.default_cloud + + config = self._get_base_cloud_config(cloud) + + # Get region specific settings + if 'region_name' not in args: + args['region_name'] = '' + region = self._get_region(cloud=cloud, region_name=args['region_name']) + args['region_name'] = region['name'] + region_args = copy.deepcopy(region['values']) + + # Regions is a list that we can use to create a list of cloud/region + # objects. It does not belong in the single-cloud dict + config.pop('regions', None) + + # Can't just do update, because None values take over + for arg_list in region_args, args: + for (key, val) in iter(arg_list.items()): + if val is not None: + if key == 'auth' and config[key] is not None: + config[key] = _auth_update(config[key], val) + else: + config[key] = val + + config = self.magic_fixes(config) + config = self._normalize_keys(config) + + # NOTE(dtroyer): OSC needs a hook into the auth args before the + # plugin is loaded in order to maintain backward- + # compatible behaviour + config = self.auth_config_hook(config) + + if validate: + loader = self._get_auth_loader(config) + config = self._validate_auth(config, loader) + auth_plugin = loader.load_from_options(**config['auth']) + else: + auth_plugin = None + + # If any of the defaults reference other values, we need to expand + for (key, value) in config.items(): + if hasattr(value, 'format'): + config[key] = value.format(**config) + + force_ipv4 = config.pop('force_ipv4', self.force_ipv4) + prefer_ipv6 = config.pop('prefer_ipv6', True) + if not prefer_ipv6: + force_ipv4 = True + + if cloud is None: + cloud_name = '' + else: + cloud_name = str(cloud) + return cloud_config.CloudConfig( + name=cloud_name, + region=config['region_name'], + config=config, + force_ipv4=force_ipv4, + auth_plugin=auth_plugin, + openstack_config=self, + session_constructor=self._session_constructor, + app_name=self._app_name, + app_version=self._app_version, + ) + + def get_one_cloud_osc( + self, + cloud=None, + validate=True, + argparse=None, + **kwargs + ): + """Retrieve a single cloud configuration and merge additional options + + :param string cloud: + The name of the configuration to load from clouds.yaml + :param boolean validate: + Validate the config. Setting this to False causes no auth plugin + to be created. It's really only useful for testing. + :param Namespace argparse: + An argparse Namespace object; allows direct passing in of + argparse options to be added to the cloud config. Values + of None and '' will be removed. + :param region_name: Name of the region of the cloud. + :param kwargs: Additional configuration options + + :raises: keystoneauth1.exceptions.MissingRequiredOptions + on missing required auth parameters + """ + + args = self._fix_args(kwargs, argparse=argparse) + + if cloud is None: + if 'cloud' in args: + cloud = args['cloud'] + else: + cloud = self.default_cloud + + config = self._get_base_cloud_config(cloud) + + # Get region specific settings + if 'region_name' not in args: + args['region_name'] = '' + region = self._get_region(cloud=cloud, region_name=args['region_name']) + args['region_name'] = region['name'] + region_args = copy.deepcopy(region['values']) + + # Regions is a list that we can use to create a list of cloud/region + # objects. It does not belong in the single-cloud dict + config.pop('regions', None) + + # Can't just do update, because None values take over + for arg_list in region_args, args: + for (key, val) in iter(arg_list.items()): + if val is not None: + if key == 'auth' and config[key] is not None: + config[key] = _auth_update(config[key], val) + else: + config[key] = val + + config = self.magic_fixes(config) + + # NOTE(dtroyer): OSC needs a hook into the auth args before the + # plugin is loaded in order to maintain backward- + # compatible behaviour + config = self.auth_config_hook(config) + + if validate: + loader = self._get_auth_loader(config) + config = self._validate_auth_correctly(config, loader) + auth_plugin = loader.load_from_options(**config['auth']) + else: + auth_plugin = None + + # If any of the defaults reference other values, we need to expand + for (key, value) in config.items(): + if hasattr(value, 'format'): + config[key] = value.format(**config) + + force_ipv4 = config.pop('force_ipv4', self.force_ipv4) + prefer_ipv6 = config.pop('prefer_ipv6', True) + if not prefer_ipv6: + force_ipv4 = True + + if cloud is None: + cloud_name = '' + else: + cloud_name = str(cloud) + return cloud_config.CloudConfig( + name=cloud_name, + region=config['region_name'], + config=self._normalize_keys(config), + force_ipv4=force_ipv4, + auth_plugin=auth_plugin, + openstack_config=self, + ) + + @staticmethod + def set_one_cloud(config_file, cloud, set_config=None): + """Set a single cloud configuration. + + :param string config_file: + The path to the config file to edit. If this file does not exist + it will be created. + :param string cloud: + The name of the configuration to save to clouds.yaml + :param dict set_config: Configuration options to be set + """ + + set_config = set_config or {} + cur_config = {} + try: + with open(config_file) as fh: + cur_config = yaml.safe_load(fh) + except IOError as e: + # Not no such file + if e.errno != 2: + raise + pass + + clouds_config = cur_config.get('clouds', {}) + cloud_config = _auth_update(clouds_config.get(cloud, {}), set_config) + clouds_config[cloud] = cloud_config + cur_config['clouds'] = clouds_config + + with open(config_file, 'w') as fh: + yaml.safe_dump(cur_config, fh, default_flow_style=False) + +if __name__ == '__main__': + config = OpenStackConfig().get_all_clouds() + for cloud in config: + print_cloud = False + if len(sys.argv) == 1: + print_cloud = True + elif len(sys.argv) == 3 and ( + sys.argv[1] == cloud.name and sys.argv[2] == cloud.region): + print_cloud = True + elif len(sys.argv) == 2 and ( + sys.argv[1] == cloud.name): + print_cloud = True + + if print_cloud: + print(cloud.name, cloud.region, cloud.config) diff --git a/openstack/config/schema.json b/openstack/config/schema.json new file mode 100644 index 000000000..8110d58e9 --- /dev/null +++ b/openstack/config/schema.json @@ -0,0 +1,121 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "https://git.openstack.org/cgit/openstack/cloud-data/plain/schema.json#", + "type": "object", + "properties": { + "auth_type": { + "name": "Auth Type", + "description": "Name of authentication plugin to be used", + "default": "password", + "type": "string" + }, + "disable_vendor_agent": { + "name": "Disable Vendor Agent Properties", + "description": "Image properties required to disable vendor agent", + "type": "object", + "properties": {} + }, + "floating_ip_source": { + "name": "Floating IP Source", + "description": "Which service provides Floating IPs", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "image_api_use_tasks": { + "name": "Image Task API", + "description": "Does the cloud require the Image Task API", + "default": false, + "type": "boolean" + }, + "image_format": { + "name": "Image Format", + "description": "Format for uploaded Images", + "default": "qcow2", + "type": "string" + }, + "interface": { + "name": "API Interface", + "description": "Which API Interface should connections hit", + "default": "public", + "enum": [ "public", "internal", "admin" ] + }, + "secgroup_source": { + "name": "Security Group Source", + "description": "Which service provides security groups", + "default": "neutron", + "enum": [ "neutron", "nova", "None" ] + }, + "baremetal_api_version": { + "name": "Baremetal API Service Type", + "description": "Baremetal API Service Type", + "default": "1", + "type": "string" + }, + "compute_api_version": { + "name": "Compute API Version", + "description": "Compute API Version", + "default": "2", + "type": "string" + }, + "database_api_version": { + "name": "Database API Version", + "description": "Database API Version", + "default": "1.0", + "type": "string" + }, + "dns_api_version": { + "name": "DNS API Version", + "description": "DNS API Version", + "default": "2", + "type": "string" + }, + "identity_api_version": { + "name": "Identity API Version", + "description": "Identity API Version", + "default": "2", + "type": "string" + }, + "image_api_version": { + "name": "Image API Version", + "description": "Image API Version", + "default": "1", + "type": "string" + }, + "network_api_version": { + "name": "Network API Version", + "description": "Network API Version", + "default": "2", + "type": "string" + }, + "object_store_api_version": { + "name": "Object Storage API Version", + "description": "Object Storage API Version", + "default": "1", + "type": "string" + }, + "volume_api_version": { + "name": "Volume API Version", + "description": "Volume API Version", + "default": "2", + "type": "string" + } + }, + "required": [ + "auth_type", + "baremetal_api_version", + "compute_api_version", + "database_api_version", + "disable_vendor_agent", + "dns_api_version", + "floating_ip_source", + "identity_api_version", + "image_api_use_tasks", + "image_api_version", + "image_format", + "interface", + "network_api_version", + "object_store_api_version", + "secgroup_source", + "volume_api_version" + ] +} diff --git a/openstack/config/vendor-schema.json b/openstack/config/vendor-schema.json new file mode 100644 index 000000000..8193a19ba --- /dev/null +++ b/openstack/config/vendor-schema.json @@ -0,0 +1,223 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "https://git.openstack.org/cgit/openstack/cloud-data/plain/vendor-schema.json#", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "profile": { + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "auth_url": { + "name": "Auth URL", + "description": "URL of the primary Keystone endpoint", + "type": "string" + } + } + }, + "auth_type": { + "name": "Auth Type", + "description": "Name of authentication plugin to be used", + "default": "password", + "type": "string" + }, + "disable_vendor_agent": { + "name": "Disable Vendor Agent Properties", + "description": "Image properties required to disable vendor agent", + "type": "object", + "properties": {} + }, + "floating_ip_source": { + "name": "Floating IP Source", + "description": "Which service provides Floating IPs", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "image_api_use_tasks": { + "name": "Image Task API", + "description": "Does the cloud require the Image Task API", + "default": false, + "type": "boolean" + }, + "image_format": { + "name": "Image Format", + "description": "Format for uploaded Images", + "default": "qcow2", + "type": "string" + }, + "interface": { + "name": "API Interface", + "description": "Which API Interface should connections hit", + "default": "public", + "enum": [ "public", "internal", "admin" ] + }, + "message": { + "name": "Status message", + "description": "Optional message with information related to status", + "type": "string" + }, + "requires_floating_ip": { + "name": "Requires Floating IP", + "description": "Whether the cloud requires a floating IP to route traffic off of the cloud", + "default": null, + "type": ["boolean", "null"] + }, + "secgroup_source": { + "name": "Security Group Source", + "description": "Which service provides security groups", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "status": { + "name": "Vendor status", + "description": "Status of the vendor's cloud", + "enum": [ "active", "deprecated", "shutdown"], + "default": "active" + }, + "compute_service_name": { + "name": "Compute API Service Name", + "description": "Compute API Service Name", + "type": "string" + }, + "database_service_name": { + "name": "Database API Service Name", + "description": "Database API Service Name", + "type": "string" + }, + "dns_service_name": { + "name": "DNS API Service Name", + "description": "DNS API Service Name", + "type": "string" + }, + "identity_service_name": { + "name": "Identity API Service Name", + "description": "Identity API Service Name", + "type": "string" + }, + "image_service_name": { + "name": "Image API Service Name", + "description": "Image API Service Name", + "type": "string" + }, + "volume_service_name": { + "name": "Volume API Service Name", + "description": "Volume API Service Name", + "type": "string" + }, + "network_service_name": { + "name": "Network API Service Name", + "description": "Network API Service Name", + "type": "string" + }, + "object_service_name": { + "name": "Object Storage API Service Name", + "description": "Object Storage API Service Name", + "type": "string" + }, + "baremetal_service_name": { + "name": "Baremetal API Service Name", + "description": "Baremetal API Service Name", + "type": "string" + }, + "compute_service_type": { + "name": "Compute API Service Type", + "description": "Compute API Service Type", + "type": "string" + }, + "database_service_type": { + "name": "Database API Service Type", + "description": "Database API Service Type", + "type": "string" + }, + "dns_service_type": { + "name": "DNS API Service Type", + "description": "DNS API Service Type", + "type": "string" + }, + "identity_service_type": { + "name": "Identity API Service Type", + "description": "Identity API Service Type", + "type": "string" + }, + "image_service_type": { + "name": "Image API Service Type", + "description": "Image API Service Type", + "type": "string" + }, + "volume_service_type": { + "name": "Volume API Service Type", + "description": "Volume API Service Type", + "type": "string" + }, + "network_service_type": { + "name": "Network API Service Type", + "description": "Network API Service Type", + "type": "string" + }, + "object_service_type": { + "name": "Object Storage API Service Type", + "description": "Object Storage API Service Type", + "type": "string" + }, + "baremetal_service_type": { + "name": "Baremetal API Service Type", + "description": "Baremetal API Service Type", + "type": "string" + }, + "compute_api_version": { + "name": "Compute API Version", + "description": "Compute API Version", + "type": "string" + }, + "database_api_version": { + "name": "Database API Version", + "description": "Database API Version", + "type": "string" + }, + "dns_api_version": { + "name": "DNS API Version", + "description": "DNS API Version", + "type": "string" + }, + "identity_api_version": { + "name": "Identity API Version", + "description": "Identity API Version", + "type": "string" + }, + "image_api_version": { + "name": "Image API Version", + "description": "Image API Version", + "type": "string" + }, + "volume_api_version": { + "name": "Volume API Version", + "description": "Volume API Version", + "type": "string" + }, + "network_api_version": { + "name": "Network API Version", + "description": "Network API Version", + "type": "string" + }, + "object_api_version": { + "name": "Object Storage API Version", + "description": "Object Storage API Version", + "type": "string" + }, + "baremetal_api_version": { + "name": "Baremetal API Version", + "description": "Baremetal API Version", + "type": "string" + } + } + } + }, + "required": [ + "name", + "profile" + ] +} diff --git a/openstack/config/vendors/__init__.py b/openstack/config/vendors/__init__.py new file mode 100644 index 000000000..3e1d20a5a --- /dev/null +++ b/openstack/config/vendors/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import json +import os + +import yaml + +_vendors_path = os.path.dirname(os.path.realpath(__file__)) +_vendor_defaults = None + + +def get_profile(profile_name): + global _vendor_defaults + if _vendor_defaults is None: + _vendor_defaults = {} + for vendor in glob.glob(os.path.join(_vendors_path, '*.yaml')): + with open(vendor, 'r') as f: + vendor_data = yaml.safe_load(f) + _vendor_defaults[vendor_data['name']] = vendor_data['profile'] + for vendor in glob.glob(os.path.join(_vendors_path, '*.json')): + with open(vendor, 'r') as f: + vendor_data = json.load(f) + _vendor_defaults[vendor_data['name']] = vendor_data['profile'] + return _vendor_defaults.get(profile_name) diff --git a/openstack/config/vendors/auro.json b/openstack/config/vendors/auro.json new file mode 100644 index 000000000..410a8e19c --- /dev/null +++ b/openstack/config/vendors/auro.json @@ -0,0 +1,11 @@ +{ + "name": "auro", + "profile": { + "auth": { + "auth_url": "https://api.van1.auro.io:5000/v2.0" + }, + "identity_api_version": "2", + "region_name": "van1", + "requires_floating_ip": true + } +} diff --git a/openstack/config/vendors/bluebox.json b/openstack/config/vendors/bluebox.json new file mode 100644 index 000000000..647c8429f --- /dev/null +++ b/openstack/config/vendors/bluebox.json @@ -0,0 +1,7 @@ +{ + "name": "bluebox", + "profile": { + "volume_api_version": "1", + "region_name": "RegionOne" + } +} diff --git a/openstack/config/vendors/catalyst.json b/openstack/config/vendors/catalyst.json new file mode 100644 index 000000000..3ad75075b --- /dev/null +++ b/openstack/config/vendors/catalyst.json @@ -0,0 +1,15 @@ +{ + "name": "catalyst", + "profile": { + "auth": { + "auth_url": "https://api.cloud.catalyst.net.nz:5000/v2.0" + }, + "regions": [ + "nz-por-1", + "nz_wlg_2" + ], + "image_api_version": "1", + "volume_api_version": "1", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/citycloud.json b/openstack/config/vendors/citycloud.json new file mode 100644 index 000000000..c9ac335c8 --- /dev/null +++ b/openstack/config/vendors/citycloud.json @@ -0,0 +1,19 @@ +{ + "name": "citycloud", + "profile": { + "auth": { + "auth_url": "https://identity1.citycloud.com:5000/v3/" + }, + "regions": [ + "Buf1", + "La1", + "Fra1", + "Lon1", + "Sto2", + "Kna1" + ], + "requires_floating_ip": true, + "volume_api_version": "1", + "identity_api_version": "3" + } +} diff --git a/openstack/config/vendors/conoha.json b/openstack/config/vendors/conoha.json new file mode 100644 index 000000000..5636f0955 --- /dev/null +++ b/openstack/config/vendors/conoha.json @@ -0,0 +1,14 @@ +{ + "name": "conoha", + "profile": { + "auth": { + "auth_url": "https://identity.{region_name}.conoha.io" + }, + "regions": [ + "sin1", + "sjc1", + "tyo1" + ], + "identity_api_version": "2" + } +} diff --git a/openstack/config/vendors/datacentred.json b/openstack/config/vendors/datacentred.json new file mode 100644 index 000000000..e67d3da72 --- /dev/null +++ b/openstack/config/vendors/datacentred.json @@ -0,0 +1,11 @@ +{ + "name": "datacentred", + "profile": { + "auth": { + "auth_url": "https://compute.datacentred.io:5000" + }, + "region-name": "sal01", + "identity_api_version": "3", + "image_api_version": "2" + } +} diff --git a/openstack/config/vendors/dreamcompute.json b/openstack/config/vendors/dreamcompute.json new file mode 100644 index 000000000..8244cf77c --- /dev/null +++ b/openstack/config/vendors/dreamcompute.json @@ -0,0 +1,11 @@ +{ + "name": "dreamcompute", + "profile": { + "auth": { + "auth_url": "https://iad2.dream.io:5000" + }, + "identity_api_version": "3", + "region_name": "RegionOne", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/dreamhost.json b/openstack/config/vendors/dreamhost.json new file mode 100644 index 000000000..ea2ebac1e --- /dev/null +++ b/openstack/config/vendors/dreamhost.json @@ -0,0 +1,13 @@ +{ + "name": "dreamhost", + "profile": { + "status": "deprecated", + "message": "The dreamhost profile is deprecated. Please use the dreamcompute profile instead", + "auth": { + "auth_url": "https://keystone.dream.io" + }, + "identity_api_version": "3", + "region_name": "RegionOne", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/elastx.json b/openstack/config/vendors/elastx.json new file mode 100644 index 000000000..1e7248213 --- /dev/null +++ b/openstack/config/vendors/elastx.json @@ -0,0 +1,10 @@ +{ + "name": "elastx", + "profile": { + "auth": { + "auth_url": "https://ops.elastx.net:5000" + }, + "identity_api_version": "3", + "region_name": "regionOne" + } +} diff --git a/openstack/config/vendors/entercloudsuite.json b/openstack/config/vendors/entercloudsuite.json new file mode 100644 index 000000000..c58c478f0 --- /dev/null +++ b/openstack/config/vendors/entercloudsuite.json @@ -0,0 +1,16 @@ +{ + "name": "entercloudsuite", + "profile": { + "auth": { + "auth_url": "https://api.entercloudsuite.com/" + }, + "identity_api_version": "3", + "image_api_version": "1", + "volume_api_version": "1", + "regions": [ + "it-mil1", + "nl-ams1", + "de-fra1" + ] + } +} diff --git a/openstack/config/vendors/fuga.json b/openstack/config/vendors/fuga.json new file mode 100644 index 000000000..388500b1b --- /dev/null +++ b/openstack/config/vendors/fuga.json @@ -0,0 +1,15 @@ +{ + "name": "fuga", + "profile": { + "auth": { + "auth_url": "https://identity.api.fuga.io:5000", + "user_domain_name": "Default", + "project_domain_name": "Default" + }, + "regions": [ + "cystack" + ], + "identity_api_version": "3", + "volume_api_version": "3" + } +} diff --git a/openstack/config/vendors/ibmcloud.json b/openstack/config/vendors/ibmcloud.json new file mode 100644 index 000000000..90962c60e --- /dev/null +++ b/openstack/config/vendors/ibmcloud.json @@ -0,0 +1,13 @@ +{ + "name": "ibmcloud", + "profile": { + "auth": { + "auth_url": "https://identity.open.softlayer.com" + }, + "volume_api_version": "2", + "identity_api_version": "3", + "regions": [ + "london" + ] + } +} diff --git a/openstack/config/vendors/internap.json b/openstack/config/vendors/internap.json new file mode 100644 index 000000000..b67fc06d4 --- /dev/null +++ b/openstack/config/vendors/internap.json @@ -0,0 +1,17 @@ +{ + "name": "internap", + "profile": { + "auth": { + "auth_url": "https://identity.api.cloud.iweb.com" + }, + "regions": [ + "ams01", + "da01", + "nyj01", + "sin01", + "sjc01" + ], + "identity_api_version": "3", + "floating_ip_source": "None" + } +} diff --git a/openstack/config/vendors/otc.json b/openstack/config/vendors/otc.json new file mode 100644 index 000000000..b0c1b116f --- /dev/null +++ b/openstack/config/vendors/otc.json @@ -0,0 +1,13 @@ +{ + "name": "otc", + "profile": { + "auth": { + "auth_url": "https://iam.%(region_name)s.otc.t-systems.com/v3" + }, + "regions": [ + "eu-de" + ], + "identity_api_version": "3", + "image_format": "vhd" + } +} diff --git a/openstack/config/vendors/ovh.json b/openstack/config/vendors/ovh.json new file mode 100644 index 000000000..f17dc2b68 --- /dev/null +++ b/openstack/config/vendors/ovh.json @@ -0,0 +1,15 @@ +{ + "name": "ovh", + "profile": { + "auth": { + "auth_url": "https://auth.cloud.ovh.net/" + }, + "regions": [ + "BHS1", + "GRA1", + "SBG1" + ], + "identity_api_version": "3", + "floating_ip_source": "None" + } +} diff --git a/openstack/config/vendors/rackspace.json b/openstack/config/vendors/rackspace.json new file mode 100644 index 000000000..6a4590f67 --- /dev/null +++ b/openstack/config/vendors/rackspace.json @@ -0,0 +1,29 @@ +{ + "name": "rackspace", + "profile": { + "auth": { + "auth_url": "https://identity.api.rackspacecloud.com/v2.0/" + }, + "regions": [ + "DFW", + "HKG", + "IAD", + "ORD", + "SYD", + "LON" + ], + "database_service_type": "rax:database", + "compute_service_name": "cloudServersOpenStack", + "image_api_use_tasks": true, + "image_format": "vhd", + "floating_ip_source": "None", + "secgroup_source": "None", + "requires_floating_ip": false, + "volume_api_version": "1", + "disable_vendor_agent": { + "vm_mode": "hvm", + "xenapi_use_agent": false + }, + "has_network": false + } +} diff --git a/openstack/config/vendors/switchengines.json b/openstack/config/vendors/switchengines.json new file mode 100644 index 000000000..46f632515 --- /dev/null +++ b/openstack/config/vendors/switchengines.json @@ -0,0 +1,15 @@ +{ + "name": "switchengines", + "profile": { + "auth": { + "auth_url": "https://keystone.cloud.switch.ch:5000/v2.0" + }, + "regions": [ + "LS", + "ZH" + ], + "volume_api_version": "1", + "image_api_use_tasks": true, + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/ultimum.json b/openstack/config/vendors/ultimum.json new file mode 100644 index 000000000..4bfd088cd --- /dev/null +++ b/openstack/config/vendors/ultimum.json @@ -0,0 +1,11 @@ +{ + "name": "ultimum", + "profile": { + "auth": { + "auth_url": "https://console.ultimum-cloud.com:5000/" + }, + "identity_api_version": "3", + "volume_api_version": "1", + "region-name": "RegionOne" + } +} diff --git a/openstack/config/vendors/unitedstack.json b/openstack/config/vendors/unitedstack.json new file mode 100644 index 000000000..ac8be117f --- /dev/null +++ b/openstack/config/vendors/unitedstack.json @@ -0,0 +1,16 @@ +{ + "name": "unitedstack", + "profile": { + "auth": { + "auth_url": "https://identity.api.ustack.com/v3" + }, + "regions": [ + "bj1", + "gd1" + ], + "volume_api_version": "1", + "identity_api_version": "3", + "image_format": "raw", + "floating_ip_source": "None" + } +} diff --git a/openstack/config/vendors/vexxhost.json b/openstack/config/vendors/vexxhost.json new file mode 100644 index 000000000..2227fff4f --- /dev/null +++ b/openstack/config/vendors/vexxhost.json @@ -0,0 +1,15 @@ +{ + "name": "vexxhost", + "profile": { + "auth": { + "auth_url": "https://auth.vexxhost.net" + }, + "regions": [ + "ca-ymq-1" + ], + "dns_api_version": "1", + "identity_api_version": "3", + "floating_ip_source": "None", + "requires_floating_ip": false + } +} diff --git a/openstack/config/vendors/zetta.json b/openstack/config/vendors/zetta.json new file mode 100644 index 000000000..44e9711ff --- /dev/null +++ b/openstack/config/vendors/zetta.json @@ -0,0 +1,13 @@ +{ + "name": "zetta", + "profile": { + "auth": { + "auth_url": "https://identity.api.zetta.io/v3" + }, + "regions": [ + "no-osl1" + ], + "identity_api_version": "3", + "dns_api_version": "2" + } +} diff --git a/openstack/connection.py b/openstack/connection.py index 6f42d35d4..d83b77daa 100644 --- a/openstack/connection.py +++ b/openstack/connection.py @@ -61,7 +61,7 @@ import logging import sys from keystoneauth1.loading import base as ksa_loader -import os_client_config +import openstack.config from openstack import exceptions from openstack import profile as _profile @@ -79,10 +79,10 @@ def from_config(cloud_name=None, cloud_config=None, options=None): :param str cloud_name: Use the `cloud_name` configuration details when creating the Connection instance. :param cloud_config: An instance of - `os_client_config.config.OpenStackConfig` + `openstack.config.loader.OpenStackConfig` as returned from the os-client-config library. If no `config` is provided, - `os_client_config.OpenStackConfig` will be called, + `openstack.config.OpenStackConfig` will be called, and the provided `cloud_name` will be used in determining which cloud's configuration details will be used in creation of the @@ -90,7 +90,7 @@ def from_config(cloud_name=None, cloud_config=None, options=None): :param options: A namespace object; allows direct passing in of options to be added to the cloud config. This does not have to be an instance of argparse.Namespace, despite the naming of the - the `os_client_config.config.OpenStackConfig.get_one_cloud` + the `openstack.config.loader.OpenStackConfig.get_one_cloud` argument to which it is passed. :rtype: :class:`~openstack.connection.Connection` @@ -105,7 +105,7 @@ def from_config(cloud_name=None, cloud_config=None, options=None): # this stuff needs to be fixed where we keep version and path separated. defaults['network_api_version'] = 'v2.0' if cloud_config is None: - occ = os_client_config.OpenStackConfig(override_defaults=defaults) + occ = openstack.config.OpenStackConfig(override_defaults=defaults) cloud_config = occ.get_one_cloud(cloud=cloud_name, argparse=options) if cloud_config.debug: diff --git a/openstack/image/v2/_proxy.py b/openstack/image/v2/_proxy.py index 14907dbd5..9de368a7c 100644 --- a/openstack/image/v2/_proxy.py +++ b/openstack/image/v2/_proxy.py @@ -66,8 +66,7 @@ class Proxy(proxy2.BaseProxy): This will download an image to memory when ``stream=False``, or allow streaming downloads using an iterator when ``stream=True``. For examples of working with streamed responses, see - :ref:`download_image-stream-true` and the Requests documentation - :ref:`body-content-workflow`. + :ref:`download_image-stream-true`. :param image: The value can be either the ID of an image or a :class:`~openstack.image.v2.image.Image` instance. diff --git a/openstack/tests/ansible/README.txt b/openstack/tests/ansible/README.txt new file mode 100644 index 000000000..3931b4af9 --- /dev/null +++ b/openstack/tests/ansible/README.txt @@ -0,0 +1,26 @@ +This directory contains a testing infrastructure for the Ansible +OpenStack modules. You will need a clouds.yaml file in order to run +the tests. You must provide a value for the `cloud` variable for each +run (using the -e option) as a default is not currently provided. + +If you want to run these tests against devstack, it is easiest to use +the tox target. This assumes you have a devstack-admin cloud defined +in your clouds.yaml file that points to devstack. Some examples of +using tox: + + tox -e ansible + + tox -e ansible keypair security_group + +If you want to run these tests directly, or against different clouds, +then you'll need to use the ansible-playbook command that comes with +the Ansible distribution and feed it the run.yml playbook. Some examples: + + # Run all module tests against a provider + ansible-playbook run.yml -e "cloud=hp" + + # Run only the keypair and security_group tests + ansible-playbook run.yml -e "cloud=hp" --tags "keypair,security_group" + + # Run all tests except security_group + ansible-playbook run.yml -e "cloud=hp" --skip-tags "security_group" diff --git a/openstack/tests/ansible/hooks/post_test_hook.sh b/openstack/tests/ansible/hooks/post_test_hook.sh new file mode 100755 index 000000000..6b511719a --- /dev/null +++ b/openstack/tests/ansible/hooks/post_test_hook.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Rework for Zuul v3 + +export OPENSTACKSDK_DIR="$BASE/new/python-openstacksdk" + +cd $OPENSTACKSDK_DIR +sudo chown -R jenkins:stack $OPENSTACKSDK_DIR + +echo "Running shade Ansible test suite" + +if [ ${OPENSTACKSDK_ANSIBLE_DEV:-0} -eq 1 ] +then + # Use the upstream development version of Ansible + set +e + sudo -E -H -u jenkins tox -eansible -- -d + EXIT_CODE=$? + set -e +else + # Use the release version of Ansible + set +e + sudo -E -H -u jenkins tox -eansible + EXIT_CODE=$? + set -e +fi + + +exit $EXIT_CODE diff --git a/openstack/tests/ansible/roles/auth/tasks/main.yml b/openstack/tests/ansible/roles/auth/tasks/main.yml new file mode 100644 index 000000000..ca894e50a --- /dev/null +++ b/openstack/tests/ansible/roles/auth/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Authenticate to the cloud + os_auth: + cloud={{ cloud }} + +- debug: var=service_catalog diff --git a/openstack/tests/ansible/roles/client_config/tasks/main.yml b/openstack/tests/ansible/roles/client_config/tasks/main.yml new file mode 100644 index 000000000..1506f6d69 --- /dev/null +++ b/openstack/tests/ansible/roles/client_config/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: List all profiles + os_client_config: + register: list + +# WARNING: This will output sensitive authentication information!!!! +- debug: var=list diff --git a/openstack/tests/ansible/roles/group/tasks/main.yml b/openstack/tests/ansible/roles/group/tasks/main.yml new file mode 100644 index 000000000..535ed4318 --- /dev/null +++ b/openstack/tests/ansible/roles/group/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Create group + os_group: + cloud: "{{ cloud }}" + state: present + name: "{{ group_name }}" + +- name: Update group + os_group: + cloud: "{{ cloud }}" + state: present + name: "{{ group_name }}" + description: "updated description" + +- name: Delete group + os_group: + cloud: "{{ cloud }}" + state: absent + name: "{{ group_name }}" diff --git a/openstack/tests/ansible/roles/group/vars/main.yml b/openstack/tests/ansible/roles/group/vars/main.yml new file mode 100644 index 000000000..361c01190 --- /dev/null +++ b/openstack/tests/ansible/roles/group/vars/main.yml @@ -0,0 +1 @@ +group_name: ansible_group diff --git a/openstack/tests/ansible/roles/image/tasks/main.yml b/openstack/tests/ansible/roles/image/tasks/main.yml new file mode 100644 index 000000000..587e887b8 --- /dev/null +++ b/openstack/tests/ansible/roles/image/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Create a test image file + shell: mktemp + register: tmp_file + +- name: Fill test image file to 1MB + shell: truncate -s 1048576 {{ tmp_file.stdout }} + +- name: Create raw image (defaults) + os_image: + cloud: "{{ cloud }}" + state: present + name: "{{ image_name }}" + filename: "{{ tmp_file.stdout }}" + disk_format: raw + register: image + +- debug: var=image + +- name: Delete raw image (defaults) + os_image: + cloud: "{{ cloud }}" + state: absent + name: "{{ image_name }}" + +- name: Create raw image (complex) + os_image: + cloud: "{{ cloud }}" + state: present + name: "{{ image_name }}" + filename: "{{ tmp_file.stdout }}" + disk_format: raw + is_public: True + min_disk: 10 + min_ram: 1024 + kernel: cirros-vmlinuz + ramdisk: cirros-initrd + properties: + cpu_arch: x86_64 + distro: ubuntu + register: image + +- debug: var=image + +- name: Delete raw image (complex) + os_image: + cloud: "{{ cloud }}" + state: absent + name: "{{ image_name }}" + +- name: Delete test image file + file: + name: "{{ tmp_file.stdout }}" + state: absent diff --git a/openstack/tests/ansible/roles/image/vars/main.yml b/openstack/tests/ansible/roles/image/vars/main.yml new file mode 100644 index 000000000..13efe7144 --- /dev/null +++ b/openstack/tests/ansible/roles/image/vars/main.yml @@ -0,0 +1 @@ +image_name: ansible_image diff --git a/openstack/tests/ansible/roles/keypair/tasks/main.yml b/openstack/tests/ansible/roles/keypair/tasks/main.yml new file mode 100644 index 000000000..53a856e2f --- /dev/null +++ b/openstack/tests/ansible/roles/keypair/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Create keypair (non-existing) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + +- name: Delete keypair (non-existing) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Generate test key file + user: + name: "{{ ansible_env.USER }}" + generate_ssh_key: yes + ssh_key_file: .ssh/shade_id_rsa + +- name: Create keypair (file) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + public_key_file: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" + +- name: Delete keypair (file) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Create keypair (key) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + public_key: "{{ lookup('file', '~/.ssh/shade_id_rsa.pub') }}" + +- name: Delete keypair (key) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Delete test key pub file + file: + name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" + state: absent + +- name: Delete test key pvt file + file: + name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa" + state: absent diff --git a/openstack/tests/ansible/roles/keypair/vars/main.yml b/openstack/tests/ansible/roles/keypair/vars/main.yml new file mode 100644 index 000000000..3956b56a2 --- /dev/null +++ b/openstack/tests/ansible/roles/keypair/vars/main.yml @@ -0,0 +1 @@ +keypair_name: shade_keypair diff --git a/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml b/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml new file mode 100644 index 000000000..d1ca1273b --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Create keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + state: present + name: "{{ domain_name }}" + description: "test description" + +- name: Update keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + name: "{{ domain_name }}" + description: "updated description" + +- name: Delete keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + state: absent + name: "{{ domain_name }}" diff --git a/openstack/tests/ansible/roles/keystone_domain/vars/main.yml b/openstack/tests/ansible/roles/keystone_domain/vars/main.yml new file mode 100644 index 000000000..049e7c378 --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_domain/vars/main.yml @@ -0,0 +1 @@ +domain_name: ansible_domain diff --git a/openstack/tests/ansible/roles/keystone_role/tasks/main.yml b/openstack/tests/ansible/roles/keystone_role/tasks/main.yml new file mode 100644 index 000000000..110b4386b --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_role/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Create keystone role + os_keystone_role: + cloud: "{{ cloud }}" + state: present + name: "{{ role_name }}" + +- name: Delete keystone role + os_keystone_role: + cloud: "{{ cloud }}" + state: absent + name: "{{ role_name }}" diff --git a/openstack/tests/ansible/roles/keystone_role/vars/main.yml b/openstack/tests/ansible/roles/keystone_role/vars/main.yml new file mode 100644 index 000000000..d1ebe5d1c --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_role/vars/main.yml @@ -0,0 +1 @@ +role_name: ansible_keystone_role diff --git a/openstack/tests/ansible/roles/network/tasks/main.yml b/openstack/tests/ansible/roles/network/tasks/main.yml new file mode 100644 index 000000000..8a85c25cc --- /dev/null +++ b/openstack/tests/ansible/roles/network/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Create network + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: present + shared: "{{ network_shared }}" + external: "{{ network_external }}" + +- name: Delete network + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/network/vars/main.yml b/openstack/tests/ansible/roles/network/vars/main.yml new file mode 100644 index 000000000..d5435ecb1 --- /dev/null +++ b/openstack/tests/ansible/roles/network/vars/main.yml @@ -0,0 +1,3 @@ +network_name: shade_network +network_shared: false +network_external: false diff --git a/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml b/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml new file mode 100644 index 000000000..c034bfc70 --- /dev/null +++ b/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Create public flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_public_flavor + is_public: True + ram: 1024 + vcpus: 1 + disk: 10 + ephemeral: 10 + swap: 1 + flavorid: 12345 + +- name: Delete public flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_public_flavor + +- name: Create private flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_private_flavor + is_public: False + ram: 1024 + vcpus: 1 + disk: 10 + ephemeral: 10 + swap: 1 + flavorid: 12345 + +- name: Delete private flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_private_flavor + +- name: Create flavor (defaults) + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_defaults_flavor + ram: 1024 + vcpus: 1 + disk: 10 + +- name: Delete flavor (defaults) + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_defaults_flavor diff --git a/openstack/tests/ansible/roles/object/tasks/main.yml b/openstack/tests/ansible/roles/object/tasks/main.yml new file mode 100644 index 000000000..ae54b6ba2 --- /dev/null +++ b/openstack/tests/ansible/roles/object/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Create a test object file + shell: mktemp + register: tmp_file + +- name: Create container + os_object: + cloud: "{{ cloud }}" + state: present + container: ansible_container + container_access: private + +- name: Put object + os_object: + cloud: "{{ cloud }}" + state: present + name: ansible_object + filename: "{{ tmp_file.stdout }}" + container: ansible_container + +- name: Delete object + os_object: + cloud: "{{ cloud }}" + state: absent + name: ansible_object + container: ansible_container + +- name: Delete container + os_object: + cloud: "{{ cloud }}" + state: absent + container: ansible_container + +- name: Delete test object file + file: + name: "{{ tmp_file.stdout }}" + state: absent diff --git a/openstack/tests/ansible/roles/port/tasks/main.yml b/openstack/tests/ansible/roles/port/tasks/main.yml new file mode 100644 index 000000000..05ce1e20f --- /dev/null +++ b/openstack/tests/ansible/roles/port/tasks/main.yml @@ -0,0 +1,101 @@ +--- +- name: Create network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ network_name }}" + external: True + +- name: Create subnet + os_subnet: + cloud: "{{ cloud }}" + state: present + name: "{{ subnet_name }}" + network_name: "{{ network_name }}" + cidr: 10.5.5.0/24 + +- name: Create port (no security group) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + no_security_groups: True + fixed_ips: + - ip_address: 10.5.5.69 + register: port + +- debug: var=port + +- name: Delete port (no security group) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Create security group + os_security_group: + cloud: "{{ cloud }}" + state: present + name: "{{ secgroup_name }}" + description: Test group + +- name: Create port (with security group) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + fixed_ips: + - ip_address: 10.5.5.69 + security_groups: + - "{{ secgroup_name }}" + register: port + +- debug: var=port + +- name: Delete port (with security group) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Create port (with allowed_address_pairs and extra_dhcp_opts) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + no_security_groups: True + allowed_address_pairs: + - ip_address: 10.6.7.0/24 + extra_dhcp_opts: + - opt_name: "bootfile-name" + opt_value: "testfile.1" + register: port + +- debug: var=port + +- name: Delete port (with allowed_address_pairs and extra_dhcp_opts) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Delete security group + os_security_group: + cloud: "{{ cloud }}" + state: absent + name: "{{ secgroup_name }}" + +- name: Delete subnet + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: "{{ subnet_name }}" + +- name: Delete network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ network_name }}" diff --git a/openstack/tests/ansible/roles/port/vars/main.yml b/openstack/tests/ansible/roles/port/vars/main.yml new file mode 100644 index 000000000..a81f6a2ea --- /dev/null +++ b/openstack/tests/ansible/roles/port/vars/main.yml @@ -0,0 +1,4 @@ +network_name: ansible_port_network +subnet_name: ansible_port_subnet +port_name: ansible_port +secgroup_name: ansible_port_secgroup diff --git a/openstack/tests/ansible/roles/router/tasks/main.yml b/openstack/tests/ansible/roles/router/tasks/main.yml new file mode 100644 index 000000000..9987f4c9b --- /dev/null +++ b/openstack/tests/ansible/roles/router/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- name: Create external network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ external_network_name }}" + external: true + +- name: Create internal network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ network_name }}" + external: false + +- name: Create subnet1 + os_subnet: + cloud: "{{ cloud }}" + state: present + network_name: "{{ external_network_name }}" + name: shade_subnet1 + cidr: 10.6.6.0/24 + +- name: Create subnet2 + os_subnet: + cloud: "{{ cloud }}" + state: present + network_name: "{{ network_name }}" + name: shade_subnet2 + cidr: 10.7.7.0/24 + +- name: Create router + os_router: + cloud: "{{ cloud }}" + state: present + name: "{{ router_name }}" + network: "{{ external_network_name }}" + +- name: Update router + os_router: + cloud: "{{ cloud }}" + state: present + name: "{{ router_name }}" + network: "{{ external_network_name }}" + interfaces: + - shade_subnet2 + +- name: Delete router + os_router: + cloud: "{{ cloud }}" + state: absent + name: "{{ router_name }}" + +- name: Delete subnet1 + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: shade_subnet1 + +- name: Delete subnet2 + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: shade_subnet2 + +- name: Delete internal network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ network_name }}" + +- name: Delete external network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ external_network_name }}" diff --git a/openstack/tests/ansible/roles/router/vars/main.yml b/openstack/tests/ansible/roles/router/vars/main.yml new file mode 100644 index 000000000..df5cbeb55 --- /dev/null +++ b/openstack/tests/ansible/roles/router/vars/main.yml @@ -0,0 +1,2 @@ +external_network_name: ansible_external_net +router_name: ansible_router diff --git a/openstack/tests/ansible/roles/security_group/tasks/main.yml b/openstack/tests/ansible/roles/security_group/tasks/main.yml new file mode 100644 index 000000000..ddc7e50cd --- /dev/null +++ b/openstack/tests/ansible/roles/security_group/tasks/main.yml @@ -0,0 +1,123 @@ +--- +- name: Create security group + os_security_group: + cloud: "{{ cloud }}" + name: "{{ secgroup_name }}" + state: present + description: Created from Ansible playbook + +- name: Create empty ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create -1 ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +- name: Create empty TCP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create empty UDP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: udp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create HTTP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 + +- name: Create egress rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + port_range_min: 30000 + port_range_max: 30001 + remote_ip_prefix: 0.0.0.0/0 + direction: egress + +- name: Delete empty ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete -1 ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete empty TCP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete empty UDP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: udp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete HTTP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete egress rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + port_range_min: 30000 + port_range_max: 30001 + remote_ip_prefix: 0.0.0.0/0 + direction: egress + +- name: Delete security group + os_security_group: + cloud: "{{ cloud }}" + name: "{{ secgroup_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/security_group/vars/main.yml b/openstack/tests/ansible/roles/security_group/vars/main.yml new file mode 100644 index 000000000..00310dd10 --- /dev/null +++ b/openstack/tests/ansible/roles/security_group/vars/main.yml @@ -0,0 +1 @@ +secgroup_name: shade_secgroup diff --git a/openstack/tests/ansible/roles/server/tasks/main.yml b/openstack/tests/ansible/roles/server/tasks/main.yml new file mode 100644 index 000000000..f25bc2ef6 --- /dev/null +++ b/openstack/tests/ansible/roles/server/tasks/main.yml @@ -0,0 +1,92 @@ +--- +- name: Create server with meta as CSV + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + auto_floating_ip: false + meta: "key1=value1,key2=value2" + wait: true + register: server + +- debug: var=server + +- name: Delete server with meta as CSV + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server with meta as dict + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + auto_floating_ip: false + network: "{{ server_network }}" + meta: + key1: value1 + key2: value2 + wait: true + register: server + +- debug: var=server + +- name: Delete server with meta as dict + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server (FIP from pool/network) + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + floating_ip_pools: + - public + wait: true + register: server + +- debug: var=server + +- name: Delete server (FIP from pool/network) + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server from volume + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + auto_floating_ip: false + boot_from_volume: true + volume_size: 5 + terminate_volume: true + wait: true + register: server + +- debug: var=server + +- name: Delete server with volume + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true diff --git a/openstack/tests/ansible/roles/server/vars/main.yaml b/openstack/tests/ansible/roles/server/vars/main.yaml new file mode 100644 index 000000000..3db7edf8a --- /dev/null +++ b/openstack/tests/ansible/roles/server/vars/main.yaml @@ -0,0 +1,3 @@ +server_network: private +server_name: ansible_server +flavor: m1.tiny diff --git a/openstack/tests/ansible/roles/subnet/tasks/main.yml b/openstack/tests/ansible/roles/subnet/tasks/main.yml new file mode 100644 index 000000000..8d70cd2b5 --- /dev/null +++ b/openstack/tests/ansible/roles/subnet/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Create network {{ network_name }} + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: present + +- name: Create subnet {{ subnet_name }} on network {{ network_name }} + os_subnet: + cloud: "{{ cloud }}" + network_name: "{{ network_name }}" + name: "{{ subnet_name }}" + state: present + enable_dhcp: false + dns_nameservers: + - 8.8.8.7 + - 8.8.8.8 + cidr: 192.168.0.0/24 + gateway_ip: 192.168.0.1 + allocation_pool_start: 192.168.0.2 + allocation_pool_end: 192.168.0.254 + +- name: Update subnet + os_subnet: + cloud: "{{ cloud }}" + network_name: "{{ network_name }}" + name: "{{ subnet_name }}" + state: present + dns_nameservers: + - 8.8.8.7 + cidr: 192.168.0.0/24 + +- name: Delete subnet {{ subnet_name }} + os_subnet: + cloud: "{{ cloud }}" + name: "{{ subnet_name }}" + state: absent + +- name: Delete network {{ network_name }} + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/subnet/vars/main.yml b/openstack/tests/ansible/roles/subnet/vars/main.yml new file mode 100644 index 000000000..b9df9212a --- /dev/null +++ b/openstack/tests/ansible/roles/subnet/vars/main.yml @@ -0,0 +1 @@ +subnet_name: shade_subnet diff --git a/openstack/tests/ansible/roles/user/tasks/main.yml b/openstack/tests/ansible/roles/user/tasks/main.yml new file mode 100644 index 000000000..6585ca582 --- /dev/null +++ b/openstack/tests/ansible/roles/user/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Create user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: ansible.user@nowhere.net + domain: default + default_project: demo + register: user + +- debug: var=user + +- name: Update user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: updated.ansible.user@nowhere.net + register: updateduser + +- debug: var=updateduser + +- name: Delete user + os_user: + cloud: "{{ cloud }}" + state: absent + name: ansible_user diff --git a/openstack/tests/ansible/roles/user_group/tasks/main.yml b/openstack/tests/ansible/roles/user_group/tasks/main.yml new file mode 100644 index 000000000..a0074e2dc --- /dev/null +++ b/openstack/tests/ansible/roles/user_group/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Create user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: ansible.user@nowhere.net + domain: default + default_project: demo + register: user + +- name: Assign user to nonadmins group + os_user_group: + cloud: "{{ cloud }}" + state: present + user: ansible_user + group: nonadmins + +- name: Remove user from nonadmins group + os_user_group: + cloud: "{{ cloud }}" + state: absent + user: ansible_user + group: nonadmins + +- name: Delete user + os_user: + cloud: "{{ cloud }}" + state: absent + name: ansible_user diff --git a/openstack/tests/ansible/roles/volume/tasks/main.yml b/openstack/tests/ansible/roles/volume/tasks/main.yml new file mode 100644 index 000000000..1479a0030 --- /dev/null +++ b/openstack/tests/ansible/roles/volume/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Create volume + os_volume: + cloud: "{{ cloud }}" + state: present + size: 1 + display_name: ansible_volume + display_description: Test volume + register: vol + +- debug: var=vol + +- name: Delete volume + os_volume: + cloud: "{{ cloud }}" + state: absent + display_name: ansible_volume diff --git a/openstack/tests/ansible/run.yml b/openstack/tests/ansible/run.yml new file mode 100644 index 000000000..27ad8af0f --- /dev/null +++ b/openstack/tests/ansible/run.yml @@ -0,0 +1,24 @@ +--- +- hosts: localhost + connection: local + gather_facts: true + + roles: + - { role: auth, tags: auth } + - { role: client_config, tags: client_config } + - { role: group, tags: group } + - { role: image, tags: image } + - { role: keypair, tags: keypair } + - { role: keystone_domain, tags: keystone_domain } + - { role: keystone_role, tags: keystone_role } + - { role: network, tags: network } + - { role: nova_flavor, tags: nova_flavor } + - { role: object, tags: object } + - { role: port, tags: port } + - { role: router, tags: router } + - { role: security_group, tags: security_group } + - { role: server, tags: server } + - { role: subnet, tags: subnet } + - { role: user, tags: user } + - { role: user_group, tags: user_group } + - { role: volume, tags: volume } diff --git a/openstack/tests/base.py b/openstack/tests/base.py new file mode 100644 index 000000000..81e6450bc --- /dev/null +++ b/openstack/tests/base.py @@ -0,0 +1,110 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import fixtures +import logging +import munch +import pprint +from six import StringIO +import testtools +import testtools.content + +_TRUE_VALUES = ('true', '1', 'yes') + + +class TestCase(testtools.TestCase): + + """Test case base class for all tests.""" + + # A way to adjust slow test classes + TIMEOUT_SCALING_FACTOR = 1.0 + + def setUp(self): + """Run before each test method to initialize test environment.""" + + super(TestCase, self).setUp() + test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0)) + try: + test_timeout = int(test_timeout * self.TIMEOUT_SCALING_FACTOR) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + self._log_stream = StringIO() + if os.environ.get('OS_ALWAYS_LOG') in _TRUE_VALUES: + self.addCleanup(self.printLogs) + else: + self.addOnException(self.attachLogs) + + handler = logging.StreamHandler(self._log_stream) + formatter = logging.Formatter('%(asctime)s %(name)-32s %(message)s') + handler.setFormatter(formatter) + + logger = logging.getLogger('shade') + logger.setLevel(logging.DEBUG) + logger.addHandler(handler) + + # Enable HTTP level tracing + logger = logging.getLogger('keystoneauth') + logger.setLevel(logging.DEBUG) + logger.addHandler(handler) + logger.propagate = False + + def assertEqual(self, first, second, *args, **kwargs): + '''Munch aware wrapper''' + if isinstance(first, munch.Munch): + first = first.toDict() + if isinstance(second, munch.Munch): + second = second.toDict() + return super(TestCase, self).assertEqual( + first, second, *args, **kwargs) + + def printLogs(self, *args): + self._log_stream.seek(0) + print(self._log_stream.read()) + + def attachLogs(self, *args): + def reader(): + self._log_stream.seek(0) + while True: + x = self._log_stream.read(4096) + if not x: + break + yield x.encode('utf8') + content = testtools.content.content_from_reader( + reader, + testtools.content_type.UTF8_TEXT, + False) + self.addDetail('logging', content) + + def add_info_on_exception(self, name, text): + def add_content(unused): + self.addDetail(name, testtools.content.text_content( + pprint.pformat(text))) + self.addOnException(add_content) diff --git a/openstack/tests/fakes.py b/openstack/tests/fakes.py new file mode 100644 index 000000000..cc35723f5 --- /dev/null +++ b/openstack/tests/fakes.py @@ -0,0 +1,467 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.V + +""" +fakes +---------------------------------- + +Fakes used for testing +""" + +import datetime +import json +import uuid + +from openstack.cloud._heat import template_format +from openstack.cloud import meta + +PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0' +FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddd' +CHOCOLATE_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8ddde' +STRAWBERRY_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddf' +COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1' +ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format( + p=PROJECT_ID) +NO_MD5 = '93b885adfe0da089cdf634904fd59f71' +NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' +FAKE_PUBLIC_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" # flake8: noqa + + +def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24): + return { + u'OS-FLV-DISABLED:disabled': False, + u'OS-FLV-EXT-DATA:ephemeral': 0, + u'disk': disk, + u'id': flavor_id, + u'links': [{ + u'href': u'{endpoint}/flavors/{id}'.format( + endpoint=COMPUTE_ENDPOINT, id=flavor_id), + u'rel': u'self' + }, { + u'href': u'{endpoint}/flavors/{id}'.format( + endpoint=COMPUTE_ENDPOINT, id=flavor_id), + u'rel': u'bookmark' + }], + u'name': name, + u'os-flavor-access:is_public': True, + u'ram': ram, + u'rxtx_factor': 1.0, + u'swap': u'', + u'vcpus': vcpus + } +FAKE_FLAVOR = make_fake_flavor(FLAVOR_ID, 'vanilla') +FAKE_CHOCOLATE_FLAVOR = make_fake_flavor( + CHOCOLATE_FLAVOR_ID, 'chocolate', ram=200) +FAKE_STRAWBERRY_FLAVOR = make_fake_flavor( + STRAWBERRY_FLAVOR_ID, 'strawberry', ram=300) +FAKE_FLAVOR_LIST = [FAKE_FLAVOR, FAKE_CHOCOLATE_FLAVOR, FAKE_STRAWBERRY_FLAVOR] +FAKE_TEMPLATE = '''heat_template_version: 2014-10-16 + +parameters: + length: + type: number + default: 10 + +resources: + my_rand: + type: OS::Heat::RandomString + properties: + length: {get_param: length} +outputs: + rand: + value: + get_attr: [my_rand, value] +''' +FAKE_TEMPLATE_CONTENT = template_format.parse(FAKE_TEMPLATE) + + +def make_fake_server( + server_id, name, status='ACTIVE', admin_pass=None, + addresses=None, image=None, flavor=None): + if addresses is None: + if status == 'ACTIVE': + addresses = { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 6, + "addr": "fddb:b018:307:0:f816:3eff:fedf:b08d", + "OS-EXT-IPS:type": "fixed"}, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 4, + "addr": "10.1.0.9", + "OS-EXT-IPS:type": "fixed"}, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 4, + "addr": "172.24.5.5", + "OS-EXT-IPS:type": "floating"}]} + else: + addresses = {} + if image is None: + image = {"id": "217f3ab1-03e0-4450-bf27-63d52b421e9e", + "links": []} + if flavor is None: + flavor = {"id": "64", + "links": []} + + server = { + "OS-EXT-STS:task_state": None, + "addresses": addresses, + "links": [], + "image": image, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2017-03-23T23:57:38.000000", + "flavor": flavor, + "id": server_id, + "security_groups": [{"name": "default"}], + "user_id": "9c119f4beaaa438792ce89387362b3ad", + "OS-DCF:diskConfig": "MANUAL", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "OS-EXT-AZ:availability_zone": "nova", + "metadata": {}, + "status": status, + "updated": "2017-03-23T23:57:39Z", + "hostId": "89d165f04384e3ffa4b6536669eb49104d30d6ca832bba2684605dbc", + "OS-SRV-USG:terminated_at": None, + "key_name": None, + "name": name, + "created": "2017-03-23T23:57:12Z", + "tenant_id": PROJECT_ID, + "os-extended-volumes:volumes_attached": [], + "config_drive": "True"} + if admin_pass: + server['adminPass'] = admin_pass + return json.loads(json.dumps(server)) + + +def make_fake_keypair(name): + # Note: this is literally taken from: + # https://developer.openstack.org/api-ref/compute/ + return { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": name, + "type": "ssh", + "public_key": FAKE_PUBLIC_KEY, + "created_at": datetime.datetime.now().isoformat(), + } + + +def make_fake_stack(id, name, description=None, status='CREATE_COMPLETE'): + return { + 'creation_time': '2017-03-23T23:57:12Z', + 'deletion_time': '2017-03-23T23:57:12Z', + 'description': description, + 'id': id, + 'links': [], + 'parent': None, + 'stack_name': name, + 'stack_owner': None, + 'stack_status': status, + 'stack_user_project_id': PROJECT_ID, + 'tags': None, + 'updated_time': '2017-03-23T23:57:12Z', + } + + +def make_fake_stack_event( + id, name, status='CREATE_COMPLETED', resource_name='id'): + event_id = uuid.uuid4().hex + self_url = "{endpoint}/stacks/{name}/{id}/resources/{name}/events/{event}" + resource_url = "{endpoint}/stacks/{name}/{id}/resources/{name}" + return { + "resource_name": id if resource_name == 'id' else name, + "event_time": "2017-03-26T19:38:18", + "links": [ + { + "href": self_url.format( + endpoint=ORCHESTRATION_ENDPOINT, + name=name, id=id, event=event_id), + "rel": "self" + }, { + "href": resource_url.format( + endpoint=ORCHESTRATION_ENDPOINT, + name=name, id=id), + "rel": "resource" + }, { + "href": "{endpoint}/stacks/{name}/{id}".format( + endpoint=ORCHESTRATION_ENDPOINT, + name=name, id=id), + "rel": "stack" + }], + "logical_resource_id": name, + "resource_status": status, + "resource_status_reason": "", + "physical_resource_id": id, + "id": event_id, + } + + +def make_fake_image( + image_id=None, md5=NO_MD5, sha256=NO_SHA256, status='active'): + return { + u'image_state': u'available', + u'container_format': u'bare', + u'min_ram': 0, + u'ramdisk_id': None, + u'updated_at': u'2016-02-10T05:05:02Z', + u'file': '/v2/images/' + image_id + '/file', + u'size': 3402170368, + u'image_type': u'snapshot', + u'disk_format': u'qcow2', + u'id': image_id, + u'schema': u'/v2/schemas/image', + u'status': status, + u'tags': [], + u'visibility': u'private', + u'locations': [{ + u'url': u'http://127.0.0.1/images/' + image_id, + u'metadata': {}}], + u'min_disk': 40, + u'virtual_size': None, + u'name': u'fake_image', + u'checksum': u'ee36e35a297980dee1b514de9803ec6d', + u'created_at': u'2016-02-10T05:03:11Z', + u'owner_specified.openstack.md5': NO_MD5, + u'owner_specified.openstack.sha256': NO_SHA256, + u'owner_specified.openstack.object': 'images/fake_image', + u'protected': False} + + +def make_fake_machine(machine_name, machine_id=None): + if not machine_id: + machine_id = uuid.uuid4().hex + return meta.obj_to_munch(FakeMachine( + id=machine_id, + name=machine_name)) + + +class FakeFloatingIP(object): + def __init__(self, id, pool, ip, fixed_ip, instance_id): + self.id = id + self.pool = pool + self.ip = ip + self.fixed_ip = fixed_ip + self.instance_id = instance_id + + +def make_fake_server_group(id, name, policies): + return json.loads(json.dumps({ + 'id': id, + 'name': name, + 'policies': policies, + 'members': [], + 'metadata': {}, + })) + + +def make_fake_hypervisor(id, name): + return json.loads(json.dumps({ + 'id': id, + 'hypervisor_hostname': name, + 'state': 'up', + 'status': 'enabled', + "cpu_info": { + "arch": "x86_64", + "model": "Nehalem", + "vendor": "Intel", + "features": [ + "pge", + "clflush" + ], + "topology": { + "cores": 1, + "threads": 1, + "sockets": 4 + } + }, + "current_workload": 0, + "status": "enabled", + "state": "up", + "disk_available_least": 0, + "host_ip": "1.1.1.1", + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "service": { + "host": "host1", + "id": 7, + "disabled_reason": None + }, + "vcpus": 1, + "vcpus_used": 0 + })) + + +class FakeVolume(object): + def __init__( + self, id, status, name, attachments=[], + size=75): + self.id = id + self.status = status + self.name = name + self.attachments = attachments + self.size = size + self.snapshot_id = 'id:snapshot' + self.description = 'description' + self.volume_type = 'type:volume' + self.availability_zone = 'az1' + self.created_at = '1900-01-01 12:34:56' + self.source_volid = '12345' + self.metadata = {} + + +class FakeVolumeSnapshot(object): + def __init__( + self, id, status, name, description, size=75): + self.id = id + self.status = status + self.name = name + self.description = description + self.size = size + self.created_at = '1900-01-01 12:34:56' + self.volume_id = '12345' + self.metadata = {} + + +class FakeMachine(object): + def __init__(self, id, name=None, driver=None, driver_info=None, + chassis_uuid=None, instance_info=None, instance_uuid=None, + properties=None): + self.uuid = id + self.name = name + self.driver = driver + self.driver_info = driver_info + self.chassis_uuid = chassis_uuid + self.instance_info = instance_info + self.instance_uuid = instance_uuid + self.properties = properties + + +class FakeMachinePort(object): + def __init__(self, id, address, node_id): + self.uuid = id + self.address = address + self.node_uuid = node_id + +def make_fake_neutron_security_group( + id, name, description, rules, project_id=None): + if not rules: + rules = [] + if not project_id: + project_id = PROJECT_ID + return json.loads(json.dumps({ + 'id': id, + 'name': name, + 'description': description, + 'project_id': project_id, + 'tenant_id': project_id, + 'security_group_rules': rules, + })) + + +def make_fake_nova_security_group_rule( + id, from_port, to_port, ip_protocol, cidr): + return json.loads(json.dumps({ + 'id': id, + 'from_port': int(from_port), + 'to_port': int(to_port), + 'ip_protcol': 'tcp', + 'ip_range': { + 'cidr': cidr + } + })) + + +def make_fake_nova_security_group(id, name, description, rules): + if not rules: + rules = [] + return json.loads(json.dumps({ + 'id': id, + 'name': name, + 'description': description, + 'tenant_id': PROJECT_ID, + 'rules': rules, + })) + + +class FakeNovaSecgroupRule(object): + def __init__(self, id, from_port=None, to_port=None, ip_protocol=None, + cidr=None, parent_group_id=None): + self.id = id + self.from_port = from_port + self.to_port = to_port + self.ip_protocol = ip_protocol + if cidr: + self.ip_range = {'cidr': cidr} + self.parent_group_id = parent_group_id + + +class FakeHypervisor(object): + def __init__(self, id, hostname): + self.id = id + self.hypervisor_hostname = hostname + + +class FakeZone(object): + def __init__(self, id, name, type_, email, description, + ttl, masters): + self.id = id + self.name = name + self.type_ = type_ + self.email = email + self.description = description + self.ttl = ttl + self.masters = masters + + +class FakeRecordset(object): + def __init__(self, zone, id, name, type_, description, + ttl, records): + self.zone = zone + self.id = id + self.name = name + self.type_ = type_ + self.description = description + self.ttl = ttl + self.records = records + + +def make_fake_aggregate(id, name, availability_zone='nova', + metadata=None, hosts=None): + if not metadata: + metadata = {} + if not hosts: + hosts = [] + return json.loads(json.dumps({ + "availability_zone": availability_zone, + "created_at": datetime.datetime.now().isoformat(), + "deleted": False, + "deleted_at": None, + "hosts": hosts, + "id": int(id), + "metadata": { + "availability_zone": availability_zone, + }, + "name": name, + "updated_at": None, + })) diff --git a/openstack/tests/functional/base.py b/openstack/tests/functional/base.py index 9eb4eba4d..eb968f35d 100644 --- a/openstack/tests/functional/base.py +++ b/openstack/tests/functional/base.py @@ -11,7 +11,7 @@ # under the License. import os -import os_client_config +import openstack.config import time import unittest @@ -39,7 +39,7 @@ def _get_resource_value(resource_key, default): return default opts = Opts(cloud_name=TEST_CLOUD) -occ = os_client_config.OpenStackConfig() +occ = openstack.config.OpenStackConfig() cloud = occ.get_one_cloud(opts.cloud, argparse=opts) IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.5-x86_64-disk') diff --git a/openstack/tests/functional/cloud/__init__.py b/openstack/tests/functional/cloud/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/tests/functional/cloud/base.py b/openstack/tests/functional/cloud/base.py new file mode 100644 index 000000000..855d5557a --- /dev/null +++ b/openstack/tests/functional/cloud/base.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Merge this with openstack.tests.functional.base + +import os + +import openstack.config as occ + +import openstack.cloud +from openstack.tests import base + + +class BaseFunctionalTestCase(base.TestCase): + def setUp(self): + super(BaseFunctionalTestCase, self).setUp() + + self._demo_name = os.environ.get('OPENSTACKSDK_DEMO_CLOUD', 'devstack') + self._op_name = os.environ.get( + 'OPENSTACKSDK_OPERATOR_CLOUD', 'devstack-admin') + + self.config = occ.OpenStackConfig() + self._set_user_cloud() + self._set_operator_cloud() + + self.identity_version = \ + self.operator_cloud.cloud_config.get_api_version('identity') + + def _set_user_cloud(self, **kwargs): + user_config = self.config.get_one_cloud( + cloud=self._demo_name, **kwargs) + self.user_cloud = openstack.OpenStackCloud( + cloud_config=user_config, + log_inner_exceptions=True) + + def _set_operator_cloud(self, **kwargs): + operator_config = self.config.get_one_cloud( + cloud=self._op_name, **kwargs) + self.operator_cloud = openstack.OperatorCloud( + cloud_config=operator_config, + log_inner_exceptions=True) + + def pick_image(self): + images = self.user_cloud.list_images() + self.add_info_on_exception('images', images) + + image_name = os.environ.get('OPENSTACKSDK_IMAGE') + if image_name: + for image in images: + if image.name == image_name: + return image + self.assertFalse( + "Cloud does not have {image}".format(image=image_name)) + + for image in images: + if image.name.startswith('cirros') and image.name.endswith('-uec'): + return image + for image in images: + if (image.name.startswith('cirros') + and image.disk_format == 'qcow2'): + return image + for image in images: + if image.name.lower().startswith('ubuntu'): + return image + for image in images: + if image.name.lower().startswith('centos'): + return image + self.assertFalse('no sensible image available') + + +class KeystoneBaseFunctionalTestCase(BaseFunctionalTestCase): + + def setUp(self): + super(KeystoneBaseFunctionalTestCase, self).setUp() + + use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False) + if use_keystone_v2: + # keystone v2 has special behavior for the admin + # interface and some of the operations, so make a new cloud + # object with interface set to admin. + # We only do it for keystone tests on v2 because otherwise + # the admin interface is not a thing that wants to actually + # be used + self._set_operator_cloud(interface='admin') diff --git a/openstack/tests/functional/cloud/hooks/post_test_hook.sh b/openstack/tests/functional/cloud/hooks/post_test_hook.sh new file mode 100755 index 000000000..8092a6114 --- /dev/null +++ b/openstack/tests/functional/cloud/hooks/post_test_hook.sh @@ -0,0 +1,54 @@ +#!/bin/bash -x + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Rework for zuul v3 + +export OPENSTACKSDK_DIR="$BASE/new/shade" + +cd $OPENSTACKSDK_DIR +sudo chown -R jenkins:stack $OPENSTACKSDK_DIR + +CLOUDS_YAML=/etc/openstack/clouds.yaml + +if [ ! -e ${CLOUDS_YAML} ] +then + # stable/liberty had clouds.yaml in the home/base directory + sudo mkdir -p /etc/openstack + sudo cp $BASE/new/.config/openstack/clouds.yaml ${CLOUDS_YAML} + sudo chown -R jenkins:stack /etc/openstack +fi + +# Devstack runs both keystone v2 and v3. An environment variable is set +# within the shade keystone v2 job that tells us which version we should +# test against. +if [ ${OPENSTACKSDK_USE_KEYSTONE_V2:-0} -eq 1 ] +then + sudo sed -ie "s/identity_api_version: '3'/identity_api_version: '2.0'/g" $CLOUDS_YAML + sudo sed -ie '/^.*domain_id.*$/d' $CLOUDS_YAML +fi + +if [ "x$1" = "xtips" ] ; then + tox_env=functional-tips +else + tox_env=functional +fi +echo "Running shade functional test suite" +set +e +sudo -E -H -u jenkins tox -e$tox_env +EXIT_CODE=$? +sudo stestr last --subunit > $WORKSPACE/tempest.subunit +.tox/$tox_env/bin/pbr freeze +set -e + +exit $EXIT_CODE diff --git a/openstack/tests/functional/cloud/test_aggregate.py b/openstack/tests/functional/cloud/test_aggregate.py new file mode 100644 index 000000000..8949c12db --- /dev/null +++ b/openstack/tests/functional/cloud/test_aggregate.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_aggregate +---------------------------------- + +Functional tests for `shade` aggregate resource. +""" + +from openstack.tests.functional import base + + +class TestAggregate(base.BaseFunctionalTestCase): + + def test_aggregates(self): + aggregate_name = self.getUniqueString() + availability_zone = self.getUniqueString() + self.addCleanup(self.cleanup, aggregate_name) + aggregate = self.operator_cloud.create_aggregate(aggregate_name) + + aggregate_ids = [v['id'] + for v in self.operator_cloud.list_aggregates()] + self.assertIn(aggregate['id'], aggregate_ids) + + aggregate = self.operator_cloud.update_aggregate( + aggregate_name, + availability_zone=availability_zone + ) + self.assertEqual(availability_zone, aggregate['availability_zone']) + + aggregate = self.operator_cloud.set_aggregate_metadata( + aggregate_name, + {'key': 'value'} + ) + self.assertIn('key', aggregate['metadata']) + + aggregate = self.operator_cloud.set_aggregate_metadata( + aggregate_name, + {'key': None} + ) + self.assertNotIn('key', aggregate['metadata']) + + self.operator_cloud.delete_aggregate(aggregate_name) + + def cleanup(self, aggregate_name): + aggregate = self.operator_cloud.get_aggregate(aggregate_name) + if aggregate: + self.operator_cloud.delete_aggregate(aggregate_name) diff --git a/openstack/tests/functional/cloud/test_cluster_templates.py b/openstack/tests/functional/cloud/test_cluster_templates.py new file mode 100644 index 000000000..f2142494c --- /dev/null +++ b/openstack/tests/functional/cloud/test_cluster_templates.py @@ -0,0 +1,113 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_cluster_templates +---------------------------------- + +Funself.ctional tests for `shade` cluster_template methods. +""" + +from testtools import content + +from openstack.tests.functional import base + +import os +import subprocess + + +class TestClusterTemplate(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestClusterTemplate, self).setUp() + if not self.user_cloud.has_service('container-infra'): + self.skipTest('Container service not supported by cloud') + self.ct = None + + def test_cluster_templates(self): + '''Test cluster_templates functionality''' + name = 'fake-cluster_template' + server_type = 'vm' + public = False + image_id = 'fedora-atomic-f23-dib' + tls_disabled = False + registry_enabled = False + coe = 'kubernetes' + keypair_id = 'testkey' + + self.addDetail('cluster_template', content.text_content(name)) + self.addCleanup(self.cleanup, name) + + # generate a keypair to add to nova + ssh_directory = '/tmp/.ssh' + if not os.path.isdir(ssh_directory): + os.mkdir(ssh_directory) + subprocess.call( + ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', + '%s/id_rsa_shade' % ssh_directory]) + + # add keypair to nova + with open('%s/id_rsa_openstack.cloud.pub' % ssh_directory) as f: + key_content = f.read() + self.user_cloud.create_keypair('testkey', key_content) + + # Test we can create a cluster_template and we get it returned + self.ct = self.user_cloud.create_cluster_template( + name=name, image_id=image_id, + keypair_id=keypair_id, coe=coe) + self.assertEqual(self.ct['name'], name) + self.assertEqual(self.ct['image_id'], image_id) + self.assertEqual(self.ct['keypair_id'], keypair_id) + self.assertEqual(self.ct['coe'], coe) + self.assertEqual(self.ct['registry_enabled'], registry_enabled) + self.assertEqual(self.ct['tls_disabled'], tls_disabled) + self.assertEqual(self.ct['public'], public) + self.assertEqual(self.ct['server_type'], server_type) + + # Test that we can list cluster_templates + cluster_templates = self.user_cloud.list_cluster_templates() + self.assertIsNotNone(cluster_templates) + + # Test we get the same cluster_template with the + # get_cluster_template method + cluster_template_get = self.user_cloud.get_cluster_template( + self.ct['uuid']) + self.assertEqual(cluster_template_get['uuid'], self.ct['uuid']) + + # Test the get method also works by name + cluster_template_get = self.user_cloud.get_cluster_template(name) + self.assertEqual(cluster_template_get['name'], self.ct['name']) + + # Test we can update a field on the cluster_template and only that + # field is updated + cluster_template_update = self.user_cloud.update_cluster_template( + self.ct['uuid'], 'replace', tls_disabled=True) + self.assertEqual( + cluster_template_update['uuid'], self.ct['uuid']) + self.assertTrue(cluster_template_update['tls_disabled']) + + # Test we can delete and get True returned + cluster_template_delete = self.user_cloud.delete_cluster_template( + self.ct['uuid']) + self.assertTrue(cluster_template_delete) + + def cleanup(self, name): + if self.ct: + try: + self.user_cloud.delete_cluster_template(self.ct['name']) + except Exception: + pass + + # delete keypair + self.user_cloud.delete_keypair('testkey') + os.unlink('/tmp/.ssh/id_rsa_shade') + os.unlink('/tmp/.ssh/id_rsa_openstack.cloud.pub') diff --git a/openstack/tests/functional/cloud/test_compute.py b/openstack/tests/functional/cloud/test_compute.py new file mode 100644 index 000000000..627c5adb0 --- /dev/null +++ b/openstack/tests/functional/cloud/test_compute.py @@ -0,0 +1,460 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_compute +---------------------------------- + +Functional tests for `shade` compute methods. +""" + +from fixtures import TimeoutException +import six + +from openstack.cloud import exc +from openstack.tests.functional import base +from openstack.tests.functional.util import pick_flavor +from openstack.cloud import _utils + + +class TestCompute(base.BaseFunctionalTestCase): + def setUp(self): + # OS_TEST_TIMEOUT is 60 sec by default + # but on a bad day, test_attach_detach_volume can take more time. + self.TIMEOUT_SCALING_FACTOR = 1.5 + + super(TestCompute, self).setUp() + self.flavor = pick_flavor( + self.user_cloud.list_flavors(get_extra=False)) + if self.flavor is None: + self.assertFalse('no sensible flavor available') + self.image = self.pick_image() + self.server_name = self.getUniqueString() + + def _cleanup_servers_and_volumes(self, server_name): + """Delete the named server and any attached volumes. + + Adding separate cleanup calls for servers and volumes can be tricky + since they need to be done in the proper order. And sometimes deleting + a server can start the process of deleting a volume if it is booted + from that volume. This encapsulates that logic. + """ + server = self.user_cloud.get_server(server_name) + if not server: + return + volumes = self.user_cloud.get_volumes(server) + try: + self.user_cloud.delete_server(server.name, wait=True) + for volume in volumes: + if volume.status != 'deleting': + self.user_cloud.delete_volume(volume.id, wait=True) + except (exc.OpenStackCloudTimeout, TimeoutException): + # Ups, some timeout occured during process of deletion server + # or volumes, so now we will try to call delete each of them + # once again and we will try to live with it + self.user_cloud.delete_server(server.name) + for volume in volumes: + self.operator_cloud.delete_volume( + volume.id, wait=False, force=True) + + def test_create_and_delete_server(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_create_and_delete_server_auto_ip_delete_ips(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + auto_ip=True, + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server( + self.server_name, wait=True, delete_ips=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_attach_detach_volume(self): + server_name = self.getUniqueString() + self.addCleanup(self._cleanup_servers_and_volumes, server_name) + server = self.user_cloud.create_server( + name=server_name, image=self.image, flavor=self.flavor, + wait=True) + volume = self.user_cloud.create_volume(1) + vol_attachment = self.user_cloud.attach_volume(server, volume) + for key in ('device', 'serverId', 'volumeId'): + self.assertIn(key, vol_attachment) + self.assertTrue(vol_attachment[key]) # assert string is not empty + self.assertIsNone(self.user_cloud.detach_volume(server, volume)) + + def test_create_and_delete_server_with_config_drive(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + config_drive=True, + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertTrue(server['has_config_drive']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_create_and_delete_server_with_config_drive_none(self): + # check that we're not sending invalid values for config_drive + # if it's passed in explicitly as None - which nodepool does if it's + # not set in the config + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + config_drive=None, + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertFalse(server['has_config_drive']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server( + self.server_name, wait=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_list_all_servers(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + # We're going to get servers from other tests, but that's ok, as long + # as we get the server we created with the demo user. + found_server = False + for s in self.operator_cloud.list_servers(all_projects=True): + if s.name == server.name: + found_server = True + self.assertTrue(found_server) + + def test_list_all_servers_bad_permissions(self): + # Normal users are not allowed to pass all_projects=True + self.assertRaises( + exc.OpenStackCloudException, + self.user_cloud.list_servers, + all_projects=True) + + def test_create_server_image_flavor_dict(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image={'id': self.image.id}, + flavor={'id': self.flavor.id}, + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_get_server_console(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + # _get_server_console_output does not trap HTTP exceptions, so this + # returning a string tests that the call is correct. Testing that + # the cloud returns actual data in the output is out of scope. + log = self.user_cloud._get_server_console_output(server_id=server.id) + self.assertTrue(isinstance(log, six.string_types)) + + def test_get_server_console_name_or_id(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + log = self.user_cloud.get_server_console(server=self.server_name) + self.assertTrue(isinstance(log, six.string_types)) + + def test_list_availability_zone_names(self): + self.assertEqual( + ['nova'], self.user_cloud.list_availability_zone_names()) + + def test_get_server_console_bad_server(self): + self.assertRaises( + exc.OpenStackCloudException, + self.user_cloud.get_server_console, + server=self.server_name) + + def test_create_and_delete_server_with_admin_pass(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + admin_pass='sheiqu9loegahSh', + wait=True) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + self.assertEqual(server['adminPass'], 'sheiqu9loegahSh') + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_get_image_id(self): + self.assertEqual( + self.image.id, self.user_cloud.get_image_id(self.image.id)) + self.assertEqual( + self.image.id, self.user_cloud.get_image_id(self.image.name)) + + def test_get_image_name(self): + self.assertEqual( + self.image.name, self.user_cloud.get_image_name(self.image.id)) + self.assertEqual( + self.image.name, self.user_cloud.get_image_name(self.image.name)) + + def _assert_volume_attach(self, server, volume_id=None, image=''): + self.assertEqual(self.server_name, server['name']) + self.assertEqual(image, server['image']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + volumes = self.user_cloud.get_volumes(server) + self.assertEqual(1, len(volumes)) + volume = volumes[0] + if volume_id: + self.assertEqual(volume_id, volume['id']) + else: + volume_id = volume['id'] + self.assertEqual(1, len(volume['attachments']), 1) + self.assertEqual(server['id'], volume['attachments'][0]['server_id']) + return volume_id + + def test_create_boot_from_volume_image(self): + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + boot_from_volume=True, + volume_size=1, + wait=True) + volume_id = self._assert_volume_attach(server) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertTrue(volume['bootable']) + self.assertEqual(server['id'], volume['attachments'][0]['server_id']) + self.assertTrue(self.user_cloud.delete_server(server.id, wait=True)) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume.id, wait=True)) + self.assertIsNone(self.user_cloud.get_server(server.id)) + self.assertIsNone(self.user_cloud.get_volume(volume.id)) + + def _wait_for_detach(self, volume_id): + # Volumes do not show up as unattached for a bit immediately after + # deleting a server that had had a volume attached. Yay for eventual + # consistency! + for count in _utils._iterate_timeout( + 60, + 'Timeout waiting for volume {volume_id} to detach'.format( + volume_id=volume_id)): + volume = self.user_cloud.get_volume(volume_id) + if volume.status in ( + 'available', 'error', + 'error_restoring', 'error_extending'): + return + + def test_create_terminate_volume_image(self): + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + boot_from_volume=True, + terminate_volume=True, + volume_size=1, + wait=True) + volume_id = self._assert_volume_attach(server) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + volume = self.user_cloud.get_volume(volume_id) + # We can either get None (if the volume delete was quick), or a volume + # that is in the process of being deleted. + if volume: + self.assertEqual('deleting', volume.status) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_create_boot_from_volume_preexisting(self): + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True) + self.addCleanup(self.user_cloud.delete_volume, volume.id) + server = self.user_cloud.create_server( + name=self.server_name, + image=None, + flavor=self.flavor, + boot_volume=volume, + volume_size=1, + wait=True) + volume_id = self._assert_volume_attach(server, volume_id=volume['id']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertTrue(volume['bootable']) + self.assertEqual([], volume['attachments']) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume_id)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + self.assertIsNone(self.user_cloud.get_volume(volume_id)) + + def test_create_boot_attach_volume(self): + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + server = self.user_cloud.create_server( + name=self.server_name, + flavor=self.flavor, + image=self.image, + boot_from_volume=False, + volumes=[volume], + wait=True) + volume_id = self._assert_volume_attach( + server, volume_id=volume['id'], image={'id': self.image['id']}) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertEqual([], volume['attachments']) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume_id)) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + self.assertIsNone(self.user_cloud.get_volume(volume_id)) + + def test_create_boot_from_volume_preexisting_terminate(self): + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True) + server = self.user_cloud.create_server( + name=self.server_name, + image=None, + flavor=self.flavor, + boot_volume=volume, + terminate_volume=True, + volume_size=1, + wait=True) + volume_id = self._assert_volume_attach(server, volume_id=volume['id']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True)) + volume = self.user_cloud.get_volume(volume_id) + # We can either get None (if the volume delete was quick), or a volume + # that is in the process of being deleted. + if volume: + self.assertEqual('deleting', volume.status) + self.assertIsNone(self.user_cloud.get_server(self.server_name)) + + def test_create_image_snapshot_wait_active(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + admin_pass='sheiqu9loegahSh', + wait=True) + image = self.user_cloud.create_image_snapshot('test-snapshot', server, + wait=True) + self.addCleanup(self.user_cloud.delete_image, image['id']) + self.assertEqual('active', image['status']) + + def test_set_and_delete_metadata(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + self.user_cloud.set_server_metadata(self.server_name, + {'key1': 'value1', + 'key2': 'value2'}) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual(set(updated_server.metadata.items()), + set({'key1': 'value1', 'key2': 'value2'}.items())) + + self.user_cloud.set_server_metadata(self.server_name, + {'key2': 'value3'}) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual(set(updated_server.metadata.items()), + set({'key1': 'value1', 'key2': 'value3'}.items())) + + self.user_cloud.delete_server_metadata(self.server_name, ['key2']) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual(set(updated_server.metadata.items()), + set({'key1': 'value1'}.items())) + + self.user_cloud.delete_server_metadata(self.server_name, ['key1']) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual(set(updated_server.metadata.items()), set([])) + + self.assertRaises( + exc.OpenStackCloudURINotFound, + self.user_cloud.delete_server_metadata, + self.server_name, ['key1']) + + def test_update_server(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True) + server_updated = self.user_cloud.update_server( + self.server_name, + name='new_name' + ) + self.assertEqual('new_name', server_updated['name']) diff --git a/openstack/tests/functional/cloud/test_devstack.py b/openstack/tests/functional/cloud/test_devstack.py new file mode 100644 index 000000000..9aeda3606 --- /dev/null +++ b/openstack/tests/functional/cloud/test_devstack.py @@ -0,0 +1,53 @@ +# Copyright (c) 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_devstack +------------- + +Throw errors if we do not actually detect the services we're supposed to. +""" +import os + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack.tests.functional import base + + +class TestDevstack(base.BaseFunctionalTestCase): + + scenarios = [ + ('designate', dict(env='DESIGNATE', service='dns')), + ('heat', dict(env='HEAT', service='orchestration')), + ('magnum', dict(env='MAGNUM', service='container-infra')), + ('neutron', dict(env='NEUTRON', service='network')), + ('swift', dict(env='SWIFT', service='object-store')), + ] + + def test_has_service(self): + if os.environ.get( + 'OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0') == '1': + self.assertTrue(self.user_cloud.has_service(self.service)) + + +class TestKeystoneVersion(base.BaseFunctionalTestCase): + + def test_keystone_version(self): + use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False) + if use_keystone_v2 and use_keystone_v2 != '0': + self.assertEqual('2.0', self.identity_version) + else: + self.assertEqual('3', self.identity_version) diff --git a/openstack/tests/functional/cloud/test_domain.py b/openstack/tests/functional/cloud/test_domain.py new file mode 100644 index 000000000..bcda7209d --- /dev/null +++ b/openstack/tests/functional/cloud/test_domain.py @@ -0,0 +1,125 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_domain +---------------------------------- + +Functional tests for `shade` keystone domain resource. +""" + +import openstack.cloud +from openstack.tests.functional import base + + +class TestDomain(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestDomain, self).setUp() + i_ver = self.operator_cloud.cloud_config.get_api_version('identity') + if i_ver in ('2', '2.0'): + self.skipTest('Identity service does not support domains') + self.domain_prefix = self.getUniqueString('domain') + self.addCleanup(self._cleanup_domains) + + def _cleanup_domains(self): + exception_list = list() + for domain in self.operator_cloud.list_domains(): + if domain['name'].startswith(self.domain_prefix): + try: + self.operator_cloud.delete_domain(domain['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise openstack.OpenStackCloudException( + '\n'.join(exception_list)) + + def test_search_domains(self): + domain_name = self.domain_prefix + '_search' + + # Shouldn't find any domain with this name yet + results = self.operator_cloud.search_domains( + filters=dict(name=domain_name)) + self.assertEqual(0, len(results)) + + # Now create a new domain + domain = self.operator_cloud.create_domain(domain_name) + self.assertEqual(domain_name, domain['name']) + + # Now we should find only the new domain + results = self.operator_cloud.search_domains( + filters=dict(name=domain_name)) + self.assertEqual(1, len(results)) + self.assertEqual(domain_name, results[0]['name']) + + # Now we search by name with name_or_id, should find only new domain + results = self.operator_cloud.search_domains(name_or_id=domain_name) + self.assertEqual(1, len(results)) + self.assertEqual(domain_name, results[0]['name']) + + def test_update_domain(self): + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description') + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + updated = self.operator_cloud.update_domain( + domain['id'], name='updated name', + description='updated description', enabled=False) + self.assertEqual('updated name', updated['name']) + self.assertEqual('updated description', updated['description']) + self.assertFalse(updated['enabled']) + + # Now we update domain by name with name_or_id + updated = self.operator_cloud.update_domain( + None, + name_or_id='updated name', + name='updated name 2', + description='updated description 2', + enabled=True) + self.assertEqual('updated name 2', updated['name']) + self.assertEqual('updated description 2', updated['description']) + self.assertTrue(updated['enabled']) + + def test_delete_domain(self): + domain = self.operator_cloud.create_domain(self.domain_prefix, + 'description') + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(domain['id']) + self.assertTrue(deleted) + + # Now we delete domain by name with name_or_id + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description') + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(None, domain['name']) + self.assertTrue(deleted) + + # Finally, we assert we get False from delete_domain if domain does + # not exist + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description') + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(None, 'bogus_domain') + self.assertFalse(deleted) diff --git a/openstack/tests/functional/cloud/test_endpoints.py b/openstack/tests/functional/cloud/test_endpoints.py new file mode 100644 index 000000000..d053818e5 --- /dev/null +++ b/openstack/tests/functional/cloud/test_endpoints.py @@ -0,0 +1,198 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_endpoint +---------------------------------- + +Functional tests for `shade` endpoint resource. +""" + +import string +import random + +from openstack.cloud.exc import OpenStackCloudException +from openstack.cloud.exc import OpenStackCloudUnavailableFeature +from openstack.tests.functional import base + + +class TestEndpoints(base.KeystoneBaseFunctionalTestCase): + + endpoint_attributes = ['id', 'region', 'publicurl', 'internalurl', + 'service_id', 'adminurl'] + + def setUp(self): + super(TestEndpoints, self).setUp() + + # Generate a random name for services and regions in this test + self.new_item_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5)) + + self.addCleanup(self._cleanup_services) + self.addCleanup(self._cleanup_endpoints) + + def _cleanup_endpoints(self): + exception_list = list() + for e in self.operator_cloud.list_endpoints(): + if e.get('region') is not None and \ + e['region'].startswith(self.new_item_name): + try: + self.operator_cloud.delete_endpoint(id=e['id']) + except Exception as e: + # We were unable to delete a service, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_services(self): + exception_list = list() + for s in self.operator_cloud.list_services(): + if s['name'] is not None and \ + s['name'].startswith(self.new_item_name): + try: + self.operator_cloud.delete_service(name_or_id=s['id']) + except Exception as e: + # We were unable to delete a service, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_endpoint(self): + service_name = self.new_item_name + '_create' + + service = self.operator_cloud.create_service( + name=service_name, type='test_type', + description='this is a test description') + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + admin_url='http://admin.url/', + region=service_name) + + self.assertNotEqual([], endpoints) + self.assertIsNotNone(endpoints[0].get('id')) + + # Test None parameters + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + region=service_name) + + self.assertNotEqual([], endpoints) + self.assertIsNotNone(endpoints[0].get('id')) + + def test_update_endpoint(self): + ver = self.operator_cloud.cloud_config.get_api_version('identity') + if ver.startswith('2'): + # NOTE(SamYaple): Update endpoint only works with v3 api + self.assertRaises(OpenStackCloudUnavailableFeature, + self.operator_cloud.update_endpoint, + 'endpoint_id1') + else: + service = self.operator_cloud.create_service( + name='service1', type='test_type') + endpoint = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + url='http://admin.url/', + interface='admin', + region='orig_region', + enabled=False)[0] + + new_service = self.operator_cloud.create_service( + name='service2', type='test_type') + new_endpoint = self.operator_cloud.update_endpoint( + endpoint.id, + service_name_or_id=new_service.id, + url='http://public.url/', + interface='public', + region='update_region', + enabled=True) + + self.assertEqual(new_endpoint.url, 'http://public.url/') + self.assertEqual(new_endpoint.interface, 'public') + self.assertEqual(new_endpoint.region, 'update_region') + self.assertEqual(new_endpoint.service_id, new_service.id) + self.assertTrue(new_endpoint.enabled) + + def test_list_endpoints(self): + service_name = self.new_item_name + '_list' + + service = self.operator_cloud.create_service( + name=service_name, type='test_type', + description='this is a test description') + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + region=service_name) + + observed_endpoints = self.operator_cloud.list_endpoints() + found = False + for e in observed_endpoints: + # Test all attributes are returned + for endpoint in endpoints: + if e['id'] == endpoint['id']: + found = True + self.assertEqual(service['id'], e['service_id']) + if 'interface' in e: + if 'interface' == 'internal': + self.assertEqual('http://internal.test/', e['url']) + elif 'interface' == 'public': + self.assertEqual('http://public.test/', e['url']) + else: + self.assertEqual('http://public.test/', + e['publicurl']) + self.assertEqual('http://internal.test/', + e['internalurl']) + self.assertEqual(service_name, e['region']) + + self.assertTrue(found, msg='new endpoint not found in endpoints list!') + + def test_delete_endpoint(self): + service_name = self.new_item_name + '_delete' + + service = self.operator_cloud.create_service( + name=service_name, type='test_type', + description='this is a test description') + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + region=service_name) + + self.assertNotEqual([], endpoints) + for endpoint in endpoints: + self.operator_cloud.delete_endpoint(endpoint['id']) + + observed_endpoints = self.operator_cloud.list_endpoints() + found = False + for e in observed_endpoints: + for endpoint in endpoints: + if e['id'] == endpoint['id']: + found = True + break + self.failUnlessEqual( + False, found, message='new endpoint was not deleted!') diff --git a/openstack/tests/functional/cloud/test_flavor.py b/openstack/tests/functional/cloud/test_flavor.py new file mode 100644 index 000000000..742117582 --- /dev/null +++ b/openstack/tests/functional/cloud/test_flavor.py @@ -0,0 +1,171 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_flavor +---------------------------------- + +Functional tests for `shade` flavor resource. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestFlavor(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestFlavor, self).setUp() + + # Generate a random name for flavors in this test + self.new_item_name = self.getUniqueString('flavor') + + self.addCleanup(self._cleanup_flavors) + + def _cleanup_flavors(self): + exception_list = list() + for f in self.operator_cloud.list_flavors(get_extra=False): + if f['name'].startswith(self.new_item_name): + try: + self.operator_cloud.delete_flavor(f['id']) + except Exception as e: + # We were unable to delete a flavor, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_flavor(self): + flavor_name = self.new_item_name + '_create' + flavor_kwargs = dict( + name=flavor_name, ram=1024, vcpus=2, disk=10, ephemeral=5, + swap=100, rxtx_factor=1.5, is_public=True + ) + + flavor = self.operator_cloud.create_flavor(**flavor_kwargs) + + self.assertIsNotNone(flavor['id']) + + # When properly normalized, we should always get an extra_specs + # and expect empty dict on create. + self.assertIn('extra_specs', flavor) + self.assertEqual({}, flavor['extra_specs']) + + # We should also always have ephemeral and public attributes + self.assertIn('ephemeral', flavor) + self.assertIn('OS-FLV-EXT-DATA:ephemeral', flavor) + self.assertEqual(5, flavor['ephemeral']) + self.assertIn('is_public', flavor) + self.assertIn('os-flavor-access:is_public', flavor) + self.assertTrue(flavor['is_public']) + + for key in flavor_kwargs.keys(): + self.assertIn(key, flavor) + for key, value in flavor_kwargs.items(): + self.assertEqual(value, flavor[key]) + + def test_list_flavors(self): + pub_flavor_name = self.new_item_name + '_public' + priv_flavor_name = self.new_item_name + '_private' + public_kwargs = dict( + name=pub_flavor_name, ram=1024, vcpus=2, disk=10, is_public=True + ) + private_kwargs = dict( + name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False + ) + + # Create a public and private flavor. We expect both to be listed + # for an operator. + self.operator_cloud.create_flavor(**public_kwargs) + self.operator_cloud.create_flavor(**private_kwargs) + + flavors = self.operator_cloud.list_flavors(get_extra=False) + + # Flavor list will include the standard devstack flavors. We just want + # to make sure both of the flavors we just created are present. + found = [] + for f in flavors: + # extra_specs should be added within list_flavors() + self.assertIn('extra_specs', f) + if f['name'] in (pub_flavor_name, priv_flavor_name): + found.append(f) + self.assertEqual(2, len(found)) + + def test_flavor_access(self): + priv_flavor_name = self.new_item_name + '_private' + private_kwargs = dict( + name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False + ) + new_flavor = self.operator_cloud.create_flavor(**private_kwargs) + + # Validate the 'demo' user cannot see the new flavor + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(0, len(flavors)) + + # We need the tenant ID for the 'demo' user + project = self.operator_cloud.get_project('demo') + self.assertIsNotNone(project) + + # Now give 'demo' access + self.operator_cloud.add_flavor_access(new_flavor['id'], project['id']) + + # Now see if the 'demo' user has access to it + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(1, len(flavors)) + self.assertEqual(priv_flavor_name, flavors[0]['name']) + + # Now see if the 'demo' user has access to it without needing + # the demo_cloud access. + acls = self.operator_cloud.list_flavor_access(new_flavor['id']) + self.assertEqual(1, len(acls)) + self.assertEqual(project['id'], acls[0]['project_id']) + + # Now revoke the access and make sure we can't find it + self.operator_cloud.remove_flavor_access(new_flavor['id'], + project['id']) + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(0, len(flavors)) + + def test_set_unset_flavor_specs(self): + """ + Test setting and unsetting flavor extra specs + """ + flavor_name = self.new_item_name + '_spec_test' + kwargs = dict( + name=flavor_name, ram=1024, vcpus=2, disk=10 + ) + new_flavor = self.operator_cloud.create_flavor(**kwargs) + + # Expect no extra_specs + self.assertEqual({}, new_flavor['extra_specs']) + + # Now set them + extra_specs = {'foo': 'aaa', 'bar': 'bbb'} + self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs) + mod_flavor = self.operator_cloud.get_flavor(new_flavor['id']) + + # Verify extra_specs were set + self.assertIn('extra_specs', mod_flavor) + self.assertEqual(extra_specs, mod_flavor['extra_specs']) + + # Unset the 'foo' value + self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo']) + mod_flavor = self.operator_cloud.get_flavor_by_id(new_flavor['id']) + + # Verify 'foo' is unset and 'bar' is still set + self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs']) diff --git a/openstack/tests/functional/cloud/test_floating_ip.py b/openstack/tests/functional/cloud/test_floating_ip.py new file mode 100644 index 000000000..f2d2fc773 --- /dev/null +++ b/openstack/tests/functional/cloud/test_floating_ip.py @@ -0,0 +1,284 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip +---------------------------------- + +Functional tests for floating IP resource. +""" + +import pprint + +from testtools import content + +from openstack.cloud import _utils +from openstack.cloud import meta +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base +from openstack.tests.functional.util import pick_flavor + + +class TestFloatingIP(base.BaseFunctionalTestCase): + timeout = 60 + + def setUp(self): + super(TestFloatingIP, self).setUp() + self.flavor = pick_flavor( + self.user_cloud.list_flavors(get_extra=False)) + if self.flavor is None: + self.assertFalse('no sensible flavor available') + self.image = self.pick_image() + + # Generate a random name for these tests + self.new_item_name = self.getUniqueString() + + self.addCleanup(self._cleanup_network) + self.addCleanup(self._cleanup_servers) + + def _cleanup_network(self): + exception_list = list() + + # Delete stale networks as well as networks created for this test + if self.user_cloud.has_service('network'): + # Delete routers + for r in self.user_cloud.list_routers(): + try: + if r['name'].startswith(self.new_item_name): + self.user_cloud.update_router( + r['id'], ext_gateway_net_id=None) + for s in self.user_cloud.list_subnets(): + if s['name'].startswith(self.new_item_name): + try: + self.user_cloud.remove_router_interface( + r, subnet_id=s['id']) + except Exception: + pass + self.user_cloud.delete_router(name_or_id=r['id']) + except Exception as e: + exception_list.append(str(e)) + continue + # Delete subnets + for s in self.user_cloud.list_subnets(): + if s['name'].startswith(self.new_item_name): + try: + self.user_cloud.delete_subnet(name_or_id=s['id']) + except Exception as e: + exception_list.append(str(e)) + continue + # Delete networks + for n in self.user_cloud.list_networks(): + if n['name'].startswith(self.new_item_name): + try: + self.user_cloud.delete_network(name_or_id=n['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_servers(self): + exception_list = list() + + # Delete stale servers as well as server created for this test + for i in self.user_cloud.list_servers(bare=True): + if i.name.startswith(self.new_item_name): + try: + self.user_cloud.delete_server(i, wait=True) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_ips(self, server): + + exception_list = list() + + fixed_ip = meta.get_server_private_ip(server) + + for ip in self.user_cloud.list_floating_ips(): + if (ip.get('fixed_ip', None) == fixed_ip + or ip.get('fixed_ip_address', None) == fixed_ip): + try: + self.user_cloud.delete_floating_ip(ip['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def _setup_networks(self): + if self.user_cloud.has_service('network'): + # Create a network + self.test_net = self.user_cloud.create_network( + name=self.new_item_name + '_net') + # Create a subnet on it + self.test_subnet = self.user_cloud.create_subnet( + subnet_name=self.new_item_name + '_subnet', + network_name_or_id=self.test_net['id'], + cidr='10.24.4.0/24', + enable_dhcp=True + ) + # Create a router + self.test_router = self.user_cloud.create_router( + name=self.new_item_name + '_router') + # Attach the router to an external network + ext_nets = self.user_cloud.search_networks( + filters={'router:external': True}) + self.user_cloud.update_router( + name_or_id=self.test_router['id'], + ext_gateway_net_id=ext_nets[0]['id']) + # Attach the router to the internal subnet + self.user_cloud.add_router_interface( + self.test_router, subnet_id=self.test_subnet['id']) + + # Select the network for creating new servers + self.nic = {'net-id': self.test_net['id']} + self.addDetail( + 'networks-neutron', + content.text_content(pprint.pformat( + self.user_cloud.list_networks()))) + else: + # Find network names for nova-net + data = self.user_cloud._compute_client.get('/os-tenant-networks') + nets = meta.get_and_munchify('networks', data) + self.addDetail( + 'networks-nova', + content.text_content(pprint.pformat( + nets))) + self.nic = {'net-id': nets[0].id} + + def test_private_ip(self): + self._setup_networks() + + new_server = self.user_cloud.get_openstack_vars( + self.user_cloud.create_server( + wait=True, name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, nics=[self.nic])) + + self.addDetail( + 'server', content.text_content(pprint.pformat(new_server))) + self.assertNotEqual(new_server['private_v4'], '') + + def test_add_auto_ip(self): + self._setup_networks() + + new_server = self.user_cloud.create_server( + wait=True, name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, nics=[self.nic]) + + # ToDo: remove the following iteration when create_server waits for + # the IP to be attached + ip = None + for _ in _utils._iterate_timeout( + self.timeout, "Timeout waiting for IP address to be attached"): + ip = meta.get_server_external_ipv4(self.user_cloud, new_server) + if ip is not None: + break + new_server = self.user_cloud.get_server(new_server.id) + + self.addCleanup(self._cleanup_ips, new_server) + + def test_detach_ip_from_server(self): + self._setup_networks() + + new_server = self.user_cloud.create_server( + wait=True, name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, nics=[self.nic]) + + # ToDo: remove the following iteration when create_server waits for + # the IP to be attached + ip = None + for _ in _utils._iterate_timeout( + self.timeout, "Timeout waiting for IP address to be attached"): + ip = meta.get_server_external_ipv4(self.user_cloud, new_server) + if ip is not None: + break + new_server = self.user_cloud.get_server(new_server.id) + + self.addCleanup(self._cleanup_ips, new_server) + + f_ip = self.user_cloud.get_floating_ip( + id=None, filters={'floating_ip_address': ip}) + self.user_cloud.detach_ip_from_server( + server_id=new_server.id, floating_ip_id=f_ip['id']) + + def test_list_floating_ips(self): + fip_admin = self.operator_cloud.create_floating_ip() + self.addCleanup(self.operator_cloud.delete_floating_ip, fip_admin.id) + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + # Get all the floating ips. + fip_id_list = [ + fip.id for fip in self.operator_cloud.list_floating_ips() + ] + if self.user_cloud.has_service('network'): + # Neutron returns all FIP for all projects by default + self.assertIn(fip_admin.id, fip_id_list) + self.assertIn(fip_user.id, fip_id_list) + + # Ask Neutron for only a subset of all the FIPs. + filtered_fip_id_list = [ + fip.id for fip in self.operator_cloud.list_floating_ips( + {'tenant_id': self.user_cloud.current_project_id} + ) + ] + self.assertNotIn(fip_admin.id, filtered_fip_id_list) + self.assertIn(fip_user.id, filtered_fip_id_list) + + else: + self.assertIn(fip_admin.id, fip_id_list) + # By default, Nova returns only the FIPs that belong to the + # project which made the listing request. + self.assertNotIn(fip_user.id, fip_id_list) + self.assertRaisesRegex( + ValueError, "Nova-network don't support server-side.*", + self.operator_cloud.list_floating_ips, filters={'foo': 'bar'} + ) + + def test_search_floating_ips(self): + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + self.assertIn( + fip_user['id'], + [fip.id for fip in self.user_cloud.search_floating_ips( + filters={"attached": False})] + ) + self.assertNotIn( + fip_user['id'], + [fip.id for fip in self.user_cloud.search_floating_ips( + filters={"attached": True})] + ) + + def test_get_floating_ip_by_id(self): + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + ret_fip = self.user_cloud.get_floating_ip_by_id(fip_user.id) + self.assertEqual(fip_user, ret_fip) diff --git a/openstack/tests/functional/cloud/test_floating_ip_pool.py b/openstack/tests/functional/cloud/test_floating_ip_pool.py new file mode 100644 index 000000000..38935d08e --- /dev/null +++ b/openstack/tests/functional/cloud/test_floating_ip_pool.py @@ -0,0 +1,50 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_pool +---------------------------------- + +Functional tests for floating IP pool resource (managed by nova) +""" + +from openstack.tests.functional import base + + +# When using nova-network, floating IP pools are created with nova-manage +# command. +# When using Neutron, floating IP pools in Nova are mapped from external +# network names. This only if the floating-ip-pools nova extension is +# available. +# For instance, for current implementation of hpcloud that's not true: +# nova floating-ip-pool-list returns 404. + + +class TestFloatingIPPool(base.BaseFunctionalTestCase): + def setUp(self): + super(TestFloatingIPPool, self).setUp() + + if not self.user_cloud._has_nova_extension('os-floating-ip-pools'): + # Skipping this test is floating-ip-pool extension is not + # available on the testing cloud + self.skip( + 'Floating IP pools extension is not available') + + def test_list_floating_ip_pools(self): + pools = self.user_cloud.list_floating_ip_pools() + if not pools: + self.assertFalse('no floating-ip pool available') + + for pool in pools: + self.assertIn('name', pool) diff --git a/openstack/tests/functional/cloud/test_groups.py b/openstack/tests/functional/cloud/test_groups.py new file mode 100644 index 000000000..df84c9662 --- /dev/null +++ b/openstack/tests/functional/cloud/test_groups.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_groups +---------------------------------- + +Functional tests for `shade` keystone group resource. +""" + +import openstack.cloud +from openstack.tests.functional import base + + +class TestGroup(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestGroup, self).setUp() + i_ver = self.operator_cloud.cloud_config.get_api_version('identity') + if i_ver in ('2', '2.0'): + self.skipTest('Identity service does not support groups') + self.group_prefix = self.getUniqueString('group') + self.addCleanup(self._cleanup_groups) + + def _cleanup_groups(self): + exception_list = list() + for group in self.operator_cloud.list_groups(): + if group['name'].startswith(self.group_prefix): + try: + self.operator_cloud.delete_group(group['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise openstack.OpenStackCloudException( + '\n'.join(exception_list)) + + def test_create_group(self): + group_name = self.group_prefix + '_create' + group = self.operator_cloud.create_group(group_name, 'test group') + + for key in ('id', 'name', 'description', 'domain_id'): + self.assertIn(key, group) + self.assertEqual(group_name, group['name']) + self.assertEqual('test group', group['description']) + + def test_delete_group(self): + group_name = self.group_prefix + '_delete' + + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertIsNotNone(group) + + self.assertTrue(self.operator_cloud.delete_group(group_name)) + + results = self.operator_cloud.search_groups( + filters=dict(name=group_name)) + self.assertEqual(0, len(results)) + + def test_delete_group_not_exists(self): + self.assertFalse(self.operator_cloud.delete_group('xInvalidGroupx')) + + def test_search_groups(self): + group_name = self.group_prefix + '_search' + + # Shouldn't find any group with this name yet + results = self.operator_cloud.search_groups( + filters=dict(name=group_name)) + self.assertEqual(0, len(results)) + + # Now create a new group + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertEqual(group_name, group['name']) + + # Now we should find only the new group + results = self.operator_cloud.search_groups( + filters=dict(name=group_name)) + self.assertEqual(1, len(results)) + self.assertEqual(group_name, results[0]['name']) + + def test_update_group(self): + group_name = self.group_prefix + '_update' + group_desc = 'test group' + + group = self.operator_cloud.create_group(group_name, group_desc) + self.assertEqual(group_name, group['name']) + self.assertEqual(group_desc, group['description']) + + updated_group_name = group_name + '_xyz' + updated_group_desc = group_desc + ' updated' + updated_group = self.operator_cloud.update_group( + group_name, + name=updated_group_name, + description=updated_group_desc) + self.assertEqual(updated_group_name, updated_group['name']) + self.assertEqual(updated_group_desc, updated_group['description']) diff --git a/openstack/tests/functional/cloud/test_identity.py b/openstack/tests/functional/cloud/test_identity.py new file mode 100644 index 000000000..d61463c3c --- /dev/null +++ b/openstack/tests/functional/cloud/test_identity.py @@ -0,0 +1,250 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_identity +---------------------------------- + +Functional tests for `shade` identity methods. +""" + +import random +import string + +from openstack import OpenStackCloudException +from openstack.tests.functional import base + + +class TestIdentity(base.KeystoneBaseFunctionalTestCase): + def setUp(self): + super(TestIdentity, self).setUp() + self.role_prefix = 'test_role' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5)) + self.user_prefix = self.getUniqueString('user') + self.group_prefix = self.getUniqueString('group') + + self.addCleanup(self._cleanup_users) + if self.identity_version not in ('2', '2.0'): + self.addCleanup(self._cleanup_groups) + self.addCleanup(self._cleanup_roles) + + def _cleanup_groups(self): + exception_list = list() + for group in self.operator_cloud.list_groups(): + if group['name'].startswith(self.group_prefix): + try: + self.operator_cloud.delete_group(group['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_users(self): + exception_list = list() + for user in self.operator_cloud.list_users(): + if user['name'].startswith(self.user_prefix): + try: + self.operator_cloud.delete_user(user['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_roles(self): + exception_list = list() + for role in self.operator_cloud.list_roles(): + if role['name'].startswith(self.role_prefix): + try: + self.operator_cloud.delete_role(role['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def _create_user(self, **kwargs): + domain_id = None + if self.identity_version not in ('2', '2.0'): + domain = self.operator_cloud.get_domain('default') + domain_id = domain['id'] + return self.operator_cloud.create_user(domain_id=domain_id, **kwargs) + + def test_list_roles(self): + roles = self.operator_cloud.list_roles() + self.assertIsNotNone(roles) + self.assertNotEqual([], roles) + + def test_get_role(self): + role = self.operator_cloud.get_role('admin') + self.assertIsNotNone(role) + self.assertIn('id', role) + self.assertIn('name', role) + self.assertEqual('admin', role['name']) + + def test_search_roles(self): + roles = self.operator_cloud.search_roles(filters={'name': 'admin'}) + self.assertIsNotNone(roles) + self.assertEqual(1, len(roles)) + self.assertEqual('admin', roles[0]['name']) + + def test_create_role(self): + role_name = self.role_prefix + '_create_role' + role = self.operator_cloud.create_role(role_name) + self.assertIsNotNone(role) + self.assertIn('id', role) + self.assertIn('name', role) + self.assertEqual(role_name, role['name']) + + def test_delete_role(self): + role_name = self.role_prefix + '_delete_role' + role = self.operator_cloud.create_role(role_name) + self.assertIsNotNone(role) + self.assertTrue(self.operator_cloud.delete_role(role_name)) + + # TODO(Shrews): Once we can support assigning roles within shade, we + # need to make this test a little more specific, and add more for testing + # filtering functionality. + def test_list_role_assignments(self): + if self.identity_version in ('2', '2.0'): + self.skipTest("Identity service does not support role assignments") + assignments = self.operator_cloud.list_role_assignments() + self.assertIsInstance(assignments, list) + self.assertGreater(len(assignments), 0) + + def test_list_role_assignments_v2(self): + user = self.operator_cloud.get_user('demo') + project = self.operator_cloud.get_project('demo') + assignments = self.operator_cloud.list_role_assignments( + filters={'user': user['id'], 'project': project['id']}) + self.assertIsInstance(assignments, list) + self.assertGreater(len(assignments), 0) + + def test_grant_revoke_role_user_project(self): + user_name = self.user_prefix + '_user_project' + user_email = 'nobody@nowhere.com' + role_name = self.role_prefix + '_grant_user_project' + role = self.operator_cloud.create_role(role_name) + user = self._create_user(name=user_name, + email=user_email, + default_project='demo') + self.assertTrue(self.operator_cloud.grant_role( + role_name, user=user['id'], project='demo', wait=True)) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'user': user['id'], + 'project': self.operator_cloud.get_project('demo')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue(self.operator_cloud.revoke_role( + role_name, user=user['id'], project='demo', wait=True)) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'user': user['id'], + 'project': self.operator_cloud.get_project('demo')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_group_project(self): + if self.identity_version in ('2', '2.0'): + self.skipTest("Identity service does not support group") + role_name = self.role_prefix + '_grant_group_project' + role = self.operator_cloud.create_role(role_name) + group_name = self.group_prefix + '_group_project' + group = self.operator_cloud.create_group( + name=group_name, + description='test group', + domain='default') + self.assertTrue(self.operator_cloud.grant_role( + role_name, group=group['id'], project='demo')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'group': group['id'], + 'project': self.operator_cloud.get_project('demo')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue(self.operator_cloud.revoke_role( + role_name, group=group['id'], project='demo')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'group': group['id'], + 'project': self.operator_cloud.get_project('demo')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_user_domain(self): + if self.identity_version in ('2', '2.0'): + self.skipTest("Identity service does not support domain") + role_name = self.role_prefix + '_grant_user_domain' + role = self.operator_cloud.create_role(role_name) + user_name = self.user_prefix + '_user_domain' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, + email=user_email, + default_project='demo') + self.assertTrue(self.operator_cloud.grant_role( + role_name, user=user['id'], domain='default')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'user': user['id'], + 'domain': self.operator_cloud.get_domain('default')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue(self.operator_cloud.revoke_role( + role_name, user=user['id'], domain='default')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'user': user['id'], + 'domain': self.operator_cloud.get_domain('default')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_group_domain(self): + if self.identity_version in ('2', '2.0'): + self.skipTest("Identity service does not support domain or group") + role_name = self.role_prefix + '_grant_group_domain' + role = self.operator_cloud.create_role(role_name) + group_name = self.group_prefix + '_group_domain' + group = self.operator_cloud.create_group( + name=group_name, + description='test group', + domain='default') + self.assertTrue(self.operator_cloud.grant_role( + role_name, group=group['id'], domain='default')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'group': group['id'], + 'domain': self.operator_cloud.get_domain('default')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue(self.operator_cloud.revoke_role( + role_name, group=group['id'], domain='default')) + assignments = self.operator_cloud.list_role_assignments({ + 'role': role['id'], + 'group': group['id'], + 'domain': self.operator_cloud.get_domain('default')['id'] + }) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) diff --git a/openstack/tests/functional/cloud/test_image.py b/openstack/tests/functional/cloud/test_image.py new file mode 100644 index 000000000..fdd41ad48 --- /dev/null +++ b/openstack/tests/functional/cloud/test_image.py @@ -0,0 +1,170 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_compute +---------------------------------- + +Functional tests for `shade` image methods. +""" + +import filecmp +import os +import tempfile + +from openstack.tests.functional import base + + +class TestImage(base.BaseFunctionalTestCase): + def setUp(self): + super(TestImage, self).setUp() + self.image = self.pick_image() + + def test_create_image(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_download_image(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + self.addCleanup(os.remove, test_image.name) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + self.addCleanup(self.user_cloud.delete_image, image_name, wait=True) + output = os.path.join(tempfile.gettempdir(), self.getUniqueString()) + self.user_cloud.download_image(image_name, output) + self.addCleanup(os.remove, output) + self.assertTrue(filecmp.cmp(test_image.name, output), + "Downloaded contents don't match created image") + + def test_create_image_skip_duplicate(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + first_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + second_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + self.assertEqual(first_image.id, second_image.id) + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_create_image_force_duplicate(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + first_image = None + second_image = None + try: + first_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + second_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + allow_duplicates=True, + wait=True) + self.assertNotEqual(first_image.id, second_image.id) + finally: + if first_image: + self.user_cloud.delete_image(first_image.id, wait=True) + if second_image: + self.user_cloud.delete_image(second_image.id, wait=True) + + def test_create_image_update_properties(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + self.user_cloud.update_image_properties( + image=image, + name=image_name, + foo='bar') + image = self.user_cloud.get_image(image_name) + self.assertIn('foo', image.properties) + self.assertEqual(image.properties['foo'], 'bar') + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_get_image_by_id(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True) + image = self.user_cloud.get_image_by_id(image.id) + self.assertEqual(image_name, image.name) + self.assertEqual('raw', image.disk_format) + finally: + self.user_cloud.delete_image(image_name, wait=True) diff --git a/openstack/tests/functional/cloud/test_inventory.py b/openstack/tests/functional/cloud/test_inventory.py new file mode 100644 index 000000000..477d80a70 --- /dev/null +++ b/openstack/tests/functional/cloud/test_inventory.py @@ -0,0 +1,93 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_inventory +---------------------------------- + +Functional tests for `shade` inventory methods. +""" + +from openstack.cloud import inventory + +from openstack.tests.functional import base +from openstack.tests.functional.util import pick_flavor + + +class TestInventory(base.BaseFunctionalTestCase): + def setUp(self): + super(TestInventory, self).setUp() + # This needs to use an admin account, otherwise a public IP + # is not allocated from devstack. + self.inventory = inventory.OpenStackInventory() + self.server_name = self.getUniqueString('inventory') + self.flavor = pick_flavor( + self.user_cloud.list_flavors(get_extra=False)) + if self.flavor is None: + self.assertTrue(False, 'no sensible flavor available') + self.image = self.pick_image() + self.addCleanup(self._cleanup_server) + server = self.operator_cloud.create_server( + name=self.server_name, image=self.image, flavor=self.flavor, + wait=True, auto_ip=True) + self.server_id = server['id'] + + def _cleanup_server(self): + self.user_cloud.delete_server(self.server_id, wait=True) + + def _test_host_content(self, host): + self.assertEqual(host['image']['id'], self.image.id) + self.assertNotIn('links', host['image']) + self.assertEqual(host['flavor']['id'], self.flavor.id) + self.assertNotIn('links', host['flavor']) + self.assertNotIn('links', host) + self.assertIsInstance(host['volumes'], list) + self.assertIsInstance(host['metadata'], dict) + self.assertIn('interface_ip', host) + + def _test_expanded_host_content(self, host): + self.assertEqual(host['image']['name'], self.image.name) + self.assertEqual(host['flavor']['name'], self.flavor.name) + + def test_get_host(self): + host = self.inventory.get_host(self.server_id) + self.assertIsNotNone(host) + self.assertEqual(host['name'], self.server_name) + self._test_host_content(host) + self._test_expanded_host_content(host) + host_found = False + for host in self.inventory.list_hosts(): + if host['id'] == self.server_id: + host_found = True + self._test_host_content(host) + self.assertTrue(host_found) + + def test_get_host_no_detail(self): + host = self.inventory.get_host(self.server_id, expand=False) + self.assertIsNotNone(host) + self.assertEqual(host['name'], self.server_name) + + self.assertEqual(host['image']['id'], self.image.id) + self.assertNotIn('links', host['image']) + self.assertNotIn('name', host['name']) + self.assertEqual(host['flavor']['id'], self.flavor.id) + self.assertNotIn('links', host['flavor']) + self.assertNotIn('name', host['flavor']) + + host_found = False + for host in self.inventory.list_hosts(expand=False): + if host['id'] == self.server_id: + host_found = True + self._test_host_content(host) + self.assertTrue(host_found) diff --git a/openstack/tests/functional/cloud/test_keypairs.py b/openstack/tests/functional/cloud/test_keypairs.py new file mode 100644 index 000000000..98d591aed --- /dev/null +++ b/openstack/tests/functional/cloud/test_keypairs.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_keypairs +---------------------------------- + +Functional tests for `shade` keypairs methods +""" +from openstack.tests import fakes +from openstack.tests.functional import base + + +class TestKeypairs(base.BaseFunctionalTestCase): + + def test_create_and_delete(self): + '''Test creating and deleting keypairs functionality''' + name = self.getUniqueString('keypair') + self.addCleanup(self.user_cloud.delete_keypair, name) + keypair = self.user_cloud.create_keypair(name=name) + self.assertEqual(keypair['name'], name) + self.assertIsNotNone(keypair['public_key']) + self.assertIsNotNone(keypair['private_key']) + self.assertIsNotNone(keypair['fingerprint']) + self.assertEqual(keypair['type'], 'ssh') + + keypairs = self.user_cloud.list_keypairs() + self.assertIn(name, [k['name'] for k in keypairs]) + + self.user_cloud.delete_keypair(name) + + keypairs = self.user_cloud.list_keypairs() + self.assertNotIn(name, [k['name'] for k in keypairs]) + + def test_create_and_delete_with_key(self): + '''Test creating and deleting keypairs functionality''' + name = self.getUniqueString('keypair') + self.addCleanup(self.user_cloud.delete_keypair, name) + keypair = self.user_cloud.create_keypair( + name=name, public_key=fakes.FAKE_PUBLIC_KEY) + self.assertEqual(keypair['name'], name) + self.assertIsNotNone(keypair['public_key']) + self.assertIsNone(keypair['private_key']) + self.assertIsNotNone(keypair['fingerprint']) + self.assertEqual(keypair['type'], 'ssh') + + keypairs = self.user_cloud.list_keypairs() + self.assertIn(name, [k['name'] for k in keypairs]) + + self.user_cloud.delete_keypair(name) + + keypairs = self.user_cloud.list_keypairs() + self.assertNotIn(name, [k['name'] for k in keypairs]) diff --git a/openstack/tests/functional/cloud/test_limits.py b/openstack/tests/functional/cloud/test_limits.py new file mode 100644 index 000000000..b5b7e7d5d --- /dev/null +++ b/openstack/tests/functional/cloud/test_limits.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_limits +---------------------------------- + +Functional tests for `shade` limits method +""" +from openstack.tests.functional import base + + +class TestUsage(base.BaseFunctionalTestCase): + + def test_get_our_limits(self): + '''Test quotas functionality''' + limits = self.user_cloud.get_compute_limits() + self.assertIsNotNone(limits) + self.assertTrue(hasattr(limits, 'max_server_meta')) + + # Test normalize limits + self.assertFalse(hasattr(limits, 'maxImageMeta')) + + def test_get_other_limits(self): + '''Test quotas functionality''' + limits = self.operator_cloud.get_compute_limits('demo') + self.assertIsNotNone(limits) + self.assertTrue(hasattr(limits, 'max_server_meta')) + + # Test normalize limits + self.assertFalse(hasattr(limits, 'maxImageMeta')) diff --git a/openstack/tests/functional/cloud/test_magnum_services.py b/openstack/tests/functional/cloud/test_magnum_services.py new file mode 100644 index 000000000..914f62f21 --- /dev/null +++ b/openstack/tests/functional/cloud/test_magnum_services.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_magnum_services +-------------------- + +Functional tests for `shade` services method. +""" + +from openstack.tests.functional import base + + +class TestMagnumServices(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestMagnumServices, self).setUp() + if not self.operator_cloud.has_service('container-infra'): + self.skipTest('Container service not supported by cloud') + + def test_magnum_services(self): + '''Test magnum services functionality''' + + # Test that we can list services + services = self.operator_cloud.list_magnum_services() + + self.assertEqual(1, len(services)) + self.assertEqual(services[0]['id'], 1) + self.assertEqual('up', services[0]['state']) + self.assertEqual('magnum-conductor', services[0]['binary']) + self.assertGreater(services[0]['report_count'], 0) diff --git a/openstack/tests/functional/cloud/test_network.py b/openstack/tests/functional/cloud/test_network.py new file mode 100644 index 000000000..361d7e999 --- /dev/null +++ b/openstack/tests/functional/cloud/test_network.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_network +---------------------------------- + +Functional tests for `shade` network methods. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestNetwork(base.BaseFunctionalTestCase): + def setUp(self): + super(TestNetwork, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + self.network_name = self.getUniqueString('network') + self.addCleanup(self._cleanup_networks) + + def _cleanup_networks(self): + exception_list = list() + for network in self.operator_cloud.list_networks(): + if network['name'].startswith(self.network_name): + try: + self.operator_cloud.delete_network(network['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_network_basic(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertFalse(net1['shared']) + self.assertFalse(net1['router:external']) + self.assertTrue(net1['admin_state_up']) + + def test_get_network_by_id(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertFalse(net1['shared']) + self.assertFalse(net1['router:external']) + self.assertTrue(net1['admin_state_up']) + + ret_net1 = self.operator_cloud.get_network_by_id(net1.id) + self.assertIn('id', ret_net1) + self.assertEqual(self.network_name, ret_net1['name']) + self.assertFalse(ret_net1['shared']) + self.assertFalse(ret_net1['router:external']) + self.assertTrue(ret_net1['admin_state_up']) + + def test_create_network_advanced(self): + net1 = self.operator_cloud.create_network( + name=self.network_name, + shared=True, + external=True, + admin_state_up=False, + ) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertTrue(net1['router:external']) + self.assertTrue(net1['shared']) + self.assertFalse(net1['admin_state_up']) + + def test_create_network_provider_flat(self): + existing_public = self.operator_cloud.search_networks( + filters={'provider:network_type': 'flat'}) + if existing_public: + self.skipTest('Physical network already allocated') + net1 = self.operator_cloud.create_network( + name=self.network_name, + shared=True, + provider={ + 'physical_network': 'public', + 'network_type': 'flat', + } + ) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertEqual('flat', net1['provider:network_type']) + self.assertEqual('public', net1['provider:physical_network']) + self.assertIsNone(net1['provider:segmentation_id']) + + def test_list_networks_filtered(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIsNotNone(net1) + net2 = self.operator_cloud.create_network( + name=self.network_name + 'other') + self.assertIsNotNone(net2) + match = self.operator_cloud.list_networks( + filters=dict(name=self.network_name)) + self.assertEqual(1, len(match)) + self.assertEqual(net1['name'], match[0]['name']) diff --git a/openstack/tests/functional/cloud/test_object.py b/openstack/tests/functional/cloud/test_object.py new file mode 100644 index 000000000..8a97ed3da --- /dev/null +++ b/openstack/tests/functional/cloud/test_object.py @@ -0,0 +1,166 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_object +---------------------------------- + +Functional tests for `shade` object methods. +""" + +import random +import string +import tempfile + +from testtools import content + +from openstack.cloud import exc +from openstack.tests.functional import base + + +class TestObject(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestObject, self).setUp() + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service not supported by cloud') + + def test_create_object(self): + '''Test uploading small and large files.''' + container_name = self.getUniqueString('container') + self.addDetail('container', content.text_content(container_name)) + self.addCleanup(self.user_cloud.delete_container, container_name) + self.user_cloud.create_container(container_name) + self.assertEqual(container_name, + self.user_cloud.list_containers()[0]['name']) + sizes = ( + (64 * 1024, 1), # 64K, one segment + (64 * 1024, 5) # 64MB, 5 segments + ) + for size, nseg in sizes: + segment_size = int(round(size / nseg)) + with tempfile.NamedTemporaryFile() as fake_file: + fake_content = ''.join(random.SystemRandom().choice( + string.ascii_uppercase + string.digits) + for _ in range(size)).encode('latin-1') + + fake_file.write(fake_content) + fake_file.flush() + name = 'test-%d' % size + self.addCleanup( + self.user_cloud.delete_object, container_name, name) + self.user_cloud.create_object( + container_name, name, + fake_file.name, + segment_size=segment_size, + metadata={'foo': 'bar'}) + self.assertFalse(self.user_cloud.is_object_stale( + container_name, name, + fake_file.name + )) + self.assertEqual( + 'bar', self.user_cloud.get_object_metadata( + container_name, name)['x-object-meta-foo'] + ) + self.user_cloud.update_object(container=container_name, name=name, + metadata={'testk': 'testv'}) + self.assertEqual( + 'testv', self.user_cloud.get_object_metadata( + container_name, name)['x-object-meta-testk'] + ) + try: + self.assertIsNotNone( + self.user_cloud.get_object(container_name, name)) + except exc.OpenStackCloudException as e: + self.addDetail( + 'failed_response', + content.text_content(str(e.response.headers))) + self.addDetail( + 'failed_response', + content.text_content(e.response.text)) + self.assertEqual( + name, + self.user_cloud.list_objects(container_name)[0]['name']) + self.assertTrue( + self.user_cloud.delete_object(container_name, name)) + self.assertEqual([], self.user_cloud.list_objects(container_name)) + self.assertEqual(container_name, + self.user_cloud.list_containers()[0]['name']) + self.user_cloud.delete_container(container_name) + + def test_download_object_to_file(self): + '''Test uploading small and large files.''' + container_name = self.getUniqueString('container') + self.addDetail('container', content.text_content(container_name)) + self.addCleanup(self.user_cloud.delete_container, container_name) + self.user_cloud.create_container(container_name) + self.assertEqual(container_name, + self.user_cloud.list_containers()[0]['name']) + sizes = ( + (64 * 1024, 1), # 64K, one segment + (64 * 1024, 5) # 64MB, 5 segments + ) + for size, nseg in sizes: + fake_content = '' + segment_size = int(round(size / nseg)) + with tempfile.NamedTemporaryFile() as fake_file: + fake_content = ''.join(random.SystemRandom().choice( + string.ascii_uppercase + string.digits) + for _ in range(size)).encode('latin-1') + + fake_file.write(fake_content) + fake_file.flush() + name = 'test-%d' % size + self.addCleanup( + self.user_cloud.delete_object, container_name, name) + self.user_cloud.create_object( + container_name, name, + fake_file.name, + segment_size=segment_size, + metadata={'foo': 'bar'}) + self.assertFalse(self.user_cloud.is_object_stale( + container_name, name, + fake_file.name + )) + self.assertEqual( + 'bar', self.user_cloud.get_object_metadata( + container_name, name)['x-object-meta-foo'] + ) + self.user_cloud.update_object(container=container_name, name=name, + metadata={'testk': 'testv'}) + self.assertEqual( + 'testv', self.user_cloud.get_object_metadata( + container_name, name)['x-object-meta-testk'] + ) + try: + with tempfile.NamedTemporaryFile() as fake_file: + self.user_cloud.get_object( + container_name, name, outfile=fake_file.name) + downloaded_content = open(fake_file.name, 'rb').read() + self.assertEqual(fake_content, downloaded_content) + except exc.OpenStackCloudException as e: + self.addDetail( + 'failed_response', + content.text_content(str(e.response.headers))) + self.addDetail( + 'failed_response', + content.text_content(e.response.text)) + raise + self.assertEqual( + name, + self.user_cloud.list_objects(container_name)[0]['name']) + self.assertTrue( + self.user_cloud.delete_object(container_name, name)) + self.assertEqual([], self.user_cloud.list_objects(container_name)) + self.assertEqual(container_name, + self.user_cloud.list_containers()[0]['name']) + self.user_cloud.delete_container(container_name) diff --git a/openstack/tests/functional/cloud/test_port.py b/openstack/tests/functional/cloud/test_port.py new file mode 100644 index 000000000..a25a8797c --- /dev/null +++ b/openstack/tests/functional/cloud/test_port.py @@ -0,0 +1,152 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_port +---------------------------------- + +Functional tests for `shade` port resource. +""" + +import string +import random + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestPort(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestPort, self).setUp() + # Skip Neutron tests if neutron is not present + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + + # Generate a unique port name to allow concurrent tests + self.new_port_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5)) + + self.addCleanup(self._cleanup_ports) + + def _cleanup_ports(self): + exception_list = list() + + for p in self.operator_cloud.list_ports(): + if p['name'].startswith(self.new_port_name): + try: + self.operator_cloud.delete_port(name_or_id=p['id']) + except Exception as e: + # We were unable to delete this port, let's try with next + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_port(self): + port_name = self.new_port_name + '_create' + + networks = self.operator_cloud.list_networks() + if not networks: + self.assertFalse('no sensible network available') + + port = self.operator_cloud.create_port( + network_id=networks[0]['id'], name=port_name) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + def test_get_port(self): + port_name = self.new_port_name + '_get' + + networks = self.operator_cloud.list_networks() + if not networks: + self.assertFalse('no sensible network available') + + port = self.operator_cloud.create_port( + network_id=networks[0]['id'], name=port_name) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.operator_cloud.get_port(name_or_id=port['id']) + # extra_dhcp_opts is added later by Neutron... + if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: + del updated_port['extra_dhcp_opts'] + self.assertEqual(port, updated_port) + + def test_get_port_by_id(self): + port_name = self.new_port_name + '_get_by_id' + + networks = self.operator_cloud.list_networks() + if not networks: + self.assertFalse('no sensible network available') + + port = self.operator_cloud.create_port( + network_id=networks[0]['id'], name=port_name) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.operator_cloud.get_port_by_id(port['id']) + # extra_dhcp_opts is added later by Neutron... + if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: + del updated_port['extra_dhcp_opts'] + self.assertEqual(port, updated_port) + + def test_update_port(self): + port_name = self.new_port_name + '_update' + new_port_name = port_name + '_new' + + networks = self.operator_cloud.list_networks() + if not networks: + self.assertFalse('no sensible network available') + + self.operator_cloud.create_port( + network_id=networks[0]['id'], name=port_name) + + port = self.operator_cloud.update_port( + name_or_id=port_name, name=new_port_name) + self.assertIsInstance(port, dict) + self.assertEqual(port.get('name'), new_port_name) + + updated_port = self.operator_cloud.get_port(name_or_id=port['id']) + self.assertEqual(port.get('name'), new_port_name) + self.assertEqual(port, updated_port) + + def test_delete_port(self): + port_name = self.new_port_name + '_delete' + + networks = self.operator_cloud.list_networks() + if not networks: + self.assertFalse('no sensible network available') + + port = self.operator_cloud.create_port( + network_id=networks[0]['id'], name=port_name) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.operator_cloud.get_port(name_or_id=port['id']) + self.assertIsNotNone(updated_port) + + self.operator_cloud.delete_port(name_or_id=port_name) + + updated_port = self.operator_cloud.get_port(name_or_id=port['id']) + self.assertIsNone(updated_port) diff --git a/openstack/tests/functional/cloud/test_project.py b/openstack/tests/functional/cloud/test_project.py new file mode 100644 index 000000000..7aeb714d6 --- /dev/null +++ b/openstack/tests/functional/cloud/test_project.py @@ -0,0 +1,108 @@ +# Copyright (c) 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_project +---------------------------------- + +Functional tests for `shade` project resource. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestProject(base.KeystoneBaseFunctionalTestCase): + + def setUp(self): + super(TestProject, self).setUp() + self.new_project_name = self.getUniqueString('project') + self.addCleanup(self._cleanup_projects) + + def _cleanup_projects(self): + exception_list = list() + for p in self.operator_cloud.list_projects(): + if p['name'].startswith(self.new_project_name): + try: + self.operator_cloud.delete_project(p['id']) + except Exception as e: + exception_list.append(str(e)) + continue + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_project(self): + project_name = self.new_project_name + '_create' + + params = { + 'name': project_name, + 'description': 'test_create_project', + } + if self.identity_version == '3': + params['domain_id'] = \ + self.operator_cloud.get_domain('default')['id'] + + project = self.operator_cloud.create_project(**params) + + self.assertIsNotNone(project) + self.assertEqual(project_name, project['name']) + self.assertEqual('test_create_project', project['description']) + + def test_update_project(self): + project_name = self.new_project_name + '_update' + + params = { + 'name': project_name, + 'description': 'test_update_project', + 'enabled': True + } + if self.identity_version == '3': + params['domain_id'] = \ + self.operator_cloud.get_domain('default')['id'] + + project = self.operator_cloud.create_project(**params) + updated_project = self.operator_cloud.update_project( + project_name, enabled=False, description='new') + self.assertIsNotNone(updated_project) + self.assertEqual(project['id'], updated_project['id']) + self.assertEqual(project['name'], updated_project['name']) + self.assertEqual(updated_project['description'], 'new') + self.assertTrue(project['enabled']) + self.assertFalse(updated_project['enabled']) + + # Revert the description and verify the project is still disabled + updated_project = self.operator_cloud.update_project( + project_name, description=params['description']) + self.assertIsNotNone(updated_project) + self.assertEqual(project['id'], updated_project['id']) + self.assertEqual(project['name'], updated_project['name']) + self.assertEqual(project['description'], + updated_project['description']) + self.assertTrue(project['enabled']) + self.assertFalse(updated_project['enabled']) + + def test_delete_project(self): + project_name = self.new_project_name + '_delete' + params = {'name': project_name} + if self.identity_version == '3': + params['domain_id'] = \ + self.operator_cloud.get_domain('default')['id'] + project = self.operator_cloud.create_project(**params) + self.assertIsNotNone(project) + self.assertTrue(self.operator_cloud.delete_project(project['id'])) + + def test_delete_project_not_found(self): + self.assertFalse(self.operator_cloud.delete_project('doesNotExist')) diff --git a/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py b/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py new file mode 100644 index 000000000..90e7f193c --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py @@ -0,0 +1,106 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_bandwidth_limit_rule +---------------------------------- + +Functional tests for `shade`QoS bandwidth limit methods. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestQosBandwidthLimitRule(base.BaseFunctionalTestCase): + def setUp(self): + super(TestQosBandwidthLimitRule, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise OpenStackCloudException(e) + + def test_qos_bandwidth_limit_rule_lifecycle(self): + max_kbps = 1500 + max_burst_kbps = 500 + updated_max_kbps = 2000 + + # Create bw limit rule + rule = self.operator_cloud.create_qos_bandwidth_limit_rule( + self.policy['id'], + max_kbps=max_kbps, + max_burst_kbps=max_burst_kbps) + self.assertIn('id', rule) + self.assertEqual(max_kbps, rule['max_kbps']) + self.assertEqual(max_burst_kbps, rule['max_burst_kbps']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( + self.policy['id'], + rule['id'], + max_kbps=updated_max_kbps) + self.assertIn('id', updated_rule) + self.assertEqual(updated_max_kbps, updated_rule['max_kbps']) + self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( + self.policy['id']) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_bandwidth_limit_rule( + self.policy['id'], updated_rule['id']) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( + self.policy['id']) + self.assertEqual([], policy_rules) + + def test_create_qos_bandwidth_limit_rule_direction(self): + if not self.operator_cloud._has_neutron_extension( + 'qos-bw-limit-direction'): + self.skipTest("'qos-bw-limit-direction' network extension " + "not supported by cloud") + max_kbps = 1500 + direction = "ingress" + updated_direction = "egress" + + # Create bw limit rule + rule = self.operator_cloud.create_qos_bandwidth_limit_rule( + self.policy['id'], + max_kbps=max_kbps, + direction=direction) + self.assertIn('id', rule) + self.assertEqual(max_kbps, rule['max_kbps']) + self.assertEqual(direction, rule['direction']) + + # Now try to update direction in rule + updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( + self.policy['id'], + rule['id'], + direction=updated_direction) + self.assertIn('id', updated_rule) + self.assertEqual(max_kbps, updated_rule['max_kbps']) + self.assertEqual(updated_direction, updated_rule['direction']) diff --git a/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py b/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py new file mode 100644 index 000000000..ec289a665 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py @@ -0,0 +1,75 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_dscp_marking_rule +---------------------------------- + +Functional tests for `shade`QoS DSCP marking rule methods. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestQosDscpMarkingRule(base.BaseFunctionalTestCase): + def setUp(self): + super(TestQosDscpMarkingRule, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise OpenStackCloudException(e) + + def test_qos_dscp_marking_rule_lifecycle(self): + dscp_mark = 16 + updated_dscp_mark = 32 + + # Create DSCP marking rule + rule = self.operator_cloud.create_qos_dscp_marking_rule( + self.policy['id'], + dscp_mark=dscp_mark) + self.assertIn('id', rule) + self.assertEqual(dscp_mark, rule['dscp_mark']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_dscp_marking_rule( + self.policy['id'], + rule['id'], + dscp_mark=updated_dscp_mark) + self.assertIn('id', updated_rule) + self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( + self.policy['id']) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_dscp_marking_rule( + self.policy['id'], updated_rule['id']) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( + self.policy['id']) + self.assertEqual([], policy_rules) diff --git a/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py b/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py new file mode 100644 index 000000000..5ca30e6d7 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py @@ -0,0 +1,75 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_minumum_bandwidth_rule +---------------------------------- + +Functional tests for `shade`QoS minimum bandwidth methods. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestQosMinimumBandwidthRule(base.BaseFunctionalTestCase): + def setUp(self): + super(TestQosMinimumBandwidthRule, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise OpenStackCloudException(e) + + def test_qos_minimum_bandwidth_rule_lifecycle(self): + min_kbps = 1500 + updated_min_kbps = 2000 + + # Create min bw rule + rule = self.operator_cloud.create_qos_minimum_bandwidth_rule( + self.policy['id'], + min_kbps=min_kbps) + self.assertIn('id', rule) + self.assertEqual(min_kbps, rule['min_kbps']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule( + self.policy['id'], + rule['id'], + min_kbps=updated_min_kbps) + self.assertIn('id', updated_rule) + self.assertEqual(updated_min_kbps, updated_rule['min_kbps']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( + self.policy['id']) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_minimum_bandwidth_rule( + self.policy['id'], updated_rule['id']) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( + self.policy['id']) + self.assertEqual([], policy_rules) diff --git a/openstack/tests/functional/cloud/test_qos_policy.py b/openstack/tests/functional/cloud/test_qos_policy.py new file mode 100644 index 000000000..08ac57d5c --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_policy.py @@ -0,0 +1,95 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_policy +---------------------------------- + +Functional tests for `shade`QoS policies methods. +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +class TestQosPolicy(base.BaseFunctionalTestCase): + def setUp(self): + super(TestQosPolicy, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + self.policy_name = self.getUniqueString('qos_policy') + self.addCleanup(self._cleanup_policies) + + def _cleanup_policies(self): + exception_list = list() + for policy in self.operator_cloud.list_qos_policies(): + if policy['name'].startswith(self.policy_name): + try: + self.operator_cloud.delete_qos_policy(policy['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_qos_policy_basic(self): + policy = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['shared']) + self.assertFalse(policy['is_default']) + + def test_create_qos_policy_shared(self): + policy = self.operator_cloud.create_qos_policy( + name=self.policy_name, shared=True) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertTrue(policy['shared']) + self.assertFalse(policy['is_default']) + + def test_create_qos_policy_default(self): + if not self.operator_cloud._has_neutron_extension('qos-default'): + self.skipTest("'qos-default' network extension not supported " + "by cloud") + policy = self.operator_cloud.create_qos_policy( + name=self.policy_name, default=True) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['shared']) + self.assertTrue(policy['is_default']) + + def test_update_qos_policy(self): + policy = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['shared']) + self.assertFalse(policy['is_default']) + + updated_policy = self.operator_cloud.update_qos_policy( + policy['id'], shared=True, default=True) + self.assertEqual(self.policy_name, updated_policy['name']) + self.assertTrue(updated_policy['shared']) + self.assertTrue(updated_policy['is_default']) + + def test_list_qos_policies_filtered(self): + policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertIsNotNone(policy1) + policy2 = self.operator_cloud.create_qos_policy( + name=self.policy_name + 'other') + self.assertIsNotNone(policy2) + match = self.operator_cloud.list_qos_policies( + filters=dict(name=self.policy_name)) + self.assertEqual(1, len(match)) + self.assertEqual(policy1['name'], match[0]['name']) diff --git a/openstack/tests/functional/cloud/test_quotas.py b/openstack/tests/functional/cloud/test_quotas.py new file mode 100644 index 000000000..b246c3217 --- /dev/null +++ b/openstack/tests/functional/cloud/test_quotas.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_quotas +---------------------------------- + +Functional tests for `shade` quotas methods. +""" + +from openstack.tests.functional import base + + +class TestComputeQuotas(base.BaseFunctionalTestCase): + + def test_quotas(self): + '''Test quotas functionality''' + quotas = self.operator_cloud.get_compute_quotas('demo') + cores = quotas['cores'] + self.operator_cloud.set_compute_quotas('demo', cores=cores + 1) + self.assertEqual( + cores + 1, + self.operator_cloud.get_compute_quotas('demo')['cores']) + self.operator_cloud.delete_compute_quotas('demo') + self.assertEqual( + cores, self.operator_cloud.get_compute_quotas('demo')['cores']) + + +class TestVolumeQuotas(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestVolumeQuotas, self).setUp() + if not self.operator_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + def test_quotas(self): + '''Test quotas functionality''' + quotas = self.operator_cloud.get_volume_quotas('demo') + volumes = quotas['volumes'] + self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1) + self.assertEqual( + volumes + 1, + self.operator_cloud.get_volume_quotas('demo')['volumes']) + self.operator_cloud.delete_volume_quotas('demo') + self.assertEqual( + volumes, + self.operator_cloud.get_volume_quotas('demo')['volumes']) + + +class TestNetworkQuotas(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestNetworkQuotas, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('network service not supported by cloud') + + def test_quotas(self): + '''Test quotas functionality''' + quotas = self.operator_cloud.get_network_quotas('demo') + network = quotas['network'] + self.operator_cloud.set_network_quotas('demo', network=network + 1) + self.assertEqual( + network + 1, + self.operator_cloud.get_network_quotas('demo')['network']) + self.operator_cloud.delete_network_quotas('demo') + self.assertEqual( + network, + self.operator_cloud.get_network_quotas('demo')['network']) + + def test_get_quotas_details(self): + expected_keys = ['limit', 'used', 'reserved'] + '''Test getting details about quota usage''' + quota_details = self.operator_cloud.get_network_quotas( + 'demo', details=True) + for quota_values in quota_details.values(): + for expected_key in expected_keys: + self.assertTrue(expected_key in quota_values.keys()) diff --git a/openstack/tests/functional/cloud/test_range_search.py b/openstack/tests/functional/cloud/test_range_search.py new file mode 100644 index 000000000..d9dce2f5d --- /dev/null +++ b/openstack/tests/functional/cloud/test_range_search.py @@ -0,0 +1,143 @@ +# Copyright (c) 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + + +from openstack.cloud import exc +from openstack.tests.functional import base + + +class TestRangeSearch(base.BaseFunctionalTestCase): + + def _filter_m1_flavors(self, results): + """The m1 flavors are the original devstack flavors""" + new_results = [] + for flavor in results: + if flavor['name'].startswith("m1."): + new_results.append(flavor) + return new_results + + def test_range_search_bad_range(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + self.assertRaises( + exc.OpenStackCloudException, + self.user_cloud.range_search, flavors, {"ram": "<1a0"}) + + def test_range_search_exact(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "4096"}) + self.assertIsInstance(result, list) + # should only be 1 m1 flavor with 4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + self.assertEqual("m1.medium", result[0]['name']) + + def test_range_search_min(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "MIN"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # older devstack does not have cirros256 + self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) + + def test_range_search_max(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "MAX"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + self.assertEqual("m1.xlarge", result[0]['name']) + + def test_range_search_lt(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "<1024"}) + self.assertIsInstance(result, list) + # should only be 1 m1 flavor with <1024 ram + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + self.assertEqual("m1.tiny", result[0]['name']) + + def test_range_search_gt(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": ">4096"}) + self.assertIsInstance(result, list) + # should only be 2 m1 flavors with >4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(2, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.large", flavor_names) + self.assertIn("m1.xlarge", flavor_names) + + def test_range_search_le(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "<=4096"}) + self.assertIsInstance(result, list) + # should only be 3 m1 flavors with <=4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(3, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.tiny", flavor_names) + self.assertIn("m1.small", flavor_names) + self.assertIn("m1.medium", flavor_names) + + def test_range_search_ge(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": ">=4096"}) + self.assertIsInstance(result, list) + # should only be 3 m1 flavors with >=4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(3, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.medium", flavor_names) + self.assertIn("m1.large", flavor_names) + self.assertIn("m1.xlarge", flavor_names) + + def test_range_search_multi_1(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": "MIN", "vcpus": "MIN"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # older devstack does not have cirros256 + self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) + + def test_range_search_multi_2(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": "<1024", "vcpus": "MIN"}) + self.assertIsInstance(result, list) + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.tiny", flavor_names) + + def test_range_search_multi_3(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": ">=4096", "vcpus": "<6"}) + self.assertIsInstance(result, list) + result = self._filter_m1_flavors(result) + self.assertEqual(2, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.medium", flavor_names) + self.assertIn("m1.large", flavor_names) + + def test_range_search_multi_4(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": ">=4096", "vcpus": "MAX"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # This is the only result that should have max vcpu + self.assertEqual("m1.xlarge", result[0]['name']) diff --git a/openstack/tests/functional/cloud/test_recordset.py b/openstack/tests/functional/cloud/test_recordset.py new file mode 100644 index 000000000..92528d697 --- /dev/null +++ b/openstack/tests/functional/cloud/test_recordset.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_recordset +---------------------------------- + +Functional tests for `shade` recordset methods. +""" + +from testtools import content + +from openstack.tests.functional import base + + +class TestRecordset(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestRecordset, self).setUp() + if not self.user_cloud.has_service('dns'): + self.skipTest('dns service not supported by cloud') + + def test_recordsets(self): + '''Test DNS recordsets functionality''' + zone = 'example2.net.' + email = 'test@example2.net' + name = 'www' + type_ = 'a' + description = 'Test recordset' + ttl = 3600 + records = ['192.168.1.1'] + + self.addDetail('zone', content.text_content(zone)) + self.addDetail('recordset', content.text_content(name)) + self.addCleanup(self.cleanup, zone, name) + + # Create a zone to hold the tested recordset + zone_obj = self.user_cloud.create_zone(name=zone, email=email) + + # Test we can create a recordset and we get it returned + created_recordset = self.user_cloud.create_recordset(zone, name, type_, + records, + description, ttl) + self.assertEqual(created_recordset['zone_id'], zone_obj['id']) + self.assertEqual(created_recordset['name'], name + '.' + zone) + self.assertEqual(created_recordset['type'], type_.upper()) + self.assertEqual(created_recordset['records'], records) + self.assertEqual(created_recordset['description'], description) + self.assertEqual(created_recordset['ttl'], ttl) + + # Test that we can list recordsets + recordsets = self.user_cloud.list_recordsets(zone) + self.assertIsNotNone(recordsets) + + # Test we get the same recordset with the get_recordset method + get_recordset = self.user_cloud.get_recordset(zone, + created_recordset['id']) + self.assertEqual(get_recordset['id'], created_recordset['id']) + + # Test the get method also works by name + get_recordset = self.user_cloud.get_recordset(zone, name + '.' + zone) + self.assertEqual(get_recordset['id'], created_recordset['id']) + + # Test we can update a field on the recordset and only that field + # is updated + updated_recordset = self.user_cloud.update_recordset(zone_obj['id'], + name + '.' + zone, + ttl=7200) + self.assertEqual(updated_recordset['id'], created_recordset['id']) + self.assertEqual(updated_recordset['name'], name + '.' + zone) + self.assertEqual(updated_recordset['type'], type_.upper()) + self.assertEqual(updated_recordset['records'], records) + self.assertEqual(updated_recordset['description'], description) + self.assertEqual(updated_recordset['ttl'], 7200) + + # Test we can delete and get True returned + deleted_recordset = self.user_cloud.delete_recordset( + zone, name + '.' + zone) + self.assertTrue(deleted_recordset) + + def cleanup(self, zone_name, recordset_name): + self.user_cloud.delete_recordset( + zone_name, recordset_name + '.' + zone_name) + self.user_cloud.delete_zone(zone_name) diff --git a/openstack/tests/functional/cloud/test_router.py b/openstack/tests/functional/cloud/test_router.py new file mode 100644 index 000000000..c70cbf84c --- /dev/null +++ b/openstack/tests/functional/cloud/test_router.py @@ -0,0 +1,340 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_router +---------------------------------- + +Functional tests for `shade` router methods. +""" + +import ipaddress + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.functional import base + + +EXPECTED_TOPLEVEL_FIELDS = ( + 'id', 'name', 'admin_state_up', 'external_gateway_info', + 'tenant_id', 'routes', 'status' +) + +EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips') + + +class TestRouter(base.BaseFunctionalTestCase): + def setUp(self): + super(TestRouter, self).setUp() + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + + self.router_prefix = self.getUniqueString('router') + self.network_prefix = self.getUniqueString('network') + self.subnet_prefix = self.getUniqueString('subnet') + + # NOTE(Shrews): Order matters! + self.addCleanup(self._cleanup_networks) + self.addCleanup(self._cleanup_subnets) + self.addCleanup(self._cleanup_routers) + + def _cleanup_routers(self): + exception_list = list() + for router in self.operator_cloud.list_routers(): + if router['name'].startswith(self.router_prefix): + try: + self.operator_cloud.delete_router(router['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_networks(self): + exception_list = list() + for network in self.operator_cloud.list_networks(): + if network['name'].startswith(self.network_prefix): + try: + self.operator_cloud.delete_network(network['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def _cleanup_subnets(self): + exception_list = list() + for subnet in self.operator_cloud.list_subnets(): + if subnet['name'].startswith(self.subnet_prefix): + try: + self.operator_cloud.delete_subnet(subnet['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_router_basic(self): + net1_name = self.network_prefix + '_net1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True) + + router_name = self.router_prefix + '_create_basic' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=True, + ext_gateway_net_id=net1['id'], + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertEqual(net1['id'], ext_gw_info['network_id']) + self.assertTrue(ext_gw_info['enable_snat']) + + def test_create_router_project(self): + project = self.operator_cloud.get_project('demo') + self.assertIsNotNone(project) + proj_id = project['id'] + net1_name = self.network_prefix + '_net1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True, project_id=proj_id) + + router_name = self.router_prefix + '_create_project' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=True, + ext_gateway_net_id=net1['id'], + project_id=proj_id + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertEqual(proj_id, router['tenant_id']) + self.assertEqual(net1['id'], ext_gw_info['network_id']) + self.assertTrue(ext_gw_info['enable_snat']) + + def _create_and_verify_advanced_router(self, + external_cidr, + external_gateway_ip=None): + # external_cidr must be passed in as unicode (u'') + # NOTE(Shrews): The arguments are needed because these tests + # will run in parallel and we want to make sure that each test + # is using different resources to prevent race conditions. + net1_name = self.network_prefix + '_net1' + sub1_name = self.subnet_prefix + '_sub1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True) + sub1 = self.operator_cloud.create_subnet( + net1['id'], external_cidr, subnet_name=sub1_name, + gateway_ip=external_gateway_ip + ) + + ip_net = ipaddress.IPv4Network(external_cidr) + last_ip = str(list(ip_net.hosts())[-1]) + + router_name = self.router_prefix + '_create_advanced' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=False, + ext_gateway_net_id=net1['id'], + enable_snat=False, + ext_fixed_ips=[ + {'subnet_id': sub1['id'], 'ip_address': last_ip} + ] + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertFalse(router['admin_state_up']) + + self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) + self.assertEqual( + sub1['id'], + ext_gw_info['external_fixed_ips'][0]['subnet_id'] + ) + self.assertEqual( + last_ip, + ext_gw_info['external_fixed_ips'][0]['ip_address'] + ) + + return router + + def test_create_router_advanced(self): + self._create_and_verify_advanced_router(external_cidr=u'10.2.2.0/24') + + def test_add_remove_router_interface(self): + router = self._create_and_verify_advanced_router( + external_cidr=u'10.3.3.0/24') + net_name = self.network_prefix + '_intnet1' + sub_name = self.subnet_prefix + '_intsub1' + net = self.operator_cloud.create_network(name=net_name) + sub = self.operator_cloud.create_subnet( + net['id'], '10.4.4.0/24', subnet_name=sub_name, + gateway_ip='10.4.4.1' + ) + + iface = self.operator_cloud.add_router_interface( + router, subnet_id=sub['id']) + self.assertIsNone( + self.operator_cloud.remove_router_interface( + router, subnet_id=sub['id']) + ) + + # Test return values *after* the interface is detached so the + # resources we've created can be cleaned up if these asserts fail. + self.assertIsNotNone(iface) + for key in ('id', 'subnet_id', 'port_id', 'tenant_id'): + self.assertIn(key, iface) + self.assertEqual(router['id'], iface['id']) + self.assertEqual(sub['id'], iface['subnet_id']) + + def test_list_router_interfaces(self): + router = self._create_and_verify_advanced_router( + external_cidr=u'10.5.5.0/24') + net_name = self.network_prefix + '_intnet1' + sub_name = self.subnet_prefix + '_intsub1' + net = self.operator_cloud.create_network(name=net_name) + sub = self.operator_cloud.create_subnet( + net['id'], '10.6.6.0/24', subnet_name=sub_name, + gateway_ip='10.6.6.1' + ) + + iface = self.operator_cloud.add_router_interface( + router, subnet_id=sub['id']) + all_ifaces = self.operator_cloud.list_router_interfaces(router) + int_ifaces = self.operator_cloud.list_router_interfaces( + router, interface_type='internal') + ext_ifaces = self.operator_cloud.list_router_interfaces( + router, interface_type='external') + self.assertIsNone( + self.operator_cloud.remove_router_interface( + router, subnet_id=sub['id']) + ) + + # Test return values *after* the interface is detached so the + # resources we've created can be cleaned up if these asserts fail. + self.assertIsNotNone(iface) + self.assertEqual(2, len(all_ifaces)) + self.assertEqual(1, len(int_ifaces)) + self.assertEqual(1, len(ext_ifaces)) + + ext_fixed_ips = router['external_gateway_info']['external_fixed_ips'] + self.assertEqual(ext_fixed_ips[0]['subnet_id'], + ext_ifaces[0]['fixed_ips'][0]['subnet_id']) + self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id']) + + def test_update_router_name(self): + router = self._create_and_verify_advanced_router( + external_cidr=u'10.7.7.0/24') + + new_name = self.router_prefix + '_update_name' + updated = self.operator_cloud.update_router( + router['id'], name=new_name) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # Name is the only change we expect + self.assertEqual(new_name, updated['name']) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['admin_state_up'], updated['admin_state_up']) + self.assertEqual(router['external_gateway_info'], + updated['external_gateway_info']) + + def test_update_router_admin_state(self): + router = self._create_and_verify_advanced_router( + external_cidr=u'10.8.8.0/24') + + updated = self.operator_cloud.update_router( + router['id'], admin_state_up=True) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # admin_state_up is the only change we expect + self.assertTrue(updated['admin_state_up']) + self.assertNotEqual(router['admin_state_up'], + updated['admin_state_up']) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['name'], updated['name']) + self.assertEqual(router['external_gateway_info'], + updated['external_gateway_info']) + + def test_update_router_ext_gw_info(self): + router = self._create_and_verify_advanced_router( + external_cidr=u'10.9.9.0/24') + + # create a new subnet + existing_net_id = router['external_gateway_info']['network_id'] + sub_name = self.subnet_prefix + '_update' + sub = self.operator_cloud.create_subnet( + existing_net_id, '10.10.10.0/24', subnet_name=sub_name, + gateway_ip='10.10.10.1' + ) + + updated = self.operator_cloud.update_router( + router['id'], + ext_gateway_net_id=existing_net_id, + ext_fixed_ips=[ + {'subnet_id': sub['id'], 'ip_address': '10.10.10.77'} + ] + ) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # external_gateway_info is the only change we expect + ext_gw_info = updated['external_gateway_info'] + self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) + self.assertEqual( + sub['id'], + ext_gw_info['external_fixed_ips'][0]['subnet_id'] + ) + self.assertEqual( + '10.10.10.77', + ext_gw_info['external_fixed_ips'][0]['ip_address'] + ) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['name'], updated['name']) + self.assertEqual(router['admin_state_up'], updated['admin_state_up']) diff --git a/openstack/tests/functional/cloud/test_security_groups.py b/openstack/tests/functional/cloud/test_security_groups.py new file mode 100644 index 000000000..23c33aafb --- /dev/null +++ b/openstack/tests/functional/cloud/test_security_groups.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_security_groups +---------------------------------- + +Functional tests for `shade` security_groups resource. +""" + +from openstack.tests.functional import base + + +class TestSecurityGroups(base.BaseFunctionalTestCase): + def test_create_list_security_groups(self): + sg1 = self.user_cloud.create_security_group( + name="sg1", description="sg1") + self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) + sg2 = self.operator_cloud.create_security_group( + name="sg2", description="sg2") + self.addCleanup(self.operator_cloud.delete_security_group, sg2['id']) + + if self.user_cloud.has_service('network'): + # Neutron defaults to all_tenants=1 when admin + sg_list = self.operator_cloud.list_security_groups() + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + + # Filter by tenant_id (filtering by project_id won't work with + # Keystone V2) + sg_list = self.operator_cloud.list_security_groups( + filters={'tenant_id': self.user_cloud.current_project_id}) + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list]) + + else: + # Nova does not list all tenants by default + sg_list = self.operator_cloud.list_security_groups() + self.assertIn(sg2['id'], [sg['id'] for sg in sg_list]) + self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list]) + + sg_list = self.operator_cloud.list_security_groups( + filters={'all_tenants': 1}) + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + + def test_get_security_group_by_id(self): + sg = self.user_cloud.create_security_group(name='sg', description='sg') + self.addCleanup(self.user_cloud.delete_security_group, sg['id']) + + ret_sg = self.user_cloud.get_security_group_by_id(sg['id']) + self.assertEqual(sg, ret_sg) diff --git a/openstack/tests/functional/cloud/test_server_group.py b/openstack/tests/functional/cloud/test_server_group.py new file mode 100644 index 000000000..9a83fd6e8 --- /dev/null +++ b/openstack/tests/functional/cloud/test_server_group.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_group +---------------------------------- + +Functional tests for `shade` server_group resource. +""" + +from openstack.tests.functional import base + + +class TestServerGroup(base.BaseFunctionalTestCase): + + def test_server_group(self): + server_group_name = self.getUniqueString() + self.addCleanup(self.cleanup, server_group_name) + server_group = self.user_cloud.create_server_group( + server_group_name, ['affinity']) + + server_group_ids = [v['id'] + for v in self.user_cloud.list_server_groups()] + self.assertIn(server_group['id'], server_group_ids) + + self.user_cloud.delete_server_group(server_group_name) + + def cleanup(self, server_group_name): + server_group = self.user_cloud.get_server_group(server_group_name) + if server_group: + self.user_cloud.delete_server_group(server_group['id']) diff --git a/openstack/tests/functional/cloud/test_services.py b/openstack/tests/functional/cloud/test_services.py new file mode 100644 index 000000000..705c7057a --- /dev/null +++ b/openstack/tests/functional/cloud/test_services.py @@ -0,0 +1,131 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_services +---------------------------------- + +Functional tests for `shade` service resource. +""" + +import string +import random + +from openstack.cloud.exc import OpenStackCloudException +from openstack.cloud.exc import OpenStackCloudUnavailableFeature +from openstack.tests.functional import base + + +class TestServices(base.KeystoneBaseFunctionalTestCase): + + service_attributes = ['id', 'name', 'type', 'description'] + + def setUp(self): + super(TestServices, self).setUp() + + # Generate a random name for services in this test + self.new_service_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5)) + + self.addCleanup(self._cleanup_services) + + def _cleanup_services(self): + exception_list = list() + for s in self.operator_cloud.list_services(): + if s['name'] is not None and \ + s['name'].startswith(self.new_service_name): + try: + self.operator_cloud.delete_service(name_or_id=s['id']) + except Exception as e: + # We were unable to delete a service, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise OpenStackCloudException('\n'.join(exception_list)) + + def test_create_service(self): + service = self.operator_cloud.create_service( + name=self.new_service_name + '_create', type='test_type', + description='this is a test description') + self.assertIsNotNone(service.get('id')) + + def test_update_service(self): + ver = self.operator_cloud.cloud_config.get_api_version('identity') + if ver.startswith('2'): + # NOTE(SamYaple): Update service only works with v3 api + self.assertRaises(OpenStackCloudUnavailableFeature, + self.operator_cloud.update_service, + 'service_id', name='new name') + else: + service = self.operator_cloud.create_service( + name=self.new_service_name + '_create', type='test_type', + description='this is a test description', enabled=True) + new_service = self.operator_cloud.update_service( + service.id, + name=self.new_service_name + '_update', + description='this is an updated description', + enabled=False + ) + self.assertEqual(new_service.name, + self.new_service_name + '_update') + self.assertEqual(new_service.description, + 'this is an updated description') + self.assertFalse(new_service.enabled) + self.assertEqual(service.id, new_service.id) + + def test_list_services(self): + service = self.operator_cloud.create_service( + name=self.new_service_name + '_list', type='test_type') + observed_services = self.operator_cloud.list_services() + self.assertIsInstance(observed_services, list) + found = False + for s in observed_services: + # Test all attributes are returned + if s['id'] == service['id']: + self.assertEqual(self.new_service_name + '_list', + s.get('name')) + self.assertEqual('test_type', s.get('type')) + found = True + self.assertTrue(found, msg='new service not found in service list!') + + def test_delete_service_by_name(self): + # Test delete by name + service = self.operator_cloud.create_service( + name=self.new_service_name + '_delete_by_name', + type='test_type') + self.operator_cloud.delete_service(name_or_id=service['name']) + observed_services = self.operator_cloud.list_services() + found = False + for s in observed_services: + if s['id'] == service['id']: + found = True + break + self.failUnlessEqual(False, found, message='service was not deleted!') + + def test_delete_service_by_id(self): + # Test delete by id + service = self.operator_cloud.create_service( + name=self.new_service_name + '_delete_by_id', + type='test_type') + self.operator_cloud.delete_service(name_or_id=service['id']) + observed_services = self.operator_cloud.list_services() + found = False + for s in observed_services: + if s['id'] == service['id']: + found = True + self.failUnlessEqual(False, found, message='service was not deleted!') diff --git a/openstack/tests/functional/cloud/test_stack.py b/openstack/tests/functional/cloud/test_stack.py new file mode 100644 index 000000000..0186513bb --- /dev/null +++ b/openstack/tests/functional/cloud/test_stack.py @@ -0,0 +1,176 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_stack +---------------------------------- + +Functional tests for `shade` stack methods. +""" + +import tempfile + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.functional import base + +simple_template = '''heat_template_version: 2014-10-16 +parameters: + length: + type: number + default: 10 + +resources: + my_rand: + type: OS::Heat::RandomString + properties: + length: {get_param: length} +outputs: + rand: + value: + get_attr: [my_rand, value] +''' + +root_template = '''heat_template_version: 2014-10-16 +parameters: + length: + type: number + default: 10 + count: + type: number + default: 5 + +resources: + my_rands: + type: OS::Heat::ResourceGroup + properties: + count: {get_param: count} + resource_def: + type: My::Simple::Template + properties: + length: {get_param: length} +outputs: + rands: + value: + get_attr: [my_rands, attributes, rand] +''' + +environment = ''' +resource_registry: + My::Simple::Template: %s +''' + +validate_template = '''heat_template_version: asdf-no-such-version ''' + + +class TestStack(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestStack, self).setUp() + if not self.user_cloud.has_service('orchestration'): + self.skipTest('Orchestration service not supported by cloud') + + def _cleanup_stack(self): + self.user_cloud.delete_stack(self.stack_name, wait=True) + self.assertIsNone(self.user_cloud.get_stack(self.stack_name)) + + def test_stack_validation(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(validate_template.encode('utf-8')) + test_template.close() + stack_name = self.getUniqueString('validate_template') + self.assertRaises(exc.OpenStackCloudException, + self.user_cloud.create_stack, + name=stack_name, + template_file=test_template.name) + + def test_stack_simple(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + self.stack_name = self.getUniqueString('simple_stack') + self.addCleanup(self._cleanup_stack) + stack = self.user_cloud.create_stack( + name=self.stack_name, + template_file=test_template.name, + wait=True) + + # assert expected values in stack + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + rand = stack['outputs'][0]['output_value'] + self.assertEqual(10, len(rand)) + + # assert get_stack matches returned create_stack + stack = self.user_cloud.get_stack(self.stack_name) + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + self.assertEqual(rand, stack['outputs'][0]['output_value']) + + # assert stack is in list_stacks + stacks = self.user_cloud.list_stacks() + stack_ids = [s['id'] for s in stacks] + self.assertIn(stack['id'], stack_ids) + + # update with no changes + stack = self.user_cloud.update_stack( + self.stack_name, + template_file=test_template.name, + wait=True) + + # assert no change in updated stack + self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) + rand = stack['outputs'][0]['output_value'] + self.assertEqual(rand, stack['outputs'][0]['output_value']) + + # update with changes + stack = self.user_cloud.update_stack( + self.stack_name, + template_file=test_template.name, + wait=True, + length=12) + + # assert changed output in updated stack + stack = self.user_cloud.get_stack(self.stack_name) + self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) + new_rand = stack['outputs'][0]['output_value'] + self.assertNotEqual(rand, new_rand) + self.assertEqual(12, len(new_rand)) + + def test_stack_nested(self): + + test_template = tempfile.NamedTemporaryFile( + suffix='.yaml', delete=False) + test_template.write(root_template.encode('utf-8')) + test_template.close() + + simple_tmpl = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) + simple_tmpl.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + simple_tmpl.close() + + env = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) + expanded_env = environment % simple_tmpl.name + env.write(expanded_env.encode('utf-8')) + env.close() + + self.stack_name = self.getUniqueString('nested_stack') + self.addCleanup(self._cleanup_stack) + stack = self.user_cloud.create_stack( + name=self.stack_name, + template_file=test_template.name, + environment_files=[env.name], + wait=True) + + # assert expected values in stack + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + rands = stack['outputs'][0]['output_value'] + self.assertEqual(['0', '1', '2', '3', '4'], sorted(rands.keys())) + for rand in rands.values(): + self.assertEqual(10, len(rand)) diff --git a/openstack/tests/functional/cloud/test_usage.py b/openstack/tests/functional/cloud/test_usage.py new file mode 100644 index 000000000..e3467081f --- /dev/null +++ b/openstack/tests/functional/cloud/test_usage.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_usage +---------------------------------- + +Functional tests for `shade` usage method +""" +import datetime + +from openstack.tests.functional import base + + +class TestUsage(base.BaseFunctionalTestCase): + + def test_get_compute_usage(self): + '''Test usage functionality''' + start = datetime.datetime.now() - datetime.timedelta(seconds=5) + usage = self.operator_cloud.get_compute_usage('demo', start) + self.add_info_on_exception('usage', usage) + self.assertIsNotNone(usage) + self.assertIn('total_hours', usage) + self.assertIn('started_at', usage) + self.assertEqual(start.isoformat(), usage['started_at']) + self.assertIn('location', usage) diff --git a/openstack/tests/functional/cloud/test_users.py b/openstack/tests/functional/cloud/test_users.py new file mode 100644 index 000000000..2a3f6246f --- /dev/null +++ b/openstack/tests/functional/cloud/test_users.py @@ -0,0 +1,165 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_users +---------------------------------- + +Functional tests for `shade` user methods. +""" + +from openstack.cloud import operator_cloud +from openstack import OpenStackCloudException +from openstack.tests.functional import base + + +class TestUsers(base.KeystoneBaseFunctionalTestCase): + def setUp(self): + super(TestUsers, self).setUp() + self.user_prefix = self.getUniqueString('user') + self.addCleanup(self._cleanup_users) + + def _cleanup_users(self): + exception_list = list() + for user in self.operator_cloud.list_users(): + if user['name'].startswith(self.user_prefix): + try: + self.operator_cloud.delete_user(user['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise OpenStackCloudException('\n'.join(exception_list)) + + def _create_user(self, **kwargs): + domain_id = None + i_ver = self.operator_cloud.cloud_config.get_api_version('identity') + if i_ver not in ('2', '2.0'): + domain = self.operator_cloud.get_domain('default') + domain_id = domain['id'] + return self.operator_cloud.create_user(domain_id=domain_id, **kwargs) + + def test_list_users(self): + users = self.operator_cloud.list_users() + self.assertIsNotNone(users) + self.assertNotEqual([], users) + + def test_get_user(self): + user = self.operator_cloud.get_user('admin') + self.assertIsNotNone(user) + self.assertIn('id', user) + self.assertIn('name', user) + self.assertEqual('admin', user['name']) + + def test_search_users(self): + users = self.operator_cloud.search_users(filters={'enabled': True}) + self.assertIsNotNone(users) + + def test_search_users_jmespath(self): + users = self.operator_cloud.search_users(filters="[?enabled]") + self.assertIsNotNone(users) + + def test_create_user(self): + user_name = self.user_prefix + '_create' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertEqual(user_name, user['name']) + self.assertEqual(user_email, user['email']) + self.assertTrue(user['enabled']) + + def test_delete_user(self): + user_name = self.user_prefix + '_delete' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertTrue(self.operator_cloud.delete_user(user['id'])) + + def test_delete_user_not_found(self): + self.assertFalse(self.operator_cloud.delete_user('does_not_exist')) + + def test_update_user(self): + user_name = self.user_prefix + '_updatev3' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertTrue(user['enabled']) + + # Pass some keystone v3 params. This should work no matter which + # version of keystone we are testing against. + new_user = self.operator_cloud.update_user( + user['id'], + name=user_name + '2', + email='somebody@nowhere.com', + enabled=False, + password='secret', + description='') + self.assertIsNotNone(new_user) + self.assertEqual(user['id'], new_user['id']) + self.assertEqual(user_name + '2', new_user['name']) + self.assertEqual('somebody@nowhere.com', new_user['email']) + self.assertFalse(new_user['enabled']) + + def test_update_user_password(self): + user_name = self.user_prefix + '_password' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, + email=user_email, + password='old_secret') + self.assertIsNotNone(user) + self.assertTrue(user['enabled']) + + # This should work for both v2 and v3 + new_user = self.operator_cloud.update_user( + user['id'], password='new_secret') + self.assertIsNotNone(new_user) + self.assertEqual(user['id'], new_user['id']) + self.assertEqual(user_name, new_user['name']) + self.assertEqual(user_email, new_user['email']) + self.assertTrue(new_user['enabled']) + self.assertTrue(self.operator_cloud.grant_role( + 'Member', user=user['id'], project='demo', wait=True)) + self.addCleanup( + self.operator_cloud.revoke_role, + 'Member', user=user['id'], project='demo', wait=True) + self.assertIsNotNone(operator_cloud( + cloud=self._demo_name, + username=user_name, password='new_secret').service_catalog) + + def test_users_and_groups(self): + i_ver = self.operator_cloud.cloud_config.get_api_version('identity') + if i_ver in ('2', '2.0'): + self.skipTest('Identity service does not support groups') + + group_name = self.getUniqueString('group') + self.addCleanup(self.operator_cloud.delete_group, group_name) + + # Create a group + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertIsNotNone(group) + + # Create a user + user_name = self.user_prefix + '_ug' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + + # Add the user to the group + self.operator_cloud.add_user_to_group(user_name, group_name) + self.assertTrue( + self.operator_cloud.is_user_in_group(user_name, group_name)) + + # Remove them from the group + self.operator_cloud.remove_user_from_group(user_name, group_name) + self.assertFalse( + self.operator_cloud.is_user_in_group(user_name, group_name)) diff --git a/openstack/tests/functional/cloud/test_volume.py b/openstack/tests/functional/cloud/test_volume.py new file mode 100644 index 000000000..5140c6df1 --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume.py @@ -0,0 +1,150 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_volume +---------------------------------- + +Functional tests for `shade` block storage methods. +""" + +from fixtures import TimeoutException +from testtools import content + +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack.tests.functional import base + + +class TestVolume(base.BaseFunctionalTestCase): + + # Creating and deleting volumes is slow + TIMEOUT_SCALING_FACTOR = 1.5 + + def setUp(self): + super(TestVolume, self).setUp() + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + def test_volumes(self): + '''Test volume and snapshot functionality''' + volume_name = self.getUniqueString() + snapshot_name = self.getUniqueString() + self.addDetail('volume', content.text_content(volume_name)) + self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name) + volume = self.user_cloud.create_volume( + display_name=volume_name, size=1) + snapshot = self.user_cloud.create_volume_snapshot( + volume['id'], + display_name=snapshot_name + ) + + ret_volume = self.user_cloud.get_volume_by_id(volume['id']) + self.assertEqual(volume['id'], ret_volume['id']) + + volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] + self.assertIn(volume['id'], volume_ids) + + snapshot_list = self.user_cloud.list_volume_snapshots() + snapshot_ids = [s['id'] for s in snapshot_list] + self.assertIn(snapshot['id'], snapshot_ids) + + ret_snapshot = self.user_cloud.get_volume_snapshot_by_id( + snapshot['id']) + self.assertEqual(snapshot['id'], ret_snapshot['id']) + + self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True) + self.user_cloud.delete_volume(volume_name, wait=True) + + def test_volume_to_image(self): + '''Test volume export to image functionality''' + volume_name = self.getUniqueString() + image_name = self.getUniqueString() + self.addDetail('volume', content.text_content(volume_name)) + self.addCleanup(self.cleanup, volume_name, image_name=image_name) + volume = self.user_cloud.create_volume( + display_name=volume_name, size=1) + image = self.user_cloud.create_image( + image_name, volume=volume, wait=True) + + volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] + self.assertIn(volume['id'], volume_ids) + + image_list = self.user_cloud.list_images() + image_ids = [s['id'] for s in image_list] + self.assertIn(image['id'], image_ids) + + self.user_cloud.delete_image(image_name, wait=True) + self.user_cloud.delete_volume(volume_name, wait=True) + + def cleanup(self, volume, snapshot_name=None, image_name=None): + # Need to delete snapshots before volumes + if snapshot_name: + snapshot = self.user_cloud.get_volume_snapshot(snapshot_name) + if snapshot: + self.user_cloud.delete_volume_snapshot( + snapshot_name, wait=True) + if image_name: + image = self.user_cloud.get_image(image_name) + if image: + self.user_cloud.delete_image(image_name, wait=True) + if not isinstance(volume, list): + self.user_cloud.delete_volume(volume, wait=True) + else: + # We have more than one volume to clean up - submit all of the + # deletes without wait, then poll until none of them are found + # in the volume list anymore + for v in volume: + self.user_cloud.delete_volume(v, wait=False) + try: + for count in _utils._iterate_timeout( + 180, "Timeout waiting for volume cleanup"): + found = False + for existing in self.user_cloud.list_volumes(): + for v in volume: + if v['id'] == existing['id']: + found = True + break + if found: + break + if not found: + break + except (exc.OpenStackCloudTimeout, TimeoutException): + # NOTE(slaweq): ups, some volumes are still not removed + # so we should try to force delete it once again and move + # forward + for existing in self.user_cloud.list_volumes(): + for v in volume: + if v['id'] == existing['id']: + self.operator_cloud.delete_volume( + v, wait=False, force=True) + + def test_list_volumes_pagination(self): + '''Test pagination for list volumes functionality''' + + volumes = [] + # the number of created volumes needs to be higher than + # CONF.osapi_max_limit but not higher than volume quotas for + # the test user in the tenant(default quotas is set to 10) + num_volumes = 8 + for i in range(num_volumes): + name = self.getUniqueString() + v = self.user_cloud.create_volume(display_name=name, size=1) + volumes.append(v) + self.addCleanup(self.cleanup, volumes) + result = [] + for i in self.user_cloud.list_volumes(): + if i['name'] and i['name'].startswith(self.id()): + result.append(i['id']) + self.assertEqual( + sorted([i['id'] for i in volumes]), + sorted(result)) diff --git a/openstack/tests/functional/cloud/test_volume_backup.py b/openstack/tests/functional/cloud/test_volume_backup.py new file mode 100644 index 000000000..ec2c72c3c --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume_backup.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.tests.functional import base + + +class TestVolume(base.BaseFunctionalTestCase): + # Creating a volume backup is incredibly slow. + TIMEOUT_SCALING_FACTOR = 1.5 + + def setUp(self): + super(TestVolume, self).setUp() + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + if not self.user_cloud.has_service('object-store'): + self.skipTest('volume backups require swift') + + def test_create_get_delete_volume_backup(self): + volume = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + + backup_name_1 = self.getUniqueString() + backup_desc_1 = self.getUniqueString() + backup = self.user_cloud.create_volume_backup( + volume_id=volume['id'], name=backup_name_1, + description=backup_desc_1, wait=True) + self.assertEqual(backup_name_1, backup['name']) + + backup = self.user_cloud.get_volume_backup(backup['id']) + self.assertEqual("available", backup['status']) + self.assertEqual(backup_desc_1, backup['description']) + + self.user_cloud.delete_volume_backup(backup['id'], wait=True) + self.assertIsNone(self.user_cloud.get_volume_backup(backup['id'])) + + def test_list_volume_backups(self): + vol1 = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1) + self.addCleanup(self.user_cloud.delete_volume, vol1['id']) + + # We create 2 volumes to create 2 backups. We could have created 2 + # backups from the same volume but taking 2 successive backups seems + # to be race-condition prone. And I didn't want to use an ugly sleep() + # here. + vol2 = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1) + self.addCleanup(self.user_cloud.delete_volume, vol2['id']) + + backup_name_1 = self.getUniqueString() + backup = self.user_cloud.create_volume_backup( + volume_id=vol1['id'], name=backup_name_1) + self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) + + backup = self.user_cloud.create_volume_backup(volume_id=vol2['id']) + self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) + + backups = self.user_cloud.list_volume_backups() + self.assertEqual(2, len(backups)) + + backups = self.user_cloud.list_volume_backups( + search_opts={"name": backup_name_1}) + self.assertEqual(1, len(backups)) + self.assertEqual(backup_name_1, backups[0]['name']) diff --git a/openstack/tests/functional/cloud/test_volume_type.py b/openstack/tests/functional/cloud/test_volume_type.py new file mode 100644 index 000000000..3bd48a43f --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume_type.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_volume +---------------------------------- + +Functional tests for `shade` block storage methods. +""" +import testtools +from openstack.cloud import exc +from openstack.tests.functional import base + + +class TestVolumeType(base.BaseFunctionalTestCase): + + def _assert_project(self, volume_name_or_id, project_id, allowed=True): + acls = self.operator_cloud.get_volume_type_access(volume_name_or_id) + allowed_projects = [x.get('project_id') for x in acls] + self.assertEqual(allowed, project_id in allowed_projects) + + def setUp(self): + super(TestVolumeType, self).setUp() + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + volume_type = { + "name": 'test-volume-type', + "description": None, + "os-volume-type-access:is_public": False} + self.operator_cloud._volume_client.post( + '/types', json={'volume_type': volume_type}) + + def tearDown(self): + ret = self.operator_cloud.get_volume_type('test-volume-type') + if ret.get('id'): + self.operator_cloud._volume_client.delete( + '/types/{volume_type_id}'.format(volume_type_id=ret.id)) + super(TestVolumeType, self).tearDown() + + def test_list_volume_types(self): + volume_types = self.operator_cloud.list_volume_types() + self.assertTrue(volume_types) + self.assertTrue(any( + x for x in volume_types if x.name == 'test-volume-type')) + + def test_add_remove_volume_type_access(self): + volume_type = self.operator_cloud.get_volume_type('test-volume-type') + self.assertEqual('test-volume-type', volume_type.name) + + self.operator_cloud.add_volume_type_access( + 'test-volume-type', + self.operator_cloud.current_project_id) + self._assert_project( + 'test-volume-type', self.operator_cloud.current_project_id, + allowed=True) + + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', + self.operator_cloud.current_project_id) + self._assert_project( + 'test-volume-type', self.operator_cloud.current_project_id, + allowed=False) + + def test_add_volume_type_access_missing_project(self): + # Project id is not valitaded and it may not exist. + self.operator_cloud.add_volume_type_access( + 'test-volume-type', + '00000000000000000000000000000000') + + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', + '00000000000000000000000000000000') + + def test_add_volume_type_access_missing_volume(self): + with testtools.ExpectedException( + exc.OpenStackCloudException, + "VolumeType not found.*" + ): + self.operator_cloud.add_volume_type_access( + 'MISSING_VOLUME_TYPE', + self.operator_cloud.current_project_id) + + def test_remove_volume_type_access_missing_volume(self): + with testtools.ExpectedException( + exc.OpenStackCloudException, + "VolumeType not found.*" + ): + self.operator_cloud.remove_volume_type_access( + 'MISSING_VOLUME_TYPE', + self.operator_cloud.current_project_id) + + def test_add_volume_type_access_bad_project(self): + with testtools.ExpectedException( + exc.OpenStackCloudBadRequest, + "Unable to authorize.*" + ): + self.operator_cloud.add_volume_type_access( + 'test-volume-type', + 'BAD_PROJECT_ID') + + def test_remove_volume_type_access_missing_project(self): + with testtools.ExpectedException( + exc.OpenStackCloudURINotFound, + "Unable to revoke.*" + ): + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', + '00000000000000000000000000000000') diff --git a/openstack/tests/functional/cloud/test_zone.py b/openstack/tests/functional/cloud/test_zone.py new file mode 100644 index 000000000..ecb95e842 --- /dev/null +++ b/openstack/tests/functional/cloud/test_zone.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_zone +---------------------------------- + +Functional tests for `shade` zone methods. +""" + +from testtools import content + +from openstack.tests.functional import base + + +class TestZone(base.BaseFunctionalTestCase): + + def setUp(self): + super(TestZone, self).setUp() + if not self.user_cloud.has_service('dns'): + self.skipTest('dns service not supported by cloud') + + def test_zones(self): + '''Test DNS zones functionality''' + name = 'example.net.' + zone_type = 'primary' + email = 'test@example.net' + description = 'Test zone' + ttl = 3600 + masters = None + + self.addDetail('zone', content.text_content(name)) + self.addCleanup(self.cleanup, name) + + # Test we can create a zone and we get it returned + zone = self.user_cloud.create_zone( + name=name, zone_type=zone_type, email=email, + description=description, ttl=ttl, + masters=masters) + self.assertEqual(zone['name'], name) + self.assertEqual(zone['type'], zone_type.upper()) + self.assertEqual(zone['email'], email) + self.assertEqual(zone['description'], description) + self.assertEqual(zone['ttl'], ttl) + self.assertEqual(zone['masters'], []) + + # Test that we can list zones + zones = self.user_cloud.list_zones() + self.assertIsNotNone(zones) + + # Test we get the same zone with the get_zone method + zone_get = self.user_cloud.get_zone(zone['id']) + self.assertEqual(zone_get['id'], zone['id']) + + # Test the get method also works by name + zone_get = self.user_cloud.get_zone(name) + self.assertEqual(zone_get['name'], zone['name']) + + # Test we can update a field on the zone and only that field + # is updated + zone_update = self.user_cloud.update_zone(zone['id'], ttl=7200) + self.assertEqual(zone_update['id'], zone['id']) + self.assertEqual(zone_update['name'], zone['name']) + self.assertEqual(zone_update['type'], zone['type']) + self.assertEqual(zone_update['email'], zone['email']) + self.assertEqual(zone_update['description'], zone['description']) + self.assertEqual(zone_update['ttl'], 7200) + self.assertEqual(zone_update['masters'], zone['masters']) + + # Test we can delete and get True returned + zone_delete = self.user_cloud.delete_zone(zone['id']) + self.assertTrue(zone_delete) + + def cleanup(self, name): + self.user_cloud.delete_zone(name) diff --git a/openstack/tests/functional/cloud/util.py b/openstack/tests/functional/cloud/util.py new file mode 100644 index 000000000..fef67190f --- /dev/null +++ b/openstack/tests/functional/cloud/util.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +util +-------------------------------- + +Util methods for functional tests +""" +import operator +import os + + +def pick_flavor(flavors): + """Given a flavor list pick the smallest one.""" + # Enable running functional tests against rax - which requires + # performance flavors be used for boot from volume + flavor_name = os.environ.get('OPENSTACKSDK_FLAVOR') + if flavor_name: + for flavor in flavors: + if flavor.name == flavor_name: + return flavor + return None + + for flavor in sorted( + flavors, + key=operator.attrgetter('ram')): + if 'performance' in flavor.name: + return flavor + for flavor in sorted( + flavors, + key=operator.attrgetter('ram')): + return flavor diff --git a/openstack/tests/unit/base.py b/openstack/tests/unit/base.py index eae41c16d..275afc07f 100644 --- a/openstack/tests/unit/base.py +++ b/openstack/tests/unit/base.py @@ -13,39 +13,631 @@ # License for the specific language governing permissions and limitations # under the License. -import os +import collections +import time +import uuid import fixtures -import testtools +import mock +import os +import openstack.config as occ +from requests import structures +from requests_mock.contrib import fixture as rm_fixture +from six.moves import urllib +import tempfile -_TRUE_VALUES = ('true', '1', 'yes') +import openstack +from openstack.tests import base -class TestCase(testtools.TestCase): +_ProjectData = collections.namedtuple( + 'ProjectData', + 'project_id, project_name, enabled, domain_id, description, ' + 'json_response, json_request') - """Test case base class for all unit tests.""" - def setUp(self): +_UserData = collections.namedtuple( + 'UserData', + 'user_id, password, name, email, description, domain_id, enabled, ' + 'json_response, json_request') + + +_GroupData = collections.namedtuple( + 'GroupData', + 'group_id, group_name, domain_id, description, json_response, ' + 'json_request') + + +_DomainData = collections.namedtuple( + 'DomainData', + 'domain_id, domain_name, description, json_response, ' + 'json_request') + + +_ServiceData = collections.namedtuple( + 'Servicedata', + 'service_id, service_name, service_type, description, enabled, ' + 'json_response_v3, json_response_v2, json_request') + + +_EndpointDataV3 = collections.namedtuple( + 'EndpointData', + 'endpoint_id, service_id, interface, region, url, enabled, ' + 'json_response, json_request') + + +_EndpointDataV2 = collections.namedtuple( + 'EndpointData', + 'endpoint_id, service_id, region, public_url, internal_url, ' + 'admin_url, v3_endpoint_list, json_response, ' + 'json_request') + + +# NOTE(notmorgan): Shade does not support domain-specific roles +# This should eventually be fixed if it becomes a main-stream feature. +_RoleData = collections.namedtuple( + 'RoleData', + 'role_id, role_name, json_response, json_request') + + +class BaseTestCase(base.TestCase): + + def setUp(self, cloud_config_fixture='clouds.yaml'): """Run before each test method to initialize test environment.""" - super(TestCase, self).setUp() - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + super(BaseTestCase, self).setUp() - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) + # Sleeps are for real testing, but unit tests shouldn't need them + realsleep = time.sleep - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + def _nosleep(seconds): + return realsleep(seconds * 0.0001) - self.log_fixture = self.useFixture(fixtures.FakeLogger()) + self.sleep_fixture = self.useFixture(fixtures.MonkeyPatch( + 'time.sleep', + _nosleep)) + self.fixtures_directory = 'openstack/tests/unit/fixtures' + + # Isolate os-client-config from test environment + config = tempfile.NamedTemporaryFile(delete=False) + cloud_path = '%s/clouds/%s' % (self.fixtures_directory, + cloud_config_fixture) + with open(cloud_path, 'rb') as f: + content = f.read() + config.write(content) + config.close() + + vendor = tempfile.NamedTemporaryFile(delete=False) + vendor.write(b'{}') + vendor.close() + + test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', '_test_cloud_') + self.config = occ.OpenStackConfig( + config_files=[config.name], + vendor_files=[vendor.name], + secure_files=['non-existant']) + self.cloud_config = self.config.get_one_cloud( + cloud=test_cloud, validate=False) + self.cloud = openstack.OpenStackCloud( + cloud_config=self.cloud_config, + log_inner_exceptions=True) + self.strict_cloud = openstack.OpenStackCloud( + cloud_config=self.cloud_config, + log_inner_exceptions=True, + strict=True) + self.op_cloud = openstack.OperatorCloud( + cloud_config=self.cloud_config, + log_inner_exceptions=True) + + +# TODO(shade) Remove this and rename RequestsMockTestCase to TestCase. +# There are still a few places, like test_normalize, that assume +# this mocking is in place rather than having the correct +# requests_mock entries set up that need to be converted. +class TestCase(BaseTestCase): + + def setUp(self, cloud_config_fixture='clouds.yaml'): + + super(TestCase, self).setUp(cloud_config_fixture=cloud_config_fixture) + self.session_fixture = self.useFixture(fixtures.MonkeyPatch( + 'openstack.config.cloud_config.CloudConfig.get_session', + mock.Mock())) + + +class RequestsMockTestCase(BaseTestCase): + + def setUp(self, cloud_config_fixture='clouds.yaml'): + + super(RequestsMockTestCase, self).setUp( + cloud_config_fixture=cloud_config_fixture) + + # FIXME(notmorgan): Convert the uri_registry, discovery.json, and + # use of keystone_v3/v2 to a proper fixtures.Fixture. For now this + # is acceptable, but eventually this should become it's own fixture + # that encapsulates the registry, registering the URIs, and + # assert_calls (and calling assert_calls every test case that uses + # it on cleanup). Subclassing here could be 100% eliminated in the + # future allowing any class to simply + # self.useFixture(openstack.cloud.RequestsMockFixture) and get all + # the benefits. + + # NOTE(notmorgan): use an ordered dict here to ensure we preserve the + # order in which items are added to the uri_registry. This makes + # the behavior more consistent when dealing with ensuring the + # requests_mock uri/query_string matchers are ordered and parse the + # request in the correct orders. + self._uri_registry = collections.OrderedDict() + self.discovery_json = os.path.join( + self.fixtures_directory, 'discovery.json') + self.use_keystone_v3() + self.__register_uris_called = False + + def get_mock_url(self, service_type, interface='public', resource=None, + append=None, base_url_append=None, + qs_elements=None): + endpoint_url = self.cloud.endpoint_for( + service_type=service_type, interface=interface) + # Strip trailing slashes, so as not to produce double-slashes below + if endpoint_url.endswith('/'): + endpoint_url = endpoint_url[:-1] + to_join = [endpoint_url] + qs = '' + if base_url_append: + to_join.append(base_url_append) + if resource: + to_join.append(resource) + to_join.extend(append or []) + if qs_elements is not None: + qs = '?%s' % '&'.join(qs_elements) + return '%(uri)s%(qs)s' % {'uri': '/'.join(to_join), 'qs': qs} + + def mock_for_keystone_projects(self, project=None, v3=True, + list_get=False, id_get=False, + project_list=None, project_count=None): + if project: + assert not (project_list or project_count) + elif project_list: + assert not (project or project_count) + elif project_count: + assert not (project or project_list) + else: + raise Exception('Must specify a project, project_list, ' + 'or project_count') + assert list_get or id_get + + base_url_append = 'v3' if v3 else None + if project: + project_list = [project] + elif project_count: + # Generate multiple projects + project_list = [self._get_project_data(v3=v3) + for c in range(0, project_count)] + uri_mock_list = [] + if list_get: + uri_mock_list.append( + dict(method='GET', + uri=self.get_mock_url( + service_type='identity', + interface='admin', + resource='projects', + base_url_append=base_url_append), + status_code=200, + json={'projects': [p.json_response['project'] + for p in project_list]}) + ) + if id_get: + for p in project_list: + uri_mock_list.append( + dict(method='GET', + uri=self.get_mock_url( + service_type='identity', + interface='admin', + resource='projects', + append=[p.project_id], + base_url_append=base_url_append), + status_code=200, + json=p.json_response) + ) + self.__do_register_uris(uri_mock_list) + return project_list + + def _get_project_data(self, project_name=None, enabled=None, + domain_id=None, description=None, v3=True, + project_id=None): + project_name = project_name or self.getUniqueString('projectName') + project_id = uuid.UUID(project_id or uuid.uuid4().hex).hex + response = {'id': project_id, 'name': project_name} + request = {'name': project_name} + domain_id = (domain_id or uuid.uuid4().hex) if v3 else None + if domain_id: + request['domain_id'] = domain_id + response['domain_id'] = domain_id + if enabled is not None: + enabled = bool(enabled) + response['enabled'] = enabled + request['enabled'] = enabled + response.setdefault('enabled', True) + request.setdefault('enabled', True) + if description: + response['description'] = description + request['description'] = description + request.setdefault('description', None) + if v3: + project_key = 'project' + else: + project_key = 'tenant' + return _ProjectData(project_id, project_name, enabled, domain_id, + description, {project_key: response}, + {project_key: request}) + + def _get_group_data(self, name=None, domain_id=None, description=None): + group_id = uuid.uuid4().hex + name = name or self.getUniqueString('groupname') + domain_id = uuid.UUID(domain_id or uuid.uuid4().hex).hex + response = {'id': group_id, 'name': name, 'domain_id': domain_id} + request = {'name': name, 'domain_id': domain_id} + if description is not None: + response['description'] = description + request['description'] = description + + return _GroupData(group_id, name, domain_id, description, + {'group': response}, {'group': request}) + + def _get_user_data(self, name=None, password=None, **kwargs): + + name = name or self.getUniqueString('username') + password = password or self.getUniqueString('user_password') + user_id = uuid.uuid4().hex + + response = {'name': name, 'id': user_id} + request = {'name': name, 'password': password} + + if kwargs.get('domain_id'): + kwargs['domain_id'] = uuid.UUID(kwargs['domain_id']).hex + response['domain_id'] = kwargs.pop('domain_id') + request['domain_id'] = response['domain_id'] + + response['email'] = kwargs.pop('email', None) + request['email'] = response['email'] + + response['enabled'] = kwargs.pop('enabled', True) + request['enabled'] = response['enabled'] + + response['description'] = kwargs.pop('description', None) + if response['description']: + request['description'] = response['description'] + + self.assertIs(0, len(kwargs), message='extra key-word args received ' + 'on _get_user_data') + + return _UserData(user_id, password, name, response['email'], + response['description'], response.get('domain_id'), + response.get('enabled'), {'user': response}, + {'user': request}) + + def _get_domain_data(self, domain_name=None, description=None, + enabled=None): + domain_id = uuid.uuid4().hex + domain_name = domain_name or self.getUniqueString('domainName') + response = {'id': domain_id, 'name': domain_name} + request = {'name': domain_name} + if enabled is not None: + request['enabled'] = bool(enabled) + response['enabled'] = bool(enabled) + if description: + response['description'] = description + request['description'] = description + response.setdefault('enabled', True) + return _DomainData(domain_id, domain_name, description, + {'domain': response}, {'domain': request}) + + def _get_service_data(self, type=None, name=None, description=None, + enabled=True): + service_id = uuid.uuid4().hex + name = name or uuid.uuid4().hex + type = type or uuid.uuid4().hex + + response = {'id': service_id, 'name': name, 'type': type, + 'enabled': enabled} + if description is not None: + response['description'] = description + request = response.copy() + request.pop('id') + return _ServiceData(service_id, name, type, description, enabled, + {'service': response}, + {'OS-KSADM:service': response}, request) + + def _get_endpoint_v3_data(self, service_id=None, region=None, + url=None, interface=None, enabled=True): + endpoint_id = uuid.uuid4().hex + service_id = service_id or uuid.uuid4().hex + region = region or uuid.uuid4().hex + url = url or 'https://example.com/' + interface = interface or uuid.uuid4().hex + + response = {'id': endpoint_id, 'service_id': service_id, + 'region': region, 'interface': interface, + 'url': url, 'enabled': enabled} + request = response.copy() + request.pop('id') + response['region_id'] = response['region'] + return _EndpointDataV3(endpoint_id, service_id, interface, region, + url, enabled, {'endpoint': response}, + {'endpoint': request}) + + def _get_endpoint_v2_data(self, service_id=None, region=None, + public_url=None, admin_url=None, + internal_url=None): + endpoint_id = uuid.uuid4().hex + service_id = service_id or uuid.uuid4().hex + region = region or uuid.uuid4().hex + response = {'id': endpoint_id, 'service_id': service_id, + 'region': region} + v3_endpoints = {} + request = response.copy() + request.pop('id') + if admin_url: + response['adminURL'] = admin_url + v3_endpoints['admin'] = self._get_endpoint_v3_data( + service_id, region, public_url, interface='admin') + if internal_url: + response['internalURL'] = internal_url + v3_endpoints['internal'] = self._get_endpoint_v3_data( + service_id, region, internal_url, interface='internal') + if public_url: + response['publicURL'] = public_url + v3_endpoints['public'] = self._get_endpoint_v3_data( + service_id, region, public_url, interface='public') + request = response.copy() + request.pop('id') + for u in ('publicURL', 'internalURL', 'adminURL'): + if request.get(u): + request[u.lower()] = request.pop(u) + return _EndpointDataV2(endpoint_id, service_id, region, public_url, + internal_url, admin_url, v3_endpoints, + {'endpoint': response}, {'endpoint': request}) + + def _get_role_data(self, role_name=None): + role_id = uuid.uuid4().hex + role_name = role_name or uuid.uuid4().hex + request = {'name': role_name} + response = request.copy() + response['id'] = role_id + return _RoleData(role_id, role_name, {'role': response}, + {'role': request}) + + def use_keystone_v3(self, catalog='catalog-v3.json'): + self.adapter = self.useFixture(rm_fixture.Fixture()) + self.calls = [] + self._uri_registry.clear() + self.__do_register_uris([ + dict(method='GET', uri='https://identity.example.com/', + text=open(self.discovery_json, 'r').read()), + dict(method='POST', + uri='https://identity.example.com/v3/auth/tokens', + headers={ + 'X-Subject-Token': self.getUniqueString('KeystoneToken')}, + text=open(os.path.join( + self.fixtures_directory, catalog), 'r').read() + ), + ]) + self._make_test_cloud(identity_api_version='3') + + def use_keystone_v2(self): + self.adapter = self.useFixture(rm_fixture.Fixture()) + self.calls = [] + self._uri_registry.clear() + + self.__do_register_uris([ + dict(method='GET', uri='https://identity.example.com/', + text=open(self.discovery_json, 'r').read()), + dict(method='POST', uri='https://identity.example.com/v2.0/tokens', + text=open(os.path.join( + self.fixtures_directory, 'catalog-v2.json'), 'r').read() + ), + ]) + + self._make_test_cloud(cloud_name='_test_cloud_v2_', + identity_api_version='2.0') + + def _make_test_cloud(self, cloud_name='_test_cloud_', **kwargs): + test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', cloud_name) + self.cloud_config = self.config.get_one_cloud( + cloud=test_cloud, validate=True, **kwargs) + self.cloud = openstack.OpenStackCloud( + cloud_config=self.cloud_config, + log_inner_exceptions=True) + self.op_cloud = openstack.OperatorCloud( + cloud_config=self.cloud_config, + log_inner_exceptions=True) + + def get_glance_discovery_mock_dict( + self, image_version_json='image-version.json'): + discovery_fixture = os.path.join( + self.fixtures_directory, image_version_json) + return dict(method='GET', uri='https://image.example.com/', + status_code=300, + text=open(discovery_fixture, 'r').read()) + + def get_designate_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "dns.json") + return dict(method='GET', uri="https://dns.example.com/", + text=open(discovery_fixture, 'r').read()) + + def get_ironic_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "baremetal.json") + return dict(method='GET', uri="https://bare-metal.example.com/", + text=open(discovery_fixture, 'r').read()) + + def use_glance(self, image_version_json='image-version.json'): + # NOTE(notmorgan): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_glance is meant to be used during an + # actual test case, use .get_glance_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([ + self.get_glance_discovery_mock_dict(image_version_json)]) + + def use_designate(self): + # NOTE(slaweq): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_designate is meant to be used during an + # actual test case, use .get_designate_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([ + self.get_designate_discovery_mock_dict()]) + + def use_ironic(self): + # NOTE(TheJulia): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_ironic is meant to be used during an + # actual test case, use .get_ironic_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([ + self.get_ironic_discovery_mock_dict()]) + + def register_uris(self, uri_mock_list=None): + """Mock a list of URIs and responses via requests mock. + + This method may be called only once per test-case to avoid odd + and difficult to debug interactions. Discovery and Auth request mocking + happens separately from this method. + + :param uri_mock_list: List of dictionaries that template out what is + passed to requests_mock fixture's `register_uri`. + Format is: + {'method': , + 'uri': , + ... + } + + Common keys to pass in the dictionary: + * json: the json response (dict) + * status_code: the HTTP status (int) + * validate: The request body (dict) to + validate with assert_calls + all key-word arguments that are valid to send to + requests_mock are supported. + + This list should be in the order in which calls + are made. When `assert_calls` is executed, order + here will be validated. Duplicate URIs and + Methods are allowed and will be collapsed into a + single matcher. Each response will be returned + in order as the URI+Method is hit. + :type uri_mock_list: list + :return: None + """ + assert not self.__register_uris_called + self.__do_register_uris(uri_mock_list or []) + self.__register_uris_called = True + + def __do_register_uris(self, uri_mock_list=None): + for to_mock in uri_mock_list: + kw_params = {k: to_mock.pop(k) + for k in ('request_headers', 'complete_qs', + '_real_http') + if k in to_mock} + + method = to_mock.pop('method') + uri = to_mock.pop('uri') + # NOTE(notmorgan): make sure the delimiter is non-url-safe, in this + # case "|" is used so that the split can be a bit easier on + # maintainers of this code. + key = '{method}|{uri}|{params}'.format( + method=method, uri=uri, params=kw_params) + validate = to_mock.pop('validate', {}) + valid_keys = set(['json', 'headers', 'params']) + invalid_keys = set(validate.keys()) - valid_keys + if invalid_keys: + raise TypeError( + "Invalid values passed to validate: {keys}".format( + keys=invalid_keys)) + headers = structures.CaseInsensitiveDict(to_mock.pop('headers', + {})) + if 'content-type' not in headers: + headers[u'content-type'] = 'application/json' + + to_mock['headers'] = headers + + self.calls += [ + dict( + method=method, + url=uri, **validate) + ] + self._uri_registry.setdefault( + key, {'response_list': [], 'kw_params': kw_params}) + if self._uri_registry[key]['kw_params'] != kw_params: + raise AssertionError( + 'PROGRAMMING ERROR: key-word-params ' + 'should be part of the uri_key and cannot change, ' + 'it will affect the matcher in requests_mock. ' + '%(old)r != %(new)r' % + {'old': self._uri_registry[key]['kw_params'], + 'new': kw_params}) + self._uri_registry[key]['response_list'].append(to_mock) + + for mocked, params in self._uri_registry.items(): + mock_method, mock_uri, _ignored = mocked.split('|', 2) + self.adapter.register_uri( + mock_method, mock_uri, params['response_list'], + **params['kw_params']) + + def assert_calls(self, stop_after=None, do_count=True): + for (x, (call, history)) in enumerate( + zip(self.calls, self.adapter.request_history)): + if stop_after and x > stop_after: + break + + call_uri_parts = urllib.parse.urlparse(call['url']) + history_uri_parts = urllib.parse.urlparse(history.url) + self.assertEqual( + (call['method'], call_uri_parts.scheme, call_uri_parts.netloc, + call_uri_parts.path, call_uri_parts.params, + urllib.parse.parse_qs(call_uri_parts.query)), + (history.method, history_uri_parts.scheme, + history_uri_parts.netloc, history_uri_parts.path, + history_uri_parts.params, + urllib.parse.parse_qs(history_uri_parts.query)), + ('REST mismatch on call %(index)d. Expected %(call)r. ' + 'Got %(history)r). ' + 'NOTE: query string order differences wont cause mismatch' % + { + 'index': x, + 'call': '{method} {url}'.format(method=call['method'], + url=call['url']), + 'history': '{method} {url}'.format( + method=history.method, + url=history.url)}) + ) + if 'json' in call: + self.assertEqual( + call['json'], history.json(), + 'json content mismatch in call {index}'.format(index=x)) + # headers in a call isn't exhaustive - it's checking to make sure + # a specific header or headers are there, not that they are the + # only headers + if 'headers' in call: + for key, value in call['headers'].items(): + self.assertEqual( + value, history.headers[key], + 'header mismatch in call {index}'.format(index=x)) + if do_count: + self.assertEqual( + len(self.calls), len(self.adapter.request_history)) + + +class IronicTestCase(RequestsMockTestCase): + + def setUp(self): + super(IronicTestCase, self).setUp() + self.use_ironic() + self.uuid = str(uuid.uuid4()) + self.name = self.getUniqueString('name') + + def get_mock_url(self, resource=None, append=None, qs_elements=None): + return super(IronicTestCase, self).get_mock_url( + service_type='baremetal', interface='public', resource=resource, + append=append, base_url_append='v1', qs_elements=qs_elements) diff --git a/openstack/tests/unit/cloud/__init__.py b/openstack/tests/unit/cloud/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/tests/unit/cloud/test__adapter.py b/openstack/tests/unit/cloud/test__adapter.py new file mode 100644 index 000000000..9722b02c8 --- /dev/null +++ b/openstack/tests/unit/cloud/test__adapter.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack.cloud import _adapter +from openstack.tests.unit import base + + +class TestExtractName(base.TestCase): + + scenarios = [ + ('slash_servers_bare', dict(url='/servers', parts=['servers'])), + ('slash_servers_arg', dict(url='/servers/1', parts=['servers'])), + ('servers_bare', dict(url='servers', parts=['servers'])), + ('servers_arg', dict(url='servers/1', parts=['servers'])), + ('networks_bare', dict(url='/v2.0/networks', parts=['networks'])), + ('networks_arg', dict(url='/v2.0/networks/1', parts=['networks'])), + ('tokens', dict(url='/v3/tokens', parts=['tokens'])), + ('discovery', dict(url='/', parts=['discovery'])), + ('secgroups', dict( + url='/servers/1/os-security-groups', + parts=['servers', 'os-security-groups'])), + ] + + def test_extract_name(self): + + results = _adapter.extract_name(self.url) + self.assertEqual(self.parts, results) diff --git a/openstack/tests/unit/cloud/test__utils.py b/openstack/tests/unit/cloud/test__utils.py new file mode 100644 index 000000000..ec7ac6150 --- /dev/null +++ b/openstack/tests/unit/cloud/test__utils.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import string +import tempfile +from uuid import uuid4 + +import mock +import testtools + +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack.tests.unit import base + + +RANGE_DATA = [ + dict(id=1, key1=1, key2=5), + dict(id=2, key1=1, key2=20), + dict(id=3, key1=2, key2=10), + dict(id=4, key1=2, key2=30), + dict(id=5, key1=3, key2=40), + dict(id=6, key1=3, key2=40), +] + + +class TestUtils(base.TestCase): + + def test__filter_list_name_or_id(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', None) + self.assertEqual([el1], ret) + + def test__filter_list_name_or_id_special(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto[2017-01-10]', None) + self.assertEqual([el2], ret) + + def test__filter_list_name_or_id_partial_bad(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto[2017-01]', None) + self.assertEqual([], ret) + + def test__filter_list_name_or_id_partial_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto*', None) + self.assertEqual([el2], ret) + + def test__filter_list_name_or_id_non_glob_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto', None) + self.assertEqual([], ret) + + def test__filter_list_name_or_id_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + el3 = dict(id=200, name='pluto-2') + data = [el1, el2, el3] + ret = _utils._filter_list(data, 'pluto*', None) + self.assertEqual([el2, el3], ret) + + def test__filter_list_name_or_id_glob_not_found(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + el3 = dict(id=200, name='pluto-2') + data = [el1, el2, el3] + ret = _utils._filter_list(data, 'q*', None) + self.assertEqual([], ret) + + def test__filter_list_unicode(self): + el1 = dict(id=100, name=u'中文', last='duck', + other=dict(category='duck', financial=dict(status='poor'))) + el2 = dict(id=200, name=u'中文', last='trump', + other=dict(category='human', financial=dict(status='rich'))) + el3 = dict(id=300, name='donald', last='ronald mac', + other=dict(category='clown', financial=dict(status='rich'))) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, u'中文', + {'other': { + 'financial': {'status': 'rich'} + }}) + self.assertEqual([el2], ret) + + def test__filter_list_filter(self): + el1 = dict(id=100, name='donald', other='duck') + el2 = dict(id=200, name='donald', other='trump') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', {'other': 'duck'}) + self.assertEqual([el1], ret) + + def test__filter_list_filter_jmespath(self): + el1 = dict(id=100, name='donald', other='duck') + el2 = dict(id=200, name='donald', other='trump') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', "[?other == `duck`]") + self.assertEqual([el1], ret) + + def test__filter_list_dict1(self): + el1 = dict(id=100, name='donald', last='duck', + other=dict(category='duck')) + el2 = dict(id=200, name='donald', last='trump', + other=dict(category='human')) + el3 = dict(id=300, name='donald', last='ronald mac', + other=dict(category='clown')) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, 'donald', {'other': {'category': 'clown'}}) + self.assertEqual([el3], ret) + + def test__filter_list_dict2(self): + el1 = dict(id=100, name='donald', last='duck', + other=dict(category='duck', financial=dict(status='poor'))) + el2 = dict(id=200, name='donald', last='trump', + other=dict(category='human', financial=dict(status='rich'))) + el3 = dict(id=300, name='donald', last='ronald mac', + other=dict(category='clown', financial=dict(status='rich'))) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, 'donald', + {'other': { + 'financial': {'status': 'rich'} + }}) + self.assertEqual([el2, el3], ret) + + def test_safe_dict_min_ints(self): + """Test integer comparison""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_strs(self): + """Test integer as strings comparison""" + data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_None(self): + """Test None values""" + data = [{'f1': 3}, {'f1': None}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_key_missing(self): + """Test missing key for an entry still works""" + data = [{'f1': 3}, {'x': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_key_not_found(self): + """Test key not found in any elements returns None""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('doesnotexist', data) + self.assertIsNone(retval) + + def test_safe_dict_min_not_int(self): + """Test non-integer key value raises OSCE""" + data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Search for minimum value failed. " + "Value for f1 is not an integer: aaa" + ): + _utils.safe_dict_min('f1', data) + + def test_safe_dict_max_ints(self): + """Test integer comparison""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_strs(self): + """Test integer as strings comparison""" + data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_None(self): + """Test None values""" + data = [{'f1': 3}, {'f1': None}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_key_missing(self): + """Test missing key for an entry still works""" + data = [{'f1': 3}, {'x': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_key_not_found(self): + """Test key not found in any elements returns None""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('doesnotexist', data) + self.assertIsNone(retval) + + def test_safe_dict_max_not_int(self): + """Test non-integer key value raises OSCE""" + data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Search for maximum value failed. " + "Value for f1 is not an integer: aaa" + ): + _utils.safe_dict_max('f1', data) + + def test_parse_range_None(self): + self.assertIsNone(_utils.parse_range(None)) + + def test_parse_range_invalid(self): + self.assertIsNone(_utils.parse_range("1024") + self.assertIsInstance(retval, tuple) + self.assertEqual(">", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_parse_range_le(self): + retval = _utils.parse_range("<=1024") + self.assertIsInstance(retval, tuple) + self.assertEqual("<=", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_parse_range_ge(self): + retval = _utils.parse_range(">=1024") + self.assertIsInstance(retval, tuple) + self.assertEqual(">=", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_range_filter_min(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "min") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[:2], retval) + + def test_range_filter_max(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "max") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[-2:], retval) + + def test_range_filter_range(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "<3") + self.assertIsInstance(retval, list) + self.assertEqual(4, len(retval)) + self.assertEqual(RANGE_DATA[:4], retval) + + def test_range_filter_exact(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "2") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[2:4], retval) + + def test_range_filter_invalid_int(self): + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Invalid range value: <1A0" + ): + _utils.range_filter(RANGE_DATA, "key1", "<1A0") + + def test_range_filter_invalid_op(self): + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Invalid range value: <>100" + ): + _utils.range_filter(RANGE_DATA, "key1", "<>100") + + def test_file_segment(self): + file_size = 4200 + content = ''.join(random.SystemRandom().choice( + string.ascii_uppercase + string.digits) + for _ in range(file_size)).encode('latin-1') + self.imagefile = tempfile.NamedTemporaryFile(delete=False) + self.imagefile.write(content) + self.imagefile.close() + + segments = self.cloud._get_file_segments( + endpoint='test_container/test_image', + filename=self.imagefile.name, + file_size=file_size, + segment_size=1000) + self.assertEqual(len(segments), 5) + segment_content = b'' + for (index, (name, segment)) in enumerate(segments.items()): + self.assertEqual( + 'test_container/test_image/{index:0>6}'.format(index=index), + name) + segment_content += segment.read() + self.assertEqual(content, segment_content) + + def test_get_entity_pass_object(self): + obj = mock.Mock(id=uuid4().hex) + self.cloud.use_direct_get = True + self.assertEqual(obj, _utils._get_entity(self.cloud, '', obj, {})) + + def test_get_entity_no_use_direct_get(self): + # test we are defaulting to the search_ methods + # if the use_direct_get flag is set to False(default). + uuid = uuid4().hex + resource = 'network' + func = 'search_%ss' % resource + filters = {} + with mock.patch.object(self.cloud, func) as search: + _utils._get_entity(self.cloud, resource, uuid, filters) + search.assert_called_once_with(uuid, filters) + + def test_get_entity_no_uuid_like(self): + # test we are defaulting to the search_ methods + # if the name_or_id param is a name(string) but not a uuid. + self.cloud.use_direct_get = True + name = 'name_no_uuid' + resource = 'network' + func = 'search_%ss' % resource + filters = {} + with mock.patch.object(self.cloud, func) as search: + _utils._get_entity(self.cloud, resource, name, filters) + search.assert_called_once_with(name, filters) + + def test_get_entity_pass_uuid(self): + uuid = uuid4().hex + self.cloud.use_direct_get = True + resources = ['flavor', 'image', 'volume', 'network', + 'subnet', 'port', 'floating_ip', 'security_group'] + for r in resources: + f = 'get_%s_by_id' % r + with mock.patch.object(self.cloud, f) as get: + _utils._get_entity(self.cloud, r, uuid, {}) + get.assert_called_once_with(uuid) + + def test_get_entity_pass_search_methods(self): + self.cloud.use_direct_get = True + resources = ['flavor', 'image', 'volume', 'network', + 'subnet', 'port', 'floating_ip', 'security_group'] + filters = {} + name = 'name_no_uuid' + for r in resources: + f = 'search_%ss' % r + with mock.patch.object(self.cloud, f) as search: + _utils._get_entity(self.cloud, r, name, {}) + search.assert_called_once_with(name, filters) + + def test_get_entity_get_and_search(self): + resources = ['flavor', 'image', 'volume', 'network', + 'subnet', 'port', 'floating_ip', 'security_group'] + for r in resources: + self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r)) + self.assertTrue(hasattr(self.cloud, 'search_%ss' % r)) diff --git a/openstack/tests/unit/cloud/test_aggregate.py b/openstack/tests/unit/cloud/test_aggregate.py new file mode 100644 index 000000000..6f0e9513d --- /dev/null +++ b/openstack/tests/unit/cloud/test_aggregate.py @@ -0,0 +1,182 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base +from openstack.tests import fakes + + +class TestAggregate(base.RequestsMockTestCase): + + def setUp(self): + super(TestAggregate, self).setUp() + self.aggregate_name = self.getUniqueString('aggregate') + self.fake_aggregate = fakes.make_fake_aggregate(1, self.aggregate_name) + + def test_create_aggregate(self): + create_aggregate = self.fake_aggregate.copy() + del create_aggregate['metadata'] + del create_aggregate['hosts'] + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregate': create_aggregate}, + validate=dict(json={ + 'aggregate': { + 'name': self.aggregate_name, + 'availability_zone': None, + }})), + ]) + self.op_cloud.create_aggregate(name=self.aggregate_name) + + self.assert_calls() + + def test_create_aggregate_with_az(self): + availability_zone = 'az1' + az_aggregate = fakes.make_fake_aggregate( + 1, self.aggregate_name, availability_zone=availability_zone) + + create_aggregate = az_aggregate.copy() + del create_aggregate['metadata'] + del create_aggregate['hosts'] + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregate': create_aggregate}, + validate=dict(json={ + 'aggregate': { + 'name': self.aggregate_name, + 'availability_zone': availability_zone, + }})), + ]) + + self.op_cloud.create_aggregate( + name=self.aggregate_name, availability_zone=availability_zone) + + self.assert_calls() + + def test_delete_aggregate(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'])), + ]) + + self.assertTrue(self.op_cloud.delete_aggregate('1')) + + self.assert_calls() + + def test_update_aggregate_set_az(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1']), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'availability_zone': 'az', + }})), + ]) + + self.op_cloud.update_aggregate(1, availability_zone='az') + + self.assert_calls() + + def test_update_aggregate_unset_az(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1']), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'availability_zone': None, + }})), + ]) + + self.op_cloud.update_aggregate(1, availability_zone=None) + + self.assert_calls() + + def test_set_aggregate_metadata(self): + metadata = {'key': 'value'} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-aggregates', '1', 'action']), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={'set_metadata': {'metadata': metadata}})), + ]) + self.op_cloud.set_aggregate_metadata('1', metadata) + + self.assert_calls() + + def test_add_host_to_aggregate(self): + hostname = 'host1' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-aggregates', '1', 'action']), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={'add_host': {'host': hostname}})), + ]) + self.op_cloud.add_host_to_aggregate('1', hostname) + + self.assert_calls() + + def test_remove_host_from_aggregate(self): + hostname = 'host1' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates']), + json={'aggregates': [self.fake_aggregate]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-aggregates', '1', 'action']), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={'remove_host': {'host': hostname}})), + ]) + self.op_cloud.remove_host_from_aggregate('1', hostname) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_availability_zones.py b/openstack/tests/unit/cloud/test_availability_zones.py new file mode 100644 index 000000000..965c41470 --- /dev/null +++ b/openstack/tests/unit/cloud/test_availability_zones.py @@ -0,0 +1,79 @@ +# Copyright (c) 2017 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.tests.unit import base +from openstack.tests import fakes + + +_fake_zone_list = { + "availabilityZoneInfo": [ + { + "hosts": None, + "zoneName": "az1", + "zoneState": { + "available": True + } + }, + { + "hosts": None, + "zoneName": "nova", + "zoneState": { + "available": False + } + } + ] +} + + +class TestAvailabilityZoneNames(base.RequestsMockTestCase): + + def test_list_availability_zone_names(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-availability-zone'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json=_fake_zone_list), + ]) + + self.assertEqual( + ['az1'], self.cloud.list_availability_zone_names()) + + self.assert_calls() + + def test_unauthorized_availability_zone_names(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-availability-zone'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + status_code=403), + ]) + + self.assertEqual( + [], self.cloud.list_availability_zone_names()) + + self.assert_calls() + + def test_list_all_availability_zone_names(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-availability-zone'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json=_fake_zone_list), + ]) + + self.assertEqual( + ['az1', 'nova'], + self.cloud.list_availability_zone_names(unavailable=True)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_baremetal_node.py b/openstack/tests/unit/cloud/test_baremetal_node.py new file mode 100644 index 000000000..837b51917 --- /dev/null +++ b/openstack/tests/unit/cloud/test_baremetal_node.py @@ -0,0 +1,915 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_baremetal_node +---------------------------------- + +Tests for baremetal node related operations +""" + +import uuid + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestBaremetalNode(base.IronicTestCase): + + def setUp(self): + super(TestBaremetalNode, self).setUp() + # TODO(shade) Fix this when we get ironic update to REST + self.skipTest("Ironic operations not supported yet") + self.fake_baremetal_node = fakes.make_fake_machine( + self.name, self.uuid) + + def test_list_machines(self): + fake_baremetal_two = fakes.make_fake_machine('two', str(uuid.uuid4())) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='nodes'), + json={'nodes': [self.fake_baremetal_node, + fake_baremetal_two]}), + ]) + + machines = self.op_cloud.list_machines() + self.assertEqual(2, len(machines)) + self.assertEqual(self.fake_baremetal_node, machines[0]) + self.assert_calls() + + def test_get_machine(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + + machine = self.op_cloud.get_machine(self.fake_baremetal_node['uuid']) + self.assertEqual(machine['uuid'], + self.fake_baremetal_node['uuid']) + self.assert_calls() + + def test_get_machine_by_mac(self): + mac_address = '00:01:02:03:04:05' + url_address = 'detail?address=%s' % mac_address + node_uuid = self.fake_baremetal_node['uuid'] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='ports', + append=[url_address]), + json={'ports': [{'address': mac_address, + 'node_uuid': node_uuid}]}), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + + machine = self.op_cloud.get_machine_by_mac(mac_address) + self.assertEqual(machine['uuid'], + self.fake_baremetal_node['uuid']) + self.assert_calls() + + def test_validate_node(self): + # NOTE(TheJulia): Note: These are only the interfaces + # that are validated, and both must be true for an + # exception to not be raised. + # This should be fixed at some point, as some interfaces + # are important in some cases and should be validated, + # such as storage. + validate_return = { + 'deploy': { + 'result': True, + }, + 'power': { + 'result': True, + }, + 'foo': { + 'result': False, + }} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'validate']), + json=validate_return), + ]) + self.op_cloud.validate_node(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + # FIXME(TheJulia): So, this doesn't presently fail, but should fail. + # Placing the test here, so we can sort out the issue in the actual + # method later. + # def test_validate_node_raises_exception(self): + # validate_return = { + # 'deploy': { + # 'result': False, + # 'reason': 'error!', + # }, + # 'power': { + # 'result': False, + # 'reason': 'meow!', + # }, + # 'foo': { + # 'result': True + # }} + # self.register_uris([ + # dict(method='GET', + # uri=self.get_mock_url( + # resource='nodes', + # append=[self.fake_baremetal_node['uuid'], + # 'validate']), + # json=validate_return), + # ]) + # self.assertRaises( + # Exception, + # self.op_cloud.validate_node, + # self.fake_baremetal_node['uuid']) + # + # self.assert_calls() + + def test_patch_machine(self): + test_patch = [{ + 'op': 'remove', + 'path': '/instance_info'}] + self.fake_baremetal_node['instance_info'] = {} + self.register_uris([ + dict(method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node, + validate=dict(json=test_patch)), + ]) + self.op_cloud.patch_machine(self.fake_baremetal_node['uuid'], + test_patch) + + self.assert_calls() + + def test_set_node_instance_info(self): + test_patch = [{ + 'op': 'add', + 'path': '/foo', + 'value': 'bar'}] + self.register_uris([ + dict(method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node, + validate=dict(json=test_patch)), + ]) + self.op_cloud.set_node_instance_info( + self.fake_baremetal_node['uuid'], test_patch) + + self.assert_calls() + + def test_purge_node_instance_info(self): + test_patch = [{ + 'op': 'remove', + 'path': '/instance_info'}] + self.fake_baremetal_node['instance_info'] = {} + self.register_uris([ + dict(method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node, + validate=dict(json=test_patch)), + ]) + self.op_cloud.purge_node_instance_info( + self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_fail_active(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.op_cloud.inspect_machine, + self.fake_baremetal_node['uuid'], + wait=True, + timeout=1) + + self.assert_calls() + + def test_inspect_machine_failed(self): + inspecting_node = self.fake_baremetal_node.copy() + self.fake_baremetal_node['provision_state'] = 'inspect failed' + self.fake_baremetal_node['last_error'] = 'kaboom!' + inspecting_node['provision_state'] = 'inspecting' + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node) + ]) + + self.op_cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_manageable(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node), + ]) + self.op_cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_available(self): + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'manage'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=manageable_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=manageable_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'provide'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + ]) + self.op_cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_available_wait(self): + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'manage'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=manageable_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=manageable_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'provide'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + ]) + self.op_cloud.inspect_machine(self.fake_baremetal_node['uuid'], + wait=True, timeout=1) + + self.assert_calls() + + def test_inspect_machine_wait(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + self.op_cloud.inspect_machine(self.fake_baremetal_node['uuid'], + wait=True, timeout=1) + + self.assert_calls() + + def test_inspect_machine_inspect_failed(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + inspect_fail_node = self.fake_baremetal_node.copy() + inspect_fail_node['provision_state'] = 'inspect failed' + inspect_fail_node['last_error'] = 'Earth Imploded' + self.register_uris([ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'inspect'})), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspecting_node), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=inspect_fail_node), + ]) + self.assertRaises(exc.OpenStackCloudException, + self.op_cloud.inspect_machine, + self.fake_baremetal_node['uuid'], + wait=True, timeout=1) + + self.assert_calls() + + def test_set_machine_maintenace_state(self): + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'maintenance']), + validate=dict(json={'reason': 'no reason'})), + ]) + self.op_cloud.set_machine_maintenance_state( + self.fake_baremetal_node['uuid'], True, reason='no reason') + + self.assert_calls() + + def test_set_machine_maintenace_state_false(self): + self.register_uris([ + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'maintenance'])), + ]) + self.op_cloud.set_machine_maintenance_state( + self.fake_baremetal_node['uuid'], False) + + self.assert_calls + + def test_remove_machine_from_maintenance(self): + self.register_uris([ + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'maintenance'])), + ]) + self.op_cloud.remove_machine_from_maintenance( + self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_set_machine_power_on(self): + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'power']), + validate=dict(json={'target': 'power on'})), + ]) + return_value = self.op_cloud.set_machine_power_on( + self.fake_baremetal_node['uuid']) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_off(self): + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'power']), + validate=dict(json={'target': 'power off'})), + ]) + return_value = self.op_cloud.set_machine_power_off( + self.fake_baremetal_node['uuid']) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_reboot(self): + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'power']), + validate=dict(json={'target': 'rebooting'})), + ]) + return_value = self.op_cloud.set_machine_power_reboot( + self.fake_baremetal_node['uuid']) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_reboot_failure(self): + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'power']), + status_code=400, + json={'error': 'invalid'}, + validate=dict(json={'target': 'rebooting'})), + ]) + self.assertRaises(exc.OpenStackCloudException, + self.op_cloud.set_machine_power_reboot, + self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_node_set_provision_state(self): + deploy_node = self.fake_baremetal_node.copy() + deploy_node['provision_state'] = 'deploying' + active_node = self.fake_baremetal_node.copy() + active_node['provision_state'] = 'active' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active', + 'configdrive': 'http://host/file'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + self.op_cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'active', + configdrive='http://host/file') + + self.assert_calls() + + def test_node_set_provision_state_wait_timeout(self): + deploy_node = self.fake_baremetal_node.copy() + deploy_node['provision_state'] = 'deploying' + active_node = self.fake_baremetal_node.copy() + active_node['provision_state'] = 'active' + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=deploy_node), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=active_node), + ]) + return_value = self.op_cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'active', + wait=True) + + self.assertEqual(active_node, return_value) + self.assert_calls() + + def test_node_set_provision_state_wait_timeout_fails(self): + # Intentionally time out. + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.op_cloud.node_set_provision_state, + self.fake_baremetal_node['uuid'], + 'active', + wait=True, + timeout=0.001) + + self.assert_calls() + + def test_node_set_provision_state_wait_success(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + + return_value = self.op_cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'active', + wait=True) + + self.assertEqual(self.fake_baremetal_node, return_value) + self.assert_calls() + + def test_node_set_provision_state_wait_failure_cases(self): + self.fake_baremetal_node['provision_state'] = 'foo failed' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.op_cloud.node_set_provision_state, + self.fake_baremetal_node['uuid'], + 'active', + wait=True, + timeout=300) + + self.assert_calls() + + def test_node_set_provision_state_wait_provide(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'provide'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=available_node), + ]) + return_value = self.op_cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'provide', + wait=True) + + self.assertEqual(available_node, return_value) + self.assert_calls() + + def test_activate_node(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'active', + 'configdrive': 'http://host/file'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + return_value = self.op_cloud.activate_node( + self.fake_baremetal_node['uuid'], + configdrive='http://host/file', + wait=True) + + self.assertIsNone(return_value) + self.assert_calls() + + def test_deactivate_node(self): + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris([ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], + 'states', 'provision']), + validate=dict(json={'target': 'deleted'})), + dict(method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + return_value = self.op_cloud.deactivate_node( + self.fake_baremetal_node['uuid'], + wait=True) + + self.assertIsNone(return_value) + self.assert_calls() + + def test_update_machine_patch_no_action(self): + self.register_uris([dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ]) + # NOTE(TheJulia): This is just testing mechanics. + update_dict = self.op_cloud.update_machine( + self.fake_baremetal_node['uuid']) + self.assertIsNone(update_dict['changes']) + self.assertDictEqual(self.fake_baremetal_node, update_dict['node']) + + self.assert_calls() + + +class TestUpdateMachinePatch(base.IronicTestCase): + # NOTE(TheJulia): As appears, and mordred describes, + # this class utilizes black magic, which ultimately + # results in additional test runs being executed with + # the scenario name appended. Useful for lots of + # variables that need to be tested. + + def setUp(self): + super(TestUpdateMachinePatch, self).setUp() + # TODO(shade) Fix this when we get ironic update to REST + self.skipTest("Ironic operations not supported yet") + self.fake_baremetal_node = fakes.make_fake_machine( + self.name, self.uuid) + + def test_update_machine_patch(self): + # The model has evolved over time, create the field if + # we don't already have it. + if self.field_name not in self.fake_baremetal_node: + self.fake_baremetal_node[self.field_name] = None + value_to_send = self.fake_baremetal_node[self.field_name] + if self.changed: + value_to_send = 'meow' + uris = [dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node), + ] + if self.changed: + test_patch = [{ + 'op': 'replace', + 'path': '/' + self.field_name, + 'value': 'meow'}] + uris.append( + dict( + method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']]), + json=self.fake_baremetal_node, + validate=dict(json=test_patch))) + + self.register_uris(uris) + + call_args = {self.field_name: value_to_send} + update_dict = self.op_cloud.update_machine( + self.fake_baremetal_node['uuid'], **call_args) + + if not self.changed: + self.assertIsNone(update_dict['changes']) + self.assertDictEqual(self.fake_baremetal_node, update_dict['node']) + + self.assert_calls() + + scenarios = [ + ('chassis_uuid', dict(field_name='chassis_uuid', changed=False)), + ('chassis_uuid_changed', + dict(field_name='chassis_uuid', changed=True)), + ('driver', dict(field_name='driver', changed=False)), + ('driver_changed', dict(field_name='driver', changed=True)), + ('driver_info', dict(field_name='driver_info', changed=False)), + ('driver_info_changed', dict(field_name='driver_info', changed=True)), + ('instance_info', dict(field_name='instance_info', changed=False)), + ('instance_info_changed', + dict(field_name='instance_info', changed=True)), + ('instance_uuid', dict(field_name='instance_uuid', changed=False)), + ('instance_uuid_changed', + dict(field_name='instance_uuid', changed=True)), + ('name', dict(field_name='name', changed=False)), + ('name_changed', dict(field_name='name', changed=True)), + ('properties', dict(field_name='properties', changed=False)), + ('properties_changed', dict(field_name='properties', changed=True)) + ] diff --git a/openstack/tests/unit/cloud/test_caching.py b/openstack/tests/unit/cloud/test_caching.py new file mode 100644 index 000000000..aa26f11eb --- /dev/null +++ b/openstack/tests/unit/cloud/test_caching.py @@ -0,0 +1,561 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import concurrent +import time + +import mock +import munch +import testtools + +import openstack +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +# Mock out the gettext function so that the task schema can be copypasta +def _(msg): + return msg + + +_TASK_PROPERTIES = { + "id": { + "description": _("An identifier for the task"), + "pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' + '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), + "type": "string" + }, + "type": { + "description": _("The type of task represented by this content"), + "enum": [ + "import", + ], + "type": "string" + }, + "status": { + "description": _("The current status of this task"), + "enum": [ + "pending", + "processing", + "success", + "failure" + ], + "type": "string" + }, + "input": { + "description": _("The parameters required by task, JSON blob"), + "type": ["null", "object"], + }, + "result": { + "description": _("The result of current task, JSON blob"), + "type": ["null", "object"], + }, + "owner": { + "description": _("An identifier for the owner of this task"), + "type": "string" + }, + "message": { + "description": _("Human-readable informative message only included" + " when appropriate (usually on failure)"), + "type": "string", + }, + "expires_at": { + "description": _("Datetime when this resource would be" + " subject to removal"), + "type": ["null", "string"] + }, + "created_at": { + "description": _("Datetime when this resource was created"), + "type": "string" + }, + "updated_at": { + "description": _("Datetime when this resource was updated"), + "type": "string" + }, + 'self': {'type': 'string'}, + 'schema': {'type': 'string'} +} +_TASK_SCHEMA = dict( + name='Task', properties=_TASK_PROPERTIES, + additionalProperties=False, +) + + +class TestMemoryCache(base.RequestsMockTestCase): + + def setUp(self): + super(TestMemoryCache, self).setUp( + cloud_config_fixture='clouds_cache.yaml') + + def _image_dict(self, fake_image): + return self.cloud._normalize_image(meta.obj_to_munch(fake_image)) + + def _munch_images(self, fake_image): + return self.cloud._normalize_images([fake_image]) + + def test_openstack_cloud(self): + self.assertIsInstance(self.cloud, openstack.OpenStackCloud) + + def test_list_projects_v3(self): + project_one = self._get_project_data() + project_two = self._get_project_data() + project_list = [project_one, project_two] + + first_response = {'projects': [project_one.json_response['project']]} + second_response = {'projects': [p.json_response['project'] + for p in project_list]} + + mock_uri = self.get_mock_url( + service_type='identity', interface='admin', resource='projects', + base_url_append='v3') + + self.register_uris([ + dict(method='GET', uri=mock_uri, status_code=200, + json=first_response), + dict(method='GET', uri=mock_uri, status_code=200, + json=second_response)]) + + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(first_response['projects'])), + self.cloud.list_projects()) + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(first_response['projects'])), + self.cloud.list_projects()) + # invalidate the list_projects cache + self.cloud.list_projects.invalidate(self.cloud) + # ensure the new values are now retrieved + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(second_response['projects'])), + self.cloud.list_projects()) + self.assert_calls() + + def test_list_projects_v2(self): + self.use_keystone_v2() + project_one = self._get_project_data(v3=False) + project_two = self._get_project_data(v3=False) + project_list = [project_one, project_two] + + first_response = {'tenants': [project_one.json_response['tenant']]} + second_response = {'tenants': [p.json_response['tenant'] + for p in project_list]} + + mock_uri = self.get_mock_url( + service_type='identity', interface='admin', resource='tenants') + + self.register_uris([ + dict(method='GET', uri=mock_uri, status_code=200, + json=first_response), + dict(method='GET', uri=mock_uri, status_code=200, + json=second_response)]) + + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(first_response['tenants'])), + self.cloud.list_projects()) + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(first_response['tenants'])), + self.cloud.list_projects()) + # invalidate the list_projects cache + self.cloud.list_projects.invalidate(self.cloud) + # ensure the new values are now retrieved + self.assertEqual( + self.cloud._normalize_projects( + meta.obj_list_to_munch(second_response['tenants'])), + self.cloud.list_projects()) + self.assert_calls() + + def test_list_servers_no_herd(self): + self.cloud._SERVER_AGE = 2 + fake_server = fakes.make_fake_server('1234', 'name') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [fake_server]}), + ]) + with concurrent.futures.ThreadPoolExecutor(16) as pool: + for i in range(16): + pool.submit(lambda: self.cloud.list_servers(bare=True)) + # It's possible to race-condition 16 threads all in the + # single initial lock without a tiny sleep + time.sleep(0.001) + + self.assert_calls() + + def test_list_volumes(self): + fake_volume = fakes.FakeVolume('volume1', 'available', + 'Volume 1 Display Name') + fake_volume_dict = meta.obj_to_munch(fake_volume) + fake_volume2 = fakes.FakeVolume('volume2', 'available', + 'Volume 2 Display Name') + fake_volume2_dict = meta.obj_to_munch(fake_volume2) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volume_dict]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volume_dict, fake_volume2_dict]})]) + self.assertEqual( + [self.cloud._normalize_volume(fake_volume_dict)], + self.cloud.list_volumes()) + # this call should hit the cache + self.assertEqual( + [self.cloud._normalize_volume(fake_volume_dict)], + self.cloud.list_volumes()) + self.cloud.list_volumes.invalidate(self.cloud) + self.assertEqual( + [self.cloud._normalize_volume(fake_volume_dict), + self.cloud._normalize_volume(fake_volume2_dict)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_list_volumes_creating_invalidates(self): + fake_volume = fakes.FakeVolume('volume1', 'creating', + 'Volume 1 Display Name') + fake_volume_dict = meta.obj_to_munch(fake_volume) + fake_volume2 = fakes.FakeVolume('volume2', 'available', + 'Volume 2 Display Name') + fake_volume2_dict = meta.obj_to_munch(fake_volume2) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volume_dict]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volume_dict, fake_volume2_dict]})]) + self.assertEqual( + [self.cloud._normalize_volume(fake_volume_dict)], + self.cloud.list_volumes()) + self.assertEqual( + [self.cloud._normalize_volume(fake_volume_dict), + self.cloud._normalize_volume(fake_volume2_dict)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_create_volume_invalidates(self): + fake_volb4 = meta.obj_to_munch( + fakes.FakeVolume('volume1', 'available', '')) + _id = '12345' + fake_vol_creating = meta.obj_to_munch( + fakes.FakeVolume(_id, 'creating', '')) + fake_vol_avail = meta.obj_to_munch( + fakes.FakeVolume(_id, 'available', '')) + + def now_deleting(request, context): + fake_vol_avail['status'] = 'deleting' + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volb4]}), + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes']), + json={'volume': fake_vol_creating}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volb4, fake_vol_creating]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volb4, fake_vol_avail]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volb4, fake_vol_avail]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', _id]), + json=now_deleting), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [fake_volb4]})]) + + self.assertEqual( + [self.cloud._normalize_volume(fake_volb4)], + self.cloud.list_volumes()) + volume = dict(display_name='junk_vol', + size=1, + display_description='test junk volume') + self.cloud.create_volume(wait=True, timeout=None, **volume) + # If cache was not invalidated, we would not see our own volume here + # because the first volume was available and thus would already be + # cached. + self.assertEqual( + [self.cloud._normalize_volume(fake_volb4), + self.cloud._normalize_volume(fake_vol_avail)], + self.cloud.list_volumes()) + self.cloud.delete_volume(_id) + # And now delete and check same thing since list is cached as all + # available + self.assertEqual( + [self.cloud._normalize_volume(fake_volb4)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_list_users(self): + user_data = self._get_user_data(email='test@example.com') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + service_type='identity', + interface='admin', + resource='users', + base_url_append='v3'), + status_code=200, + json={'users': [user_data.json_response['user']]})]) + users = self.cloud.list_users() + self.assertEqual(1, len(users)) + self.assertEqual(user_data.user_id, users[0]['id']) + self.assertEqual(user_data.name, users[0]['name']) + self.assertEqual(user_data.email, users[0]['email']) + self.assert_calls() + + def test_modify_user_invalidates_cache(self): + self.use_keystone_v2() + + user_data = self._get_user_data(email='test@example.com') + new_resp = {'user': user_data.json_response['user'].copy()} + new_resp['user']['email'] = 'Nope@Nope.Nope' + new_req = {'user': {'email': new_resp['user']['email']}} + + mock_users_url = self.get_mock_url( + service_type='identity', + interface='admin', + resource='users') + mock_user_resource_url = self.get_mock_url( + service_type='identity', + interface='admin', + resource='users', + append=[user_data.user_id]) + + empty_user_list_resp = {'users': []} + users_list_resp = {'users': [user_data.json_response['user']]} + updated_users_list_resp = {'users': [new_resp['user']]} + + # Password is None in the original create below + user_data.json_request['user']['password'] = None + + uris_to_mock = [ + # Inital User List is Empty + dict(method='GET', uri=mock_users_url, status_code=200, + json=empty_user_list_resp), + # POST to create the user + # GET to get the user data after POST + dict(method='POST', uri=mock_users_url, status_code=200, + json=user_data.json_response, + validate=dict(json=user_data.json_request)), + # List Users Call + dict(method='GET', uri=mock_users_url, status_code=200, + json=users_list_resp), + # List users to get ID for update + # Get user using user_id from list + # Update user + # Get updated user + dict(method='GET', uri=mock_users_url, status_code=200, + json=users_list_resp), + dict(method='PUT', uri=mock_user_resource_url, status_code=200, + json=new_resp, validate=dict(json=new_req)), + # List Users Call + dict(method='GET', uri=mock_users_url, status_code=200, + json=updated_users_list_resp), + # List User to get ID for delete + # Get user using user_id from list + # delete user + dict(method='GET', uri=mock_users_url, status_code=200, + json=updated_users_list_resp), + dict(method='GET', uri=mock_user_resource_url, status_code=200, + json=new_resp), + dict(method='DELETE', uri=mock_user_resource_url, status_code=204), + # List Users Call (empty post delete) + dict(method='GET', uri=mock_users_url, status_code=200, + json=empty_user_list_resp) + ] + + self.register_uris(uris_to_mock) + + # first cache an empty list + self.assertEqual([], self.cloud.list_users()) + + # now add one + created = self.cloud.create_user(name=user_data.name, + email=user_data.email) + self.assertEqual(user_data.user_id, created['id']) + self.assertEqual(user_data.name, created['name']) + self.assertEqual(user_data.email, created['email']) + # Cache should have been invalidated + users = self.cloud.list_users() + self.assertEqual(user_data.user_id, users[0]['id']) + self.assertEqual(user_data.name, users[0]['name']) + self.assertEqual(user_data.email, users[0]['email']) + + # Update and check to see if it is updated + updated = self.cloud.update_user(user_data.user_id, + email=new_resp['user']['email']) + self.assertEqual(user_data.user_id, updated.id) + self.assertEqual(user_data.name, updated.name) + self.assertEqual(new_resp['user']['email'], updated.email) + users = self.cloud.list_users() + self.assertEqual(1, len(users)) + self.assertEqual(user_data.user_id, users[0]['id']) + self.assertEqual(user_data.name, users[0]['name']) + self.assertEqual(new_resp['user']['email'], users[0]['email']) + # Now delete and ensure it disappears + self.cloud.delete_user(user_data.user_id) + self.assertEqual([], self.cloud.list_users()) + self.assert_calls() + + def test_list_flavors(self): + mock_uri = '{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT) + + uris_to_mock = [ + dict(method='GET', uri=mock_uri, json={'flavors': []}), + dict(method='GET', uri=mock_uri, + json={'flavors': fakes.FAKE_FLAVOR_LIST}) + ] + uris_to_mock.extend([ + dict(method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), + json={'extra_specs': {}}) + for flavor in fakes.FAKE_FLAVOR_LIST]) + + self.register_uris(uris_to_mock) + + self.assertEqual([], self.cloud.list_flavors()) + + self.assertEqual([], self.cloud.list_flavors()) + + fake_flavor_dicts = self.cloud._normalize_flavors( + fakes.FAKE_FLAVOR_LIST) + self.cloud.list_flavors.invalidate(self.cloud) + self.assertEqual(fake_flavor_dicts, self.cloud.list_flavors()) + + self.assert_calls() + + def test_list_images(self): + + self.use_glance() + fake_image = fakes.make_fake_image(image_id='42') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('image', 'public', + append=['v2', 'images']), + json={'images': []}), + dict(method='GET', + uri=self.get_mock_url('image', 'public', + append=['v2', 'images']), + json={'images': [fake_image]}), + ]) + + self.assertEqual([], self.cloud.list_images()) + self.assertEqual([], self.cloud.list_images()) + self.cloud.list_images.invalidate(self.cloud) + self.assertEqual( + self._munch_images(fake_image), self.cloud.list_images()) + + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_list_images_ignores_unsteady_status(self, mock_image_client): + steady_image = munch.Munch(id='68', name='Jagr', status='active') + for status in ('queued', 'saving', 'pending_delete'): + active_image = munch.Munch( + id=self.getUniqueString(), name=self.getUniqueString(), + status=status) + mock_image_client.get.return_value = [active_image] + + self.assertEqual( + self._munch_images(active_image), + self.cloud.list_images()) + mock_image_client.get.return_value = [ + active_image, steady_image] + # Should expect steady_image to appear if active wasn't cached + self.assertEqual( + [self._image_dict(active_image), + self._image_dict(steady_image)], + self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_list_images_caches_steady_status(self, mock_image_client): + steady_image = munch.Munch(id='91', name='Federov', status='active') + first_image = None + for status in ('active', 'deleted', 'killed'): + active_image = munch.Munch( + id=self.getUniqueString(), name=self.getUniqueString(), + status=status) + mock_image_client.get.return_value = [active_image] + if not first_image: + first_image = active_image + self.assertEqual( + self._munch_images(first_image), + self.cloud.list_images()) + mock_image_client.get.return_value = [ + active_image, steady_image] + # because we skipped the create_image code path, no invalidation + # was done, so we _SHOULD_ expect steady state images to cache and + # therefore we should _not_ expect to see the new one here + self.assertEqual( + self._munch_images(first_image), + self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_cache_no_cloud_name(self, mock_image_client): + + self.cloud.name = None + fi = munch.Munch( + id='1', name='None Test Image', + status='active', visibility='private') + mock_image_client.get.return_value = [fi] + self.assertEqual( + self._munch_images(fi), + self.cloud.list_images()) + # Now test that the list was cached + fi2 = munch.Munch( + id='2', name='None Test Image', + status='active', visibility='private') + mock_image_client.get.return_value = [fi, fi2] + self.assertEqual( + self._munch_images(fi), + self.cloud.list_images()) + # Invalidation too + self.cloud.list_images.invalidate(self.cloud) + self.assertEqual( + [self._image_dict(fi), self._image_dict(fi2)], + self.cloud.list_images()) + + +class TestBogusAuth(base.TestCase): + + def setUp(self): + super(TestBogusAuth, self).setUp( + cloud_config_fixture='clouds_cache.yaml') + + def test_get_auth_bogus(self): + with testtools.ExpectedException(exc.OpenStackCloudException): + openstack.openstack_cloud( + cloud='_bogus_test_', config=self.config) diff --git a/openstack/tests/unit/cloud/test_cluster_templates.py b/openstack/tests/unit/cloud/test_cluster_templates.py new file mode 100644 index 000000000..8beff5dee --- /dev/null +++ b/openstack/tests/unit/cloud/test_cluster_templates.py @@ -0,0 +1,201 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import munch + +import openstack.cloud +import testtools +from openstack.tests.unit import base + + +cluster_template_obj = munch.Munch( + apiserver_port=12345, + cluster_distro='fake-distro', + coe='fake-coe', + created_at='fake-date', + dns_nameserver='8.8.8.8', + docker_volume_size=1, + external_network_id='public', + fixed_network=None, + flavor_id='fake-flavor', + https_proxy=None, + human_id=None, + image_id='fake-image', + insecure_registry='https://192.168.0.10', + keypair_id='fake-key', + labels={}, + links={}, + master_flavor_id=None, + name='fake-cluster-template', + network_driver='fake-driver', + no_proxy=None, + public=False, + registry_enabled=False, + server_type='vm', + tls_disabled=False, + updated_at=None, + uuid='fake-uuid', + volume_driver=None, +) + + +class TestClusterTemplates(base.RequestsMockTestCase): + + def test_list_cluster_templates_without_detail(self): + + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()]))]) + cluster_templates_list = self.cloud.list_cluster_templates() + self.assertEqual( + cluster_templates_list[0], + self.cloud._normalize_cluster_template(cluster_template_obj)) + self.assert_calls() + + def test_list_cluster_templates_with_detail(self): + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()]))]) + cluster_templates_list = self.cloud.list_cluster_templates(detail=True) + self.assertEqual( + cluster_templates_list[0], + self.cloud._normalize_cluster_template(cluster_template_obj)) + self.assert_calls() + + def test_search_cluster_templates_by_name(self): + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()]))]) + + cluster_templates = self.cloud.search_cluster_templates( + name_or_id='fake-cluster-template') + + self.assertEqual(1, len(cluster_templates)) + self.assertEqual('fake-uuid', cluster_templates[0]['uuid']) + self.assert_calls() + + def test_search_cluster_templates_not_found(self): + + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()]))]) + + cluster_templates = self.cloud.search_cluster_templates( + name_or_id='non-existent') + + self.assertEqual(0, len(cluster_templates)) + self.assert_calls() + + def test_get_cluster_template(self): + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()]))]) + + r = self.cloud.get_cluster_template('fake-cluster-template') + self.assertIsNotNone(r) + self.assertDictEqual( + r, self.cloud._normalize_cluster_template(cluster_template_obj)) + self.assert_calls() + + def test_get_cluster_template_not_found(self): + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[]))]) + r = self.cloud.get_cluster_template('doesNotExist') + self.assertIsNone(r) + self.assert_calls() + + def test_create_cluster_template(self): + self.register_uris([dict( + method='POST', + uri='https://container-infra.example.com/v1/baymodels', + json=dict(baymodels=[cluster_template_obj.toDict()]), + validate=dict(json={ + 'coe': 'fake-coe', + 'image_id': 'fake-image', + 'keypair_id': 'fake-key', + 'name': 'fake-cluster-template'}), + )]) + self.cloud.create_cluster_template( + name=cluster_template_obj.name, + image_id=cluster_template_obj.image_id, + keypair_id=cluster_template_obj.keypair_id, + coe=cluster_template_obj.coe) + self.assert_calls() + + def test_create_cluster_template_exception(self): + self.register_uris([dict( + method='POST', + uri='https://container-infra.example.com/v1/baymodels', + status_code=403)]) + # TODO(mordred) requests here doens't give us a great story + # for matching the old error message text. Investigate plumbing + # an error message in to the adapter call so that we can give a + # more informative error. Also, the test was originally catching + # OpenStackCloudException - but for some reason testtools will not + # match the more specific HTTPError, even though it's a subclass + # of OpenStackCloudException. + with testtools.ExpectedException( + openstack.OpenStackCloudHTTPError): + self.cloud.create_cluster_template('fake-cluster-template') + self.assert_calls() + + def test_delete_cluster_template(self): + uri = 'https://container-infra.example.com/v1/baymodels/fake-uuid' + self.register_uris([ + dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()])), + dict( + method='DELETE', + uri=uri), + ]) + self.cloud.delete_cluster_template('fake-uuid') + self.assert_calls() + + def test_update_cluster_template(self): + uri = 'https://container-infra.example.com/v1/baymodels/fake-uuid' + self.register_uris([ + dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + json=dict(baymodels=[cluster_template_obj.toDict()])), + dict( + method='PATCH', + uri=uri, + status_code=200, + validate=dict( + json=[{ + u'op': u'replace', + u'path': u'/name', + u'value': u'new-cluster-template' + }] + )), + dict( + method='GET', + uri='https://container-infra.example.com/v1/baymodels/detail', + # This json value is not meaningful to the test - it just has + # to be valid. + json=dict(baymodels=[cluster_template_obj.toDict()])), + ]) + new_name = 'new-cluster-template' + self.cloud.update_cluster_template( + 'fake-uuid', 'replace', name=new_name) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_create_server.py b/openstack/tests/unit/cloud/test_create_server.py new file mode 100644 index 000000000..ba969011a --- /dev/null +++ b/openstack/tests/unit/cloud/test_create_server.py @@ -0,0 +1,813 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_create_server +---------------------------------- + +Tests for the `create_server` command. +""" +import base64 +import uuid + +import mock + +import openstack.cloud +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestCreateServer(base.RequestsMockTestCase): + + def test_create_server_with_get_exception(self): + """ + Test that a bad status code when attempting to get the server instance + raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + status_code=404), + ]) + self.assertRaises( + exc.OpenStackCloudException, self.cloud.create_server, + 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}) + self.assert_calls() + + def test_create_server_with_server_error(self): + """ + Test that a server error before we return or begin waiting for the + server instance spawn raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + error_server = fakes.make_fake_server('1234', '', 'ERROR') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': error_server}), + ]) + self.assertRaises( + exc.OpenStackCloudException, self.cloud.create_server, + 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}) + self.assert_calls() + + def test_create_server_wait_server_error(self): + """ + Test that a server error while waiting for the server to spawn + raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + error_server = fakes.make_fake_server('1234', '', 'ERROR') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [build_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [error_server]}), + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_server, + 'server-name', dict(id='image-id'), + dict(id='flavor-id'), wait=True) + + self.assert_calls() + + def test_create_server_with_timeout(self): + """ + Test that a timeout while waiting for the server to spawn raises an + exception in create_server. + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [fake_server]}), + ]) + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud.create_server, + 'server-name', + dict(id='image-id'), dict(id='flavor-id'), + wait=True, timeout=0.01) + # We poll at the end, so we don't know real counts + self.assert_calls(do_count=False) + + def test_create_server_no_wait(self): + """ + Test that create_server with no wait and no exception in the + create call returns the server instance. + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': fake_server}), + ]) + normalized = self.cloud._expand_server( + self.cloud._normalize_server(fake_server), False, False) + self.assertEqual( + normalized, + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'))) + + self.assert_calls() + + def test_create_server_config_drive(self): + """ + Test that config_drive gets passed in properly + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'config_drive': True, + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': fake_server}), + ]) + normalized = self.cloud._expand_server( + self.cloud._normalize_server(fake_server), False, False) + self.assertEqual( + normalized, + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + config_drive=True)) + + self.assert_calls() + + def test_create_server_config_drive_none(self): + """ + Test that config_drive gets not passed in properly + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': fake_server}), + ]) + normalized = self.cloud._expand_server( + self.cloud._normalize_server(fake_server), False, False) + self.assertEqual( + normalized, + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + config_drive=None)) + + self.assert_calls() + + def test_create_server_with_admin_pass_no_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + admin_pass = self.getUniqueString('password') + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_create_server = fakes.make_fake_server( + '1234', '', 'BUILD', admin_pass=admin_pass) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_create_server}, + validate=dict( + json={'server': { + u'adminPass': admin_pass, + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': fake_server}), + ]) + self.assertEqual( + self.cloud._normalize_server(fake_create_server)['adminPass'], + self.cloud.create_server( + name='server-name', image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + admin_pass=admin_pass)['adminPass']) + + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, "wait_for_server") + def test_create_server_with_admin_pass_wait(self, mock_wait): + """ + Test that a server with an admin_pass passed returns the password + """ + admin_pass = self.getUniqueString('password') + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server_with_pass = fakes.make_fake_server( + '1234', '', 'BUILD', admin_pass=admin_pass) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server_with_pass}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'adminPass': admin_pass, + u'name': u'server-name'}})), + ]) + + # The wait returns non-password server + mock_wait.return_value = self.cloud._normalize_server(fake_server) + + server = self.cloud.create_server( + name='server-name', image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + admin_pass=admin_pass, wait=True) + + # Assert that we did wait + self.assertTrue(mock_wait.called) + + # Even with the wait, we should still get back a passworded server + self.assertEqual( + server['adminPass'], + self.cloud._normalize_server(fake_server_with_pass)['adminPass'] + ) + self.assert_calls() + + def test_create_server_user_data_base64(self): + """ + Test that a server passed user-data sends it base64 encoded. + """ + user_data = self.getUniqueString('user_data') + user_data_b64 = base64.b64encode(user_data).decode('utf-8') + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server['user_data'] = user_data + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'user_data': user_data_b64, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': fake_server}), + ]) + + self.cloud.create_server( + name='server-name', image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + userdata=user_data, wait=False) + + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, "get_active_server") + @mock.patch.object(openstack.OpenStackCloud, "get_server") + def test_wait_for_server(self, mock_get_server, mock_get_active_server): + """ + Test that waiting for a server returns the server instance when + its status changes to "ACTIVE". + """ + # TODO(mordred) Rework this to not mock methods + building_server = {'id': 'fake_server_id', 'status': 'BUILDING'} + active_server = {'id': 'fake_server_id', 'status': 'ACTIVE'} + + mock_get_server.side_effect = iter([building_server, active_server]) + mock_get_active_server.side_effect = iter([ + building_server, active_server]) + + server = self.cloud.wait_for_server(building_server) + + self.assertEqual(2, mock_get_server.call_count) + mock_get_server.assert_has_calls([ + mock.call(building_server['id']), + mock.call(active_server['id']), + ]) + + self.assertEqual(2, mock_get_active_server.call_count) + mock_get_active_server.assert_has_calls([ + mock.call(server=building_server, reuse=True, auto_ip=True, + ips=None, ip_pool=None, wait=True, timeout=mock.ANY, + nat_destination=None), + mock.call(server=active_server, reuse=True, auto_ip=True, + ips=None, ip_pool=None, wait=True, timeout=mock.ANY, + nat_destination=None), + ]) + + self.assertEqual('ACTIVE', server['status']) + + @mock.patch.object(openstack.OpenStackCloud, 'wait_for_server') + def test_create_server_wait(self, mock_wait): + """ + Test that create_server with a wait actually does the wait. + """ + # TODO(mordred) Make this a full proper response + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': fake_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + ]) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), dict(id='flavor-id'), wait=True), + + mock_wait.assert_called_once_with( + fake_server, + auto_ip=True, ips=None, + ip_pool=None, reuse=True, timeout=180, + nat_destination=None, + ) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'add_ips_to_server') + @mock.patch('time.sleep') + def test_create_server_no_addresses( + self, mock_sleep, mock_add_ips_to_server): + """ + Test that create_server with a wait throws an exception if the + server doesn't have addresses. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server = fakes.make_fake_server( + '1234', '', 'ACTIVE', addresses={}) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [build_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=['device_id=1234']), + json={'ports': []}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + mock_add_ips_to_server.return_value = fake_server + self.cloud._SERVER_AGE = 0 + + self.assertRaises( + exc.OpenStackCloudException, self.cloud.create_server, + 'server-name', {'id': 'image-id'}, {'id': 'flavor-id'}, + wait=True) + + self.assert_calls() + + def test_create_server_network_with_no_nics(self): + """ + Verify that if 'network' is supplied, and 'nics' is not, that we + attempt to get the network for the server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + network = { + 'id': 'network-id', + 'name': 'network-name' + } + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'networks': [{u'uuid': u'network-id'}], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': build_server}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}), + ]) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), dict(id='flavor-id'), network='network-name') + self.assert_calls() + + def test_create_server_network_with_empty_nics(self): + """ + Verify that if 'network' is supplied, along with an empty 'nics' list, + it's treated the same as if 'nics' were not included. + """ + network = { + 'id': 'network-id', + 'name': 'network-name' + } + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': u'flavor-id', + u'imageRef': u'image-id', + u'max_count': 1, + u'min_count': 1, + u'networks': [{u'uuid': u'network-id'}], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': build_server}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}), + ]) + self.cloud.create_server( + 'server-name', dict(id='image-id'), dict(id='flavor-id'), + network='network-name', nics=[]) + self.assert_calls() + + def test_create_server_get_flavor_image(self): + self.use_glance() + image_id = str(uuid.uuid4()) + fake_image_dict = fakes.make_fake_image(image_id=image_id) + fake_image_search_return = {'images': [fake_image_dict]} + + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json=fake_image_search_return), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['flavors', 'detail'], + qs_elements=['is_public=None']), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': fakes.FLAVOR_ID, + u'imageRef': image_id, + u'max_count': 1, + u'min_count': 1, + u'networks': [{u'uuid': u'some-network'}], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': active_server}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + + self.cloud.create_server( + 'server-name', image_id, 'vanilla', + nics=[{'net-id': 'some-network'}], wait=False) + + self.assert_calls() + + def test_create_server_nics_port_id(self): + '''Verify port-id in nics input turns into port in REST.''' + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + image_id = uuid.uuid4().hex + port_id = uuid.uuid4().hex + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': fakes.FLAVOR_ID, + u'imageRef': image_id, + u'max_count': 1, + u'min_count': 1, + u'networks': [{u'port': port_id}], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': active_server}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + + self.cloud.create_server( + 'server-name', dict(id=image_id), dict(id=fakes.FLAVOR_ID), + nics=[{'port-id': port_id}], wait=False) + + self.assert_calls() + + def test_create_boot_attach_volume(self): + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + + vol = {'id': 'volume001', 'status': 'available', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-volumes_boot']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': 'flavor-id', + u'imageRef': 'image-id', + u'max_count': 1, + u'min_count': 1, + u'block_device_mapping_v2': [ + { + u'boot_index': 0, + u'delete_on_termination': True, + u'destination_type': u'local', + u'source_type': u'image', + u'uuid': u'image-id' + }, + { + u'boot_index': u'-1', + u'delete_on_termination': False, + u'destination_type': u'volume', + u'source_type': u'volume', + u'uuid': u'volume001' + } + ], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': active_server}), + ]) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + boot_from_volume=False, + volumes=[volume], + wait=False) + + self.assert_calls() + + def test_create_boot_from_volume_image_terminate(self): + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-volumes_boot']), + json={'server': build_server}, + validate=dict( + json={'server': { + u'flavorRef': 'flavor-id', + u'imageRef': '', + u'max_count': 1, + u'min_count': 1, + u'block_device_mapping_v2': [{ + u'boot_index': u'0', + u'delete_on_termination': True, + u'destination_type': u'volume', + u'source_type': u'image', + u'uuid': u'image-id', + u'volume_size': u'1'}], + u'name': u'server-name'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234']), + json={'server': active_server}), + ]) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + boot_from_volume=True, + terminate_volume=True, + volume_size=1, + wait=False) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_create_volume_snapshot.py b/openstack/tests/unit/cloud/test_create_volume_snapshot.py new file mode 100644 index 000000000..49386e3bd --- /dev/null +++ b/openstack/tests/unit/cloud/test_create_volume_snapshot.py @@ -0,0 +1,127 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_create_volume_snapshot +---------------------------------- + +Tests for the `create_volume_snapshot` command. +""" + +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestCreateVolumeSnapshot(base.RequestsMockTestCase): + + def test_create_volume_snapshot_wait(self): + """ + Test that create_volume_snapshot with a wait returns the volume + snapshot when its status changes to "available". + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', + 'foo', 'derpysnapshot') + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + fake_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'available', + 'foo', 'derpysnapshot') + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', append=['snapshots']), + json={'snapshot': build_snapshot_dict}, + validate=dict(json={ + 'snapshot': {'force': False, 'volume_id': '1234'}})), + dict(method='GET', + uri=self.get_mock_url('volumev2', 'public', + append=['snapshots', snapshot_id]), + json={'snapshot': build_snapshot_dict}), + dict(method='GET', + uri=self.get_mock_url('volumev2', 'public', + append=['snapshots', snapshot_id]), + json={'snapshot': fake_snapshot_dict})]) + + self.assertEqual( + self.cloud._normalize_volume(fake_snapshot_dict), + self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True) + ) + self.assert_calls() + + def test_create_volume_snapshot_with_timeout(self): + """ + Test that a timeout while waiting for the volume snapshot to create + raises an exception in create_volume_snapshot. + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', + 'foo', 'derpysnapshot') + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', append=['snapshots']), + json={'snapshot': build_snapshot_dict}, + validate=dict(json={ + 'snapshot': {'force': False, 'volume_id': '1234'}})), + dict(method='GET', + uri=self.get_mock_url('volumev2', 'public', + append=['snapshots', snapshot_id]), + json={'snapshot': build_snapshot_dict})]) + + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud.create_volume_snapshot, volume_id=volume_id, + wait=True, timeout=0.01) + self.assert_calls(do_count=False) + + def test_create_volume_snapshot_with_error(self): + """ + Test that a error status while waiting for the volume snapshot to + create raises an exception in create_volume_snapshot. + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'creating', + 'bar', 'derpysnapshot') + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + error_snapshot = fakes.FakeVolumeSnapshot(snapshot_id, 'error', + 'blah', 'derpysnapshot') + error_snapshot_dict = meta.obj_to_munch(error_snapshot) + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', append=['snapshots']), + json={'snapshot': build_snapshot_dict}, + validate=dict(json={ + 'snapshot': {'force': False, 'volume_id': '1234'}})), + dict(method='GET', + uri=self.get_mock_url('volumev2', 'public', + append=['snapshots', snapshot_id]), + json={'snapshot': build_snapshot_dict}), + dict(method='GET', + uri=self.get_mock_url('volumev2', 'public', + append=['snapshots', snapshot_id]), + json={'snapshot': error_snapshot_dict})]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_volume_snapshot, volume_id=volume_id, + wait=True, timeout=5) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_delete_server.py b/openstack/tests/unit/cloud/test_delete_server.py new file mode 100644 index 000000000..304818ae1 --- /dev/null +++ b/openstack/tests/unit/cloud/test_delete_server.py @@ -0,0 +1,265 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_delete_server +---------------------------------- + +Tests for the `delete_server` command. +""" +import uuid + +from openstack.cloud import exc as shade_exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestDeleteServer(base.RequestsMockTestCase): + + def test_delete_server(self): + """ + Test that server delete is called when wait=False + """ + server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + ]) + self.assertTrue(self.cloud.delete_server('daffy', wait=False)) + + self.assert_calls() + + def test_delete_server_already_gone(self): + """ + Test that we return immediately when server is already gone + """ + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertFalse(self.cloud.delete_server('tweety', wait=False)) + + self.assert_calls() + + def test_delete_server_already_gone_wait(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertFalse(self.cloud.delete_server('speedy', wait=True)) + self.assert_calls() + + def test_delete_server_wait_for_deleted(self): + """ + Test that delete_server waits for the server to be gone + """ + server = fakes.make_fake_server('9999', 'wily', 'ACTIVE') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '9999'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertTrue(self.cloud.delete_server('wily', wait=True)) + + self.assert_calls() + + def test_delete_server_fails(self): + """ + Test that delete_server raises non-404 exceptions + """ + server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1212']), + status_code=400), + ]) + + self.assertRaises( + shade_exc.OpenStackCloudException, + self.cloud.delete_server, 'speedy', + wait=False) + + self.assert_calls() + + def test_delete_server_no_cinder(self): + """ + Test that deleting server works when cinder is not available + """ + orig_has_service = self.cloud.has_service + + def fake_has_service(service_type): + if service_type == 'volume': + return False + return orig_has_service(service_type) + self.cloud.has_service = fake_has_service + + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + ]) + self.assertTrue(self.cloud.delete_server('porky', wait=False)) + + self.assert_calls() + + def test_delete_server_delete_ips(self): + """ + Test that deleting server and fips works + """ + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + fip_id = uuid.uuid4().hex + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json'], + qs_elements=['floating_ip_address=172.24.5.5']), + complete_qs=True, + json={'floatingips': [{ + 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', + 'floating_network_id': '376da547-b977-4cfe-9cba7', + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.5.5', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'id': fip_id, + 'status': 'ACTIVE'}]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips', + '{fip_id}.json'.format(fip_id=fip_id)])), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + complete_qs=True, + json={'floatingips': []}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertTrue(self.cloud.delete_server( + 'porky', wait=True, delete_ips=True)) + + self.assert_calls() + + def test_delete_server_delete_ips_bad_neutron(self): + """ + Test that deleting server with a borked neutron doesn't bork + """ + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json'], + qs_elements=['floating_ip_address=172.24.5.5']), + complete_qs=True, + status_code=404), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertTrue(self.cloud.delete_server( + 'porky', wait=True, delete_ips=True)) + + self.assert_calls() + + def test_delete_server_delete_fips_nova(self): + """ + Test that deleting server with a borked neutron doesn't bork + """ + self.cloud._floating_ip_source = 'nova' + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips']), + json={'floating_ips': [ + { + 'fixed_ip': None, + 'id': 1, + 'instance_id': None, + 'ip': '172.24.5.5', + 'pool': 'nova' + }]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips', '1'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips']), + json={'floating_ips': []}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'])), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + self.assertTrue(self.cloud.delete_server( + 'porky', wait=True, delete_ips=True)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_delete_volume_snapshot.py b/openstack/tests/unit/cloud/test_delete_volume_snapshot.py new file mode 100644 index 000000000..92783f205 --- /dev/null +++ b/openstack/tests/unit/cloud/test_delete_volume_snapshot.py @@ -0,0 +1,101 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_delete_volume_snapshot +---------------------------------- + +Tests for the `delete_volume_snapshot` command. +""" + +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestDeleteVolumeSnapshot(base.RequestsMockTestCase): + + def test_delete_volume_snapshot(self): + """ + Test that delete_volume_snapshot without a wait returns True instance + when the volume snapshot deletes. + """ + fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', + 'foo', 'derpysnapshot') + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', 'detail']), + json={'snapshots': [fake_snapshot_dict]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', fake_snapshot_dict['id']]))]) + + self.assertTrue( + self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False)) + self.assert_calls() + + def test_delete_volume_snapshot_with_error(self): + """ + Test that a exception while deleting a volume snapshot will cause an + OpenStackCloudException. + """ + fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', + 'foo', 'derpysnapshot') + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', 'detail']), + json={'snapshots': [fake_snapshot_dict]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', fake_snapshot_dict['id']]), + status_code=404)]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_volume_snapshot, name_or_id='1234') + self.assert_calls() + + def test_delete_volume_snapshot_with_timeout(self): + """ + Test that a timeout while waiting for the volume snapshot to delete + raises an exception in delete_volume_snapshot. + """ + fake_snapshot = fakes.FakeVolumeSnapshot('1234', 'available', + 'foo', 'derpysnapshot') + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', 'detail']), + json={'snapshots': [fake_snapshot_dict]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['snapshots', fake_snapshot_dict['id']]))]) + + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud.delete_volume_snapshot, name_or_id='1234', + wait=True, timeout=0.01) + self.assert_calls(do_count=False) diff --git a/openstack/tests/unit/cloud/test_domain_params.py b/openstack/tests/unit/cloud/test_domain_params.py new file mode 100644 index 000000000..e01dd88a0 --- /dev/null +++ b/openstack/tests/unit/cloud/test_domain_params.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +import munch + +import openstack.cloud +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestDomainParams(base.TestCase): + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, 'get_project') + def test_identity_params_v3(self, mock_get_project, + mock_is_client_version): + mock_get_project.return_value = munch.Munch(id=1234) + mock_is_client_version.return_value = True + + ret = self.cloud._get_identity_params(domain_id='5678', project='bar') + self.assertIn('default_project_id', ret) + self.assertEqual(ret['default_project_id'], 1234) + self.assertIn('domain_id', ret) + self.assertEqual(ret['domain_id'], '5678') + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, 'get_project') + def test_identity_params_v3_no_domain( + self, mock_get_project, mock_is_client_version): + mock_get_project.return_value = munch.Munch(id=1234) + mock_is_client_version.return_value = True + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud._get_identity_params, + domain_id=None, project='bar') + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, 'get_project') + def test_identity_params_v2(self, mock_get_project, + mock_is_client_version): + mock_get_project.return_value = munch.Munch(id=1234) + mock_is_client_version.return_value = False + + ret = self.cloud._get_identity_params(domain_id='foo', project='bar') + self.assertIn('tenant_id', ret) + self.assertEqual(ret['tenant_id'], 1234) + self.assertNotIn('domain', ret) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, 'get_project') + def test_identity_params_v2_no_domain(self, mock_get_project, + mock_is_client_version): + mock_get_project.return_value = munch.Munch(id=1234) + mock_is_client_version.return_value = False + + ret = self.cloud._get_identity_params(domain_id=None, project='bar') + api_calls = [mock.call('identity', 3), mock.call('identity', 3)] + mock_is_client_version.assert_has_calls(api_calls) + self.assertIn('tenant_id', ret) + self.assertEqual(ret['tenant_id'], 1234) + self.assertNotIn('domain', ret) diff --git a/openstack/tests/unit/cloud/test_domains.py b/openstack/tests/unit/cloud/test_domains.py new file mode 100644 index 000000000..d531768b8 --- /dev/null +++ b/openstack/tests/unit/cloud/test_domains.py @@ -0,0 +1,210 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +import testtools +from testtools import matchers + +import openstack.cloud +from openstack.tests.unit import base + + +class TestDomains(base.RequestsMockTestCase): + + def get_mock_url(self, service_type='identity', + interface='admin', resource='domains', + append=None, base_url_append='v3'): + return super(TestDomains, self).get_mock_url( + service_type=service_type, interface=interface, resource=resource, + append=append, base_url_append=base_url_append) + + def test_list_domains(self): + domain_data = self._get_domain_data() + self.register_uris([ + dict(method='GET', uri=self.get_mock_url(), status_code=200, + json={'domains': [domain_data.json_response['domain']]})]) + domains = self.op_cloud.list_domains() + self.assertThat(len(domains), matchers.Equals(1)) + self.assertThat(domains[0].name, + matchers.Equals(domain_data.domain_name)) + self.assertThat(domains[0].id, + matchers.Equals(domain_data.domain_id)) + self.assert_calls() + + def test_get_domain(self): + domain_data = self._get_domain_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=200, + json=domain_data.json_response)]) + domain = self.op_cloud.get_domain(domain_id=domain_data.domain_id) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assert_calls() + + def test_get_domain_with_name_or_id(self): + domain_data = self._get_domain_data() + response = {'domains': [domain_data.json_response['domain']]} + self.register_uris([ + dict(method='GET', uri=self.get_mock_url(), status_code=200, + json=response), + dict(method='GET', uri=self.get_mock_url(), status_code=200, + json=response)]) + domain = self.op_cloud.get_domain(name_or_id=domain_data.domain_id) + domain_by_name = self.op_cloud.get_domain( + name_or_id=domain_data.domain_name) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat(domain_by_name.id, + matchers.Equals(domain_data.domain_id)) + self.assertThat(domain_by_name.name, + matchers.Equals(domain_data.domain_name)) + self.assert_calls() + + def test_create_domain(self): + domain_data = self._get_domain_data(description=uuid.uuid4().hex, + enabled=True) + self.register_uris([ + dict(method='POST', uri=self.get_mock_url(), status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request))]) + domain = self.op_cloud.create_domain( + domain_data.domain_name, domain_data.description) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description)) + self.assert_calls() + + def test_create_domain_exception(self): + domain_data = self._get_domain_data(domain_name='domain_name', + enabled=True) + with testtools.ExpectedException( + openstack.OpenStackCloudBadRequest, + "Failed to create domain domain_name" + ): + self.register_uris([ + dict(method='POST', uri=self.get_mock_url(), status_code=400, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request))]) + self.op_cloud.create_domain('domain_name') + self.assert_calls() + + def test_delete_domain(self): + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris([ + dict(method='PATCH', uri=domain_resource_uri, status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}})), + dict(method='DELETE', uri=domain_resource_uri, status_code=204)]) + self.op_cloud.delete_domain(domain_data.domain_id) + self.assert_calls() + + def test_delete_domain_name_or_id(self): + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris([ + dict(method='GET', uri=self.get_mock_url(), status_code=200, + json={'domains': [domain_data.json_response['domain']]}), + dict(method='PATCH', uri=domain_resource_uri, status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}})), + dict(method='DELETE', uri=domain_resource_uri, status_code=204)]) + self.op_cloud.delete_domain(name_or_id=domain_data.domain_id) + self.assert_calls() + + def test_delete_domain_exception(self): + # NOTE(notmorgan): This test does not reflect the case where the domain + # cannot be updated to be disabled, Shade raises that as an unable + # to update domain even though it is called via delete_domain. This + # should be fixed in shade to catch either a failure on PATCH, + # subsequent GET, or DELETE call(s). + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris([ + dict(method='PATCH', uri=domain_resource_uri, status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}})), + dict(method='DELETE', uri=domain_resource_uri, status_code=404)]) + with testtools.ExpectedException( + openstack.OpenStackCloudURINotFound, + "Failed to delete domain %s" % domain_data.domain_id + ): + self.op_cloud.delete_domain(domain_data.domain_id) + self.assert_calls() + + def test_update_domain(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc')) + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris([ + dict(method='PATCH', uri=domain_resource_uri, status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request))]) + domain = self.op_cloud.update_domain( + domain_data.domain_id, + name=domain_data.domain_name, + description=domain_data.description) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description)) + self.assert_calls() + + def test_update_domain_name_or_id(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc')) + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris([ + dict(method='GET', uri=self.get_mock_url(), status_code=200, + json={'domains': [domain_data.json_response['domain']]}), + dict(method='PATCH', uri=domain_resource_uri, status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request))]) + domain = self.op_cloud.update_domain( + name_or_id=domain_data.domain_id, + name=domain_data.domain_name, + description=domain_data.description) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description)) + self.assert_calls() + + def test_update_domain_exception(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc')) + self.register_uris([ + dict(method='PATCH', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=409, + json=domain_data.json_response, + validate=dict(json={'domain': {'enabled': False}}))]) + with testtools.ExpectedException( + openstack.OpenStackCloudHTTPError, + "Error in updating domain %s" % domain_data.domain_id + ): + self.op_cloud.delete_domain(domain_data.domain_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_endpoints.py b/openstack/tests/unit/cloud/test_endpoints.py new file mode 100644 index 000000000..e0df712a9 --- /dev/null +++ b/openstack/tests/unit/cloud/test_endpoints.py @@ -0,0 +1,373 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_cloud_endpoints +---------------------------------- + +Tests Keystone endpoints commands. +""" + +import uuid + +from openstack.cloud.exc import OpenStackCloudException +from openstack.cloud.exc import OpenStackCloudUnavailableFeature +from openstack.tests.unit import base +from testtools import matchers + + +class TestCloudEndpoints(base.RequestsMockTestCase): + + def get_mock_url(self, service_type='identity', interface='admin', + resource='endpoints', append=None, base_url_append='v3'): + return super(TestCloudEndpoints, self).get_mock_url( + service_type, interface, resource, append, base_url_append) + + def _dummy_url(self): + return 'https://%s.example.com/' % uuid.uuid4().hex + + def test_create_endpoint_v2(self): + self.use_keystone_v2() + service_data = self._get_service_data() + endpoint_data = self._get_endpoint_v2_data( + service_data.service_id, public_url=self._dummy_url(), + internal_url=self._dummy_url(), admin_url=self._dummy_url()) + other_endpoint_data = self._get_endpoint_v2_data( + service_data.service_id, region=endpoint_data.region, + public_url=endpoint_data.public_url) + # correct the keys + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='services', base_url_append='OS-KSADM'), + status_code=200, + json={'OS-KSADM:services': [ + service_data.json_response_v2['OS-KSADM:service']]}), + dict(method='POST', + uri=self.get_mock_url(base_url_append=None), + status_code=200, + json=endpoint_data.json_response, + validate=dict(json=endpoint_data.json_request)), + dict(method='GET', + uri=self.get_mock_url( + resource='services', base_url_append='OS-KSADM'), + status_code=200, + json={'OS-KSADM:services': [ + service_data.json_response_v2['OS-KSADM:service']]}), + # NOTE(notmorgan): There is a stupid happening here, we do two + # gets on the services for some insane reason (read: keystoneclient + # is bad and should feel bad). + dict(method='GET', + uri=self.get_mock_url( + resource='services', base_url_append='OS-KSADM'), + status_code=200, + json={'OS-KSADM:services': [ + service_data.json_response_v2['OS-KSADM:service']]}), + dict(method='POST', + uri=self.get_mock_url(base_url_append=None), + status_code=200, + json=other_endpoint_data.json_response, + validate=dict(json=other_endpoint_data.json_request)) + ]) + + endpoints = self.op_cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=endpoint_data.region, + public_url=endpoint_data.public_url, + internal_url=endpoint_data.internal_url, + admin_url=endpoint_data.admin_url + ) + + self.assertThat(endpoints[0].id, + matchers.Equals(endpoint_data.endpoint_id)) + self.assertThat(endpoints[0].region, + matchers.Equals(endpoint_data.region)) + self.assertThat(endpoints[0].publicURL, + matchers.Equals(endpoint_data.public_url)) + self.assertThat(endpoints[0].internalURL, + matchers.Equals(endpoint_data.internal_url)) + self.assertThat(endpoints[0].adminURL, + matchers.Equals(endpoint_data.admin_url)) + + # test v3 semantics on v2.0 endpoint + self.assertRaises(OpenStackCloudException, + self.op_cloud.create_endpoint, + service_name_or_id='service1', + interface='mock_admin_url', + url='admin') + + endpoints_3on2 = self.op_cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=endpoint_data.region, + interface='public', + url=endpoint_data.public_url + ) + + # test keys and values are correct + self.assertThat( + endpoints_3on2[0].region, + matchers.Equals(other_endpoint_data.region)) + self.assertThat( + endpoints_3on2[0].publicURL, + matchers.Equals(other_endpoint_data.public_url)) + self.assertThat(endpoints_3on2[0].get('internalURL'), + matchers.Equals(None)) + self.assertThat(endpoints_3on2[0].get('adminURL'), + matchers.Equals(None)) + self.assert_calls() + + def test_create_endpoint_v3(self): + service_data = self._get_service_data() + public_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, interface='public', + url=self._dummy_url()) + public_endpoint_data_disabled = self._get_endpoint_v3_data( + service_id=service_data.service_id, interface='public', + url=self._dummy_url(), enabled=False) + admin_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, interface='admin', + url=self._dummy_url(), region=public_endpoint_data.region) + internal_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, interface='internal', + url=self._dummy_url(), region=public_endpoint_data.region) + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='services'), + status_code=200, + json={'services': [ + service_data.json_response_v3['service']]}), + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=public_endpoint_data_disabled.json_response, + validate=dict( + json=public_endpoint_data_disabled.json_request)), + dict(method='GET', + uri=self.get_mock_url(resource='services'), + status_code=200, + json={'services': [ + service_data.json_response_v3['service']]}), + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=public_endpoint_data.json_response, + validate=dict(json=public_endpoint_data.json_request)), + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=internal_endpoint_data.json_response, + validate=dict(json=internal_endpoint_data.json_request)), + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=admin_endpoint_data.json_response, + validate=dict(json=admin_endpoint_data.json_request)), + ]) + + endpoints = self.op_cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=public_endpoint_data_disabled.region, + url=public_endpoint_data_disabled.url, + interface=public_endpoint_data_disabled.interface, + enabled=False) + + # Test endpoint values + self.assertThat( + endpoints[0].id, + matchers.Equals(public_endpoint_data_disabled.endpoint_id)) + self.assertThat(endpoints[0].url, + matchers.Equals(public_endpoint_data_disabled.url)) + self.assertThat( + endpoints[0].interface, + matchers.Equals(public_endpoint_data_disabled.interface)) + self.assertThat( + endpoints[0].region, + matchers.Equals(public_endpoint_data_disabled.region)) + self.assertThat( + endpoints[0].region_id, + matchers.Equals(public_endpoint_data_disabled.region)) + self.assertThat(endpoints[0].enabled, + matchers.Equals(public_endpoint_data_disabled.enabled)) + + endpoints_2on3 = self.op_cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=public_endpoint_data.region, + public_url=public_endpoint_data.url, + internal_url=internal_endpoint_data.url, + admin_url=admin_endpoint_data.url) + + # Three endpoints should be returned, public, internal, and admin + self.assertThat(len(endpoints_2on3), matchers.Equals(3)) + + # test keys and values are correct for each endpoint created + for result, reference in zip( + endpoints_2on3, [public_endpoint_data, + internal_endpoint_data, + admin_endpoint_data] + ): + self.assertThat(result.id, matchers.Equals(reference.endpoint_id)) + self.assertThat(result.url, matchers.Equals(reference.url)) + self.assertThat(result.interface, + matchers.Equals(reference.interface)) + self.assertThat(result.region, + matchers.Equals(reference.region)) + self.assertThat(result.enabled, matchers.Equals(reference.enabled)) + self.assert_calls() + + def test_update_endpoint_v2(self): + self.use_keystone_v2() + self.assertRaises(OpenStackCloudUnavailableFeature, + self.op_cloud.update_endpoint, 'endpoint_id') + + def test_update_endpoint_v3(self): + service_data = self._get_service_data() + dummy_url = self._dummy_url() + endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, interface='admin', + enabled=False) + reference_request = endpoint_data.json_request.copy() + reference_request['endpoint']['url'] = dummy_url + self.register_uris([ + dict(method='PATCH', + uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), + status_code=200, + json=endpoint_data.json_response, + validate=dict(json=reference_request)) + ]) + endpoint = self.op_cloud.update_endpoint( + endpoint_data.endpoint_id, + service_name_or_id=service_data.service_id, + region=endpoint_data.region, + url=dummy_url, + interface=endpoint_data.interface, + enabled=False + ) + + # test keys and values are correct + self.assertThat(endpoint.id, + matchers.Equals(endpoint_data.endpoint_id)) + self.assertThat(endpoint.service_id, + matchers.Equals(service_data.service_id)) + self.assertThat(endpoint.url, + matchers.Equals(endpoint_data.url)) + self.assertThat(endpoint.interface, + matchers.Equals(endpoint_data.interface)) + + self.assert_calls() + + def test_list_endpoints(self): + endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [e.json_response['endpoint'] + for e in endpoints_data]}) + ]) + + endpoints = self.op_cloud.list_endpoints() + # test we are getting exactly len(self.mock_endpoints) elements + self.assertThat(len(endpoints), matchers.Equals(len(endpoints_data))) + + # test keys and values are correct + for i, ep in enumerate(endpoints_data): + self.assertThat(endpoints[i].id, + matchers.Equals(ep.endpoint_id)) + self.assertThat(endpoints[i].service_id, + matchers.Equals(ep.service_id)) + self.assertThat(endpoints[i].url, + matchers.Equals(ep.url)) + self.assertThat(endpoints[i].interface, + matchers.Equals(ep.interface)) + + self.assert_calls() + + def test_search_endpoints(self): + endpoints_data = [self._get_endpoint_v3_data(region='region1') + for e in range(0, 2)] + endpoints_data.extend([self._get_endpoint_v3_data() + for e in range(1, 8)]) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [e.json_response['endpoint'] + for e in endpoints_data]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [e.json_response['endpoint'] + for e in endpoints_data]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [e.json_response['endpoint'] + for e in endpoints_data]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [e.json_response['endpoint'] + for e in endpoints_data]}) + ]) + + # Search by id + endpoints = self.op_cloud.search_endpoints( + id=endpoints_data[-1].endpoint_id) + # # test we are getting exactly 1 element + self.assertEqual(1, len(endpoints)) + self.assertThat(endpoints[0].id, + matchers.Equals(endpoints_data[-1].endpoint_id)) + self.assertThat(endpoints[0].service_id, + matchers.Equals(endpoints_data[-1].service_id)) + self.assertThat(endpoints[0].url, + matchers.Equals(endpoints_data[-1].url)) + self.assertThat(endpoints[0].interface, + matchers.Equals(endpoints_data[-1].interface)) + + # Not found + endpoints = self.op_cloud.search_endpoints(id='!invalid!') + self.assertEqual(0, len(endpoints)) + + # Multiple matches + endpoints = self.op_cloud.search_endpoints( + filters={'region_id': 'region1'}) + # # test we are getting exactly 2 elements + self.assertEqual(2, len(endpoints)) + + # test we are getting the correct response for region/region_id compat + endpoints = self.op_cloud.search_endpoints( + filters={'region': 'region1'}) + # # test we are getting exactly 2 elements, this is v3 + self.assertEqual(2, len(endpoints)) + + self.assert_calls() + + def test_delete_endpoint(self): + endpoint_data = self._get_endpoint_v3_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'endpoints': [ + endpoint_data.json_response['endpoint']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), + status_code=204) + ]) + + # Delete by id + self.op_cloud.delete_endpoint(id=endpoint_data.endpoint_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_flavors.py b/openstack/tests/unit/cloud/test_flavors.py new file mode 100644 index 000000000..eff2d7601 --- /dev/null +++ b/openstack/tests/unit/cloud/test_flavors.py @@ -0,0 +1,258 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import openstack.cloud +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFlavors(base.RequestsMockTestCase): + + def test_create_flavor(self): + + self.register_uris([ + dict(method='POST', + uri='{endpoint}/flavors'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavor': fakes.FAKE_FLAVOR}, + validate=dict( + json={ + 'flavor': { + "name": "vanilla", + "ram": 65536, + "vcpus": 24, + "swap": 0, + "os-flavor-access:is_public": True, + "rxtx_factor": 1.0, + "OS-FLV-EXT-DATA:ephemeral": 0, + "disk": 1600, + "id": None}}))]) + + self.op_cloud.create_flavor( + 'vanilla', ram=65536, disk=1600, vcpus=24, + ) + self.assert_calls() + + def test_delete_flavor(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + dict(method='DELETE', + uri='{endpoint}/flavors/{id}'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID))]) + self.assertTrue(self.op_cloud.delete_flavor('vanilla')) + + self.assert_calls() + + def test_delete_flavor_not_found(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST})]) + + self.assertFalse(self.op_cloud.delete_flavor('invalid')) + + self.assert_calls() + + def test_delete_flavor_exception(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + dict(method='DELETE', + uri='{endpoint}/flavors/{id}'.format( + endpoint=fakes.FAKE_FLAVOR_LIST, id=fakes.FLAVOR_ID), + status_code=503)]) + + self.assertRaises(openstack.OpenStackCloudException, + self.op_cloud.delete_flavor, 'vanilla') + + def test_list_flavors(self): + uris_to_mock = [ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + ] + uris_to_mock.extend([ + dict(method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), + json={'extra_specs': {}}) + for flavor in fakes.FAKE_FLAVOR_LIST]) + self.register_uris(uris_to_mock) + + flavors = self.cloud.list_flavors() + + # test that new flavor is created correctly + found = False + for flavor in flavors: + if flavor['name'] == 'vanilla': + found = True + break + self.assertTrue(found) + needed_keys = {'name', 'ram', 'vcpus', 'id', 'is_public', 'disk'} + if found: + # check flavor content + self.assertTrue(needed_keys.issubset(flavor.keys())) + self.assert_calls() + + def test_get_flavor_by_ram(self): + uris_to_mock = [ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + ] + uris_to_mock.extend([ + dict(method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), + json={'extra_specs': {}}) + for flavor in fakes.FAKE_FLAVOR_LIST]) + self.register_uris(uris_to_mock) + + flavor = self.cloud.get_flavor_by_ram(ram=250) + self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) + + def test_get_flavor_by_ram_and_include(self): + uris_to_mock = [ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': fakes.FAKE_FLAVOR_LIST}), + ] + uris_to_mock.extend([ + dict(method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id']), + json={'extra_specs': {}}) + for flavor in fakes.FAKE_FLAVOR_LIST]) + self.register_uris(uris_to_mock) + flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry') + self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) + + def test_get_flavor_by_ram_not_found(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'flavors': []})]) + self.assertRaises( + openstack.OpenStackCloudException, + self.cloud.get_flavor_by_ram, + ram=100) + + def test_get_flavor_string_and_int(self): + flavor_list_uri = '{endpoint}/flavors/detail?is_public=None'.format( + endpoint=fakes.COMPUTE_ENDPOINT) + flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT) + flavor_list_json = {'flavors': [fakes.make_fake_flavor( + '1', 'vanilla')]} + flavor_json = {'extra_specs': {}} + + self.register_uris([ + dict(method='GET', uri=flavor_list_uri, json=flavor_list_json), + dict(method='GET', uri=flavor_resource_uri, json=flavor_json), + dict(method='GET', uri=flavor_list_uri, json=flavor_list_json), + dict(method='GET', uri=flavor_resource_uri, json=flavor_json)]) + + flavor1 = self.cloud.get_flavor('1') + self.assertEqual('1', flavor1['id']) + flavor2 = self.cloud.get_flavor(1) + self.assertEqual('1', flavor2['id']) + + def test_set_flavor_specs(self): + extra_specs = dict(key1='value1') + self.register_uris([ + dict(method='POST', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=1), + json=dict(extra_specs=extra_specs))]) + + self.op_cloud.set_flavor_specs(1, extra_specs) + self.assert_calls() + + def test_unset_flavor_specs(self): + keys = ['key1', 'key2'] + self.register_uris([ + dict(method='DELETE', + uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key)) + for key in keys]) + + self.op_cloud.unset_flavor_specs(1, keys) + self.assert_calls() + + def test_add_flavor_access(self): + self.register_uris([ + dict(method='POST', + uri='{endpoint}/flavors/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'), + json={ + 'flavor_access': [{ + 'flavor_id': 'flavor_id', 'tenant_id': 'tenant_id'}]}, + validate=dict( + json={'addTenantAccess': {'tenant': 'tenant_id'}}))]) + + self.op_cloud.add_flavor_access('flavor_id', 'tenant_id') + self.assert_calls() + + def test_remove_flavor_access(self): + self.register_uris([ + dict(method='POST', + uri='{endpoint}/flavors/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id'), + json={'flavor_access': []}, + validate=dict( + json={'removeTenantAccess': {'tenant': 'tenant_id'}}))]) + + self.op_cloud.remove_flavor_access('flavor_id', 'tenant_id') + self.assert_calls() + + def test_list_flavor_access(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/flavors/vanilla/os-flavor-access'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={ + 'flavor_access': [ + {'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}]}) + ]) + self.op_cloud.list_flavor_access('vanilla') + self.assert_calls() + + def test_get_flavor_by_id(self): + flavor_uri = '{endpoint}/flavors/1'.format( + endpoint=fakes.COMPUTE_ENDPOINT) + flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT) + flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} + flavor_extra_json = {'extra_specs': {'name': 'test'}} + + self.register_uris([ + dict(method='GET', uri=flavor_uri, json=flavor_json), + dict(method='GET', uri=flavor_extra_uri, json=flavor_extra_json), + ]) + + flavor1 = self.cloud.get_flavor_by_id('1') + self.assertEqual('1', flavor1['id']) + self.assertEqual({'name': 'test'}, flavor1.extra_specs) + flavor2 = self.cloud.get_flavor_by_id('1', get_extra=False) + self.assertEqual('1', flavor2['id']) + self.assertEqual({}, flavor2.extra_specs) diff --git a/openstack/tests/unit/cloud/test_floating_ip_common.py b/openstack/tests/unit/cloud/test_floating_ip_common.py new file mode 100644 index 000000000..f4c3a0fc3 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_common.py @@ -0,0 +1,211 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_common +---------------------------------- + +Tests floating IP resource methods for Neutron and Nova-network. +""" + +from mock import patch + +from openstack.cloud import meta +from openstack import OpenStackCloud +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFloatingIP(base.TestCase): + + @patch.object(OpenStackCloud, 'get_floating_ip') + @patch.object(OpenStackCloud, '_attach_ip_to_server') + @patch.object(OpenStackCloud, 'available_floating_ip') + def test_add_auto_ip( + self, mock_available_floating_ip, mock_attach_ip_to_server, + mock_get_floating_ip): + server_dict = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={} + ) + floating_ip_dict = { + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "attached": False, + "status": "ACTIVE" + } + + mock_available_floating_ip.return_value = floating_ip_dict + + self.cloud.add_auto_ip(server=server_dict) + + mock_attach_ip_to_server.assert_called_with( + timeout=60, wait=False, server=server_dict, + floating_ip=floating_ip_dict, skip_attach=False) + + @patch.object(OpenStackCloud, '_add_ip_from_pool') + def test_add_ips_to_server_pool(self, mock_add_ip_from_pool): + server_dict = fakes.make_fake_server( + server_id='romeo', name='test-server', status="ACTIVE", + addresses={}) + pool = 'nova' + + self.cloud.add_ips_to_server(server_dict, ip_pool=pool) + + mock_add_ip_from_pool.assert_called_with( + server_dict, pool, reuse=True, wait=False, timeout=60, + fixed_address=None, nat_destination=None) + + @patch.object(OpenStackCloud, 'has_service') + @patch.object(OpenStackCloud, 'get_floating_ip') + @patch.object(OpenStackCloud, '_add_auto_ip') + def test_add_ips_to_server_ipv6_only( + self, mock_add_auto_ip, + mock_get_floating_ip, + mock_has_service): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={ + 'private': [{ + 'addr': "10.223.160.141", + 'version': 4 + }], + 'public': [{ + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42', + u'OS-EXT-IPS:type': u'fixed', + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6 + }] + } + ) + server_dict = meta.add_server_interfaces(self.cloud, server) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual( + new_server['interface_ip'], + '2001:4800:7819:103:be76:4eff:fe05:8525') + self.assertEqual(new_server['private_v4'], '10.223.160.141') + self.assertEqual(new_server['public_v4'], '') + self.assertEqual( + new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525') + + @patch.object(OpenStackCloud, 'has_service') + @patch.object(OpenStackCloud, 'get_floating_ip') + @patch.object(OpenStackCloud, '_add_auto_ip') + def test_add_ips_to_server_rackspace( + self, mock_add_auto_ip, + mock_get_floating_ip, + mock_has_service): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={ + 'private': [{ + 'addr': "10.223.160.141", + 'version': 4 + }], + 'public': [{ + 'addr': "104.130.246.91", + 'version': 4 + }, { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6 + }] + } + ) + server_dict = meta.add_server_interfaces(self.cloud, server) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual( + new_server['interface_ip'], + '2001:4800:7819:103:be76:4eff:fe05:8525') + + @patch.object(OpenStackCloud, 'has_service') + @patch.object(OpenStackCloud, 'get_floating_ip') + @patch.object(OpenStackCloud, '_add_auto_ip') + def test_add_ips_to_server_rackspace_local_ipv4( + self, mock_add_auto_ip, + mock_get_floating_ip, + mock_has_service): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = False + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={ + 'private': [{ + 'addr': "10.223.160.141", + 'version': 4 + }], + 'public': [{ + 'addr': "104.130.246.91", + 'version': 4 + }, { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6 + }] + } + ) + server_dict = meta.add_server_interfaces(self.cloud, server) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual(new_server['interface_ip'], '104.130.246.91') + + @patch.object(OpenStackCloud, 'add_ip_list') + def test_add_ips_to_server_ip_list(self, mock_add_ip_list): + server_dict = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={}) + ips = ['203.0.113.29', '172.24.4.229'] + + self.cloud.add_ips_to_server(server_dict, ips=ips) + + mock_add_ip_list.assert_called_with( + server_dict, ips, wait=False, timeout=60, fixed_address=None) + + @patch.object(OpenStackCloud, '_needs_floating_ip') + @patch.object(OpenStackCloud, '_add_auto_ip') + def test_add_ips_to_server_auto_ip( + self, mock_add_auto_ip, mock_needs_floating_ip): + server_dict = fakes.make_fake_server( + server_id='server-id', name='test-server', status="ACTIVE", + addresses={}) + + # TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS + # SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR + # list_ports AND list_networks AND list_subnets. BUT THAT WOULD + # BE NOT ACTUALLY RELATED TO THIS PATCH. SO DO IT NEXT PATCH + mock_needs_floating_ip.return_value = True + + self.cloud.add_ips_to_server(server_dict) + + mock_add_auto_ip.assert_called_with( + server_dict, wait=False, timeout=60, reuse=True) diff --git a/openstack/tests/unit/cloud/test_floating_ip_neutron.py b/openstack/tests/unit/cloud/test_floating_ip_neutron.py new file mode 100644 index 000000000..08fb752cd --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_neutron.py @@ -0,0 +1,1001 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_neutron +---------------------------------- + +Tests Floating IP resource methods for Neutron +""" + +import copy +import datetime +import munch + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFloatingIP(base.RequestsMockTestCase): + mock_floating_ip_list_rep = { + 'floatingips': [ + { + 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', + 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda7', + 'status': 'ACTIVE' + }, + { + 'router_id': None, + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', + 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', + 'fixed_ip_address': None, + 'floating_ip_address': '203.0.113.30', + 'port_id': None, + 'id': '61cea855-49cb-4846-997d-801b70c71bdd', + 'status': 'DOWN' + } + ] + } + + mock_floating_ip_new_rep = { + 'floatingip': { + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'floating_network_id': 'my-network-id', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', + 'port_id': None, + 'router_id': None, + 'status': 'ACTIVE', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62df' + } + } + + mock_floating_ip_port_rep = { + 'floatingip': { + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'floating_network_id': 'my-network-id', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'router_id': None, + 'status': 'ACTIVE', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62df' + } + } + + mock_get_network_rep = { + 'status': 'ACTIVE', + 'subnets': [ + '54d6f61d-db07-451c-9ab3-b9609b6b6f0b' + ], + 'name': 'my-network', + 'provider:physical_network': None, + 'admin_state_up': True, + 'tenant_id': '4fd44f30292945e481c7b8a0c8908869', + 'provider:network_type': 'local', + 'router:external': True, + 'shared': True, + 'id': 'my-network-id', + 'provider:segmentation_id': None + } + + mock_search_ports_rep = [ + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': 'first-port', + 'created_at': datetime.datetime.now().isoformat(), + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', + 'tenant_id': '', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'compute:None', + 'mac_address': 'fa:16:3e:58:42:ed', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', + 'ip_address': u'172.24.4.2' + } + ], + 'id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'security_groups': [], + 'device_id': 'server-id' + } + ] + + def assertAreInstances(self, elements, elem_type): + for e in elements: + self.assertIsInstance(e, elem_type) + + def setUp(self): + super(TestFloatingIP, self).setUp() + + self.fake_server = fakes.make_fake_server( + 'server-id', '', 'ACTIVE', + addresses={u'test_pnztt_net': [{ + u'OS-EXT-IPS:type': u'fixed', + u'addr': '192.0.2.129', + u'version': 4, + u'OS-EXT-IPS-MAC:mac_addr': + u'fa:16:3e:ae:7d:42'}]}) + self.floating_ip = self.cloud._normalize_floating_ips( + self.mock_floating_ip_list_rep['floatingips'])[0] + + def test_float_no_status(self): + floating_ips = [ + { + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'floating_network_id': 'my-network-id', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', + 'port_id': None, + 'router_id': None, + 'tenant_id': '4969c491a3c74ee4af974e6d800c62df' + } + ] + normalized = self.cloud._normalize_floating_ips(floating_ips) + self.assertEqual('UNKNOWN', normalized[0]['status']) + + def test_list_floating_ips(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_list_rep)]) + + floating_ips = self.cloud.list_floating_ips() + + self.assertIsInstance(floating_ips, list) + self.assertAreInstances(floating_ips, dict) + self.assertEqual(2, len(floating_ips)) + + self.assert_calls() + + def test_list_floating_ips_with_filters(self): + + self.register_uris([ + dict(method='GET', + uri=('https://network.example.com/v2.0/floatingips.json?' + 'Foo=42'), + json={'floatingips': []})]) + + self.cloud.list_floating_ips(filters={'Foo': 42}) + + self.assert_calls() + + def test_search_floating_ips(self): + self.register_uris([ + dict(method='GET', + uri=('https://network.example.com/v2.0/floatingips.json' + '?attached=False'), + json=self.mock_floating_ip_list_rep)]) + + floating_ips = self.cloud.search_floating_ips( + filters={'attached': False}) + + self.assertIsInstance(floating_ips, list) + self.assertAreInstances(floating_ips, dict) + self.assertEqual(1, len(floating_ips)) + self.assert_calls() + + def test_get_floating_ip(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_list_rep)]) + + floating_ip = self.cloud.get_floating_ip( + id='2f245a7b-796b-4f26-9cf9-9e82d248fda7') + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) + self.assertEqual( + self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], + floating_ip['project_id'] + ) + self.assertEqual( + self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], + floating_ip['tenant_id'] + ) + self.assertIn('location', floating_ip) + self.assert_calls() + + def test_get_floating_ip_not_found(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_list_rep)]) + + floating_ip = self.cloud.get_floating_ip(id='non-existent') + + self.assertIsNone(floating_ip) + self.assert_calls() + + def test_get_floating_ip_by_id(self): + fid = self.mock_floating_ip_new_rep['floatingip']['id'] + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/floatingips/' + '{id}'.format(id=fid), + json=self.mock_floating_ip_new_rep)]) + + floating_ip = self.cloud.get_floating_ip_by_id(id=fid) + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['tenant_id'], + floating_ip['project_id'] + ) + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['tenant_id'], + floating_ip['tenant_id'] + ) + self.assertIn('location', floating_ip) + self.assert_calls() + + def test_create_floating_ip(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='POST', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_new_rep, + validate=dict( + json={'floatingip': { + 'floating_network_id': 'my-network-id'}})) + ]) + ip = self.cloud.create_floating_ip(network='my-network') + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address']) + self.assert_calls() + + def test_create_floating_ip_port_bad_response(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='POST', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_new_rep, + validate=dict( + json={'floatingip': { + 'floating_network_id': 'my-network-id', + 'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ab'}})) + ]) + + # Fails because we requested a port and the returned FIP has no port + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_floating_ip, + network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ab') + self.assert_calls() + + def test_create_floating_ip_port(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='POST', + uri='https://network.example.com/v2.0/floatingips.json', + json=self.mock_floating_ip_port_rep, + validate=dict( + json={'floatingip': { + 'floating_network_id': 'my-network-id', + 'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ac'}})) + ]) + + ip = self.cloud.create_floating_ip( + network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ac') + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address']) + self.assert_calls() + + def test_neutron_available_floating_ips(self): + """ + Test without specifying a network name. + """ + fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json' + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': []}), + dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}), + dict(method='POST', uri=fips_mock_uri, + json=self.mock_floating_ip_new_rep, + validate=dict(json={ + 'floatingip': { + 'floating_network_id': self.mock_get_network_rep['id'] + }})) + ]) + + # Test if first network is selected if no network is given + self.cloud._neutron_available_floating_ips() + self.assert_calls() + + def test_neutron_available_floating_ips_network(self): + """ + Test with specifying a network name. + """ + fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json' + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': []}), + dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}), + dict(method='POST', uri=fips_mock_uri, + json=self.mock_floating_ip_new_rep, + validate=dict(json={ + 'floatingip': { + 'floating_network_id': self.mock_get_network_rep['id'] + }})) + ]) + + # Test if first network is selected if no network is given + self.cloud._neutron_available_floating_ips( + network=self.mock_get_network_rep['name'] + ) + self.assert_calls() + + def test_neutron_available_floating_ips_invalid_network(self): + """ + Test with an invalid network name. + """ + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': []}) + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud._neutron_available_floating_ips, + network='INVALID') + + self.assert_calls() + + def test_auto_ip_pool_no_reuse(self): + # payloads taken from citycloud + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={"networks": [{ + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7"], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "ext-net", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "description": None + }, { + "status": "ACTIVE", + "subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "private", + "admin_state_up": True, + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2016-10-22T13:46:26", + "tags": [], + "updated_at": "2016-10-22T13:46:26", + "ipv6_address_scope": None, + "router:external": False, + "ipv4_address_scope": None, + "shared": False, + "mtu": 1450, + "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "description": "" + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/ports.json' + '?device_id=f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7', + json={"ports": [{ + "status": "ACTIVE", + "created_at": "2017-02-06T20:59:45", + "description": "", + "allowed_address_pairs": [], + "admin_state_up": True, + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "dns_name": None, + "extra_dhcp_opts": [], + "mac_address": "fa:16:3e:e8:7f:03", + "updated_at": "2017-02-06T20:59:49", + "name": "", + "device_owner": "compute:None", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "binding:vnic_type": "normal", + "fixed_ips": [{ + "subnet_id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", + "ip_address": "10.4.0.16"}], + "id": "a767944e-057a-47d1-a669-824a21b8fb7b", + "security_groups": [ + "9fb5ba44-5c46-4357-8e60-8b55526cab54"], + "device_id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7", + }]}), + + dict(method='POST', + uri='https://network.example.com/v2.0/floatingips.json', + json={"floatingip": { + "router_id": "9de9c787-8f89-4a53-8468-a5533d6d7fd1", + "status": "DOWN", + "description": "", + "dns_domain": "", + "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa + "fixed_ip_address": "10.4.0.16", + "floating_ip_address": "89.40.216.153", + "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", + "id": "e69179dc-a904-4c9a-a4c9-891e2ecb984c", + "dns_name": "", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394" + }}, + validate=dict(json={"floatingip": { + "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa + "fixed_ip_address": "10.4.0.16", + "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", + }})), + dict(method='GET', + uri='{endpoint}/servers/detail'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={"servers": [{ + "status": "ACTIVE", + "updated": "2017-02-06T20:59:49Z", + "addresses": { + "private": [{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", + "version": 4, + "addr": "10.4.0.16", + "OS-EXT-IPS:type": "fixed" + }, { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", + "version": 4, + "addr": "89.40.216.153", + "OS-EXT-IPS:type": "floating" + }]}, + "key_name": None, + "image": {"id": "95e4c449-8abf-486e-97d9-dc3f82417d2d"}, + "OS-EXT-STS:task_state": None, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2017-02-06T20:59:48.000000", + "flavor": {"id": "2186bd79-a05e-4953-9dde-ddefb63c88d4"}, + "id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7", + "security_groups": [{"name": "default"}], + "OS-SRV-USG:terminated_at": None, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "c17534835f8f42bf98fc367e0bf35e09", + "name": "testmt", + "created": "2017-02-06T20:59:44Z", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={"networks": [{ + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7"], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "ext-net", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "description": None + }, { + "status": "ACTIVE", + "subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "private", + "admin_state_up": True, + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2016-10-22T13:46:26", + "tags": [], + "updated_at": "2016-10-22T13:46:26", + "ipv6_address_scope": None, + "router:external": False, + "ipv4_address_scope": None, + "shared": False, + "mtu": 1450, + "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "description": "" + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={"subnets": [{ + "description": "", + "enable_dhcp": True, + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2016-10-22T13:46:26", + "dns_nameservers": [ + "89.36.90.101", + "89.36.90.102"], + "updated_at": "2016-10-22T13:46:26", + "gateway_ip": "10.4.0.1", + "ipv6_ra_mode": None, + "allocation_pools": [{ + "start": "10.4.0.2", + "end": "10.4.0.200"}], + "host_routes": [], + "ip_version": 4, + "ipv6_address_mode": None, + "cidr": "10.4.0.0/24", + "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", + "subnetpool_id": None, + "name": "private-subnet-ipv4", + }]})]) + + self.cloud.add_ips_to_server( + munch.Munch( + id='f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7', + addresses={ + "private": [{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", + "version": 4, + "addr": "10.4.0.16", + "OS-EXT-IPS:type": "fixed" + }]}), + ip_pool='ext-net', reuse=False) + + self.assert_calls() + + def test_available_floating_ip_new(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': []}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + validate=dict( + json={'floatingip': { + 'floating_network_id': 'my-network-id'}}), + json=self.mock_floating_ip_new_rep) + ]) + + ip = self.cloud.available_floating_ip(network='my-network') + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address']) + self.assert_calls() + + def test_delete_floating_ip_existing(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': []}), + ]) + + self.assertTrue( + self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2)) + self.assert_calls() + + def test_delete_floating_ip_existing_down(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + down_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'DOWN', + } + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [down_fip]}), + ]) + + self.assertTrue( + self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2)) + self.assert_calls() + + def test_delete_floating_ip_existing_no_delete(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fake_fip]}), + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_floating_ip, + floating_ip_id=fip_id, retry=2) + self.assert_calls() + + def test_delete_floating_ip_not_found(self): + self.register_uris([ + dict(method='DELETE', + uri=('https://network.example.com/v2.0/floatingips/' + 'a-wild-id-appears.json'), + status_code=404)]) + + ret = self.cloud.delete_floating_ip( + floating_ip_id='a-wild-id-appears') + + self.assertFalse(ret) + self.assert_calls() + + def test_attach_ip_to_server(self): + fip = self.mock_floating_ip_list_rep['floatingips'][0] + device_id = self.fake_server['id'] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=["device_id={0}".format(device_id)]), + json={'ports': self.mock_search_ports_rep}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format( + fip['id'])]), + json={'floatingip': fip}, + validate=dict( + json={'floatingip': { + 'port_id': self.mock_search_ports_rep[0]['id'], + 'fixed_ip_address': self.mock_search_ports_rep[0][ + 'fixed_ips'][0]['ip_address']}})), + ]) + + self.cloud._attach_ip_to_server( + server=self.fake_server, + floating_ip=self.floating_ip) + self.assert_calls() + + def test_add_ip_refresh_timeout(self): + device_id = self.fake_server['id'] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'networks.json']), + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': []}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=["device_id={0}".format(device_id)]), + json={'ports': self.mock_search_ports_rep}), + dict(method='POST', + uri='https://network.example.com/v2.0/floatingips.json', + json={'floatingip': self.floating_ip}, + validate=dict( + json={'floatingip': { + 'floating_network_id': 'my-network-id', + 'fixed_ip_address': self.mock_search_ports_rep[0][ + 'fixed_ips'][0]['ip_address'], + 'port_id': self.mock_search_ports_rep[0]['id']}})), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [self.floating_ip]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format( + self.floating_ip['id'])]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': []}), + ]) + + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud._add_auto_ip, + server=self.fake_server, + wait=True, timeout=0.01, + reuse=False) + self.assert_calls() + + def test_detach_ip_from_server(self): + fip = self.mock_floating_ip_new_rep['floatingip'] + attached_fip = copy.copy(fip) + attached_fip['port_id'] = 'server-port-id' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [attached_fip]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format( + fip['id'])]), + json={'floatingip': fip}, + validate=dict( + json={'floatingip': {'port_id': None}})) + ]) + self.cloud.detach_ip_from_server( + server_id='server-id', + floating_ip_id=fip['id']) + self.assert_calls() + + def test_add_ip_from_pool(self): + network = self.mock_get_network_rep + fip = self.mock_floating_ip_new_rep['floatingip'] + fixed_ip = self.mock_search_ports_rep[0]['fixed_ips'][0]['ip_address'] + port_id = self.mock_search_ports_rep[0]['id'] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [fip]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingip': fip}, + validate=dict( + json={'floatingip': { + 'floating_network_id': network['id']}})), + dict(method="GET", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=[ + "device_id={0}".format(self.fake_server['id'])]), + json={'ports': self.mock_search_ports_rep}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format( + fip['id'])]), + json={'floatingip': fip}, + validate=dict( + json={'floatingip': { + 'fixed_ip_address': fixed_ip, + 'port_id': port_id}})), + ]) + + server = self.cloud._add_ip_from_pool( + server=self.fake_server, + network=network['id'], + fixed_address=fixed_ip) + + self.assertEqual(server, self.fake_server) + self.assert_calls() + + def test_cleanup_floating_ips(self): + floating_ips = [{ + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "port_id": None, + "status": "ACTIVE" + }, { + "id": "this-is-an-attached-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "attached": True, + "port_id": "this-is-id-of-port-with-fip", + "status": "ACTIVE" + }] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': floating_ips}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'floatingips/{0}.json'.format( + floating_ips[0]['id'])]), + json={}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingips': [floating_ips[1]]}), + ]) + self.cloud.delete_unattached_floating_ips() + self.assert_calls() + + def test_create_floating_ip_no_port(self): + server_port = { + "id": "port-id", + "device_id": "some-server", + 'created_at': datetime.datetime.now().isoformat(), + 'fixed_ips': [ + { + 'subnet_id': 'subnet-id', + 'ip_address': '172.24.4.2' + } + ], + } + floating_ip = { + "id": "floating-ip-id", + "port_id": None + } + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_get_network_rep]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}), + dict(method="GET", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=['device_id=some-server']), + json={'ports': [server_port]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips.json']), + json={'floatingip': floating_ip}) + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud._neutron_create_floating_ip, + server=dict(id='some-server')) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_floating_ip_nova.py b/openstack/tests/unit/cloud/test_floating_ip_nova.py new file mode 100644 index 000000000..dc6261851 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_nova.py @@ -0,0 +1,321 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_nova +---------------------------------- + +Tests Floating IP resource methods for nova-network +""" + +from openstack.tests import fakes +from openstack.tests.unit import base + + +def get_fake_has_service(has_service): + def fake_has_service(s): + if s == 'network': + return False + return has_service(s) + return fake_has_service + + +class TestFloatingIP(base.RequestsMockTestCase): + mock_floating_ip_list_rep = [ + { + 'fixed_ip': None, + 'id': 1, + 'instance_id': None, + 'ip': '203.0.113.1', + 'pool': 'nova' + }, + { + 'fixed_ip': None, + 'id': 2, + 'instance_id': None, + 'ip': '203.0.113.2', + 'pool': 'nova' + }, + { + 'fixed_ip': '192.0.2.3', + 'id': 29, + 'instance_id': 'myself', + 'ip': '198.51.100.29', + 'pool': 'black_hole' + } + ] + + mock_floating_ip_pools = [ + {'id': 'pool1_id', 'name': 'nova'}, + {'id': 'pool2_id', 'name': 'pool2'}] + + def assertAreInstances(self, elements, elem_type): + for e in elements: + self.assertIsInstance(e, elem_type) + + def setUp(self): + super(TestFloatingIP, self).setUp() + + self.fake_server = fakes.make_fake_server( + 'server-id', '', 'ACTIVE', + addresses={u'test_pnztt_net': [{ + u'OS-EXT-IPS:type': u'fixed', + u'addr': '192.0.2.129', + u'version': 4, + u'OS-EXT-IPS-MAC:mac_addr': + u'fa:16:3e:ae:7d:42'}]}) + + self.cloud.has_service = get_fake_has_service(self.cloud.has_service) + + def test_list_floating_ips(self): + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + ]) + floating_ips = self.cloud.list_floating_ips() + + self.assertIsInstance(floating_ips, list) + self.assertEqual(3, len(floating_ips)) + self.assertAreInstances(floating_ips, dict) + + self.assert_calls() + + def test_list_floating_ips_with_filters(self): + self.assertRaisesRegex( + ValueError, "Nova-network don't support server-side", + self.cloud.list_floating_ips, filters={'Foo': 42} + ) + + def test_search_floating_ips(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + ]) + + floating_ips = self.cloud.search_floating_ips( + filters={'attached': False}) + + self.assertIsInstance(floating_ips, list) + self.assertEqual(2, len(floating_ips)) + self.assertAreInstances(floating_ips, dict) + + self.assert_calls() + + def test_get_floating_ip(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + ]) + + floating_ip = self.cloud.get_floating_ip(id='29') + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('198.51.100.29', floating_ip['floating_ip_address']) + + self.assert_calls() + + def test_get_floating_ip_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + ]) + + floating_ip = self.cloud.get_floating_ip(id='666') + + self.assertIsNone(floating_ip) + + self.assert_calls() + + def test_get_floating_ip_by_id(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips', + '1']), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}), + ]) + + floating_ip = self.cloud.get_floating_ip_by_id(id='1') + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('203.0.113.1', floating_ip['floating_ip_address']) + self.assert_calls() + + def test_create_floating_ip(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ip': self.mock_floating_ip_list_rep[1]}, + validate=dict( + json={'pool': 'nova'})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', '2']), + json={'floating_ip': self.mock_floating_ip_list_rep[1]}), + ]) + + self.cloud.create_floating_ip(network='nova') + + self.assert_calls() + + def test_available_floating_ip_existing(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep[:1]}), + ]) + + ip = self.cloud.available_floating_ip(network='nova') + + self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'], + ip['floating_ip_address']) + self.assert_calls() + + def test_available_floating_ip_new(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': []}), + dict(method='POST', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}, + validate=dict( + json={'pool': 'nova'})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', '1']), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}), + ]) + + ip = self.cloud.available_floating_ip(network='nova') + + self.assertEqual(self.mock_floating_ip_list_rep[0]['ip'], + ip['floating_ip_address']) + self.assert_calls() + + def test_delete_floating_ip_existing(self): + + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', 'a-wild-id-appears'])), + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': []}), + ]) + + ret = self.cloud.delete_floating_ip( + floating_ip_id='a-wild-id-appears') + + self.assertTrue(ret) + self.assert_calls() + + def test_delete_floating_ip_not_found(self): + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', 'a-wild-id-appears']), + status_code=404), + ]) + + ret = self.cloud.delete_floating_ip( + floating_ip_id='a-wild-id-appears') + + self.assertFalse(ret) + self.assert_calls() + + def test_attach_ip_to_server(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action']), + validate=dict( + json={ + "addFloatingIp": { + "address": "203.0.113.1", + "fixed_address": "192.0.2.129", + }})), + ]) + + self.cloud._attach_ip_to_server( + server=self.fake_server, + floating_ip=self.cloud._normalize_floating_ip( + self.mock_floating_ip_list_rep[0]), + fixed_address='192.0.2.129') + + self.assert_calls() + + def test_detach_ip_from_server(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action']), + validate=dict( + json={ + "removeFloatingIp": { + "address": "203.0.113.1", + }})), + ]) + + self.cloud.detach_ip_from_server( + server_id='server-id', floating_ip_id=1) + self.assert_calls() + + def test_add_ip_from_pool(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + dict(method='GET', + uri=self.get_mock_url('compute', append=['os-floating-ips']), + json={'floating_ips': self.mock_floating_ip_list_rep}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action']), + validate=dict( + json={ + "addFloatingIp": { + "address": "203.0.113.1", + "fixed_address": "192.0.2.129", + }})), + ]) + + server = self.cloud._add_ip_from_pool( + server=self.fake_server, + network='nova', + fixed_address='192.0.2.129') + + self.assertEqual(server, self.fake_server) + self.assert_calls() + + def test_cleanup_floating_ips(self): + # This should not call anything because it's unsafe on nova. + self.cloud.delete_unattached_floating_ips() diff --git a/openstack/tests/unit/cloud/test_floating_ip_pool.py b/openstack/tests/unit/cloud/test_floating_ip_pool.py new file mode 100644 index 000000000..4d3ee8369 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_pool.py @@ -0,0 +1,78 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_pool +---------------------------------- + +Test floating IP pool resource (managed by nova) +""" + +from openstack import OpenStackCloudException +from openstack.tests.unit import base +from openstack.tests import fakes + + +class TestFloatingIPPool(base.RequestsMockTestCase): + pools = [{'name': u'public'}] + + def test_list_floating_ip_pools(self): + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'extensions': [{ + u'alias': u'os-floating-ip-pools', + u'updated': u'2014-12-03T00:00:00Z', + u'name': u'FloatingIpPools', + u'links': [], + u'namespace': + u'http://docs.openstack.org/compute/ext/fake_xml', + u'description': u'Floating IPs support.'}]}), + dict(method='GET', + uri='{endpoint}/os-floating-ip-pools'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={"floating_ip_pools": [{"name": "public"}]}) + ]) + + floating_ip_pools = self.cloud.list_floating_ip_pools() + + self.assertItemsEqual(floating_ip_pools, self.pools) + + self.assert_calls() + + def test_list_floating_ip_pools_exception(self): + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'extensions': [{ + u'alias': u'os-floating-ip-pools', + u'updated': u'2014-12-03T00:00:00Z', + u'name': u'FloatingIpPools', + u'links': [], + u'namespace': + u'http://docs.openstack.org/compute/ext/fake_xml', + u'description': u'Floating IPs support.'}]}), + dict(method='GET', + uri='{endpoint}/os-floating-ip-pools'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + status_code=404)]) + + self.assertRaises( + OpenStackCloudException, self.cloud.list_floating_ip_pools) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_groups.py b/openstack/tests/unit/cloud/test_groups.py new file mode 100644 index 000000000..afa6229aa --- /dev/null +++ b/openstack/tests/unit/cloud/test_groups.py @@ -0,0 +1,97 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.tests.unit import base + + +class TestGroups(base.RequestsMockTestCase): + def setUp(self, cloud_config_fixture='clouds.yaml'): + super(TestGroups, self).setUp( + cloud_config_fixture=cloud_config_fixture) + self.addCleanup(self.assert_calls) + + def get_mock_url(self, service_type='identity', interface='admin', + resource='groups', append=None, base_url_append='v3'): + return super(TestGroups, self).get_mock_url( + service_type='identity', interface='admin', resource=resource, + append=append, base_url_append=base_url_append) + + def test_list_groups(self): + group_data = self._get_group_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'groups': [group_data.json_response['group']]}) + ]) + self.op_cloud.list_groups() + + def test_get_group(self): + group_data = self._get_group_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + ]) + self.op_cloud.get_group(group_data.group_id) + + def test_delete_group(self): + group_data = self._get_group_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.delete_group(group_data.group_id)) + + def test_create_group(self): + domain_data = self._get_domain_data() + group_data = self._get_group_data(domain_id=domain_data.domain_id) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[domain_data.domain_id]), + status_code=200, + json=domain_data.json_response), + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=group_data.json_response, + validate=dict(json=group_data.json_request)) + ]) + self.op_cloud.create_group( + name=group_data.group_name, description=group_data.description, + domain=group_data.domain_id) + + def test_update_group(self): + group_data = self._get_group_data() + # Domain ID is not sent + group_data.json_request['group'].pop('domain_id') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + dict(method='PATCH', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=200, + json=group_data.json_response, + validate=dict(json=group_data.json_request)) + ]) + self.op_cloud.update_group(group_data.group_id, group_data.group_name, + group_data.description) diff --git a/openstack/tests/unit/cloud/test_identity_roles.py b/openstack/tests/unit/cloud/test_identity_roles.py new file mode 100644 index 000000000..f891cd84d --- /dev/null +++ b/openstack/tests/unit/cloud/test_identity_roles.py @@ -0,0 +1,284 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import testtools + +import openstack.cloud +from openstack.tests.unit import base +from testtools import matchers + + +RAW_ROLE_ASSIGNMENTS = [ + { + "links": {"assignment": "http://example"}, + "role": {"id": "123456"}, + "scope": {"domain": {"id": "161718"}}, + "user": {"id": "313233"} + }, + { + "links": {"assignment": "http://example"}, + "group": {"id": "101112"}, + "role": {"id": "123456"}, + "scope": {"project": {"id": "456789"}} + } +] + + +class TestIdentityRoles(base.RequestsMockTestCase): + + def get_mock_url(self, service_type='identity', interface='admin', + resource='roles', append=None, base_url_append='v3', + qs_elements=None): + return super(TestIdentityRoles, self).get_mock_url( + service_type, interface, resource, append, base_url_append, + qs_elements) + + def test_list_roles(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}) + ]) + self.op_cloud.list_roles() + self.assert_calls() + + def test_get_role_by_name(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}) + ]) + role = self.op_cloud.get_role(role_data.role_name) + + self.assertIsNotNone(role) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assert_calls() + + def test_get_role_by_id(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}) + ]) + role = self.op_cloud.get_role(role_data.role_id) + + self.assertIsNotNone(role) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assert_calls() + + def test_create_role(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=role_data.json_response, + validate=dict(json=role_data.json_request)) + ]) + + role = self.op_cloud.create_role(role_data.role_name) + + self.assertIsNotNone(role) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assert_calls() + + def test_delete_role_by_id(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=204) + ]) + role = self.op_cloud.delete_role(role_data.role_id) + self.assertThat(role, matchers.Equals(True)) + self.assert_calls() + + def test_delete_role_by_name(self): + role_data = self._get_role_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=204) + ]) + role = self.op_cloud.delete_role(role_data.role_name) + self.assertThat(role, matchers.Equals(True)) + self.assert_calls() + + def test_list_role_assignments(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data(domain_id=domain_data.domain_id) + group_data = self._get_group_data(domain_id=domain_data.domain_id) + project_data = self._get_project_data(domain_id=domain_data.domain_id) + role_data = self._get_role_data() + response = [ + {'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'domain': {'id': domain_data.domain_id}}, + 'user': {'id': user_data.user_id}}, + {'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'project': {'id': project_data.project_id}}, + 'group': {'id': group_data.group_id}}, + ] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments'), + status_code=200, + json={'role_assignments': response}, + complete_qs=True) + ]) + ret = self.op_cloud.list_role_assignments() + self.assertThat(len(ret), matchers.Equals(2)) + self.assertThat(ret[0].user, matchers.Equals(user_data.user_id)) + self.assertThat(ret[0].id, matchers.Equals(role_data.role_id)) + self.assertThat(ret[0].domain, matchers.Equals(domain_data.domain_id)) + self.assertThat(ret[1].group, matchers.Equals(group_data.group_id)) + self.assertThat(ret[1].id, matchers.Equals(role_data.role_id)) + self.assertThat(ret[1].project, + matchers.Equals(project_data.project_id)) + + def test_list_role_assignments_filters(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data(domain_id=domain_data.domain_id) + role_data = self._get_role_data() + response = [ + {'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'domain': {'id': domain_data.domain_id}}, + 'user': {'id': user_data.user_id}} + ] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=['scope.domain.id=%s' % domain_data.domain_id, + 'user.id=%s' % user_data.user_id, + 'effective=True']), + status_code=200, + json={'role_assignments': response}, + complete_qs=True) + ]) + params = dict(user=user_data.user_id, domain=domain_data.domain_id, + effective=True) + ret = self.op_cloud.list_role_assignments(filters=params) + self.assertThat(len(ret), matchers.Equals(1)) + self.assertThat(ret[0].user, matchers.Equals(user_data.user_id)) + self.assertThat(ret[0].id, matchers.Equals(role_data.role_id)) + self.assertThat(ret[0].domain, matchers.Equals(domain_data.domain_id)) + + def test_list_role_assignments_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='role_assignments'), + status_code=403) + ]) + with testtools.ExpectedException( + openstack.cloud.exc.OpenStackCloudHTTPError, + "Failed to list role assignments" + ): + self.op_cloud.list_role_assignments() + self.assert_calls() + + def test_list_role_assignments_keystone_v2(self): + self.use_keystone_v2() + role_data = self._get_role_data() + user_data = self._get_user_data() + project_data = self._get_project_data(v3=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='tenants', + append=[project_data.project_id, + 'users', + user_data.user_id, + 'roles'], + base_url_append=None), + status_code=200, + json={'roles': [role_data.json_response['role']]}) + ]) + ret = self.op_cloud.list_role_assignments( + filters={ + 'user': user_data.user_id, + 'project': project_data.project_id}) + self.assertThat(len(ret), matchers.Equals(1)) + self.assertThat(ret[0].project, + matchers.Equals(project_data.project_id)) + self.assertThat(ret[0].id, matchers.Equals(role_data.role_id)) + self.assertThat(ret[0].user, matchers.Equals(user_data.user_id)) + self.assert_calls() + + def test_list_role_assignments_keystone_v2_with_role(self): + self.use_keystone_v2() + roles_data = [self._get_role_data() for r in range(0, 2)] + user_data = self._get_user_data() + project_data = self._get_project_data(v3=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource='tenants', + append=[project_data.project_id, + 'users', + user_data.user_id, + 'roles'], + base_url_append=None), + status_code=200, + json={'roles': [r.json_response['role'] for r in roles_data]}) + ]) + ret = self.op_cloud.list_role_assignments( + filters={ + 'role': roles_data[0].role_id, + 'user': user_data.user_id, + 'project': project_data.project_id}) + self.assertThat(len(ret), matchers.Equals(1)) + self.assertThat(ret[0].project, + matchers.Equals(project_data.project_id)) + self.assertThat(ret[0].id, matchers.Equals(roles_data[0].role_id)) + self.assertThat(ret[0].user, matchers.Equals(user_data.user_id)) + self.assert_calls() + + def test_list_role_assignments_exception_v2(self): + self.use_keystone_v2() + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Must provide project and user for keystone v2" + ): + self.op_cloud.list_role_assignments() + self.assert_calls() + + def test_list_role_assignments_exception_v2_no_project(self): + self.use_keystone_v2() + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Must provide project and user for keystone v2" + ): + self.op_cloud.list_role_assignments(filters={'user': '12345'}) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_image.py b/openstack/tests/unit/cloud/test_image.py new file mode 100644 index 000000000..a1f19785c --- /dev/null +++ b/openstack/tests/unit/cloud/test_image.py @@ -0,0 +1,829 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(mordred) There are mocks of the image_client in here that are not +# using requests_mock. Erradicate them. + +import operator +import tempfile +import uuid + +import mock +import munch +import six + +import openstack.cloud +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +CINDER_URL = 'https://volume.example.com/v2/1c36b64c840a42cd9e9b931a369337f0' + + +class BaseTestImage(base.RequestsMockTestCase): + + def setUp(self): + super(BaseTestImage, self).setUp() + self.image_id = str(uuid.uuid4()) + self.imagefile = tempfile.NamedTemporaryFile(delete=False) + self.imagefile.write(b'\0') + self.imagefile.close() + self.fake_image_dict = fakes.make_fake_image(image_id=self.image_id) + self.fake_search_return = {'images': [self.fake_image_dict]} + self.output = uuid.uuid4().bytes + + +class TestImage(BaseTestImage): + + def setUp(self): + super(TestImage, self).setUp() + self.use_glance() + + def test_config_v1(self): + self.cloud.cloud_config.config['image_api_version'] = '1' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v1/', + self.cloud._image_client.get_endpoint()) + self.assertEqual( + '1', self.cloud_config.get_api_version('image')) + + def test_config_v2(self): + self.cloud.cloud_config.config['image_api_version'] = '2' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v2/', + self.cloud._image_client.get_endpoint()) + self.assertEqual( + '2', self.cloud_config.get_api_version('image')) + + def test_download_image_no_output(self): + self.assertRaises(exc.OpenStackCloudException, + self.cloud.download_image, 'fake_image') + + def test_download_image_two_outputs(self): + fake_fd = six.BytesIO() + self.assertRaises(exc.OpenStackCloudException, + self.cloud.download_image, 'fake_image', + output_path='fake_path', output_file=fake_fd) + + def test_download_image_no_images_found(self): + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json=dict(images=[]))]) + self.assertRaises(exc.OpenStackCloudResourceNotFound, + self.cloud.download_image, 'fake_image', + output_path='fake_path') + self.assert_calls() + + def _register_image_mocks(self): + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json=self.fake_search_return), + dict(method='GET', + uri='https://image.example.com/v2/images/{id}/file'.format( + id=self.image_id), + content=self.output, + headers={'Content-Type': 'application/octet-stream'}) + ]) + + def test_download_image_with_fd(self): + self._register_image_mocks() + output_file = six.BytesIO() + self.cloud.download_image('fake_image', output_file=output_file) + output_file.seek(0) + self.assertEqual(output_file.read(), self.output) + self.assert_calls() + + def test_download_image_with_path(self): + self._register_image_mocks() + output_file = tempfile.NamedTemporaryFile() + self.cloud.download_image('fake_image', output_path=output_file.name) + output_file.seek(0) + self.assertEqual(output_file.read(), self.output) + self.assert_calls() + + def test_empty_list_images(self): + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': []}) + ]) + self.assertEqual([], self.cloud.list_images()) + self.assert_calls() + + def test_list_images(self): + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json=self.fake_search_return) + ]) + self.assertEqual( + self.cloud._normalize_images([self.fake_image_dict]), + self.cloud.list_images()) + self.assert_calls() + + def test_list_images_show_all(self): + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images?member_status=all', + json=self.fake_search_return) + ]) + self.assertEqual( + self.cloud._normalize_images([self.fake_image_dict]), + self.cloud.list_images(show_all=True)) + self.assert_calls() + + def test_list_images_show_all_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images?member_status=all', + json={'images': [self.fake_image_dict, deleted_image]}) + ]) + self.assertEqual( + self.cloud._normalize_images([ + self.fake_image_dict, deleted_image]), + self.cloud.list_images(show_all=True)) + self.assert_calls() + + def test_list_images_no_filter_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json={'images': [self.fake_image_dict, deleted_image]}) + ]) + self.assertEqual( + self.cloud._normalize_images([ + self.fake_image_dict, deleted_image]), + self.cloud.list_images(filter_deleted=False)) + self.assert_calls() + + def test_list_images_filter_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json={'images': [self.fake_image_dict, deleted_image]}) + ]) + self.assertEqual( + self.cloud._normalize_images([self.fake_image_dict]), + self.cloud.list_images()) + self.assert_calls() + + def test_list_images_string_properties(self): + image_dict = self.fake_image_dict.copy() + image_dict['properties'] = 'list,of,properties' + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': [image_dict]}), + ]) + images = self.cloud.list_images() + self.assertEqual( + self.cloud._normalize_images([image_dict]), + images) + self.assertEqual( + images[0]['properties']['properties'], + 'list,of,properties') + self.assert_calls() + + def test_list_images_paginated(self): + marker = str(uuid.uuid4()) + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': [self.fake_image_dict], + 'next': '/v2/images?marker={marker}'.format( + marker=marker)}), + dict(method='GET', + uri=('https://image.example.com/v2/images?' + 'marker={marker}'.format(marker=marker)), + json=self.fake_search_return) + ]) + self.assertEqual( + self.cloud._normalize_images([ + self.fake_image_dict, self.fake_image_dict]), + self.cloud.list_images()) + self.assert_calls() + + def test_create_image_put_v2(self): + self.cloud.image_api_use_tasks = False + + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': []}), + dict(method='POST', uri='https://image.example.com/v2/images', + json=self.fake_image_dict, + validate=dict( + json={ + u'container_format': u'bare', + u'disk_format': u'qcow2', + u'name': u'fake_image', + u'owner_specified.openstack.md5': fakes.NO_MD5, + u'owner_specified.openstack.object': + u'images/fake_image', + u'owner_specified.openstack.sha256': fakes.NO_SHA256, + u'visibility': u'private'}) + ), + dict(method='PUT', + uri='https://image.example.com/v2/images/{id}/file'.format( + id=self.image_id), + request_headers={'Content-Type': 'application/octet-stream'}), + dict(method='GET', uri='https://image.example.com/v2/images', + json=self.fake_search_return) + ]) + + self.cloud.create_image( + 'fake_image', self.imagefile.name, wait=True, timeout=1, + is_public=False) + + self.assert_calls() + self.assertEqual(self.adapter.request_history[5].text.read(), b'\x00') + + def test_create_image_task(self): + self.cloud.image_api_use_tasks = True + image_name = 'name-99' + container_name = 'image_upload_v2_test_container' + endpoint = self.cloud._object_store_client.get_endpoint() + + task_id = str(uuid.uuid4()) + args = dict( + id=task_id, + status='success', + type='import', + result={ + 'image_id': self.image_id, + }, + ) + + image_no_checksums = self.fake_image_dict.copy() + del(image_no_checksums['owner_specified.openstack.md5']) + del(image_no_checksums['owner_specified.openstack.sha256']) + del(image_no_checksums['owner_specified.openstack.object']) + + self.register_uris([ + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': []}), + dict(method='GET', uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500})), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=endpoint, container=container_name), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=endpoint, container=container_name), + status_code=201, + headers={'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=endpoint, container=container_name), + headers={'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', # noqa + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=endpoint, container=container_name, + object=image_name), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=endpoint, container=container_name, + object=image_name), + status_code=201, + validate=dict( + headers={'x-object-meta-x-sdk-md5': fakes.NO_MD5, + 'x-object-meta-x-sdk-sha256': fakes.NO_SHA256}) + ), + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': []}), + dict(method='POST', uri='https://image.example.com/v2/tasks', + json=args, + validate=dict( + json=dict( + type='import', input={ + 'import_from': '{container}/{object}'.format( + container=container_name, object=image_name), + 'image_properties': {'name': image_name}})) + ), + dict(method='GET', + uri='https://image.example.com/v2/tasks/{id}'.format( + id=task_id), + status_code=503, text='Random error'), + dict(method='GET', + uri='https://image.example.com/v2/tasks/{id}'.format( + id=task_id), + json={'images': args}), + dict(method='GET', uri='https://image.example.com/v2/images', + json={'images': [image_no_checksums]}), + dict(method='PATCH', + uri='https://image.example.com/v2/images/{id}'.format( + id=self.image_id), + validate=dict( + json=sorted([ + {u'op': u'add', + u'value': '{container}/{object}'.format( + container=container_name, + object=image_name), + u'path': u'/owner_specified.openstack.object'}, + {u'op': u'add', u'value': fakes.NO_MD5, + u'path': u'/owner_specified.openstack.md5'}, + {u'op': u'add', u'value': fakes.NO_SHA256, + u'path': u'/owner_specified.openstack.sha256'}], + key=operator.itemgetter('value')), + headers={ + 'Content-Type': + 'application/openstack-images-v2.1-json-patch'}) + ), + dict(method='GET', uri='https://image.example.com/v2/images', + json=self.fake_search_return) + ]) + + self.cloud.create_image( + image_name, self.imagefile.name, wait=True, timeout=1, + is_public=False, container=container_name) + + self.assert_calls() + + def _image_dict(self, fake_image): + return self.cloud._normalize_image(meta.obj_to_munch(fake_image)) + + def _munch_images(self, fake_image): + return self.cloud._normalize_images([fake_image]) + + def _call_create_image(self, name, **kwargs): + imagefile = tempfile.NamedTemporaryFile(delete=False) + imagefile.write(b'\0') + imagefile.close() + self.cloud.create_image( + name, imagefile.name, wait=True, timeout=1, + is_public=False, **kwargs) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_v1( + self, mock_image_client, mock_is_client_version): + # TODO(mordred) Fix this to use requests_mock + mock_is_client_version.return_value = False + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': 'qcow2', + 'properties': { + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'is_public': False}} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + [ret], + ] + mock_image_client.post.return_value = ret + mock_image_client.put.return_value = ret + self._call_create_image('42 name') + mock_image_client.post.assert_called_with('/images', json=args) + mock_image_client.put.assert_called_with( + '/images/42', data=mock.ANY, + headers={ + 'x-image-meta-checksum': mock.ANY, + 'x-glance-registry-purge-props': 'false' + }) + mock_image_client.get.assert_called_with('/images/detail', params={}) + self.assertEqual( + self._munch_images(ret), self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_v1_bad_delete( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = False + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': 'qcow2', + 'properties': { + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'is_public': False}} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + ] + mock_image_client.post.return_value = ret + mock_image_client.put.side_effect = exc.OpenStackCloudHTTPError( + "Some error", {}) + self.assertRaises( + exc.OpenStackCloudHTTPError, + self._call_create_image, + '42 name') + mock_image_client.post.assert_called_with('/images', json=args) + mock_image_client.put.assert_called_with( + '/images/42', data=mock.ANY, + headers={ + 'x-image-meta-checksum': mock.ANY, + 'x-glance-registry-purge-props': 'false' + }) + mock_image_client.delete.assert_called_with('/images/42') + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_update_image_no_patch( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + [ret], + ] + self.cloud.update_image_properties( + image=self._image_dict(ret), + **{'owner_specified.openstack.object': 'images/42 name'}) + mock_image_client.get.assert_called_with('/images', params={}) + mock_image_client.patch.assert_not_called() + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_v2_bad_delete( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + [ret], + ] + mock_image_client.post.return_value = ret + mock_image_client.put.side_effect = exc.OpenStackCloudHTTPError( + "Some error", {}) + self.assertRaises( + exc.OpenStackCloudHTTPError, + self._call_create_image, + '42 name', min_disk='0', min_ram=0) + mock_image_client.post.assert_called_with('/images', json=args) + mock_image_client.put.assert_called_with( + '/images/42/file', + headers={'Content-Type': 'application/octet-stream'}, + data=mock.ANY) + mock_image_client.delete.assert_called_with('/images/42') + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_bad_int( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + self.assertRaises( + exc.OpenStackCloudException, + self._call_create_image, '42 name', min_disk='fish', min_ram=0) + mock_image_client.post.assert_not_called() + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_user_int( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': u'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'int_v': '12345', + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + [ret] + ] + mock_image_client.post.return_value = ret + self._call_create_image( + '42 name', min_disk='0', min_ram=0, int_v=12345) + mock_image_client.post.assert_called_with('/images', json=args) + mock_image_client.put.assert_called_with( + '/images/42/file', + headers={'Content-Type': 'application/octet-stream'}, + data=mock.ANY) + mock_image_client.get.assert_called_with('/images', params={}) + self.assertEqual( + self._munch_images(ret), self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_meta_int( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + self._call_create_image( + '42 name', min_disk='0', min_ram=0, meta={'int_v': 12345}) + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': u'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'int_v': 12345, + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.return_value = [ret] + mock_image_client.post.return_value = ret + mock_image_client.get.assert_called_with('/images', params={}) + self.assertEqual( + self._munch_images(ret), self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_protected( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': u'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'protected': False, + 'int_v': '12345', + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.side_effect = [ + [], + [ret], + [ret], + ] + mock_image_client.put.return_value = ret + mock_image_client.post.return_value = ret + self._call_create_image( + '42 name', min_disk='0', min_ram=0, properties={'int_v': 12345}, + protected=False) + mock_image_client.post.assert_called_with('/images', json=args) + mock_image_client.put.assert_called_with( + '/images/42/file', data=mock.ANY, + headers={'Content-Type': 'application/octet-stream'}) + self.assertEqual(self._munch_images(ret), self.cloud.list_images()) + + @mock.patch.object(openstack.OpenStackCloud, '_is_client_version') + @mock.patch.object(openstack.OpenStackCloud, '_image_client') + def test_create_image_put_user_prop( + self, mock_image_client, mock_is_client_version): + mock_is_client_version.return_value = True + self.cloud.image_api_use_tasks = False + + mock_image_client.get.return_value = [] + self.assertEqual([], self.cloud.list_images()) + + args = {'name': '42 name', + 'container_format': 'bare', 'disk_format': u'qcow2', + 'owner_specified.openstack.md5': mock.ANY, + 'owner_specified.openstack.sha256': mock.ANY, + 'owner_specified.openstack.object': 'images/42 name', + 'int_v': '12345', + 'xenapi_use_agent': 'False', + 'visibility': 'private', + 'min_disk': 0, 'min_ram': 0} + ret = munch.Munch(args.copy()) + ret['id'] = '42' + ret['status'] = 'success' + mock_image_client.get.return_value = [ret] + mock_image_client.post.return_value = ret + self._call_create_image( + '42 name', min_disk='0', min_ram=0, properties={'int_v': 12345}) + mock_image_client.get.assert_called_with('/images', params={}) + self.assertEqual( + self._munch_images(ret), self.cloud.list_images()) + + def test_get_image_by_id(self): + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images/{id}'.format( + id=self.image_id), + json=self.fake_image_dict) + ]) + self.assertEqual( + self.cloud._normalize_image(self.fake_image_dict), + self.cloud.get_image_by_id(self.image_id)) + self.assert_calls() + + +class TestImageV1Only(base.RequestsMockTestCase): + + def setUp(self): + super(TestImageV1Only, self).setUp() + self.use_glance(image_version_json='image-version-v1.json') + + def test_config_v1(self): + + self.cloud.cloud_config.config['image_api_version'] = '1' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v1/', + self.cloud._image_client.get_endpoint()) + self.assertTrue(self.cloud._is_client_version('image', 1)) + + def test_config_v2(self): + self.cloud.cloud_config.config['image_api_version'] = '2' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v1/', + self.cloud._image_client.get_endpoint()) + self.assertFalse(self.cloud._is_client_version('image', 2)) + + +class TestImageV2Only(base.RequestsMockTestCase): + + def setUp(self): + super(TestImageV2Only, self).setUp() + self.use_glance(image_version_json='image-version-v2.json') + + def test_config_v1(self): + self.cloud.cloud_config.config['image_api_version'] = '1' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v2/', + self.cloud._image_client.get_endpoint()) + self.assertTrue(self.cloud._is_client_version('image', 2)) + + def test_config_v2(self): + self.cloud.cloud_config.config['image_api_version'] = '2' + # We override the scheme of the endpoint with the scheme of the service + # because glance has a bug where it doesn't return https properly. + self.assertEqual( + 'https://image.example.com/v2/', + self.cloud._image_client.get_endpoint()) + self.assertTrue(self.cloud._is_client_version('image', 2)) + + +class TestImageVolume(BaseTestImage): + + def test_create_image_volume(self): + + volume_id = 'some-volume' + + self.register_uris([ + dict(method='POST', + uri='{endpoint}/volumes/{id}/action'.format( + endpoint=CINDER_URL, id=volume_id), + json={'os-volume_upload_image': {'image_id': self.image_id}}, + validate=dict(json={ + u'os-volume_upload_image': { + u'container_format': u'bare', + u'disk_format': u'qcow2', + u'force': False, + u'image_name': u'fake_image'}}) + ), + # NOTE(notmorgan): Glance discovery happens here, insert the + # glance discovery mock at this point, DO NOT use the + # .use_glance() method, that is intended only for use in + # .setUp + self.get_glance_discovery_mock_dict(), + dict(method='GET', uri='https://image.example.com/v2/images', + json=self.fake_search_return) + ]) + + self.cloud.create_image( + 'fake_image', self.imagefile.name, wait=True, timeout=1, + volume={'id': volume_id}) + + self.assert_calls() + + def test_create_image_volume_duplicate(self): + + volume_id = 'some-volume' + + self.register_uris([ + dict(method='POST', + uri='{endpoint}/volumes/{id}/action'.format( + endpoint=CINDER_URL, id=volume_id), + json={'os-volume_upload_image': {'image_id': self.image_id}}, + validate=dict(json={ + u'os-volume_upload_image': { + u'container_format': u'bare', + u'disk_format': u'qcow2', + u'force': True, + u'image_name': u'fake_image'}}) + ), + # NOTE(notmorgan): Glance discovery happens here, insert the + # glance discovery mock at this point, DO NOT use the + # .use_glance() method, that is intended only for use in + # .setUp + self.get_glance_discovery_mock_dict(), + dict(method='GET', uri='https://image.example.com/v2/images', + json=self.fake_search_return) + ]) + + self.cloud.create_image( + 'fake_image', self.imagefile.name, wait=True, timeout=1, + volume={'id': volume_id}, allow_duplicates=True) + + self.assert_calls() + + +class TestImageBrokenDiscovery(base.RequestsMockTestCase): + + def setUp(self): + super(TestImageBrokenDiscovery, self).setUp() + self.use_glance(image_version_json='image-version-broken.json') + + def test_url_fix(self): + # image-version-broken.json has both http urls and localhost as the + # host. This is testing that what is discovered is https, because + # that's what's in the catalog, and image.example.com for the same + # reason. + self.register_uris([ + dict(method='GET', + uri='https://image.example.com/v2/images', + json={'images': []}) + ]) + self.assertEqual([], self.cloud.list_images()) + self.assertEqual( + self.cloud._image_client.get_endpoint(), + 'https://image.example.com/v2/') + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_image_snapshot.py b/openstack/tests/unit/cloud/test_image_snapshot.py new file mode 100644 index 000000000..3e5f83fdc --- /dev/null +++ b/openstack/tests/unit/cloud/test_image_snapshot.py @@ -0,0 +1,102 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestImageSnapshot(base.RequestsMockTestCase): + + def setUp(self): + super(TestImageSnapshot, self).setUp() + self.server_id = str(uuid.uuid4()) + self.image_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name) + + def test_create_image_snapshot_wait_until_active_never_active(self): + snapshot_name = 'test-snapshot' + fake_image = fakes.make_fake_image(self.image_id, status='pending') + self.register_uris([ + dict( + method='POST', + uri='{endpoint}/servers/{server_id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + server_id=self.server_id), + headers=dict( + Location='{endpoint}/images/{image_id}'.format( + endpoint='https://images.example.com', + image_id=self.image_id)), + validate=dict( + json={ + "createImage": { + "name": snapshot_name, + "metadata": {}, + }})), + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=dict(images=[fake_image])), + ]) + + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud.create_image_snapshot, + snapshot_name, dict(id=self.server_id), + wait=True, timeout=0.01) + + # After the fifth call, we just keep polling get images for status. + # Due to mocking sleep, we have no clue how many times we'll call it. + self.assert_calls(stop_after=5, do_count=False) + + def test_create_image_snapshot_wait_active(self): + snapshot_name = 'test-snapshot' + pending_image = fakes.make_fake_image(self.image_id, status='pending') + fake_image = fakes.make_fake_image(self.image_id) + self.register_uris([ + dict( + method='POST', + uri='{endpoint}/servers/{server_id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + server_id=self.server_id), + headers=dict( + Location='{endpoint}/images/{image_id}'.format( + endpoint='https://images.example.com', + image_id=self.image_id)), + validate=dict( + json={ + "createImage": { + "name": snapshot_name, + "metadata": {}, + }})), + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=dict(images=[pending_image])), + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=dict(images=[fake_image])), + ]) + image = self.cloud.create_image_snapshot( + 'test-snapshot', dict(id=self.server_id), wait=True, timeout=2) + self.assertEqual(image['id'], self.image_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_inventory.py b/openstack/tests/unit/cloud/test_inventory.py new file mode 100644 index 000000000..621d97e7a --- /dev/null +++ b/openstack/tests/unit/cloud/test_inventory.py @@ -0,0 +1,142 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from openstack.cloud import exc +from openstack.cloud import inventory +import openstack.config +from openstack.config import exceptions as occ_exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestInventory(base.TestCase): + + def setUp(self): + super(TestInventory, self).setUp() + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test__init(self, mock_cloud, mock_config): + mock_config.return_value.get_all_clouds.return_value = [{}] + + inv = inventory.OpenStackInventory() + + mock_config.assert_called_once_with( + config_files=openstack.config.loader.CONFIG_FILES + ) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + self.assertTrue(mock_config.return_value.get_all_clouds.called) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test__init_one_cloud(self, mock_cloud, mock_config): + mock_config.return_value.get_one_cloud.return_value = [{}] + + inv = inventory.OpenStackInventory(cloud='supercloud') + + mock_config.assert_called_once_with( + config_files=openstack.config.loader.CONFIG_FILES + ) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + self.assertFalse(mock_config.return_value.get_all_clouds.called) + mock_config.return_value.get_one_cloud.assert_called_once_with( + 'supercloud') + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test__raise_exception_on_no_cloud(self, mock_cloud, mock_config): + """ + Test that when os-client-config can't find a named cloud, a + shade exception is emitted. + """ + mock_config.return_value.get_one_cloud.side_effect = ( + occ_exc.OpenStackConfigException() + ) + self.assertRaises(exc.OpenStackCloudException, + inventory.OpenStackInventory, + cloud='supercloud') + mock_config.return_value.get_one_cloud.assert_called_once_with( + 'supercloud') + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test_list_hosts(self, mock_cloud, mock_config): + mock_config.return_value.get_all_clouds.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.list_hosts() + + inv.clouds[0].list_servers.assert_called_once_with(detailed=True) + self.assertFalse(inv.clouds[0].get_openstack_vars.called) + self.assertEqual([server], ret) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test_list_hosts_no_detail(self, mock_cloud, mock_config): + mock_config.return_value.get_all_clouds.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = self.cloud._normalize_server( + fakes.make_fake_server( + '1234', 'test', 'ACTIVE', addresses={})) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + + inv.list_hosts(expand=False) + + inv.clouds[0].list_servers.assert_called_once_with(detailed=False) + self.assertFalse(inv.clouds[0].get_openstack_vars.called) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test_search_hosts(self, mock_cloud, mock_config): + mock_config.return_value.get_all_clouds.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.search_hosts('server_id') + self.assertEqual([server], ret) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.OpenStackCloud") + def test_get_host(self, mock_cloud, mock_config): + mock_config.return_value.get_all_clouds.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.get_host('server_id') + self.assertEqual(server, ret) diff --git a/openstack/tests/unit/cloud/test_keypair.py b/openstack/tests/unit/cloud/test_keypair.py new file mode 100644 index 000000000..2884e78c8 --- /dev/null +++ b/openstack/tests/unit/cloud/test_keypair.py @@ -0,0 +1,110 @@ +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestKeypair(base.RequestsMockTestCase): + + def setUp(self): + super(TestKeypair, self).setUp() + self.keyname = self.getUniqueString('key') + self.key = fakes.make_fake_keypair(self.keyname) + + def test_create_keypair(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs']), + json={'keypair': self.key}, + validate=dict(json={ + 'keypair': { + 'name': self.key['name'], + 'public_key': self.key['public_key']}})), + ]) + + new_key = self.cloud.create_keypair( + self.keyname, self.key['public_key']) + self.assertEqual(new_key, self.cloud._normalize_keypair(self.key)) + + self.assert_calls() + + def test_create_keypair_exception(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs']), + status_code=400, + validate=dict(json={ + 'keypair': { + 'name': self.key['name'], + 'public_key': self.key['public_key']}})), + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_keypair, + self.keyname, self.key['public_key']) + + self.assert_calls() + + def test_delete_keypair(self): + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-keypairs', self.keyname]), + status_code=202), + ]) + self.assertTrue(self.cloud.delete_keypair(self.keyname)) + + self.assert_calls() + + def test_delete_keypair_not_found(self): + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-keypairs', self.keyname]), + status_code=404), + ]) + self.assertFalse(self.cloud.delete_keypair(self.keyname)) + + self.assert_calls() + + def test_list_keypairs(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs']), + json={'keypairs': [{'keypair': self.key}]}), + + ]) + keypairs = self.cloud.list_keypairs() + self.assertEqual(keypairs, self.cloud._normalize_keypairs([self.key])) + self.assert_calls() + + def test_list_keypairs_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs']), + status_code=400), + + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.list_keypairs) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_limits.py b/openstack/tests/unit/cloud/test_limits.py new file mode 100644 index 000000000..8a33a93d6 --- /dev/null +++ b/openstack/tests/unit/cloud/test_limits.py @@ -0,0 +1,95 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base + + +class TestLimits(base.RequestsMockTestCase): + + def test_get_compute_limits(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['limits']), + json={ + "limits": { + "absolute": { + "maxImageMeta": 128, + "maxPersonality": 5, + "maxPersonalitySize": 10240, + "maxSecurityGroupRules": 20, + "maxSecurityGroups": 10, + "maxServerMeta": 128, + "maxTotalCores": 20, + "maxTotalFloatingIps": 10, + "maxTotalInstances": 10, + "maxTotalKeypairs": 100, + "maxTotalRAMSize": 51200, + "maxServerGroups": 10, + "maxServerGroupMembers": 10, + "totalCoresUsed": 0, + "totalInstancesUsed": 0, + "totalRAMUsed": 0, + "totalSecurityGroupsUsed": 0, + "totalFloatingIpsUsed": 0, + "totalServerGroupsUsed": 0 + }, + "rate": [] + } + }), + ]) + + self.cloud.get_compute_limits() + + self.assert_calls() + + def test_other_get_compute_limits(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['limits'], + qs_elements=[ + 'tenant_id={id}'.format(id=project.project_id) + ]), + json={ + "limits": { + "absolute": { + "maxImageMeta": 128, + "maxPersonality": 5, + "maxPersonalitySize": 10240, + "maxSecurityGroupRules": 20, + "maxSecurityGroups": 10, + "maxServerMeta": 128, + "maxTotalCores": 20, + "maxTotalFloatingIps": 10, + "maxTotalInstances": 10, + "maxTotalKeypairs": 100, + "maxTotalRAMSize": 51200, + "maxServerGroups": 10, + "maxServerGroupMembers": 10, + "totalCoresUsed": 0, + "totalInstancesUsed": 0, + "totalRAMUsed": 0, + "totalSecurityGroupsUsed": 0, + "totalFloatingIpsUsed": 0, + "totalServerGroupsUsed": 0 + }, + "rate": [] + } + }), + ]) + + self.op_cloud.get_compute_limits(project.project_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_magnum_services.py b/openstack/tests/unit/cloud/test_magnum_services.py new file mode 100644 index 000000000..914f1b5f4 --- /dev/null +++ b/openstack/tests/unit/cloud/test_magnum_services.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base + + +magnum_service_obj = dict( + binary='fake-service', + created_at='2015-08-27T09:49:58-05:00', + disabled_reason=None, + host='fake-host', + human_id=None, + id=1, + report_count=1, + state='up', + updated_at=None, +) + + +class TestMagnumServices(base.RequestsMockTestCase): + + def test_list_magnum_services(self): + self.register_uris([dict( + method='GET', + uri='https://container-infra.example.com/v1/mservices', + json=dict(mservices=[magnum_service_obj]))]) + mservices_list = self.op_cloud.list_magnum_services() + self.assertEqual( + mservices_list[0], + self.cloud._normalize_magnum_service(magnum_service_obj)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_meta.py b/openstack/tests/unit/cloud/test_meta.py new file mode 100644 index 000000000..de3da10d6 --- /dev/null +++ b/openstack/tests/unit/cloud/test_meta.py @@ -0,0 +1,1054 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import openstack.cloud +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + +PRIVATE_V4 = '198.51.100.3' +PUBLIC_V4 = '192.0.2.99' +PUBLIC_V6 = '2001:0db8:face:0da0:face::0b00:1c' # rfc3849 + + +class FakeCloud(object): + region_name = 'test-region' + name = 'test-name' + private = False + force_ipv4 = False + service_val = True + _unused = "useless" + _local_ipv6 = True + + def get_flavor_name(self, id): + return 'test-flavor-name' + + def get_image_name(self, id): + return 'test-image-name' + + def get_volumes(self, server): + return [] + + def has_service(self, service_name): + return self.service_val + + def use_internal_network(self): + return True + + def use_external_network(self): + return True + + def get_internal_networks(self): + return [] + + def get_external_networks(self): + return [] + + def get_internal_ipv4_networks(self): + return [] + + def get_external_ipv4_networks(self): + return [] + + def get_internal_ipv6_networks(self): + return [] + + def get_external_ipv6_networks(self): + return [] + + def list_server_security_groups(self, server): + return [] + + def get_default_network(self): + return None + +standard_fake_server = fakes.make_fake_server( + server_id='test-id-0', + name='test-id-0', + status='ACTIVE', + addresses={'private': [{'OS-EXT-IPS:type': 'fixed', + 'addr': PRIVATE_V4, + 'version': 4}], + 'public': [{'OS-EXT-IPS:type': 'floating', + 'addr': PUBLIC_V4, + 'version': 4}]}, + flavor={'id': '101'}, + image={'id': '471c2475-da2f-47ac-aba5-cb4aa3d546f5'}, +) +standard_fake_server['metadata'] = {'group': 'test-group'} + +SUBNETS_WITH_NAT = [ + { + u'name': u'', + u'enable_dhcp': True, + u'network_id': u'5ef0358f-9403-4f7b-9151-376ca112abf7', + u'tenant_id': u'29c79f394b2946f1a0f8446d715dc301', + u'dns_nameservers': [], + u'ipv6_ra_mode': None, + u'allocation_pools': [ + { + u'start': u'10.10.10.2', + u'end': u'10.10.10.254' + } + ], + u'gateway_ip': u'10.10.10.1', + u'ipv6_address_mode': None, + u'ip_version': 4, + u'host_routes': [], + u'cidr': u'10.10.10.0/24', + u'id': u'14025a85-436e-4418-b0ee-f5b12a50f9b4' + }, +] + +OSIC_NETWORKS = [ + { + u'admin_state_up': True, + u'id': u'7004a83a-13d3-4dcd-8cf5-52af1ace4cae', + u'mtu': 0, + u'name': u'GATEWAY_NET', + u'router:external': True, + u'shared': True, + u'status': u'ACTIVE', + u'subnets': [u'cf785ee0-6cc9-4712-be3d-0bf6c86cf455'], + u'tenant_id': u'7a1ca9f7cc4e4b13ac0ed2957f1e8c32' + }, + { + u'admin_state_up': True, + u'id': u'405abfcc-77dc-49b2-a271-139619ac9b26', + u'mtu': 0, + u'name': u'openstackjenkins-network1', + u'router:external': False, + u'shared': False, + u'status': u'ACTIVE', + u'subnets': [u'a47910bc-f649-45db-98ec-e2421c413f4e'], + u'tenant_id': u'7e9c4d5842b3451d94417bd0af03a0f4' + }, + { + u'admin_state_up': True, + u'id': u'54753d2c-0a58-4928-9b32-084c59dd20a6', + u'mtu': 0, + u'name': u'GATEWAY_NET_V6', + u'router:external': True, + u'shared': True, + u'status': u'ACTIVE', + u'subnets': [u'9c21d704-a8b9-409a-b56d-501cb518d380', + u'7cb0ce07-64c3-4a3d-92d3-6f11419b45b9'], + u'tenant_id': u'7a1ca9f7cc4e4b13ac0ed2957f1e8c32' + } +] + +OSIC_SUBNETS = [ + { + u'allocation_pools': [{ + u'end': u'172.99.106.254', + u'start': u'172.99.106.5'}], + u'cidr': u'172.99.106.0/24', + u'dns_nameservers': [u'69.20.0.164', u'69.20.0.196'], + u'enable_dhcp': True, + u'gateway_ip': u'172.99.106.1', + u'host_routes': [], + u'id': u'cf785ee0-6cc9-4712-be3d-0bf6c86cf455', + u'ip_version': 4, + u'ipv6_address_mode': None, + u'ipv6_ra_mode': None, + u'name': u'GATEWAY_NET', + u'network_id': u'7004a83a-13d3-4dcd-8cf5-52af1ace4cae', + u'subnetpool_id': None, + u'tenant_id': u'7a1ca9f7cc4e4b13ac0ed2957f1e8c32' + }, + { + u'allocation_pools': [{ + u'end': u'10.0.1.254', u'start': u'10.0.1.2'}], + u'cidr': u'10.0.1.0/24', + u'dns_nameservers': [u'8.8.8.8', u'8.8.4.4'], + u'enable_dhcp': True, + u'gateway_ip': u'10.0.1.1', + u'host_routes': [], + u'id': u'a47910bc-f649-45db-98ec-e2421c413f4e', + u'ip_version': 4, + u'ipv6_address_mode': None, + u'ipv6_ra_mode': None, + u'name': u'openstackjenkins-subnet1', + u'network_id': u'405abfcc-77dc-49b2-a271-139619ac9b26', + u'subnetpool_id': None, + u'tenant_id': u'7e9c4d5842b3451d94417bd0af03a0f4' + }, + { + u'allocation_pools': [{ + u'end': u'10.255.255.254', u'start': u'10.0.0.2'}], + u'cidr': u'10.0.0.0/8', + u'dns_nameservers': [u'8.8.8.8', u'8.8.4.4'], + u'enable_dhcp': True, + u'gateway_ip': u'10.0.0.1', + u'host_routes': [], + u'id': u'9c21d704-a8b9-409a-b56d-501cb518d380', + u'ip_version': 4, + u'ipv6_address_mode': None, + u'ipv6_ra_mode': None, + u'name': u'GATEWAY_SUBNET_V6V4', + u'network_id': u'54753d2c-0a58-4928-9b32-084c59dd20a6', + u'subnetpool_id': None, + u'tenant_id': u'7a1ca9f7cc4e4b13ac0ed2957f1e8c32' + }, + { + u'allocation_pools': [{ + u'end': u'2001:4800:1ae1:18:ffff:ffff:ffff:ffff', + u'start': u'2001:4800:1ae1:18::2'}], + u'cidr': u'2001:4800:1ae1:18::/64', + u'dns_nameservers': [u'2001:4860:4860::8888'], + u'enable_dhcp': True, + u'gateway_ip': u'2001:4800:1ae1:18::1', + u'host_routes': [], + u'id': u'7cb0ce07-64c3-4a3d-92d3-6f11419b45b9', + u'ip_version': 6, + u'ipv6_address_mode': u'dhcpv6-stateless', + u'ipv6_ra_mode': None, + u'name': u'GATEWAY_SUBNET_V6V6', + u'network_id': u'54753d2c-0a58-4928-9b32-084c59dd20a6', + u'subnetpool_id': None, + u'tenant_id': u'7a1ca9f7cc4e4b13ac0ed2957f1e8c32' + } +] + + +class TestMeta(base.RequestsMockTestCase): + def test_find_nova_addresses_key_name(self): + # Note 198.51.100.0/24 is TEST-NET-2 from rfc5737 + addrs = {'public': [{'addr': '198.51.100.1', 'version': 4}], + 'private': [{'addr': '192.0.2.5', 'version': 4}]} + self.assertEqual( + ['198.51.100.1'], + meta.find_nova_addresses(addrs, key_name='public')) + self.assertEqual([], meta.find_nova_addresses(addrs, key_name='foo')) + + def test_find_nova_addresses_ext_tag(self): + addrs = {'public': [{'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4}]} + self.assertEqual( + ['198.51.100.2'], meta.find_nova_addresses(addrs, ext_tag='fixed')) + self.assertEqual([], meta.find_nova_addresses(addrs, ext_tag='foo')) + + def test_find_nova_addresses_key_name_and_ext_tag(self): + addrs = {'public': [{'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4}]} + self.assertEqual( + ['198.51.100.2'], meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed')) + self.assertEqual([], meta.find_nova_addresses( + addrs, key_name='public', ext_tag='foo')) + self.assertEqual([], meta.find_nova_addresses( + addrs, key_name='bar', ext_tag='fixed')) + + def test_find_nova_addresses_all(self): + addrs = {'public': [{'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4}]} + self.assertEqual( + ['198.51.100.2'], meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed', version=4)) + self.assertEqual([], meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed', version=6)) + + def test_get_server_ip(self): + srv = meta.obj_to_munch(standard_fake_server) + self.assertEqual( + PRIVATE_V4, meta.get_server_ip(srv, ext_tag='fixed')) + self.assertEqual( + PUBLIC_V4, meta.get_server_ip(srv, ext_tag='floating')) + + def test_get_server_private_ip(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net-name'}]} + ), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'private': [{'OS-EXT-IPS:type': 'fixed', + 'addr': PRIVATE_V4, + 'version': 4}], + 'public': [{'OS-EXT-IPS:type': 'floating', + 'addr': PUBLIC_V4, + 'version': 4}]} + ) + + self.assertEqual( + PRIVATE_V4, meta.get_server_private_ip(srv, self.cloud)) + self.assert_calls() + + def test_get_server_multiple_private_ip(self): + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net'}]} + ), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + + shared_mac = '11:22:33:44:55:66' + distinct_mac = '66:55:44:33:22:11' + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{'OS-EXT-IPS:type': 'fixed', + 'OS-EXT-IPS-MAC:mac_addr': distinct_mac, + 'addr': '10.0.0.100', + 'version': 4}, + {'OS-EXT-IPS:type': 'fixed', + 'OS-EXT-IPS-MAC:mac_addr': shared_mac, + 'addr': '10.0.0.101', + 'version': 4}], + 'public': [{'OS-EXT-IPS:type': 'floating', + 'OS-EXT-IPS-MAC:mac_addr': shared_mac, + 'addr': PUBLIC_V4, + 'version': 4}]} + ) + + self.assertEqual( + '10.0.0.101', meta.get_server_private_ip(srv, self.cloud)) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'has_service') + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_private_ip_devstack( + self, + mock_get_flavor_name, mock_get_image_name, + mock_get_volumes, mock_has_service): + + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + mock_has_service.return_value = True + + self.register_uris([ + dict(method='GET', + uri=('https://network.example.com/v2.0/ports.json?' + 'device_id=test-id'), + json={'ports': [{ + 'id': 'test_port_id', + 'mac_address': 'fa:16:3e:ae:7d:42', + 'device_id': 'test-id'}]} + ), + dict(method='GET', + uri=('https://network.example.com/v2.0/' + 'floatingips.json?port_id=test_port_id'), + json={'floatingips': []}), + + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [ + {'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False + }, + {'id': 'private', + 'name': 'private'}]} + ), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}), + + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={u'test_pnztt_net': [{ + u'OS-EXT-IPS:type': u'fixed', + u'addr': PRIVATE_V4, + u'version': 4, + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42' + }]} + )) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_private_ip_no_fip( + self, + mock_get_flavor_name, mock_get_image_name, + mock_get_volumes): + self.cloud._floating_ip_source = None + + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [ + {'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + {'id': 'private', + 'name': 'private'}]} + ), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}), + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={u'test_pnztt_net': [{ + u'OS-EXT-IPS:type': u'fixed', + u'addr': PRIVATE_V4, + u'version': 4, + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:ae:7d:42' + }]} + )) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_cloud_no_fips( + self, + mock_get_flavor_name, mock_get_image_name, + mock_get_volumes): + self.cloud._floating_ip_source = None + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + { + 'id': 'private', + 'name': 'private'}]} + ), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}), + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={u'test_pnztt_net': [{ + u'addr': PRIVATE_V4, + u'version': 4, + }]} + )) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'has_service') + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_cloud_missing_fips( + self, + mock_get_flavor_name, mock_get_image_name, + mock_get_volumes, mock_has_service): + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + mock_has_service.return_value = True + + self.register_uris([ + dict(method='GET', + uri=('https://network.example.com/v2.0/ports.json?' + 'device_id=test-id'), + json={'ports': [{ + 'id': 'test_port_id', + 'mac_address': 'fa:16:3e:ae:7d:42', + 'device_id': 'test-id'}]} + ), + dict(method='GET', + uri=('https://network.example.com/v2.0/floatingips.json' + '?port_id=test_port_id'), + json={'floatingips': [{ + 'id': 'floating-ip-id', + 'port_id': 'test_port_id', + 'fixed_ip_address': PRIVATE_V4, + 'floating_ip_address': PUBLIC_V4, + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + { + 'id': 'private', + 'name': 'private', + } + ]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}), + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={u'test_pnztt_net': [{ + u'addr': PRIVATE_V4, + u'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + }]} + )) + + self.assertEqual(PUBLIC_V4, srv['public_v4']) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_cloud_rackspace_v6( + self, mock_get_flavor_name, mock_get_image_name, + mock_get_volumes): + self.cloud.cloud_config.config['has_network'] = False + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={ + 'private': [{ + 'addr': "10.223.160.141", + 'version': 4 + }], + 'public': [{ + 'addr': "104.130.246.91", + 'version': 4 + }, { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6 + }] + } + )) + + self.assertEqual("10.223.160.141", srv['private_v4']) + self.assertEqual("104.130.246.91", srv['public_v4']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip']) + self.assert_calls() + + @mock.patch.object(openstack.OpenStackCloud, 'get_volumes') + @mock.patch.object(openstack.OpenStackCloud, 'get_image_name') + @mock.patch.object(openstack.OpenStackCloud, 'get_flavor_name') + def test_get_server_cloud_osic_split( + self, mock_get_flavor_name, mock_get_image_name, + mock_get_volumes): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + self.cloud._external_ipv4_names = ['GATEWAY_NET'] + self.cloud._external_ipv6_names = ['GATEWAY_NET_V6'] + self.cloud._internal_ipv4_names = ['GATEWAY_NET_V6'] + self.cloud._internal_ipv6_names = [] + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': OSIC_NETWORKS}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': OSIC_SUBNETS}), + dict(method='GET', + uri='{endpoint}/servers/test-id/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}) + ]) + + srv = self.cloud.get_openstack_vars(fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + flavor={u'id': u'1'}, + image={ + 'name': u'cirros-0.3.4-x86_64-uec', + u'id': u'f93d000b-7c29-4489-b375-3641a1758fe1'}, + addresses={ + 'private': [{ + 'addr': "10.223.160.141", + 'version': 4 + }], + 'public': [{ + 'addr': "104.130.246.91", + 'version': 4 + }, { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6 + }] + } + )) + + self.assertEqual("10.223.160.141", srv['private_v4']) + self.assertEqual("104.130.246.91", srv['public_v4']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip']) + self.assert_calls() + + def test_get_server_external_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': True + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{ + 'addr': PUBLIC_V4, + 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_external_provider_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net', + 'provider:network_type': 'vlan', + 'provider:physical_network': 'vlan', + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{ + 'addr': PUBLIC_V4, + 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_internal_provider_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': False, + 'provider:network_type': 'vxlan', + 'provider:physical_network': None, + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{ + 'addr': PRIVATE_V4, + 'version': 4}]}, + ) + self.assertIsNone( + meta.get_server_external_ipv4(cloud=self.cloud, server=srv)) + int_ip = meta.get_server_private_ip(cloud=self.cloud, server=srv) + + self.assertEqual(PRIVATE_V4, int_ip) + self.assert_calls() + + def test_get_server_external_none_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + json={'networks': [{ + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': False, + }]}), + dict(method='GET', + uri='https://network.example.com/v2.0/subnets.json', + json={'subnets': SUBNETS_WITH_NAT}) + ]) + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{ + 'addr': PUBLIC_V4, + 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertIsNone(ip) + self.assert_calls() + + def test_get_server_external_ipv4_neutron_accessIPv4(self): + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE') + srv['accessIPv4'] = PUBLIC_V4 + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + + def test_get_server_external_ipv4_neutron_accessIPv6(self): + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE') + srv['accessIPv6'] = PUBLIC_V6 + ip = meta.get_server_external_ipv6(server=srv) + + self.assertEqual(PUBLIC_V6, ip) + + def test_get_server_external_ipv4_neutron_exception(self): + # Testing Clouds with a non working Neutron + self.register_uris([ + dict(method='GET', + uri='https://network.example.com/v2.0/networks.json', + status_code=404)]) + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]} + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_external_ipv4_nova_public(self): + # Testing Clouds w/o Neutron and a network named public + self.cloud.cloud_config.config['has_network'] = False + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]}) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + + def test_get_server_external_ipv4_nova_none(self): + # Testing Clouds w/o Neutron or a globally routable IP + self.cloud.cloud_config.config['has_network'] = False + + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={'test-net': [{'addr': PRIVATE_V4}]}) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertIsNone(ip) + + def test_get_server_external_ipv6(self): + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE', + addresses={ + 'test-net': [ + {'addr': PUBLIC_V4, 'version': 4}, + {'addr': PUBLIC_V6, 'version': 6} + ] + } + ) + ip = meta.get_server_external_ipv6(srv) + self.assertEqual(PUBLIC_V6, ip) + + def test_get_groups_from_server(self): + server_vars = {'flavor': 'test-flavor', + 'image': 'test-image', + 'az': 'test-az'} + self.assertEqual( + ['test-name', + 'test-region', + 'test-name_test-region', + 'test-group', + 'instance-test-id-0', + 'meta-group_test-group', + 'test-az', + 'test-region_test-az', + 'test-name_test-region_test-az'], + meta.get_groups_from_server( + FakeCloud(), + meta.obj_to_munch(standard_fake_server), + server_vars + ) + ) + + def test_obj_list_to_munch(self): + """Test conversion of a list of objects to a list of dictonaries""" + class obj0(object): + value = 0 + + class obj1(object): + value = 1 + + list = [obj0, obj1] + new_list = meta.obj_list_to_munch(list) + self.assertEqual(new_list[0]['value'], 0) + self.assertEqual(new_list[1]['value'], 1) + + @mock.patch.object(FakeCloud, 'list_server_security_groups') + def test_get_security_groups(self, + mock_list_server_security_groups): + '''This test verifies that calling get_hostvars_froms_server + ultimately calls list_server_security_groups, and that the return + value from list_server_security_groups ends up in + server['security_groups'].''' + mock_list_server_security_groups.return_value = [ + {'name': 'testgroup', 'id': '1'}] + + server = meta.obj_to_munch(standard_fake_server) + hostvars = meta.get_hostvars_from_server(FakeCloud(), server) + + mock_list_server_security_groups.assert_called_once_with(server) + self.assertEqual('testgroup', + hostvars['security_groups'][0]['name']) + + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv6') + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv4') + def test_basic_hostvars( + self, mock_get_server_external_ipv4, + mock_get_server_external_ipv6): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + mock_get_server_external_ipv6.return_value = PUBLIC_V6 + + hostvars = meta.get_hostvars_from_server( + FakeCloud(), self.cloud._normalize_server( + meta.obj_to_munch(standard_fake_server))) + self.assertNotIn('links', hostvars) + self.assertEqual(PRIVATE_V4, hostvars['private_v4']) + self.assertEqual(PUBLIC_V4, hostvars['public_v4']) + self.assertEqual(PUBLIC_V6, hostvars['public_v6']) + self.assertEqual(PUBLIC_V6, hostvars['interface_ip']) + self.assertEqual('RegionOne', hostvars['region']) + self.assertEqual('_test_cloud_', hostvars['cloud']) + self.assertIn('location', hostvars) + self.assertEqual('_test_cloud_', hostvars['location']['cloud']) + self.assertEqual('RegionOne', hostvars['location']['region_name']) + self.assertEqual('admin', hostvars['location']['project']['name']) + self.assertEqual("test-image-name", hostvars['image']['name']) + self.assertEqual( + standard_fake_server['image']['id'], hostvars['image']['id']) + self.assertNotIn('links', hostvars['image']) + self.assertEqual( + standard_fake_server['flavor']['id'], hostvars['flavor']['id']) + self.assertEqual("test-flavor-name", hostvars['flavor']['name']) + self.assertNotIn('links', hostvars['flavor']) + # test having volumes + # test volume exception + self.assertEqual([], hostvars['volumes']) + + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv6') + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv4') + def test_ipv4_hostvars( + self, mock_get_server_external_ipv4, + mock_get_server_external_ipv6): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + mock_get_server_external_ipv6.return_value = PUBLIC_V6 + + fake_cloud = FakeCloud() + fake_cloud.force_ipv4 = True + hostvars = meta.get_hostvars_from_server( + fake_cloud, meta.obj_to_munch(standard_fake_server)) + self.assertEqual(PUBLIC_V4, hostvars['interface_ip']) + + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv4') + def test_private_interface_ip(self, mock_get_server_external_ipv4): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + + cloud = FakeCloud() + cloud.private = True + hostvars = meta.get_hostvars_from_server( + cloud, meta.obj_to_munch(standard_fake_server)) + self.assertEqual(PRIVATE_V4, hostvars['interface_ip']) + + @mock.patch.object(openstack.cloud.meta, 'get_server_external_ipv4') + def test_image_string(self, mock_get_server_external_ipv4): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + + server = standard_fake_server + server['image'] = 'fake-image-id' + hostvars = meta.get_hostvars_from_server( + FakeCloud(), meta.obj_to_munch(server)) + self.assertEqual('fake-image-id', hostvars['image']['id']) + + def test_az(self): + server = standard_fake_server + server['OS-EXT-AZ:availability_zone'] = 'az1' + + hostvars = self.cloud._normalize_server(meta.obj_to_munch(server)) + self.assertEqual('az1', hostvars['az']) + + def test_current_location(self): + self.assertEqual({ + 'cloud': '_test_cloud_', + 'project': { + 'id': mock.ANY, + 'name': 'admin', + 'domain_id': None, + 'domain_name': 'default' + }, + 'region_name': u'RegionOne', + 'zone': None}, + self.cloud.current_location) + + def test_current_project(self): + self.assertEqual({ + 'id': mock.ANY, + 'name': 'admin', + 'domain_id': None, + 'domain_name': 'default'}, + self.cloud.current_project) + + def test_has_volume(self): + mock_cloud = mock.MagicMock() + + fake_volume = fakes.FakeVolume( + id='volume1', + status='available', + name='Volume 1 Display Name', + attachments=[{'device': '/dev/sda0'}]) + fake_volume_dict = meta.obj_to_munch(fake_volume) + mock_cloud.get_volumes.return_value = [fake_volume_dict] + hostvars = meta.get_hostvars_from_server( + mock_cloud, meta.obj_to_munch(standard_fake_server)) + self.assertEqual('volume1', hostvars['volumes'][0]['id']) + self.assertEqual('/dev/sda0', hostvars['volumes'][0]['device']) + + def test_has_no_volume_service(self): + fake_cloud = FakeCloud() + fake_cloud.service_val = False + hostvars = meta.get_hostvars_from_server( + fake_cloud, meta.obj_to_munch(standard_fake_server)) + self.assertEqual([], hostvars['volumes']) + + def test_unknown_volume_exception(self): + mock_cloud = mock.MagicMock() + + class FakeException(Exception): + pass + + def side_effect(*args): + raise FakeException("No Volumes") + mock_cloud.get_volumes.side_effect = side_effect + self.assertRaises( + FakeException, + meta.get_hostvars_from_server, + mock_cloud, + meta.obj_to_munch(standard_fake_server)) + + def test_obj_to_munch(self): + cloud = FakeCloud() + cloud.subcloud = FakeCloud() + cloud_dict = meta.obj_to_munch(cloud) + self.assertEqual(FakeCloud.name, cloud_dict['name']) + self.assertNotIn('_unused', cloud_dict) + self.assertNotIn('get_flavor_name', cloud_dict) + self.assertNotIn('subcloud', cloud_dict) + self.assertTrue(hasattr(cloud_dict, 'name')) + self.assertEqual(cloud_dict.name, cloud_dict['name']) + + def test_obj_to_munch_subclass(self): + class FakeObjDict(dict): + additional = 1 + obj = FakeObjDict(foo='bar') + obj_dict = meta.obj_to_munch(obj) + self.assertIn('additional', obj_dict) + self.assertIn('foo', obj_dict) + self.assertEqual(obj_dict['additional'], 1) + self.assertEqual(obj_dict['foo'], 'bar') diff --git a/openstack/tests/unit/cloud/test_network.py b/openstack/tests/unit/cloud/test_network.py new file mode 100644 index 000000000..52d0449dc --- /dev/null +++ b/openstack/tests/unit/cloud/test_network.py @@ -0,0 +1,250 @@ +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import testtools + +import openstack.cloud +from openstack.tests.unit import base + + +class TestNetwork(base.RequestsMockTestCase): + + mock_new_network_rep = { + 'provider:physical_network': None, + 'ipv6_address_scope': None, + 'revision_number': 3, + 'port_security_enabled': True, + 'provider:network_type': 'local', + 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', + 'router:external': False, + 'availability_zone_hints': [], + 'availability_zones': [], + 'provider:segmentation_id': None, + 'ipv4_address_scope': None, + 'shared': False, + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'status': 'ACTIVE', + 'subnets': [], + 'description': '', + 'tags': [], + 'updated_at': '2017-04-22T19:22:53Z', + 'is_default': False, + 'qos_policy_id': None, + 'name': 'netname', + 'admin_state_up': True, + 'tenant_id': '861808a93da0484ea1767967c4df8a23', + 'created_at': '2017-04-22T19:22:53Z', + 'mtu': 0 + } + + def test_list_networks(self): + net1 = {'id': '1', 'name': 'net1'} + net2 = {'id': '2', 'name': 'net2'} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [net1, net2]}) + ]) + nets = self.cloud.list_networks() + self.assertEqual([net1, net2], nets) + self.assert_calls() + + def test_list_networks_filtered(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json'], + qs_elements=["name=test"]), + json={'networks': []}) + ]) + self.cloud.list_networks(filters={'name': 'test'}) + self.assert_calls() + + def test_create_network(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'network': self.mock_new_network_rep}, + validate=dict( + json={'network': { + 'admin_state_up': True, + 'name': 'netname'}})) + ]) + network = self.cloud.create_network("netname") + self.assertEqual(self.mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_specific_tenant(self): + project_id = "project_id_value" + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['project_id'] = project_id + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'network': mock_new_network_rep}, + validate=dict( + json={'network': { + 'admin_state_up': True, + 'name': 'netname', + 'tenant_id': project_id}})) + ]) + network = self.cloud.create_network("netname", project_id=project_id) + self.assertEqual(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_external(self): + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['router:external'] = True + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'network': mock_new_network_rep}, + validate=dict( + json={'network': { + 'admin_state_up': True, + 'name': 'netname', + 'router:external': True}})) + ]) + network = self.cloud.create_network("netname", external=True) + self.assertEqual(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_provider(self): + provider_opts = {'physical_network': 'mynet', + 'network_type': 'vlan', + 'segmentation_id': 'vlan1'} + new_network_provider_opts = { + 'provider:physical_network': 'mynet', + 'provider:network_type': 'vlan', + 'provider:segmentation_id': 'vlan1' + } + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep.update(new_network_provider_opts) + expected_send_params = { + 'admin_state_up': True, + 'name': 'netname' + } + expected_send_params.update(new_network_provider_opts) + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'network': mock_new_network_rep}, + validate=dict( + json={'network': expected_send_params})) + ]) + network = self.cloud.create_network("netname", provider=provider_opts) + self.assertEqual(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_provider_ignored_value(self): + provider_opts = {'physical_network': 'mynet', + 'network_type': 'vlan', + 'segmentation_id': 'vlan1', + 'should_not_be_passed': 1} + new_network_provider_opts = { + 'provider:physical_network': 'mynet', + 'provider:network_type': 'vlan', + 'provider:segmentation_id': 'vlan1' + } + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep.update(new_network_provider_opts) + expected_send_params = { + 'admin_state_up': True, + 'name': 'netname' + } + expected_send_params.update(new_network_provider_opts) + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'network': mock_new_network_rep}, + validate=dict( + json={'network': expected_send_params})) + ]) + network = self.cloud.create_network("netname", provider=provider_opts) + self.assertEqual(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_provider_wrong_type(self): + provider_opts = "invalid" + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Parameter 'provider' must be a dict" + ): + self.cloud.create_network("netname", provider=provider_opts) + + def test_delete_network(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'networks', "%s.json" % network_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_network(network_name)) + self.assert_calls() + + def test_delete_network_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + self.assertFalse(self.cloud.delete_network('test-net')) + self.assert_calls() + + def test_delete_network_exception(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [network]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'networks', "%s.json" % network_id]), + status_code=503) + ]) + self.assertRaises(openstack.OpenStackCloudException, + self.cloud.delete_network, network_name) + self.assert_calls() + + def test_get_network_by_id(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'networks', "%s" % network_id]), + json={'network': network}) + ]) + self.assertTrue(self.cloud.get_network_by_id(network_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_normalize.py b/openstack/tests/unit/cloud/test_normalize.py new file mode 100644 index 000000000..653927477 --- /dev/null +++ b/openstack/tests/unit/cloud/test_normalize.py @@ -0,0 +1,1030 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from openstack.tests.unit import base + +RAW_SERVER_DICT = { + 'HUMAN_ID': True, + 'NAME_ATTR': 'name', + 'OS-DCF:diskConfig': u'MANUAL', + 'OS-EXT-AZ:availability_zone': u'ca-ymq-2', + 'OS-EXT-STS:power_state': 1, + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': u'active', + 'OS-SRV-USG:launched_at': u'2015-08-01T19:52:02.000000', + 'OS-SRV-USG:terminated_at': None, + 'accessIPv4': u'', + 'accessIPv6': u'', + 'addresses': { + u'public': [{ + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'version': 6 + }, { + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'162.253.54.192', + u'version': 4}]}, + 'config_drive': u'True', + 'created': u'2015-08-01T19:52:16Z', + 'flavor': { + u'id': u'bbcb7eb5-5c8d-498f-9d7e-307c575d3566', + u'links': [{ + u'href': u'https://compute-ca-ymq-1.vexxhost.net/db9/flavors/bbc', + u'rel': u'bookmark'}]}, + 'hostId': u'bd37', + 'human_id': u'mordred-irc', + 'id': u'811c5197-dba7-4d3a-a3f6-68ca5328b9a7', + 'image': { + u'id': u'69c99b45-cd53-49de-afdc-f24789eb8f83', + u'links': [{ + u'href': u'https://compute-ca-ymq-1.vexxhost.net/db9/images/69c', + u'rel': u'bookmark'}]}, + 'key_name': u'mordred', + 'links': [{ + u'href': u'https://compute-ca-ymq-1.vexxhost.net/v2/db9/servers/811', + u'rel': u'self' + }, { + u'href': u'https://compute-ca-ymq-1.vexxhost.net/db9/servers/811', + u'rel': u'bookmark'}], + 'metadata': {u'group': u'irc', u'groups': u'irc,enabled'}, + 'name': u'mordred-irc', + 'networks': {u'public': [u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'162.253.54.192']}, + 'os-extended-volumes:volumes_attached': [], + 'progress': 0, + 'request_ids': [], + 'security_groups': [{u'name': u'default'}], + 'status': u'ACTIVE', + 'tenant_id': u'db92b20496ae4fbda850a689ea9d563f', + 'updated': u'2016-10-15T15:49:29Z', + 'user_id': u'e9b21dc437d149858faee0898fb08e92'} + +RAW_GLANCE_IMAGE_DICT = { + u'auto_disk_config': u'False', + u'checksum': u'774f48af604ab1ec319093234c5c0019', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'container_format': u'ovf', + u'created_at': u'2015-02-15T22:58:45Z', + u'disk_format': u'vhd', + u'file': u'/v2/images/f2868d7c-63e1-4974-a64d-8670a86df21e/file', + u'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + u'image_type': u'import', + u'min_disk': 20, + u'min_ram': 0, + u'name': u'Test Monty Ubuntu', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'owner': u'610275', + u'protected': False, + u'schema': u'/v2/schemas/image', + u'size': 323004185, + u'status': u'active', + u'tags': [], + u'updated_at': u'2015-02-15T23:04:34Z', + u'user_id': u'156284', + u'virtual_size': None, + u'visibility': u'private', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'} + +RAW_NOVA_IMAGE_DICT = { + 'HUMAN_ID': True, + 'NAME_ATTR': 'name', + 'OS-DCF:diskConfig': u'MANUAL', + 'OS-EXT-IMG-SIZE:size': 323004185, + 'created': u'2015-02-15T22:58:45Z', + 'human_id': u'test-monty-ubuntu', + 'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + 'links': [{ + u'href': u'https://example.com/v2/610275/images/f2868d7c', + u'rel': u'self' + }, { + u'href': u'https://example.com/610275/images/f2868d7c', + u'rel': u'bookmark' + }, { + u'href': u'https://example.com/images/f2868d7c', + u'rel': u'alternate', + u'type': u'application/vnd.openstack.image'}], + 'metadata': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'}, + 'minDisk': 20, + 'minRam': 0, + 'name': u'Test Monty Ubuntu', + 'progress': 100, + 'request_ids': [], + 'status': u'ACTIVE', + 'updated': u'2015-02-15T23:04:34Z'} + +RAW_FLAVOR_DICT = { + 'HUMAN_ID': True, + 'NAME_ATTR': 'name', + 'OS-FLV-EXT-DATA:ephemeral': 80, + 'OS-FLV-WITH-EXT-SPECS:extra_specs': { + u'class': u'performance1', + u'disk_io_index': u'40', + u'number_of_data_disks': u'1', + u'policy_class': u'performance_flavor', + u'resize_policy_class': u'performance_flavor'}, + 'disk': 40, + 'ephemeral': 80, + 'human_id': u'8-gb-performance', + 'id': u'performance1-8', + 'is_public': 'N/A', + 'links': [{ + u'href': u'https://example.com/v2/610275/flavors/performance1-8', + u'rel': u'self' + }, { + u'href': u'https://example.com/610275/flavors/performance1-8', + u'rel': u'bookmark'}], + 'name': u'8 GB Performance', + 'ram': 8192, + 'request_ids': [], + 'rxtx_factor': 1600.0, + 'swap': u'', + 'vcpus': 8} + + +# TODO(shade) Convert this to RequestsMockTestCase +class TestUtils(base.TestCase): + + def test_normalize_flavors(self): + raw_flavor = RAW_FLAVOR_DICT.copy() + expected = { + 'OS-FLV-EXT-DATA:ephemeral': 80, + 'OS-FLV-WITH-EXT-SPECS:extra_specs': { + u'class': u'performance1', + u'disk_io_index': u'40', + u'number_of_data_disks': u'1', + u'policy_class': u'performance_flavor', + u'resize_policy_class': u'performance_flavor'}, + 'disk': 40, + 'ephemeral': 80, + 'extra_specs': { + u'class': u'performance1', + u'disk_io_index': u'40', + u'number_of_data_disks': u'1', + u'policy_class': u'performance_flavor', + u'resize_policy_class': u'performance_flavor'}, + 'id': u'performance1-8', + 'is_disabled': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'name': u'8 GB Performance', + 'properties': { + 'OS-FLV-EXT-DATA:ephemeral': 80, + 'OS-FLV-WITH-EXT-SPECS:extra_specs': { + u'class': u'performance1', + u'disk_io_index': u'40', + u'number_of_data_disks': u'1', + u'policy_class': u'performance_flavor', + u'resize_policy_class': u'performance_flavor'}}, + 'ram': 8192, + 'rxtx_factor': 1600.0, + 'swap': 0, + 'vcpus': 8} + retval = self.cloud._normalize_flavor(raw_flavor) + self.assertEqual(expected, retval) + + def test_normalize_flavors_strict(self): + raw_flavor = RAW_FLAVOR_DICT.copy() + expected = { + 'disk': 40, + 'ephemeral': 80, + 'extra_specs': { + u'class': u'performance1', + u'disk_io_index': u'40', + u'number_of_data_disks': u'1', + u'policy_class': u'performance_flavor', + u'resize_policy_class': u'performance_flavor'}, + 'id': u'performance1-8', + 'is_disabled': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'name': u'8 GB Performance', + 'properties': {}, + 'ram': 8192, + 'rxtx_factor': 1600.0, + 'swap': 0, + 'vcpus': 8} + retval = self.strict_cloud._normalize_flavor(raw_flavor) + self.assertEqual(expected, retval) + + def test_normalize_nova_images(self): + raw_image = RAW_NOVA_IMAGE_DICT.copy() + expected = { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False', + 'OS-DCF:diskConfig': u'MANUAL', + 'checksum': None, + 'container_format': None, + 'created': u'2015-02-15T22:58:45Z', + 'created_at': '2015-02-15T22:58:45Z', + 'direct_url': None, + 'disk_format': None, + 'file': None, + 'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + 'is_protected': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'locations': [], + 'metadata': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False', + 'OS-DCF:diskConfig': u'MANUAL', + 'progress': 100}, + 'minDisk': 20, + 'minRam': 0, + 'min_disk': 20, + 'min_ram': 0, + 'name': u'Test Monty Ubuntu', + 'owner': None, + 'progress': 100, + 'properties': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False', + 'OS-DCF:diskConfig': u'MANUAL', + 'progress': 100}, + 'protected': False, + 'size': 323004185, + 'status': u'active', + 'tags': [], + 'updated': u'2015-02-15T23:04:34Z', + 'updated_at': u'2015-02-15T23:04:34Z', + 'virtual_size': 0, + 'visibility': 'private'} + retval = self.cloud._normalize_image(raw_image) + self.assertEqual(expected, retval) + + def test_normalize_nova_images_strict(self): + raw_image = RAW_NOVA_IMAGE_DICT.copy() + expected = { + 'checksum': None, + 'container_format': None, + 'created_at': '2015-02-15T22:58:45Z', + 'direct_url': None, + 'disk_format': None, + 'file': None, + 'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + 'is_protected': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'locations': [], + 'min_disk': 20, + 'min_ram': 0, + 'name': u'Test Monty Ubuntu', + 'owner': None, + 'properties': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False', + 'OS-DCF:diskConfig': u'MANUAL', + 'progress': 100}, + 'size': 323004185, + 'status': u'active', + 'tags': [], + 'updated_at': u'2015-02-15T23:04:34Z', + 'virtual_size': 0, + 'visibility': 'private'} + retval = self.strict_cloud._normalize_image(raw_image) + self.assertEqual(sorted(expected.keys()), sorted(retval.keys())) + self.assertEqual(expected, retval) + + def test_normalize_glance_images(self): + raw_image = RAW_GLANCE_IMAGE_DICT.copy() + expected = { + u'auto_disk_config': u'False', + 'checksum': u'774f48af604ab1ec319093234c5c0019', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + 'container_format': u'ovf', + 'created': u'2015-02-15T22:58:45Z', + 'created_at': u'2015-02-15T22:58:45Z', + 'direct_url': None, + 'disk_format': u'vhd', + 'file': u'/v2/images/f2868d7c-63e1-4974-a64d-8670a86df21e/file', + 'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + u'image_type': u'import', + 'is_protected': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': u'610275', + 'name': None}, + 'region_name': u'RegionOne', + 'zone': None}, + 'locations': [], + 'metadata': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'schema': u'/v2/schemas/image', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'}, + 'minDisk': 20, + 'min_disk': 20, + 'minRam': 0, + 'min_ram': 0, + 'name': u'Test Monty Ubuntu', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + 'owner': u'610275', + 'properties': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'schema': u'/v2/schemas/image', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'}, + 'protected': False, + u'schema': u'/v2/schemas/image', + 'size': 323004185, + 'status': u'active', + 'tags': [], + 'updated': u'2015-02-15T23:04:34Z', + 'updated_at': u'2015-02-15T23:04:34Z', + u'user_id': u'156284', + 'virtual_size': 0, + 'visibility': u'private', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'} + retval = self.cloud._normalize_image(raw_image) + self.assertEqual(expected, retval) + + def test_normalize_glance_images_strict(self): + raw_image = RAW_GLANCE_IMAGE_DICT.copy() + expected = { + 'checksum': u'774f48af604ab1ec319093234c5c0019', + 'container_format': u'ovf', + 'created_at': u'2015-02-15T22:58:45Z', + 'direct_url': None, + 'disk_format': u'vhd', + 'file': u'/v2/images/f2868d7c-63e1-4974-a64d-8670a86df21e/file', + 'id': u'f2868d7c-63e1-4974-a64d-8670a86df21e', + 'is_protected': False, + 'is_public': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': u'610275', + 'name': None}, + 'region_name': u'RegionOne', + 'zone': None}, + 'locations': [], + 'min_disk': 20, + 'min_ram': 0, + 'name': u'Test Monty Ubuntu', + 'owner': u'610275', + 'properties': { + u'auto_disk_config': u'False', + u'com.rackspace__1__build_core': u'1', + u'com.rackspace__1__build_managed': u'1', + u'com.rackspace__1__build_rackconnect': u'1', + u'com.rackspace__1__options': u'0', + u'com.rackspace__1__source': u'import', + u'com.rackspace__1__visible_core': u'1', + u'com.rackspace__1__visible_managed': u'1', + u'com.rackspace__1__visible_rackconnect': u'1', + u'image_type': u'import', + u'org.openstack__1__architecture': u'x64', + u'os_type': u'linux', + u'schema': u'/v2/schemas/image', + u'user_id': u'156284', + u'vm_mode': u'hvm', + u'xenapi_use_agent': u'False'}, + 'size': 323004185, + 'status': u'active', + 'tags': [], + 'updated_at': u'2015-02-15T23:04:34Z', + 'virtual_size': 0, + 'visibility': 'private'} + retval = self.strict_cloud._normalize_image(raw_image) + self.assertEqual(sorted(expected.keys()), sorted(retval.keys())) + self.assertEqual(expected, retval) + + def test_normalize_servers_strict(self): + raw_server = RAW_SERVER_DICT.copy() + expected = { + 'accessIPv4': u'', + 'accessIPv6': u'', + 'addresses': { + u'public': [{ + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'version': 6 + }, { + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'162.253.54.192', + u'version': 4}]}, + 'adminPass': None, + 'created': u'2015-08-01T19:52:16Z', + 'disk_config': u'MANUAL', + 'flavor': {u'id': u'bbcb7eb5-5c8d-498f-9d7e-307c575d3566'}, + 'has_config_drive': True, + 'host_id': u'bd37', + 'id': u'811c5197-dba7-4d3a-a3f6-68ca5328b9a7', + 'image': {u'id': u'69c99b45-cd53-49de-afdc-f24789eb8f83'}, + 'interface_ip': u'', + 'key_name': u'mordred', + 'launched_at': u'2015-08-01T19:52:02.000000', + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': u'db92b20496ae4fbda850a689ea9d563f', + 'name': None}, + 'region_name': u'RegionOne', + 'zone': u'ca-ymq-2'}, + 'metadata': {u'group': u'irc', u'groups': u'irc,enabled'}, + 'name': u'mordred-irc', + 'networks': { + u'public': [ + u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'162.253.54.192']}, + 'power_state': 1, + 'private_v4': None, + 'progress': 0, + 'properties': {}, + 'public_v4': None, + 'public_v6': None, + 'security_groups': [{u'name': u'default'}], + 'status': u'ACTIVE', + 'task_state': None, + 'terminated_at': None, + 'updated': u'2016-10-15T15:49:29Z', + 'user_id': u'e9b21dc437d149858faee0898fb08e92', + 'vm_state': u'active', + 'volumes': []} + retval = self.strict_cloud._normalize_server(raw_server) + self.assertEqual(expected, retval) + + def test_normalize_servers_normal(self): + raw_server = RAW_SERVER_DICT.copy() + expected = { + 'OS-DCF:diskConfig': u'MANUAL', + 'OS-EXT-AZ:availability_zone': u'ca-ymq-2', + 'OS-EXT-STS:power_state': 1, + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': u'active', + 'OS-SRV-USG:launched_at': u'2015-08-01T19:52:02.000000', + 'OS-SRV-USG:terminated_at': None, + 'accessIPv4': u'', + 'accessIPv6': u'', + 'addresses': { + u'public': [{ + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'version': 6 + }, { + u'OS-EXT-IPS-MAC:mac_addr': u'fa:16:3e:9f:46:3e', + u'OS-EXT-IPS:type': u'fixed', + u'addr': u'162.253.54.192', + u'version': 4}]}, + 'adminPass': None, + 'az': u'ca-ymq-2', + 'cloud': '_test_cloud_', + 'config_drive': u'True', + 'created': u'2015-08-01T19:52:16Z', + 'disk_config': u'MANUAL', + 'flavor': {u'id': u'bbcb7eb5-5c8d-498f-9d7e-307c575d3566'}, + 'has_config_drive': True, + 'hostId': u'bd37', + 'host_id': u'bd37', + 'id': u'811c5197-dba7-4d3a-a3f6-68ca5328b9a7', + 'image': {u'id': u'69c99b45-cd53-49de-afdc-f24789eb8f83'}, + 'interface_ip': '', + 'key_name': u'mordred', + 'launched_at': u'2015-08-01T19:52:02.000000', + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': u'db92b20496ae4fbda850a689ea9d563f', + 'name': None}, + 'region_name': u'RegionOne', + 'zone': u'ca-ymq-2'}, + 'metadata': {u'group': u'irc', u'groups': u'irc,enabled'}, + 'name': u'mordred-irc', + 'networks': { + u'public': [ + u'2604:e100:1:0:f816:3eff:fe9f:463e', + u'162.253.54.192']}, + 'os-extended-volumes:volumes_attached': [], + 'power_state': 1, + 'private_v4': None, + 'progress': 0, + 'project_id': u'db92b20496ae4fbda850a689ea9d563f', + 'properties': { + 'OS-DCF:diskConfig': u'MANUAL', + 'OS-EXT-AZ:availability_zone': u'ca-ymq-2', + 'OS-EXT-STS:power_state': 1, + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': u'active', + 'OS-SRV-USG:launched_at': u'2015-08-01T19:52:02.000000', + 'OS-SRV-USG:terminated_at': None, + 'os-extended-volumes:volumes_attached': []}, + 'public_v4': None, + 'public_v6': None, + 'region': u'RegionOne', + 'security_groups': [{u'name': u'default'}], + 'status': u'ACTIVE', + 'task_state': None, + 'tenant_id': u'db92b20496ae4fbda850a689ea9d563f', + 'terminated_at': None, + 'updated': u'2016-10-15T15:49:29Z', + 'user_id': u'e9b21dc437d149858faee0898fb08e92', + 'vm_state': u'active', + 'volumes': []} + retval = self.cloud._normalize_server(raw_server) + self.assertEqual(expected, retval) + + def test_normalize_secgroups_strict(self): + nova_secgroup = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + rules=[ + dict(id='123', from_port=80, to_port=81, ip_protocol='tcp', + ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123') + ] + ) + + expected = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + properties={}, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id=mock.ANY, + domain_id=None, + name='admin'), + cloud='_test_cloud_'), + security_group_rules=[ + dict(id='123', direction='ingress', ethertype='IPv4', + port_range_min=80, port_range_max=81, protocol='tcp', + remote_ip_prefix='0.0.0.0/0', security_group_id='xyz123', + properties={}, + remote_group_id=None, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id=mock.ANY, + domain_id=None, + name='admin'), + cloud='_test_cloud_')) + ] + ) + + retval = self.strict_cloud._normalize_secgroup(nova_secgroup) + self.assertEqual(expected, retval) + + def test_normalize_secgroups(self): + nova_secgroup = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + rules=[ + dict(id='123', from_port=80, to_port=81, ip_protocol='tcp', + ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123') + ] + ) + + expected = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + tenant_id='', + project_id='', + properties={}, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id=mock.ANY, + domain_id=None, + name='admin'), + cloud='_test_cloud_'), + security_group_rules=[ + dict(id='123', direction='ingress', ethertype='IPv4', + port_range_min=80, port_range_max=81, protocol='tcp', + remote_ip_prefix='0.0.0.0/0', security_group_id='xyz123', + properties={}, + tenant_id='', + project_id='', + remote_group_id=None, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id=mock.ANY, + domain_id=None, + name='admin'), + cloud='_test_cloud_')) + ] + ) + + retval = self.cloud._normalize_secgroup(nova_secgroup) + self.assertEqual(expected, retval) + + def test_normalize_secgroups_negone_port(self): + nova_secgroup = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group with -1 ports', + rules=[ + dict(id='123', from_port=-1, to_port=-1, ip_protocol='icmp', + ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123') + ] + ) + + retval = self.cloud._normalize_secgroup(nova_secgroup) + self.assertIsNone(retval['security_group_rules'][0]['port_range_min']) + self.assertIsNone(retval['security_group_rules'][0]['port_range_max']) + + def test_normalize_secgroup_rules(self): + nova_rules = [ + dict(id='123', from_port=80, to_port=81, ip_protocol='tcp', + ip_range={'cidr': '0.0.0.0/0'}, parent_group_id='xyz123') + ] + expected = [ + dict(id='123', direction='ingress', ethertype='IPv4', + port_range_min=80, port_range_max=81, protocol='tcp', + remote_ip_prefix='0.0.0.0/0', security_group_id='xyz123', + tenant_id='', project_id='', remote_group_id=None, + properties={}, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id=mock.ANY, + domain_id=None, + name='admin'), + cloud='_test_cloud_')) + ] + retval = self.cloud._normalize_secgroup_rules(nova_rules) + self.assertEqual(expected, retval) + + def test_normalize_volumes_v1(self): + vol = dict( + id='55db9e89-9cb4-4202-af88-d8c4a174998e', + display_name='test', + display_description='description', + bootable=u'false', # unicode type + multiattach='true', # str type + status='in-use', + created_at='2015-08-27T09:49:58-05:00', + ) + expected = { + 'attachments': [], + 'availability_zone': None, + 'bootable': False, + 'can_multiattach': True, + 'consistencygroup_id': None, + 'created_at': vol['created_at'], + 'description': vol['display_description'], + 'display_description': vol['display_description'], + 'display_name': vol['display_name'], + 'encrypted': False, + 'host': None, + 'id': '55db9e89-9cb4-4202-af88-d8c4a174998e', + 'is_bootable': False, + 'is_encrypted': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'metadata': {}, + 'migration_status': None, + 'multiattach': True, + 'name': vol['display_name'], + 'properties': {}, + 'replication_driver': None, + 'replication_extended_status': None, + 'replication_status': None, + 'size': 0, + 'snapshot_id': None, + 'source_volume_id': None, + 'status': vol['status'], + 'updated_at': None, + 'volume_type': None, + } + retval = self.cloud._normalize_volume(vol) + self.assertEqual(expected, retval) + + def test_normalize_volumes_v2(self): + vol = dict( + id='55db9e89-9cb4-4202-af88-d8c4a174998e', + name='test', + description='description', + bootable=False, + multiattach=True, + status='in-use', + created_at='2015-08-27T09:49:58-05:00', + availability_zone='my-zone', + ) + vol['os-vol-tenant-attr:tenant_id'] = 'my-project' + expected = { + 'attachments': [], + 'availability_zone': vol['availability_zone'], + 'bootable': False, + 'can_multiattach': True, + 'consistencygroup_id': None, + 'created_at': vol['created_at'], + 'description': vol['description'], + 'display_description': vol['description'], + 'display_name': vol['name'], + 'encrypted': False, + 'host': None, + 'id': '55db9e89-9cb4-4202-af88-d8c4a174998e', + 'is_bootable': False, + 'is_encrypted': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': vol['os-vol-tenant-attr:tenant_id'], + 'name': None}, + 'region_name': u'RegionOne', + 'zone': vol['availability_zone']}, + 'metadata': {}, + 'migration_status': None, + 'multiattach': True, + 'name': vol['name'], + 'os-vol-tenant-attr:tenant_id': vol[ + 'os-vol-tenant-attr:tenant_id'], + 'properties': { + 'os-vol-tenant-attr:tenant_id': vol[ + 'os-vol-tenant-attr:tenant_id']}, + 'replication_driver': None, + 'replication_extended_status': None, + 'replication_status': None, + 'size': 0, + 'snapshot_id': None, + 'source_volume_id': None, + 'status': vol['status'], + 'updated_at': None, + 'volume_type': None, + } + retval = self.cloud._normalize_volume(vol) + self.assertEqual(expected, retval) + + def test_normalize_volumes_v1_strict(self): + vol = dict( + id='55db9e89-9cb4-4202-af88-d8c4a174998e', + display_name='test', + display_description='description', + bootable=u'false', # unicode type + multiattach='true', # str type + status='in-use', + created_at='2015-08-27T09:49:58-05:00', + ) + expected = { + 'attachments': [], + 'can_multiattach': True, + 'consistencygroup_id': None, + 'created_at': vol['created_at'], + 'description': vol['display_description'], + 'host': None, + 'id': '55db9e89-9cb4-4202-af88-d8c4a174998e', + 'is_bootable': False, + 'is_encrypted': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': 'default', + 'id': mock.ANY, + 'name': 'admin'}, + 'region_name': u'RegionOne', + 'zone': None}, + 'metadata': {}, + 'migration_status': None, + 'name': vol['display_name'], + 'properties': {}, + 'replication_driver': None, + 'replication_extended_status': None, + 'replication_status': None, + 'size': 0, + 'snapshot_id': None, + 'source_volume_id': None, + 'status': vol['status'], + 'updated_at': None, + 'volume_type': None, + } + retval = self.strict_cloud._normalize_volume(vol) + self.assertEqual(expected, retval) + + def test_normalize_volumes_v2_strict(self): + vol = dict( + id='55db9e89-9cb4-4202-af88-d8c4a174998e', + name='test', + description='description', + bootable=False, + multiattach=True, + status='in-use', + created_at='2015-08-27T09:49:58-05:00', + availability_zone='my-zone', + ) + vol['os-vol-tenant-attr:tenant_id'] = 'my-project' + expected = { + 'attachments': [], + 'can_multiattach': True, + 'consistencygroup_id': None, + 'created_at': vol['created_at'], + 'description': vol['description'], + 'host': None, + 'id': '55db9e89-9cb4-4202-af88-d8c4a174998e', + 'is_bootable': False, + 'is_encrypted': False, + 'location': { + 'cloud': '_test_cloud_', + 'project': { + 'domain_id': None, + 'domain_name': None, + 'id': vol['os-vol-tenant-attr:tenant_id'], + 'name': None}, + 'region_name': u'RegionOne', + 'zone': vol['availability_zone']}, + 'metadata': {}, + 'migration_status': None, + 'name': vol['name'], + 'properties': {}, + 'replication_driver': None, + 'replication_extended_status': None, + 'replication_status': None, + 'size': 0, + 'snapshot_id': None, + 'source_volume_id': None, + 'status': vol['status'], + 'updated_at': None, + 'volume_type': None, + } + retval = self.strict_cloud._normalize_volume(vol) + self.assertEqual(expected, retval) diff --git a/openstack/tests/unit/cloud/test_object.py b/openstack/tests/unit/cloud/test_object.py new file mode 100644 index 000000000..f63ddd6a4 --- /dev/null +++ b/openstack/tests/unit/cloud/test_object.py @@ -0,0 +1,897 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile + +import testtools + +import openstack.cloud +import openstack.cloud.openstackcloud as oc_oc +from openstack.cloud import exc +from openstack.tests.unit import base + + +class BaseTestObject(base.RequestsMockTestCase): + + def setUp(self): + super(BaseTestObject, self).setUp() + + self.container = self.getUniqueString() + self.object = self.getUniqueString() + self.endpoint = self.cloud._object_store_client.get_endpoint() + self.container_endpoint = '{endpoint}/{container}'.format( + endpoint=self.endpoint, container=self.container) + self.object_endpoint = '{endpoint}/{object}'.format( + endpoint=self.container_endpoint, object=self.object) + + +class TestObject(BaseTestObject): + + def test_create_container(self): + """Test creating a (private) container""" + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, status_code=404), + dict(method='PUT', uri=self.container_endpoint, + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}) + ]) + + self.cloud.create_container(self.container) + self.assert_calls() + + def test_create_container_public(self): + """Test creating a public container""" + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, + status_code=404), + dict(method='PUT', uri=self.container_endpoint, + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='POST', uri=self.container_endpoint, + status_code=201, + validate=dict( + headers={ + 'x-container-read': + oc_oc.OBJECT_CONTAINER_ACLS[ + 'public']})), + dict(method='HEAD', uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}) + ]) + + self.cloud.create_container(self.container, public=True) + self.assert_calls() + + def test_create_container_exists(self): + """Test creating a container that exists.""" + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}) + ]) + + container = self.cloud.create_container(self.container) + + self.assert_calls() + self.assertIsNotNone(container) + + def test_delete_container(self): + self.register_uris([ + dict(method='DELETE', uri=self.container_endpoint)]) + + self.assertTrue(self.cloud.delete_container(self.container)) + self.assert_calls() + + def test_delete_container_404(self): + """No exception when deleting a container that does not exist""" + self.register_uris([ + dict(method='DELETE', uri=self.container_endpoint, + status_code=404)]) + + self.assertFalse(self.cloud.delete_container(self.container)) + self.assert_calls() + + def test_delete_container_error(self): + """Non-404 swift error re-raised as OSCE""" + # 409 happens if the container is not empty + self.register_uris([ + dict(method='DELETE', uri=self.container_endpoint, + status_code=409)]) + self.assertRaises( + openstack.OpenStackCloudException, + self.cloud.delete_container, self.container) + self.assert_calls() + + def test_update_container(self): + headers = { + 'x-container-read': + oc_oc.OBJECT_CONTAINER_ACLS['public']} + self.register_uris([ + dict(method='POST', uri=self.container_endpoint, + status_code=204, + validate=dict(headers=headers))]) + + self.cloud.update_container(self.container, headers) + self.assert_calls() + + def test_update_container_error(self): + """Swift error re-raised as OSCE""" + # This test is of questionable value - the swift API docs do not + # declare error codes (other than 404 for the container) for this + # method, and I cannot make a synthetic failure to validate a real + # error code. So we're really just testing the shade adapter error + # raising logic here, rather than anything specific to swift. + self.register_uris([ + dict(method='POST', uri=self.container_endpoint, + status_code=409)]) + self.assertRaises( + openstack.OpenStackCloudException, + self.cloud.update_container, self.container, dict(foo='bar')) + self.assert_calls() + + def test_set_container_access_public(self): + self.register_uris([ + dict(method='POST', uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={ + 'x-container-read': + oc_oc.OBJECT_CONTAINER_ACLS[ + 'public']}))]) + + self.cloud.set_container_access(self.container, 'public') + + self.assert_calls() + + def test_set_container_access_private(self): + self.register_uris([ + dict(method='POST', uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={ + 'x-container-read': + oc_oc.OBJECT_CONTAINER_ACLS[ + 'private']}))]) + + self.cloud.set_container_access(self.container, 'private') + + self.assert_calls() + + def test_set_container_access_invalid(self): + self.assertRaises( + openstack.OpenStackCloudException, + self.cloud.set_container_access, self.container, 'invalid') + + def test_get_container_access(self): + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, + headers={ + 'x-container-read': + str(oc_oc.OBJECT_CONTAINER_ACLS[ + 'public'])})]) + access = self.cloud.get_container_access(self.container) + self.assertEqual('public', access) + + def test_get_container_invalid(self): + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, + headers={'x-container-read': 'invalid'})]) + + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Could not determine container access for ACL: invalid" + ): + self.cloud.get_container_access(self.container) + + def test_get_container_access_not_found(self): + self.register_uris([ + dict(method='HEAD', uri=self.container_endpoint, + status_code=404)]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Container not found: %s" % self.container + ): + self.cloud.get_container_access(self.container) + + def test_list_containers(self): + endpoint = '{endpoint}/?format=json'.format( + endpoint=self.endpoint) + containers = [ + {u'count': 0, u'bytes': 0, u'name': self.container}] + + self.register_uris([dict(method='GET', uri=endpoint, complete_qs=True, + json=containers)]) + + ret = self.cloud.list_containers() + + self.assert_calls() + self.assertEqual(containers, ret) + + def test_list_containers_exception(self): + endpoint = '{endpoint}/?format=json'.format( + endpoint=self.endpoint) + self.register_uris([dict(method='GET', uri=endpoint, complete_qs=True, + status_code=416)]) + + self.assertRaises( + exc.OpenStackCloudException, self.cloud.list_containers) + self.assert_calls() + + def test_list_objects(self): + endpoint = '{endpoint}?format=json'.format( + endpoint=self.container_endpoint) + + objects = [{ + u'bytes': 20304400896, + u'last_modified': u'2016-12-15T13:34:13.650090', + u'hash': u'daaf9ed2106d09bba96cf193d866445e', + u'name': self.object, + u'content_type': u'application/octet-stream'}] + + self.register_uris([dict(method='GET', uri=endpoint, complete_qs=True, + json=objects)]) + + ret = self.cloud.list_objects(self.container) + + self.assert_calls() + self.assertEqual(objects, ret) + + def test_list_objects_exception(self): + endpoint = '{endpoint}?format=json'.format( + endpoint=self.container_endpoint) + self.register_uris([dict(method='GET', uri=endpoint, complete_qs=True, + status_code=416)]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.list_objects, self.container) + self.assert_calls() + + def test_delete_object(self): + self.register_uris([ + dict(method='HEAD', uri=self.object_endpoint, + headers={'X-Object-Meta': 'foo'}), + dict(method='DELETE', uri=self.object_endpoint, status_code=204)]) + + self.assertTrue(self.cloud.delete_object(self.container, self.object)) + + self.assert_calls() + + def test_delete_object_not_found(self): + self.register_uris([dict(method='HEAD', uri=self.object_endpoint, + status_code=404)]) + + self.assertFalse(self.cloud.delete_object(self.container, self.object)) + + self.assert_calls() + + def test_get_object(self): + headers = { + 'Content-Length': '20304400896', + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + } + response_headers = {k.lower(): v for k, v in headers.items()} + text = 'test body' + self.register_uris([ + dict(method='GET', uri=self.object_endpoint, + headers={ + 'Content-Length': '20304400896', + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + }, + text='test body')]) + + resp = self.cloud.get_object(self.container, self.object) + + self.assert_calls() + + self.assertEqual((response_headers, text), resp) + + def test_get_object_not_found(self): + self.register_uris([dict(method='GET', + uri=self.object_endpoint, status_code=404)]) + + self.assertIsNone(self.cloud.get_object(self.container, self.object)) + + self.assert_calls() + + def test_get_object_exception(self): + self.register_uris([dict(method='GET', uri=self.object_endpoint, + status_code=416)]) + + self.assertRaises( + openstack.OpenStackCloudException, + self.cloud.get_object, + self.container, self.object) + + self.assert_calls() + + def test_get_object_segment_size_below_min(self): + # Register directly becuase we make multiple calls. The number + # of calls we make isn't interesting - what we do with the return + # values is. Don't run assert_calls for the same reason. + self.register_uris([ + dict(method='GET', uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500}), + headers={'Content-Type': 'application/json'})]) + self.assertEqual(500, self.cloud.get_object_segment_size(400)) + self.assertEqual(900, self.cloud.get_object_segment_size(900)) + self.assertEqual(1000, self.cloud.get_object_segment_size(1000)) + self.assertEqual(1000, self.cloud.get_object_segment_size(1100)) + + def test_get_object_segment_size_http_404(self): + self.register_uris([ + dict(method='GET', uri='https://object-store.example.com/info', + status_code=404, reason='Not Found')]) + self.assertEqual(oc_oc.DEFAULT_OBJECT_SEGMENT_SIZE, + self.cloud.get_object_segment_size(None)) + self.assert_calls() + + def test_get_object_segment_size_http_412(self): + self.register_uris([ + dict(method='GET', uri='https://object-store.example.com/info', + status_code=412, reason='Precondition failed')]) + self.assertEqual( + oc_oc.DEFAULT_OBJECT_SEGMENT_SIZE, + self.cloud.get_object_segment_size(None)) + self.assert_calls() + + +class TestObjectUploads(BaseTestObject): + + def setUp(self): + super(TestObjectUploads, self).setUp() + + self.content = self.getUniqueString().encode('latin-1') + self.object_file = tempfile.NamedTemporaryFile(delete=False) + self.object_file.write(self.content) + self.object_file.close() + (self.md5, self.sha256) = self.cloud._get_file_hashes( + self.object_file.name) + self.endpoint = self.cloud._object_store_client.get_endpoint() + + def test_create_object(self): + + self.register_uris([ + dict(method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500})), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, container=self.container), + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, container=self.container), + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, container=self.container, + object=self.object), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=201, + validate=dict( + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + })) + ]) + + self.cloud.create_object( + container=self.container, name=self.object, + filename=self.object_file.name) + + self.assert_calls() + + def test_create_dynamic_large_object(self): + + max_file_size = 2 + min_file_size = 1 + + uris_to_mock = [ + dict(method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size})), + + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container, ), + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, container=self.container, + object=self.object), + status_code=404) + ] + + uris_to_mock.extend( + [dict(method='PUT', + uri='{endpoint}/{container}/{object}/{index:0>6}'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object, + index=index), + status_code=201) + for index, offset in enumerate( + range(0, len(self.content), max_file_size))] + ) + + uris_to_mock.append( + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=201, + validate=dict( + headers={ + 'x-object-manifest': '{container}/{object}'.format( + container=self.container, object=self.object), + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }))) + self.register_uris(uris_to_mock) + self.cloud.create_object( + container=self.container, name=self.object, + filename=self.object_file.name, use_slo=False) + + # After call 6, order become indeterminate because of thread pool + self.assert_calls(stop_after=6) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call') + + def test_create_static_large_object(self): + + max_file_size = 25 + min_file_size = 1 + + uris_to_mock = [ + dict(method='GET', uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size})), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container, ), + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=404) + ] + + uris_to_mock.extend([ + dict(method='PUT', + uri='{endpoint}/{container}/{object}/{index:0>6}'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object, + index=index), + status_code=201, + headers=dict(Etag='etag{index}'.format(index=index))) + for index, offset in enumerate( + range(0, len(self.content), max_file_size)) + ]) + + uris_to_mock.append( + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=201, + validate=dict( + params={ + 'multipart-manifest', 'put' + }, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }))) + self.register_uris(uris_to_mock) + + self.cloud.create_object( + container=self.container, name=self.object, + filename=self.object_file.name, use_slo=True) + + # After call 6, order become indeterminate because of thread pool + self.assert_calls(stop_after=6) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call') + + base_object = '/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object) + + self.assertEqual([ + { + 'path': "{base_object}/000000".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag0', + }, + { + 'path': "{base_object}/000001".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag1', + }, + { + 'path': "{base_object}/000002".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag2', + }, + { + 'path': "{base_object}/000003".format( + base_object=base_object), + 'size_bytes': len(self.object) - 75, + 'etag': 'etag3', + }, + ], self.adapter.request_history[-1].json()) + + def test_object_segment_retry_failure(self): + + max_file_size = 25 + min_file_size = 1 + + self.register_uris([ + dict(method='GET', uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size})), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container, ), + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000000'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000001'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000002'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000003'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=501), + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=201) + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_object, + container=self.container, name=self.object, + filename=self.object_file.name, use_slo=True) + + # After call 6, order become indeterminate because of thread pool + self.assert_calls(stop_after=6) + + def test_object_segment_retries(self): + + max_file_size = 25 + min_file_size = 1 + + self.register_uris([ + dict(method='GET', uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size})), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container, ), + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }), + dict(method='HEAD', + uri='{endpoint}/{container}'.format( + endpoint=self.endpoint, + container=self.container), + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8'}), + dict(method='HEAD', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=404), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000000'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + headers={'etag': 'etag0'}, + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000001'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + headers={'etag': 'etag1'}, + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000002'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + headers={'etag': 'etag2'}, + status_code=201), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000003'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=501), + dict(method='PUT', + uri='{endpoint}/{container}/{object}/000003'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object), + status_code=201, + headers={'etag': 'etag3'}), + dict(method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, object=self.object), + status_code=201, + validate=dict( + params={ + 'multipart-manifest', 'put' + }, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + })) + ]) + + self.cloud.create_object( + container=self.container, name=self.object, + filename=self.object_file.name, use_slo=True) + + # After call 6, order become indeterminate because of thread pool + self.assert_calls(stop_after=6) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call') + + base_object = '/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, + object=self.object) + + self.assertEqual([ + { + 'path': "{base_object}/000000".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag0', + }, + { + 'path': "{base_object}/000001".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag1', + }, + { + 'path': "{base_object}/000002".format( + base_object=base_object), + 'size_bytes': 25, + 'etag': 'etag2', + }, + { + 'path': "{base_object}/000003".format( + base_object=base_object), + 'size_bytes': len(self.object) - 75, + 'etag': 'etag3', + }, + ], self.adapter.request_history[-1].json()) diff --git a/openstack/tests/unit/cloud/test_operator_noauth.py b/openstack/tests/unit/cloud/test_operator_noauth.py new file mode 100644 index 000000000..c4eefa3dd --- /dev/null +++ b/openstack/tests/unit/cloud/test_operator_noauth.py @@ -0,0 +1,22 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(shade) Port this content back in from shade repo as tests don't have +# references to ironic_client. + +from openstack.tests.unit import base + + +class TestShadeOperatorNoAuth(base.RequestsMockTestCase): + pass diff --git a/openstack/tests/unit/cloud/test_port.py b/openstack/tests/unit/cloud/test_port.py new file mode 100644 index 000000000..6cc953240 --- /dev/null +++ b/openstack/tests/unit/cloud/test_port.py @@ -0,0 +1,363 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_port +---------------------------------- + +Test port resource (managed by neutron) +""" + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests.unit import base + + +class TestPort(base.RequestsMockTestCase): + mock_neutron_port_create_rep = { + 'port': { + 'status': 'DOWN', + 'binding:host_id': '', + 'name': 'test-port-name', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'test-net-id', + 'tenant_id': 'test-tenant-id', + 'binding:vif_details': {}, + 'binding:vnic_type': 'normal', + 'binding:vif_type': 'unbound', + 'device_owner': '', + 'mac_address': '50:1c:0d:e4:f0:0d', + 'binding:profile': {}, + 'fixed_ips': [ + { + 'subnet_id': 'test-subnet-id', + 'ip_address': '29.29.29.29' + } + ], + 'id': 'test-port-id', + 'security_groups': [], + 'device_id': '' + } + } + + mock_neutron_port_update_rep = { + 'port': { + 'status': 'DOWN', + 'binding:host_id': '', + 'name': 'test-port-name-updated', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'test-net-id', + 'tenant_id': 'test-tenant-id', + 'binding:vif_details': {}, + 'binding:vnic_type': 'normal', + 'binding:vif_type': 'unbound', + 'device_owner': '', + 'mac_address': '50:1c:0d:e4:f0:0d', + 'binding:profile': {}, + 'fixed_ips': [ + { + 'subnet_id': 'test-subnet-id', + 'ip_address': '29.29.29.29' + } + ], + 'id': 'test-port-id', + 'security_groups': [], + 'device_id': '' + } + } + + mock_neutron_port_list_rep = { + 'ports': [ + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': 'first-port', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', + 'tenant_id': '', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'network:router_gateway', + 'mac_address': 'fa:16:3e:58:42:ed', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', + 'ip_address': '172.24.4.2' + } + ], + 'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', + 'security_groups': [], + 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824' + }, + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': '', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'f27aa545-cbdd-4907-b0c6-c9e8b039dcc2', + 'tenant_id': 'd397de8a63f341818f198abb0966f6f3', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'network:router_interface', + 'mac_address': 'fa:16:3e:bb:3c:e4', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17', + 'ip_address': '10.0.0.1' + } + ], + 'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159', + 'security_groups': [], + 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824' + } + ] + } + + def test_create_port(self): + self.register_uris([ + dict(method="POST", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_create_rep, + validate=dict( + json={'port': { + 'network_id': 'test-net-id', + 'name': 'test-port-name', + 'admin_state_up': True}})) + ]) + port = self.cloud.create_port( + network_id='test-net-id', name='test-port-name', + admin_state_up=True) + self.assertEqual(self.mock_neutron_port_create_rep['port'], port) + self.assert_calls() + + def test_create_port_parameters(self): + """Test that we detect invalid arguments passed to create_port""" + self.assertRaises( + TypeError, self.cloud.create_port, + network_id='test-net-id', nome='test-port-name', + stato_amministrativo_porta=True) + + def test_create_port_exception(self): + self.register_uris([ + dict(method="POST", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + status_code=500, + validate=dict( + json={'port': { + 'network_id': 'test-net-id', + 'name': 'test-port-name', + 'admin_state_up': True}})) + ]) + self.assertRaises( + OpenStackCloudException, self.cloud.create_port, + network_id='test-net-id', name='test-port-name', + admin_state_up=True) + self.assert_calls() + + def test_update_port(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'ports', '%s.json' % port_id]), + json=self.mock_neutron_port_update_rep, + validate=dict( + json={'port': {'name': 'test-port-name-updated'}})) + ]) + port = self.cloud.update_port( + name_or_id=port_id, name='test-port-name-updated') + + self.assertEqual(self.mock_neutron_port_update_rep['port'], port) + self.assert_calls() + + def test_update_port_parameters(self): + """Test that we detect invalid arguments passed to update_port""" + self.assertRaises( + TypeError, self.cloud.update_port, + name_or_id='test-port-id', nome='test-port-name-updated') + + def test_update_port_exception(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'ports', '%s.json' % port_id]), + status_code=500, + validate=dict( + json={'port': {'name': 'test-port-name-updated'}})) + ]) + self.assertRaises( + OpenStackCloudException, self.cloud.update_port, + name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', + name='test-port-name-updated') + self.assert_calls() + + def test_list_ports(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep) + ]) + ports = self.cloud.list_ports() + self.assertItemsEqual(self.mock_neutron_port_list_rep['ports'], ports) + self.assert_calls() + + def test_list_ports_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + status_code=500) + ]) + self.assertRaises(OpenStackCloudException, self.cloud.list_ports) + + def test_search_ports_by_id(self): + port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep) + ]) + ports = self.cloud.search_ports(name_or_id=port_id) + + self.assertEqual(1, len(ports)) + self.assertEqual('fa:16:3e:bb:3c:e4', ports[0]['mac_address']) + self.assert_calls() + + def test_search_ports_by_name(self): + port_name = "first-port" + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep) + ]) + ports = self.cloud.search_ports(name_or_id=port_name) + + self.assertEqual(1, len(ports)) + self.assertEqual('fa:16:3e:58:42:ed', ports[0]['mac_address']) + self.assert_calls() + + def test_search_ports_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep) + ]) + ports = self.cloud.search_ports(name_or_id='non-existent') + self.assertEqual(0, len(ports)) + self.assert_calls() + + def test_delete_port(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'ports', '%s.json' % port_id]), + json={}) + ]) + + self.assertTrue(self.cloud.delete_port(name_or_id='first-port')) + + def test_delete_port_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json=self.mock_neutron_port_list_rep) + ]) + self.assertFalse(self.cloud.delete_port(name_or_id='non-existent')) + self.assert_calls() + + def test_delete_subnet_multiple_found(self): + port_name = "port-name" + port1 = dict(id='123', name=port_name) + port2 = dict(id='456', name=port_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json={'ports': [port1, port2]}) + ]) + self.assertRaises(OpenStackCloudException, + self.cloud.delete_port, port_name) + self.assert_calls() + + def test_delete_subnet_multiple_using_id(self): + port_name = "port-name" + port1 = dict(id='123', name=port_name) + port2 = dict(id='456', name=port_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json']), + json={'ports': [port1, port2]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'ports', '%s.json' % port1['id']]), + json={}) + ]) + self.assertTrue(self.cloud.delete_port(name_or_id=port1['id'])) + self.assert_calls() + + def test_get_port_by_id(self): + fake_port = dict(id='123', name='456') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', + 'ports', + fake_port['id']]), + json={'port': fake_port}) + ]) + r = self.cloud.get_port_by_id(fake_port['id']) + self.assertIsNotNone(r) + self.assertDictEqual(fake_port, r) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_project.py b/openstack/tests/unit/cloud/test_project.py new file mode 100644 index 000000000..737967966 --- /dev/null +++ b/openstack/tests/unit/cloud/test_project.py @@ -0,0 +1,262 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools +from testtools import matchers + +import openstack.cloud +import openstack.cloud._utils +from openstack.tests.unit import base + + +class TestProject(base.RequestsMockTestCase): + + def get_mock_url(self, service_type='identity', interface='admin', + resource=None, append=None, base_url_append=None, + v3=True): + if v3 and resource is None: + resource = 'projects' + elif not v3 and resource is None: + resource = 'tenants' + if base_url_append is None and v3: + base_url_append = 'v3' + return super(TestProject, self).get_mock_url( + service_type=service_type, interface=interface, resource=resource, + append=append, base_url_append=base_url_append) + + def test_create_project_v2(self): + self.use_keystone_v2() + project_data = self._get_project_data(v3=False) + self.register_uris([ + dict(method='POST', uri=self.get_mock_url(v3=False), + status_code=200, json=project_data.json_response, + validate=dict(json=project_data.json_request)) + ]) + project = self.op_cloud.create_project( + name=project_data.project_name, + description=project_data.description) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name)) + self.assert_calls() + + def test_create_project_v3(self,): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + reference_req = project_data.json_request.copy() + reference_req['project']['enabled'] = True + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=project_data.json_response, + validate=dict(json=reference_req)) + ]) + project = self.op_cloud.create_project( + name=project_data.project_name, + description=project_data.description, + domain_id=project_data.domain_id) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name)) + self.assertThat( + project.description, matchers.Equals(project_data.description)) + self.assertThat( + project.domain_id, matchers.Equals(project_data.domain_id)) + self.assert_calls() + + def test_create_project_v3_no_domain(self): + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "User or project creation requires an explicit" + " domain_id argument." + ): + self.op_cloud.create_project(name='foo', description='bar') + + def test_delete_project_v2(self): + self.use_keystone_v2() + project_data = self._get_project_data(v3=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(v3=False), + status_code=200, + json={'tenants': [project_data.json_response['tenant']]}), + dict(method='DELETE', + uri=self.get_mock_url( + v3=False, append=[project_data.project_id]), + status_code=204) + ]) + self.op_cloud.delete_project(project_data.project_id) + self.assert_calls() + + def test_delete_project_v3(self): + project_data = self._get_project_data(v3=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'projects': [project_data.json_response['tenant']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=204) + ]) + self.op_cloud.delete_project(project_data.project_id) + self.assert_calls() + + def test_update_project_not_found(self): + project_data = self._get_project_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'projects': []}) + ]) + # NOTE(notmorgan): This test (and shade) does not represent a case + # where the project is in the project list but a 404 is raised when + # the PATCH is issued. This is a bug in shade and should be fixed, + # shade will raise an attribute error instead of the proper + # project not found exception. + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Project %s not found." % project_data.project_id + ): + self.op_cloud.update_project(project_data.project_id) + self.assert_calls() + + def test_update_project_v2(self): + self.use_keystone_v2() + project_data = self._get_project_data( + v3=False, + description=self.getUniqueString('projectDesc')) + # remove elements that are not updated in this test. + project_data.json_request['tenant'].pop('name') + project_data.json_request['tenant'].pop('enabled') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(v3=False), + status_code=200, + json={'tenants': [project_data.json_response['tenant']]}), + dict(method='POST', + uri=self.get_mock_url( + v3=False, append=[project_data.project_id]), + status_code=200, + json=project_data.json_response, + validate=dict(json=project_data.json_request)) + ]) + project = self.op_cloud.update_project( + project_data.project_id, + description=project_data.description) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name)) + self.assertThat( + project.description, matchers.Equals(project_data.description)) + self.assert_calls() + + def test_update_project_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + reference_req = project_data.json_request.copy() + # Remove elements not actually sent in the update + reference_req['project'].pop('domain_id') + reference_req['project'].pop('name') + reference_req['project'].pop('enabled') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + project_data.domain_id)), + status_code=200, + json={'projects': [project_data.json_response['project']]}), + dict(method='PATCH', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=200, json=project_data.json_response, + validate=dict(json=reference_req)) + ]) + project = self.op_cloud.update_project( + project_data.project_id, + description=project_data.description, + domain_id=project_data.domain_id) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name)) + self.assertThat( + project.description, matchers.Equals(project_data.description)) + self.assert_calls() + + def test_list_projects_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + project_data.domain_id)), + status_code=200, + json={'projects': [project_data.json_response['project']]}) + ]) + projects = self.op_cloud.list_projects(project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id)) + self.assert_calls() + + def test_list_projects_v3_kwarg(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + project_data.domain_id)), + status_code=200, + json={'projects': [project_data.json_response['project']]}) + ]) + projects = self.op_cloud.list_projects( + domain_id=project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id)) + self.assert_calls() + + def test_list_projects_search_compat(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'projects': [project_data.json_response['project']]}) + ]) + projects = self.op_cloud.search_projects(project_data.project_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id)) + self.assert_calls() + + def test_list_projects_search_compat_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + project_data.domain_id)), + status_code=200, + json={'projects': [project_data.json_response['project']]}) + ]) + projects = self.op_cloud.search_projects( + domain_id=project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py new file mode 100644 index 000000000..1e5955a39 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py @@ -0,0 +1,397 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestQosBandwidthLimitRule(base.RequestsMockTestCase): + + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_max_kbps = 1000 + rule_max_burst = 100 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False + } + + mock_rule = { + 'id': rule_id, + 'max_kbps': rule_max_kbps, + 'max_burst_kbps': rule_max_burst, + 'direction': 'egress' + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension." + } + + qos_bw_limit_direction_extension = { + "updated": "2017-04-10T10:00:00-00:00", + "name": "Direction for QoS bandwidth limit rule", + "links": [], + "alias": "qos-bw-limit-direction", + "description": ("Allow to configure QoS bandwidth limit rule with " + "specific direction: ingress or egress") + } + + enabled_neutron_extensions = [qos_extension, + qos_bw_limit_direction_extension] + + def test_get_qos_bandwidth_limit_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={'bandwidth_limit_rule': self.mock_rule}) + ]) + r = self.cloud.get_qos_bandwidth_limit_rule(self.policy_name, + self.rule_id) + self.assertDictEqual(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_bandwidth_limit_rule_no_qos_policy_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': []}) + ]) + self.assertRaises( + exc.OpenStackCloudResourceNotFound, + self.cloud.get_qos_bandwidth_limit_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_get_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_bandwidth_limit_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules']), + json={'bandwidth_limit_rule': self.mock_rule}) + ]) + rule = self.cloud.create_qos_bandwidth_limit_rule( + self.policy_name, max_kbps=self.rule_max_kbps) + self.assertDictEqual(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_qos_bandwidth_limit_rule, self.policy_name, + max_kbps=100) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule_no_qos_direction_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules']), + json={'bandwidth_limit_rule': self.mock_rule}) + ]) + rule = self.cloud.create_qos_bandwidth_limit_rule( + self.policy_name, max_kbps=self.rule_max_kbps, direction="ingress") + self.assertDictEqual(self.mock_rule, rule) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['max_kbps'] = self.rule_max_kbps + 100 + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={'bandwidth_limit_rule': self.mock_rule}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={'bandwidth_limit_rule': expected_rule}, + validate=dict( + json={'bandwidth_limit_rule': { + 'max_kbps': self.rule_max_kbps + 100}})) + ]) + rule = self.cloud.update_qos_bandwidth_limit_rule( + self.policy_id, self.rule_id, max_kbps=self.rule_max_kbps + 100) + self.assertDictEqual(expected_rule, rule) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.update_qos_bandwidth_limit_rule, + self.policy_id, self.rule_id, max_kbps=2000) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule_no_qos_direction_extension(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['direction'] = self.rule_max_kbps + 100 + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={'bandwidth_limit_rule': self.mock_rule}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={'bandwidth_limit_rule': expected_rule}, + validate=dict( + json={'bandwidth_limit_rule': { + 'max_kbps': self.rule_max_kbps + 100}})) + ]) + rule = self.cloud.update_qos_bandwidth_limit_rule( + self.policy_id, self.rule_id, max_kbps=self.rule_max_kbps + 100, + direction="ingress") + # Even if there was attempt to change direction to 'ingress' it should + # be not changed in returned rule + self.assertDictEqual(expected_rule, rule) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + json={}) + ]) + self.assertTrue( + self.cloud.delete_qos_bandwidth_limit_rule( + self.policy_name, self.rule_id)) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_qos_bandwidth_limit_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'bandwidth_limit_rules', + '%s.json' % self.rule_id]), + status_code=404) + ]) + self.assertFalse( + self.cloud.delete_qos_bandwidth_limit_rule( + self.policy_name, self.rule_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py new file mode 100644 index 000000000..b8a2158a1 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py @@ -0,0 +1,294 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestQosDscpMarkingRule(base.RequestsMockTestCase): + + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_dscp_mark = 32 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False + } + + mock_rule = { + 'id': rule_id, + 'dscp_mark': rule_dscp_mark, + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension." + } + + enabled_neutron_extensions = [qos_extension] + + def test_get_qos_dscp_marking_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules', + '%s.json' % self.rule_id]), + json={'dscp_marking_rule': self.mock_rule}) + ]) + r = self.cloud.get_qos_dscp_marking_rule(self.policy_name, + self.rule_id) + self.assertDictEqual(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_dscp_marking_rule_no_qos_policy_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': []}) + ]) + self.assertRaises( + exc.OpenStackCloudResourceNotFound, + self.cloud.get_qos_dscp_marking_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_get_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_dscp_marking_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_create_qos_dscp_marking_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules']), + json={'dscp_marking_rule': self.mock_rule}) + ]) + rule = self.cloud.create_qos_dscp_marking_rule( + self.policy_name, dscp_mark=self.rule_dscp_mark) + self.assertDictEqual(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_qos_dscp_marking_rule, self.policy_name, + dscp_mark=16) + self.assert_calls() + + def test_update_qos_dscp_marking_rule(self): + new_dscp_mark_value = 16 + expected_rule = copy.copy(self.mock_rule) + expected_rule['dscp_mark'] = new_dscp_mark_value + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules', + '%s.json' % self.rule_id]), + json={'dscp_marking_rule': self.mock_rule}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules', + '%s.json' % self.rule_id]), + json={'dscp_marking_rule': expected_rule}, + validate=dict( + json={'dscp_marking_rule': { + 'dscp_mark': new_dscp_mark_value}})) + ]) + rule = self.cloud.update_qos_dscp_marking_rule( + self.policy_id, self.rule_id, dscp_mark=new_dscp_mark_value) + self.assertDictEqual(expected_rule, rule) + self.assert_calls() + + def test_update_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.update_qos_dscp_marking_rule, + self.policy_id, self.rule_id, dscp_mark=8) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules', + '%s.json' % self.rule_id]), + json={}) + ]) + self.assertTrue( + self.cloud.delete_qos_dscp_marking_rule( + self.policy_name, self.rule_id)) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_qos_dscp_marking_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'dscp_marking_rules', + '%s.json' % self.rule_id]), + status_code=404) + ]) + self.assertFalse( + self.cloud.delete_qos_dscp_marking_rule( + self.policy_name, self.rule_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py new file mode 100644 index 000000000..71de9e922 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py @@ -0,0 +1,294 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestQosMinimumBandwidthRule(base.RequestsMockTestCase): + + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_min_kbps = 1000 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False + } + + mock_rule = { + 'id': rule_id, + 'min_kbps': rule_min_kbps, + 'direction': 'egress' + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension." + } + + enabled_neutron_extensions = [qos_extension] + + def test_get_qos_minimum_bandwidth_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules', + '%s.json' % self.rule_id]), + json={'minimum_bandwidth_rule': self.mock_rule}) + ]) + r = self.cloud.get_qos_minimum_bandwidth_rule(self.policy_name, + self.rule_id) + self.assertDictEqual(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_minimum_bandwidth_rule_no_qos_policy_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': []}) + ]) + self.assertRaises( + exc.OpenStackCloudResourceNotFound, + self.cloud.get_qos_minimum_bandwidth_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_get_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_minimum_bandwidth_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_create_qos_minimum_bandwidth_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules']), + json={'minimum_bandwidth_rule': self.mock_rule}) + ]) + rule = self.cloud.create_qos_minimum_bandwidth_rule( + self.policy_name, min_kbps=self.rule_min_kbps) + self.assertDictEqual(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_qos_minimum_bandwidth_rule, self.policy_name, + min_kbps=100) + self.assert_calls() + + def test_update_qos_minimum_bandwidth_rule(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['min_kbps'] = self.rule_min_kbps + 100 + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules', + '%s.json' % self.rule_id]), + json={'minimum_bandwidth_rule': self.mock_rule}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules', + '%s.json' % self.rule_id]), + json={'minimum_bandwidth_rule': expected_rule}, + validate=dict( + json={'minimum_bandwidth_rule': { + 'min_kbps': self.rule_min_kbps + 100}})) + ]) + rule = self.cloud.update_qos_minimum_bandwidth_rule( + self.policy_id, self.rule_id, min_kbps=self.rule_min_kbps + 100) + self.assertDictEqual(expected_rule, rule) + self.assert_calls() + + def test_update_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.update_qos_minimum_bandwidth_rule, + self.policy_id, self.rule_id, min_kbps=2000) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules', + '%s.json' % self.rule_id]), + json={}) + ]) + self.assertTrue( + self.cloud.delete_qos_minimum_bandwidth_rule( + self.policy_name, self.rule_id)) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_qos_minimum_bandwidth_rule, + self.policy_name, self.rule_id) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', self.policy_id, + 'minimum_bandwidth_rules', + '%s.json' % self.rule_id]), + status_code=404) + ]) + self.assertFalse( + self.cloud.delete_qos_minimum_bandwidth_rule( + self.policy_name, self.rule_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_policy.py b/openstack/tests/unit/cloud/test_qos_policy.py new file mode 100644 index 000000000..38dbd27fa --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_policy.py @@ -0,0 +1,322 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestQosPolicy(base.RequestsMockTestCase): + + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension." + } + + qos_default_extension = { + "updated": "2017-041-06T10:00:00-00:00", + "name": "QoS default policy", + "links": [], + "alias": "qos-default", + "description": "Expose the QoS default policy per project" + } + + enabled_neutron_extensions = [qos_extension, qos_default_extension] + + def test_get_qos_policy(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}) + ]) + r = self.cloud.get_qos_policy(self.policy_name) + self.assertIsNotNone(r) + self.assertDictEqual(self.mock_policy, r) + self.assert_calls() + + def test_get_qos_policy_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_policy, self.policy_name) + self.assert_calls() + + def test_create_qos_policy(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policy': self.mock_policy}) + ]) + policy = self.cloud.create_qos_policy( + name=self.policy_name, project_id=self.project_id) + self.assertDictEqual(self.mock_policy, policy) + self.assert_calls() + + def test_create_qos_policy_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.create_qos_policy, name=self.policy_name) + self.assert_calls() + + def test_create_qos_policy_no_qos_default_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policy': self.mock_policy}, + validate=dict( + json={'policy': { + 'name': self.policy_name, + 'project_id': self.project_id}})) + ]) + policy = self.cloud.create_qos_policy( + name=self.policy_name, project_id=self.project_id, default=True) + self.assertDictEqual(self.mock_policy, policy) + self.assert_calls() + + def test_delete_qos_policy(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', + '%s.json' % self.policy_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_qos_policy(self.policy_name)) + self.assert_calls() + + def test_delete_qos_policy_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.delete_qos_policy, self.policy_name) + self.assert_calls() + + def test_delete_qos_policy_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': []}) + ]) + self.assertFalse(self.cloud.delete_qos_policy('goofy')) + self.assert_calls() + + def test_delete_qos_policy_multiple_found(self): + policy1 = dict(id='123', name=self.policy_name) + policy2 = dict(id='456', name=self.policy_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [policy1, policy2]}) + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.delete_qos_policy, + self.policy_name) + self.assert_calls() + + def test_delete_qos_policy_multiple_using_id(self): + policy1 = self.mock_policy + policy2 = dict(id='456', name=self.policy_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [policy1, policy2]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', + '%s.json' % self.policy_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_qos_policy(policy1['id'])) + self.assert_calls() + + def test_update_qos_policy(self): + expected_policy = copy.copy(self.mock_policy) + expected_policy['name'] = 'goofy' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': self.enabled_neutron_extensions}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', + '%s.json' % self.policy_id]), + json={'policy': expected_policy}, + validate=dict( + json={'policy': {'name': 'goofy'}})) + ]) + policy = self.cloud.update_qos_policy( + self.policy_id, name='goofy') + self.assertDictEqual(expected_policy, policy) + self.assert_calls() + + def test_update_qos_policy_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.update_qos_policy, self.policy_id, name="goofy") + self.assert_calls() + + def test_update_qos_policy_no_qos_default_extension(self): + expected_policy = copy.copy(self.mock_policy) + expected_policy['name'] = 'goofy' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies.json']), + json={'policies': [self.mock_policy]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'policies', + '%s.json' % self.policy_id]), + json={'policy': expected_policy}, + validate=dict( + json={'policy': {'name': "goofy"}})) + ]) + policy = self.cloud.update_qos_policy( + self.policy_id, name='goofy', default=True) + self.assertDictEqual(expected_policy, policy) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_rule_type.py b/openstack/tests/unit/cloud/test_qos_rule_type.py new file mode 100644 index 000000000..d52c6a1ec --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_rule_type.py @@ -0,0 +1,149 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestQosRuleType(base.RequestsMockTestCase): + + rule_type_name = "bandwidth_limit" + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension." + } + qos_rule_type_details_extension = { + "updated": "2017-06-22T10:00:00-00:00", + "name": "Details of QoS rule types", + "links": [], + "alias": "qos-rule-type-details", + "description": ("Expose details about QoS rule types supported by " + "loaded backend drivers") + } + + mock_rule_type_bandwidth_limit = { + 'type': 'bandwidth_limit' + } + mock_rule_type_dscp_marking = { + 'type': 'dscp_marking' + } + mock_rule_types = [ + mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking] + + mock_rule_type_details = { + 'drivers': [{ + 'name': 'linuxbridge', + 'supported_parameters': [{ + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': u'max_kbps' + }, { + 'parameter_values': ['ingress', 'egress'], + 'parameter_type': 'choices', + 'parameter_name': u'direction' + }, { + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': 'max_burst_kbps' + }] + }], + 'type': rule_type_name + } + + def test_list_qos_rule_types(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'rule-types.json']), + json={'rule_types': self.mock_rule_types}) + ]) + rule_types = self.cloud.list_qos_rule_types() + self.assertEqual(self.mock_rule_types, rule_types) + self.assert_calls() + + def test_list_qos_rule_types_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.list_qos_rule_types) + self.assert_calls() + + def test_get_qos_rule_type_details(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [ + self.qos_extension, + self.qos_rule_type_details_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [ + self.qos_extension, + self.qos_rule_type_details_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'qos', 'rule-types', + '%s.json' % self.rule_type_name]), + json={'rule_type': self.mock_rule_type_details}) + ]) + self.assertEqual( + self.mock_rule_type_details, + self.cloud.get_qos_rule_type_details(self.rule_type_name) + ) + self.assert_calls() + + def test_get_qos_rule_type_details_no_qos_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': []}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_rule_type_details, self.rule_type_name) + self.assert_calls() + + def test_get_qos_rule_type_details_no_qos_details_extension(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json={'extensions': [self.qos_extension]}) + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.get_qos_rule_type_details, self.rule_type_name) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_quotas.py b/openstack/tests/unit/cloud/test_quotas.py new file mode 100644 index 000000000..0494372fe --- /dev/null +++ b/openstack/tests/unit/cloud/test_quotas.py @@ -0,0 +1,255 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.cloud import exc +from openstack.tests.unit import base + +fake_quota_set = { + "cores": 20, + "fixed_ips": -1, + "floating_ips": 10, + "injected_file_content_bytes": 10240, + "injected_file_path_bytes": 255, + "injected_files": 5, + "instances": 10, + "key_pairs": 100, + "metadata_items": 128, + "ram": 51200, + "security_group_rules": 20, + "security_groups": 45, + "server_groups": 10, + "server_group_members": 10 +} + + +class TestQuotas(base.RequestsMockTestCase): + def setUp(self, cloud_config_fixture='clouds.yaml'): + super(TestQuotas, self).setUp( + cloud_config_fixture=cloud_config_fixture) + + def test_update_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-quota-sets', project.project_id]), + json={'quota_set': fake_quota_set}, + validate=dict( + json={ + 'quota_set': { + 'cores': 1, + 'force': True + }})), + ]) + + self.op_cloud.set_compute_quotas(project.project_id, cores=1) + + self.assert_calls() + + def test_update_quotas_bad_request(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-quota-sets', project.project_id]), + status_code=400), + ]) + + self.assertRaises(exc.OpenStackCloudException, + self.op_cloud.set_compute_quotas, project.project_id) + + self.assert_calls() + + def test_get_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-quota-sets', project.project_id]), + json={'quota_set': fake_quota_set}), + ]) + + self.op_cloud.get_compute_quotas(project.project_id) + + self.assert_calls() + + def test_delete_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-quota-sets', project.project_id])), + ]) + + self.op_cloud.delete_compute_quotas(project.project_id) + + self.assert_calls() + + def test_cinder_update_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['os-quota-sets', project.project_id]), + json=dict(quota_set={'volumes': 1}), + validate=dict( + json={'quota_set': { + 'volumes': 1, + 'tenant_id': project.project_id}}))]) + self.op_cloud.set_volume_quotas(project.project_id, volumes=1) + self.assert_calls() + + def test_cinder_get_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['os-quota-sets', project.project_id]), + json=dict(quota_set={'snapshots': 10, 'volumes': 20}))]) + self.op_cloud.get_volume_quotas(project.project_id) + self.assert_calls() + + def test_cinder_delete_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['os-quota-sets', project.project_id]))]) + self.op_cloud.delete_volume_quotas(project.project_id) + self.assert_calls() + + def test_neutron_update_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'quotas', + '%s.json' % project.project_id]), + json={}, + validate=dict( + json={'quota': {'network': 1}})) + ]) + self.op_cloud.set_network_quotas(project.project_id, network=1) + self.assert_calls() + + def test_neutron_get_quotas(self): + quota = { + 'subnet': 100, + 'network': 100, + 'floatingip': 50, + 'subnetpool': -1, + 'security_group_rule': 100, + 'security_group': 10, + 'router': 10, + 'rbac_policy': 10, + 'port': 500 + } + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'quotas', + '%s.json' % project.project_id]), + json={'quota': quota}) + ]) + received_quota = self.op_cloud.get_network_quotas(project.project_id) + self.assertDictEqual(quota, received_quota) + self.assert_calls() + + def test_neutron_get_quotas_details(self): + quota_details = { + 'subnet': { + 'limit': 100, + 'used': 7, + 'reserved': 0}, + 'network': { + 'limit': 100, + 'used': 6, + 'reserved': 0}, + 'floatingip': { + 'limit': 50, + 'used': 0, + 'reserved': 0}, + 'subnetpool': { + 'limit': -1, + 'used': 2, + 'reserved': 0}, + 'security_group_rule': { + 'limit': 100, + 'used': 4, + 'reserved': 0}, + 'security_group': { + 'limit': 10, + 'used': 1, + 'reserved': 0}, + 'router': { + 'limit': 10, + 'used': 2, + 'reserved': 0}, + 'rbac_policy': { + 'limit': 10, + 'used': 2, + 'reserved': 0}, + 'port': { + 'limit': 500, + 'used': 7, + 'reserved': 0} + } + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'quotas', + '%s/details.json' % project.project_id]), + json={'quota': quota_details}) + ]) + received_quota_details = self.op_cloud.get_network_quotas( + project.project_id, details=True) + self.assertDictEqual(quota_details, received_quota_details) + self.assert_calls() + + def test_neutron_delete_quotas(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'quotas', + '%s.json' % project.project_id]), + json={}) + ]) + self.op_cloud.delete_network_quotas(project.project_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_rebuild_server.py b/openstack/tests/unit/cloud/test_rebuild_server.py new file mode 100644 index 000000000..764639467 --- /dev/null +++ b/openstack/tests/unit/cloud/test_rebuild_server.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_rebuild_server +---------------------------------- + +Tests for the `rebuild_server` command. +""" + +import uuid + +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestRebuildServer(base.RequestsMockTestCase): + + def setUp(self): + super(TestRebuildServer, self).setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name) + self.rebuild_server = fakes.make_fake_server( + self.server_id, self.server_name, 'REBUILD') + self.error_server = fakes.make_fake_server( + self.server_id, self.server_name, 'ERROR') + + def test_rebuild_server_rebuild_exception(self): + """ + Test that an exception in the rebuild raises an exception in + rebuild_server. + """ + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + status_code=400, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a', + 'adminPass': 'b'}})), + ]) + + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.rebuild_server, + self.fake_server['id'], "a", "b") + + self.assert_calls() + + def test_rebuild_server_server_error(self): + """ + Test that a server error while waiting for the server to rebuild + raises an exception in rebuild_server. + """ + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': self.rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.error_server]}), + ]) + self.assertRaises( + exc.OpenStackCloudException, + self.cloud.rebuild_server, self.fake_server['id'], "a", wait=True) + + self.assert_calls() + + def test_rebuild_server_timeout(self): + """ + Test that a timeout while waiting for the server to rebuild raises an + exception in rebuild_server. + """ + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': self.rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.rebuild_server]}), + ]) + self.assertRaises( + exc.OpenStackCloudTimeout, + self.cloud.rebuild_server, + self.fake_server['id'], "a", wait=True, timeout=0.001) + + self.assert_calls(do_count=False) + + def test_rebuild_server_no_wait(self): + """ + Test that rebuild_server with no wait and no exception in the + rebuild call returns the server instance. + """ + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': self.rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a'}})), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + self.assertEqual( + self.rebuild_server['status'], + self.cloud.rebuild_server(self.fake_server['id'], "a")['status']) + + self.assert_calls() + + def test_rebuild_server_with_admin_pass_no_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + password = self.getUniqueString('password') + rebuild_server = self.rebuild_server.copy() + rebuild_server['adminPass'] = password + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a', + 'adminPass': password}})), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + self.assertEqual( + password, + self.cloud.rebuild_server( + self.fake_server['id'], 'a', + admin_pass=password)['adminPass']) + + self.assert_calls() + + def test_rebuild_server_with_admin_pass_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + password = self.getUniqueString('password') + rebuild_server = self.rebuild_server.copy() + rebuild_server['adminPass'] = password + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a', + 'adminPass': password}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.rebuild_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + + self.assertEqual( + password, + self.cloud.rebuild_server( + self.fake_server['id'], 'a', + admin_pass=password, wait=True)['adminPass']) + + self.assert_calls() + + def test_rebuild_server_wait(self): + """ + Test that rebuild_server with a wait returns the server instance when + its status changes to "ACTIVE". + """ + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.server_id, 'action']), + json={'server': self.rebuild_server}, + validate=dict( + json={ + 'rebuild': { + 'imageRef': 'a'}})), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.rebuild_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + self.assertEqual( + 'ACTIVE', + self.cloud.rebuild_server( + self.fake_server['id'], 'a', wait=True)['status']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_recordset.py b/openstack/tests/unit/cloud/test_recordset.py new file mode 100644 index 000000000..26b27d47d --- /dev/null +++ b/openstack/tests/unit/cloud/test_recordset.py @@ -0,0 +1,202 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import copy +import testtools + +import openstack.cloud +from openstack.tests.unit import base + + +zone = { + 'id': '1', + 'name': 'example.net.', + 'type': 'PRIMARY', + 'email': 'test@example.net', + 'description': 'Example zone', + 'ttl': 3600, +} + +recordset = { + 'name': 'www.example.net.', + 'type': 'A', + 'description': 'Example zone', + 'ttl': 3600, + 'records': ['192.168.1.1'] +} +recordset_zone = '1' + +new_recordset = copy.copy(recordset) +new_recordset['id'] = '1' +new_recordset['zone'] = recordset_zone + + +class TestRecordset(base.RequestsMockTestCase): + + def setUp(self): + super(TestRecordset, self).setUp() + self.use_designate() + + def test_create_recordset(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [zone], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='POST', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], 'recordsets']), + json=new_recordset, + validate=dict(json=recordset)), + ]) + rs = self.cloud.create_recordset( + zone=recordset_zone, + name=recordset['name'], + recordset_type=recordset['type'], + records=recordset['records'], + description=recordset['description'], + ttl=recordset['ttl']) + self.assertEqual(new_recordset, rs) + self.assert_calls() + + def test_create_recordset_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [zone], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='POST', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], 'recordsets']), + status_code=500, + validate=dict(json={ + 'name': 'www2.example.net.', + 'records': ['192.168.1.2'], + 'type': 'A'})), + ]) + with testtools.ExpectedException( + openstack.cloud.exc.OpenStackCloudHTTPError, + "Error creating recordset www2.example.net." + ): + self.cloud.create_recordset('1', 'www2.example.net.', + 'a', ['192.168.1.2']) + self.assert_calls() + + def test_update_recordset(self): + new_ttl = 7200 + expected_recordset = { + 'name': recordset['name'], + 'records': recordset['records'], + 'type': recordset['type'] + } + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [zone], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], + 'recordsets', new_recordset['id']]), + json=new_recordset), + dict(method='PUT', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], + 'recordsets', new_recordset['id']]), + json=expected_recordset, + validate=dict(json={'ttl': new_ttl})) + ]) + updated_rs = self.cloud.update_recordset('1', '1', ttl=new_ttl) + self.assertEqual(expected_recordset, updated_rs) + self.assert_calls() + + def test_delete_recordset(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [zone], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], + 'recordsets', new_recordset['id']]), + json=new_recordset), + dict(method='DELETE', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', zone['id'], + 'recordsets', new_recordset['id']]), + json={}) + ]) + self.assertTrue(self.cloud.delete_recordset('1', '1')) + self.assert_calls() + + def test_get_recordset_by_id(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', '1', 'recordsets', '1']), + json=new_recordset), + ]) + recordset = self.cloud.get_recordset('1', '1') + self.assertEqual(recordset['id'], '1') + self.assert_calls() + + def test_get_recordset_by_name(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', '1', 'recordsets', + new_recordset['name']]), + json=new_recordset), + ]) + recordset = self.cloud.get_recordset('1', new_recordset['name']) + self.assertEqual(new_recordset['name'], recordset['name']) + self.assert_calls() + + def test_get_recordset_not_found_returns_false(self): + recordset_name = "www.nonexistingrecord.net." + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', + append=['v2', 'zones', '1', 'recordsets', + recordset_name]), + json=[]) + ]) + recordset = self.cloud.get_recordset('1', recordset_name) + self.assertFalse(recordset) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_role_assignment.py b/openstack/tests/unit/cloud/test_role_assignment.py new file mode 100644 index 000000000..5ab23d2fc --- /dev/null +++ b/openstack/tests/unit/cloud/test_role_assignment.py @@ -0,0 +1,2901 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import exc +from openstack.tests.unit import base +import testtools +from testtools import matchers + + +class TestRoleAssignment(base.RequestsMockTestCase): + + def _build_role_assignment_response(self, role_id, scope_type, scope_id, + entity_type, entity_id): + self.assertThat(['group', 'user'], matchers.Contains(entity_type)) + self.assertThat(['project', 'domain'], matchers.Contains(scope_type)) + # NOTE(notmorgan): Links are thrown out by shade, but we construct them + # for corectness. + link_str = ('https://identity.example.com/identity/v3/{scope_t}s' + '/{scopeid}/{entity_t}s/{entityid}/roles/{roleid}') + return [{ + 'links': {'assignment': link_str.format( + scope_t=scope_type, scopeid=scope_id, entity_t=entity_type, + entityid=entity_id, roleid=role_id)}, + 'role': {'id': role_id}, + 'scope': {scope_type: {'id': scope_id}}, + entity_type: {'id': entity_id} + }] + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super(TestRoleAssignment, self).setUp(cloud_config_fixture) + self.role_data = self._get_role_data() + self.domain_data = self._get_domain_data() + self.user_data = self._get_user_data( + domain_id=self.domain_data.domain_id) + self.project_data = self._get_project_data( + domain_id=self.domain_data.domain_id) + self.project_data_v2 = self._get_project_data( + project_name=self.project_data.project_name, + project_id=self.project_data.project_id, + v3=False) + self.group_data = self._get_group_data( + domain_id=self.domain_data.domain_id) + + self.user_project_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, scope_type='project', + scope_id=self.project_data.project_id, entity_type='user', + entity_id=self.user_data.user_id) + + self.group_project_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, scope_type='project', + scope_id=self.project_data.project_id, entity_type='group', + entity_id=self.group_data.group_id) + + self.user_domain_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, scope_type='domain', + scope_id=self.domain_data.domain_id, entity_type='user', + entity_id=self.user_data.user_id) + + self.group_domain_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, scope_type='domain', + scope_id=self.domain_data.domain_id, entity_type='group', + entity_id=self.group_data.group_id) + + # Cleanup of instances to ensure garbage collection/no leaking memory + # in tests. + self.addCleanup(delattr, self, 'role_data') + self.addCleanup(delattr, self, 'user_data') + self.addCleanup(delattr, self, 'domain_data') + self.addCleanup(delattr, self, 'group_data') + self.addCleanup(delattr, self, 'project_data') + self.addCleanup(delattr, self, 'project_data_v2') + self.addCleanup(delattr, self, 'user_project_assignment') + self.addCleanup(delattr, self, 'group_project_assignment') + self.addCleanup(delattr, self, 'user_domain_assignment') + self.addCleanup(delattr, self, 'group_domain_assignment') + + def get_mock_url(self, service_type='identity', interface='admin', + resource='role_assignments', append=None, + base_url_append='v3', qs_elements=None): + return super(TestRoleAssignment, self).get_mock_url( + service_type, interface, resource, append, base_url_append, + qs_elements) + + def test_grant_role_user_v2(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + status_code=201, + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201) + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_grant_role_user_project_v2(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201, + ), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201) + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data_v2.project_id)) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data_v2.project_id)) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_id, + user=self.user_data.name, + project=self.project_data_v2.project_id)) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data_v2.project_id)) + self.assert_calls() + + def test_grant_role_user_project_v2_exists(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + ]) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data_v2.project_id)) + self.assert_calls() + + def test_grant_role_user_project(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[self.project_data.project_id, 'users', + self.user_data.user_id, 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[self.project_data.project_id, 'users', + self.user_data.user_id, 'roles', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_grant_role_user_project_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + ]) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_grant_role_group_project(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[self.project_data.project_id, 'groups', + self.group_data.group_id, 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[self.project_data.project_id, 'groups', + self.group_data.group_id, 'roles', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_grant_role_group_project_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + ]) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_grant_role_user_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_grant_role_user_domain_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'user.id=%s' % self.user_data.user_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + ]) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_name)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_grant_role_group_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_grant_role_group_domain_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'role.id=%s' % self.role_data.role_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'group.id=%s' % self.group_data.group_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + ]) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name)) + self.assertFalse(self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_revoke_role_user_v2(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, 'users', + self.user_data.user_id, 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles', 'OS-KSADM', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url( + base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, 'users', + self.user_data.user_id, 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles', 'OS-KSADM', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_user_project_v2(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}) + ]) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_id, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_user_project_v2_exists(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles', 'OS-KSADM', + self.role_data.role_id]), + status_code=204), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_user_project(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + ]) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_user_project_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_group_project(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + ]) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_group_project_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='projects'), + status_code=200, + json={'projects': [ + self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id)) + self.assert_calls() + + def test_revoke_role_user_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + ]) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_name)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_revoke_role_user_domain_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id])), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id)) + self.assert_calls() + + def test_revoke_role_group_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': []}), + ]) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_name)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id)) + self.assertFalse(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id)) + self.assert_calls() + + def test_revoke_role_group_domain_exists(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={'role_assignments': + self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'group.id=%s' % self.group_data.group_id, + 'scope.domain.id=%s' % self.domain_data.domain_id, + 'role.id=%s' % self.role_data.role_id]), + status_code=200, + complete_qs=True, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id])), + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_name)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id)) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id)) + self.assert_calls() + + def test_grant_no_role(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': []}) + ]) + + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Role {0} not found'.format(self.role_data.role_name) + ): + self.op_cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name) + self.assert_calls() + + def test_revoke_no_role(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': []}) + ]) + + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Role {0} not found'.format(self.role_data.role_name) + ): + self.op_cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name) + self.assert_calls() + + def test_grant_no_user_or_group_specified(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a user or a group' + ): + self.op_cloud.grant_role(self.role_data.role_name) + self.assert_calls() + + def test_revoke_no_user_or_group_specified(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a user or a group' + ): + self.op_cloud.revoke_role(self.role_data.role_name) + self.assert_calls() + + def test_grant_no_user_or_group(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': []}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a user or a group' + ): + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name) + self.assert_calls() + + def test_revoke_no_user_or_group(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': []}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a user or a group' + ): + self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name) + self.assert_calls() + + def test_grant_both_user_and_group(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Specify either a group or a user, not both' + ): + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + group=self.group_data.group_name) + self.assert_calls() + + def test_revoke_both_user_and_group(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(resource='groups'), + status_code=200, + json={'groups': [self.group_data.json_response['group']]}), + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Specify either a group or a user, not both' + ): + self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + group=self.group_data.group_name) + self.assert_calls() + + def test_grant_both_project_and_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + self.domain_data.domain_id)), + status_code=200, + json={'projects': + [self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}), + dict(method='PUT', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204) + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_revoke_both_project_and_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=[self.domain_data.domain_name]), + status_code=200, + json=self.domain_data.json_response), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource=('projects?domain_id=%s' % + self.domain_data.domain_id)), + status_code=200, + json={'projects': + [self.project_data.json_response['project']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + 'user.id=%s' % self.user_data.user_id, + 'scope.project.id=%s' % self.project_data.project_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}), + dict(method='DELETE', + uri=self.get_mock_url(resource='projects', + append=[self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id]), + status_code=204) + ]) + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + domain=self.domain_data.domain_name)) + self.assert_calls() + + def test_grant_no_project_or_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=['user.id=%s' % self.user_data.user_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={'role_assignments': []}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a domain or project' + ): + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name) + self.assert_calls() + + def test_revoke_no_project_or_domain(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=['user.id=%s' % self.user_data.user_id, + 'role.id=%s' % self.role_data.role_id]), + complete_qs=True, + status_code=200, + json={ + 'role_assignments': self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id)}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + 'Must specify either a domain or project' + ): + self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name) + self.assert_calls() + + def test_grant_bad_domain_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=['baddomain']), + status_code=404, + text='Could not find domain: baddomain') + ]) + with testtools.ExpectedException( + exc.OpenStackCloudURINotFound, + 'Failed to get domain baddomain' + ): + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain='baddomain') + self.assert_calls() + + def test_revoke_bad_domain_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(resource='domains', + append=['baddomain']), + status_code=404, + text='Could not find domain: baddomain') + ]) + with testtools.ExpectedException( + exc.OpenStackCloudURINotFound, + 'Failed to get domain baddomain' + ): + self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain='baddomain') + self.assert_calls() + + def test_grant_role_user_project_v2_wait(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + wait=True)) + self.assert_calls() + + def test_grant_role_user_project_v2_wait_exception(self): + self.use_keystone_v2() + + with testtools.ExpectedException( + exc.OpenStackCloudTimeout, + 'Timeout waiting for role to be granted' + ): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[ + self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + dict(method='PUT', + uri=self.get_mock_url( + base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', self.user_data.user_id, 'roles', + 'OS-KSADM', self.role_data.role_id]), + status_code=201), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[ + self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + ]) + self.assertTrue( + self.op_cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + wait=True, timeout=0.01)) + self.assert_calls(do_count=False) + + def test_revoke_role_user_project_v2_wait(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles', 'OS-KSADM', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': []}), + ]) + self.assertTrue( + self.op_cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + wait=True)) + self.assert_calls(do_count=False) + + def test_revoke_role_user_project_v2_wait_exception(self): + self.use_keystone_v2() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(base_url_append='OS-KSADM', + resource='roles'), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, resource='users'), + status_code=200, + json={'users': [self.user_data.json_response['user']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants'), + status_code=200, + json={ + 'tenants': [ + self.project_data_v2.json_response['tenant']]}), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + dict(method='DELETE', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles', 'OS-KSADM', + self.role_data.role_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(base_url_append=None, + resource='tenants', + append=[self.project_data_v2.project_id, + 'users', + self.user_data.user_id, + 'roles']), + status_code=200, + json={'roles': [self.role_data.json_response['role']]}), + ]) + with testtools.ExpectedException( + exc.OpenStackCloudTimeout, + 'Timeout waiting for role to be revoked' + ): + self.assertTrue(self.op_cloud.revoke_role( + self.role_data.role_name, user=self.user_data.name, + project=self.project_data.project_id, wait=True, timeout=0.01)) + self.assert_calls(do_count=False) diff --git a/openstack/tests/unit/cloud/test_router.py b/openstack/tests/unit/cloud/test_router.py new file mode 100644 index 000000000..71af86824 --- /dev/null +++ b/openstack/tests/unit/cloud/test_router.py @@ -0,0 +1,341 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestRouter(base.RequestsMockTestCase): + + router_name = 'goofy' + router_id = '57076620-dcfb-42ed-8ad6-79ccb4a79ed2' + subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' + + mock_router_rep = { + 'admin_state_up': True, + 'availability_zone_hints': [], + 'availability_zones': [], + 'description': u'', + 'distributed': False, + 'external_gateway_info': None, + 'flavor_id': None, + 'ha': False, + 'id': router_id, + 'name': router_name, + 'project_id': u'861808a93da0484ea1767967c4df8a23', + 'routes': [], + 'status': u'ACTIVE', + 'tenant_id': u'861808a93da0484ea1767967c4df8a23' + } + + mock_router_interface_rep = { + 'network_id': '53aee281-b06d-47fc-9e1a-37f045182b8e', + 'subnet_id': '1f1696eb-7f47-47f6-835c-4889bff88604', + 'tenant_id': '861808a93da0484ea1767967c4df8a23', + 'subnet_ids': [subnet_id], + 'port_id': '23999891-78b3-4a6b-818d-d1b713f67848', + 'id': '57076620-dcfb-42ed-8ad6-79ccb4a79ed2', + 'request_ids': ['req-f1b0b1b4-ae51-4ef9-b371-0cc3c3402cf7'] + } + + def test_get_router(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': [self.mock_router_rep]}) + ]) + r = self.cloud.get_router(self.router_name) + self.assertIsNotNone(r) + self.assertDictEqual(self.mock_router_rep, r) + self.assert_calls() + + def test_get_router_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': []}) + ]) + r = self.cloud.get_router('mickey') + self.assertIsNone(r) + self.assert_calls() + + def test_create_router(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'router': self.mock_router_rep}, + validate=dict( + json={'router': { + 'name': self.router_name, + 'admin_state_up': True}})) + ]) + new_router = self.cloud.create_router(name=self.router_name, + admin_state_up=True) + self.assertDictEqual(self.mock_router_rep, new_router) + self.assert_calls() + + def test_create_router_specific_tenant(self): + new_router_tenant_id = "project_id_value" + mock_router_rep = copy.copy(self.mock_router_rep) + mock_router_rep['tenant_id'] = new_router_tenant_id + mock_router_rep['project_id'] = new_router_tenant_id + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'router': mock_router_rep}, + validate=dict( + json={'router': { + 'name': self.router_name, + 'admin_state_up': True, + 'tenant_id': new_router_tenant_id}})) + ]) + + self.cloud.create_router(self.router_name, + project_id=new_router_tenant_id) + self.assert_calls() + + def test_create_router_with_enable_snat_True(self): + """Do not send enable_snat when same as neutron default.""" + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'router': self.mock_router_rep}, + validate=dict( + json={'router': { + 'name': self.router_name, + 'admin_state_up': True}})) + ]) + self.cloud.create_router( + name=self.router_name, admin_state_up=True, enable_snat=True) + self.assert_calls() + + def test_create_router_with_enable_snat_False(self): + """Send enable_snat when it is False.""" + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'router': self.mock_router_rep}, + validate=dict( + json={'router': { + 'name': self.router_name, + 'external_gateway_info': {'enable_snat': False}, + 'admin_state_up': True}})) + ]) + self.cloud.create_router( + name=self.router_name, admin_state_up=True, enable_snat=False) + self.assert_calls() + + def test_add_router_interface(self): + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'routers', self.router_id, + 'add_router_interface.json']), + json={'port': self.mock_router_interface_rep}, + validate=dict( + json={'subnet_id': self.subnet_id})) + ]) + self.cloud.add_router_interface( + {'id': self.router_id}, subnet_id=self.subnet_id) + self.assert_calls() + + def test_remove_router_interface(self): + self.register_uris([ + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'routers', self.router_id, + 'remove_router_interface.json']), + json={'port': self.mock_router_interface_rep}, + validate=dict( + json={'subnet_id': self.subnet_id})) + ]) + self.cloud.remove_router_interface( + {'id': self.router_id}, subnet_id=self.subnet_id) + self.assert_calls() + + def test_remove_router_interface_missing_argument(self): + self.assertRaises(ValueError, self.cloud.remove_router_interface, + {'id': '123'}) + + def test_update_router(self): + new_router_name = "mickey" + expected_router_rep = copy.copy(self.mock_router_rep) + expected_router_rep['name'] = new_router_name + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': [self.mock_router_rep]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'routers', '%s.json' % self.router_id]), + json={'router': expected_router_rep}, + validate=dict( + json={'router': { + 'name': new_router_name}})) + ]) + new_router = self.cloud.update_router( + self.router_id, name=new_router_name) + self.assertDictEqual(expected_router_rep, new_router) + self.assert_calls() + + def test_delete_router(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': [self.mock_router_rep]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'routers', '%s.json' % self.router_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_router(self.router_name)) + self.assert_calls() + + def test_delete_router_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': []}), + ]) + self.assertFalse(self.cloud.delete_router(self.router_name)) + self.assert_calls() + + def test_delete_router_multiple_found(self): + router1 = dict(id='123', name='mickey') + router2 = dict(id='456', name='mickey') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': [router1, router2]}), + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.delete_router, + 'mickey') + self.assert_calls() + + def test_delete_router_multiple_using_id(self): + router1 = dict(id='123', name='mickey') + router2 = dict(id='456', name='mickey') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers.json']), + json={'routers': [router1, router2]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'routers', '123.json']), + json={}) + ]) + self.assertTrue(self.cloud.delete_router("123")) + self.assert_calls() + + def _test_list_router_interfaces(self, router, interface_type, + expected_result=None): + internal_port = { + 'id': 'internal_port_id', + 'fixed_ips': [{ + 'subnet_id': 'internal_subnet_id', + 'ip_address': "10.0.0.1" + }], + 'device_id': self.router_id, + 'device_owner': 'network:router_interface' + } + external_port = { + 'id': 'external_port_id', + 'fixed_ips': [{ + 'subnet_id': 'external_subnet_id', + 'ip_address': "1.2.3.4" + }], + 'device_id': self.router_id, + 'device_owner': 'network:router_gateway' + } + if expected_result is None: + if interface_type == "internal": + expected_result = [internal_port] + elif interface_type == "external": + expected_result = [external_port] + else: + expected_result = [internal_port, external_port] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=["device_id=%s" % self.router_id, + "device_owner=network:router_interface"]), + json={'ports': [internal_port]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports.json'], + qs_elements=["device_id=%s" % self.router_id, + "device_owner=network:router_gateway"]), + json={'ports': [external_port]}) + ]) + ret = self.cloud.list_router_interfaces(router, interface_type) + self.assertEqual(expected_result, ret) + self.assert_calls() + + def test_list_router_interfaces_all(self): + router = { + 'id': self.router_id, + 'external_gateway_info': { + 'external_fixed_ips': [{ + 'subnet_id': 'external_subnet_id', + 'ip_address': '1.2.3.4'}] + } + } + self._test_list_router_interfaces(router, + interface_type=None) + + def test_list_router_interfaces_internal(self): + router = { + 'id': self.router_id, + 'external_gateway_info': { + 'external_fixed_ips': [{ + 'subnet_id': 'external_subnet_id', + 'ip_address': '1.2.3.4'}] + } + } + self._test_list_router_interfaces(router, + interface_type="internal") + + def test_list_router_interfaces_external(self): + router = { + 'id': self.router_id, + 'external_gateway_info': { + 'external_fixed_ips': [{ + 'subnet_id': 'external_subnet_id', + 'ip_address': '1.2.3.4'}] + } + } + self._test_list_router_interfaces(router, + interface_type="external") diff --git a/openstack/tests/unit/cloud/test_security_groups.py b/openstack/tests/unit/cloud/test_security_groups.py new file mode 100644 index 000000000..e49a3424a --- /dev/null +++ b/openstack/tests/unit/cloud/test_security_groups.py @@ -0,0 +1,784 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import copy + +import openstack.cloud +from openstack.tests.unit import base +from openstack.tests import fakes + +# TODO(mordred): Move id and name to using a getUniqueString() value + +neutron_grp_dict = fakes.make_fake_neutron_security_group( + id='1', + name='neutron-sec-group', + description='Test Neutron security group', + rules=[ + dict(id='1', port_range_min=80, port_range_max=81, + protocol='tcp', remote_ip_prefix='0.0.0.0/0') + ] +) + + +nova_grp_dict = fakes.make_fake_nova_security_group( + id='2', + name='nova-sec-group', + description='Test Nova security group #1', + rules=[ + fakes.make_fake_nova_security_group_rule( + id='2', from_port=8000, to_port=8001, ip_protocol='tcp', + cidr='0.0.0.0/0'), + ] +) + + +class TestSecurityGroups(base.RequestsMockTestCase): + + def setUp(self): + super(TestSecurityGroups, self).setUp() + self.has_neutron = True + + def fake_has_service(*args, **kwargs): + return self.has_neutron + self.cloud.has_service = fake_has_service + + def test_list_security_groups_neutron(self): + project_id = 42 + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json'], + qs_elements=["project_id=%s" % project_id]), + json={'security_groups': [neutron_grp_dict]}) + ]) + self.cloud.list_security_groups(filters={'project_id': project_id}) + self.assert_calls() + + def test_list_security_groups_nova(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups?project_id=42'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': []}), + ]) + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + self.cloud.list_security_groups(filters={'project_id': 42}) + + self.assert_calls() + + def test_list_security_groups_none(self): + + self.cloud.secgroup_source = None + self.has_neutron = False + self.assertRaises(openstack.OpenStackCloudUnavailableFeature, + self.cloud.list_security_groups) + + def test_delete_security_group_neutron(self): + sg_id = neutron_grp_dict['id'] + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups', '%s.json' % sg_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_security_group('1')) + self.assert_calls() + + def test_delete_security_group_nova(self): + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + nova_return = [nova_grp_dict] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': nova_return}), + dict(method='DELETE', + uri='{endpoint}/os-security-groups/2'.format( + endpoint=fakes.COMPUTE_ENDPOINT)), + ]) + self.cloud.delete_security_group('2') + self.assert_calls() + + def test_delete_security_group_neutron_not_found(self): + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}) + ]) + self.assertFalse(self.cloud.delete_security_group('10')) + self.assert_calls() + + def test_delete_security_group_nova_not_found(self): + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + nova_return = [nova_grp_dict] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': nova_return}), + ]) + self.assertFalse(self.cloud.delete_security_group('doesNotExist')) + + def test_delete_security_group_none(self): + self.cloud.secgroup_source = None + self.assertRaises(openstack.OpenStackCloudUnavailableFeature, + self.cloud.delete_security_group, + 'doesNotExist') + + def test_create_security_group_neutron(self): + self.cloud.secgroup_source = 'neutron' + group_name = self.getUniqueString() + group_desc = self.getUniqueString('description') + new_group = fakes.make_fake_neutron_security_group( + id='2', + name=group_name, + description=group_desc, + rules=[]) + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_group': new_group}, + validate=dict( + json={'security_group': { + 'name': group_name, + 'description': group_desc + }})) + ]) + + r = self.cloud.create_security_group(group_name, group_desc) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + + self.assert_calls() + + def test_create_security_group_neutron_specific_tenant(self): + self.cloud.secgroup_source = 'neutron' + project_id = "861808a93da0484ea1767967c4df8a23" + group_name = self.getUniqueString() + group_desc = 'security group from' \ + ' test_create_security_group_neutron_specific_tenant' + new_group = fakes.make_fake_neutron_security_group( + id='2', + name=group_name, + description=group_desc, + project_id=project_id, + rules=[]) + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_group': new_group}, + validate=dict( + json={'security_group': { + 'name': group_name, + 'description': group_desc, + 'tenant_id': project_id + }})) + ]) + + r = self.cloud.create_security_group( + group_name, + group_desc, + project_id + ) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + self.assertEqual(project_id, r['tenant_id']) + + self.assert_calls() + + def test_create_security_group_nova(self): + group_name = self.getUniqueString() + self.has_neutron = False + group_desc = self.getUniqueString('description') + new_group = fakes.make_fake_nova_security_group( + id='2', + name=group_name, + description=group_desc, + rules=[]) + self.register_uris([ + dict(method='POST', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_group': new_group}, + validate=dict(json={ + 'security_group': { + 'name': group_name, + 'description': group_desc + }})), + ]) + + self.cloud.secgroup_source = 'nova' + r = self.cloud.create_security_group(group_name, group_desc) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + + self.assert_calls() + + def test_create_security_group_none(self): + self.cloud.secgroup_source = None + self.has_neutron = False + self.assertRaises(openstack.OpenStackCloudUnavailableFeature, + self.cloud.create_security_group, + '', '') + + def test_update_security_group_neutron(self): + self.cloud.secgroup_source = 'neutron' + new_name = self.getUniqueString() + sg_id = neutron_grp_dict['id'] + update_return = neutron_grp_dict.copy() + update_return['name'] = new_name + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups', '%s.json' % sg_id]), + json={'security_group': update_return}, + validate=dict(json={ + 'security_group': {'name': new_name}})) + ]) + r = self.cloud.update_security_group(sg_id, name=new_name) + self.assertEqual(r['name'], new_name) + self.assert_calls() + + def test_update_security_group_nova(self): + self.has_neutron = False + new_name = self.getUniqueString() + self.cloud.secgroup_source = 'nova' + nova_return = [nova_grp_dict] + update_return = nova_grp_dict.copy() + update_return['name'] = new_name + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': nova_return}), + dict(method='PUT', + uri='{endpoint}/os-security-groups/2'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_group': update_return}), + ]) + + r = self.cloud.update_security_group( + nova_grp_dict['id'], name=new_name) + self.assertEqual(r['name'], new_name) + self.assert_calls() + + def test_update_security_group_bad_kwarg(self): + self.assertRaises(TypeError, + self.cloud.update_security_group, + 'doesNotExist', bad_arg='') + + def test_create_security_group_rule_neutron(self): + self.cloud.secgroup_source = 'neutron' + args = dict( + port_range_min=-1, + port_range_max=40000, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + remote_group_id='456', + direction='egress', + ethertype='IPv6' + ) + expected_args = copy.copy(args) + # For neutron, -1 port should be converted to None + expected_args['port_range_min'] = None + expected_args['security_group_id'] = neutron_grp_dict['id'] + + expected_new_rule = copy.copy(expected_args) + expected_new_rule['id'] = '1234' + expected_new_rule['tenant_id'] = '' + expected_new_rule['project_id'] = expected_new_rule['tenant_id'] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-group-rules.json']), + json={'security_group_rule': expected_new_rule}, + validate=dict(json={ + 'security_group_rule': expected_args})) + ]) + new_rule = self.cloud.create_security_group_rule( + secgroup_name_or_id=neutron_grp_dict['id'], **args) + # NOTE(slaweq): don't check location and properties in new rule + new_rule.pop("location") + new_rule.pop("properties") + self.assertEqual(expected_new_rule, new_rule) + self.assert_calls() + + def test_create_security_group_rule_neutron_specific_tenant(self): + self.cloud.secgroup_source = 'neutron' + args = dict( + port_range_min=-1, + port_range_max=40000, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + remote_group_id='456', + direction='egress', + ethertype='IPv6', + project_id='861808a93da0484ea1767967c4df8a23' + ) + expected_args = copy.copy(args) + # For neutron, -1 port should be converted to None + expected_args['port_range_min'] = None + expected_args['security_group_id'] = neutron_grp_dict['id'] + expected_args['tenant_id'] = expected_args['project_id'] + expected_args.pop('project_id') + + expected_new_rule = copy.copy(expected_args) + expected_new_rule['id'] = '1234' + expected_new_rule['project_id'] = expected_new_rule['tenant_id'] + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-group-rules.json']), + json={'security_group_rule': expected_new_rule}, + validate=dict(json={ + 'security_group_rule': expected_args})) + ]) + new_rule = self.cloud.create_security_group_rule( + secgroup_name_or_id=neutron_grp_dict['id'], ** args) + # NOTE(slaweq): don't check location and properties in new rule + new_rule.pop("location") + new_rule.pop("properties") + self.assertEqual(expected_new_rule, new_rule) + self.assert_calls() + + def test_create_security_group_rule_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + nova_return = [nova_grp_dict] + + new_rule = fakes.make_fake_nova_security_group_rule( + id='xyz', from_port=1, to_port=2000, ip_protocol='tcp', + cidr='1.2.3.4/32') + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': nova_return}), + dict(method='POST', + uri='{endpoint}/os-security-group-rules'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_group_rule': new_rule}, + validate=dict(json={ + "security_group_rule": { + "from_port": 1, + "ip_protocol": "tcp", + "to_port": 2000, + "parent_group_id": "2", + "cidr": "1.2.3.4/32", + "group_id": "123"}})), + ]) + + self.cloud.create_security_group_rule( + '2', port_range_min=1, port_range_max=2000, protocol='tcp', + remote_ip_prefix='1.2.3.4/32', remote_group_id='123') + + self.assert_calls() + + def test_create_security_group_rule_nova_no_ports(self): + + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + new_rule = fakes.make_fake_nova_security_group_rule( + id='xyz', from_port=1, to_port=65535, ip_protocol='tcp', + cidr='1.2.3.4/32') + + nova_return = [nova_grp_dict] + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': nova_return}), + dict(method='POST', + uri='{endpoint}/os-security-group-rules'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_group_rule': new_rule}, + validate=dict(json={ + "security_group_rule": { + "from_port": 1, + "ip_protocol": "tcp", + "to_port": 65535, + "parent_group_id": "2", + "cidr": "1.2.3.4/32", + "group_id": "123"}})), + ]) + + self.cloud.create_security_group_rule( + '2', protocol='tcp', + remote_ip_prefix='1.2.3.4/32', remote_group_id='123') + + self.assert_calls() + + def test_create_security_group_rule_none(self): + self.has_neutron = False + self.cloud.secgroup_source = None + self.assertRaises(openstack.OpenStackCloudUnavailableFeature, + self.cloud.create_security_group_rule, + '') + + def test_delete_security_group_rule_neutron(self): + rule_id = "xyz" + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-group-rules', + '%s.json' % rule_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_security_group_rule(rule_id)) + self.assert_calls() + + def test_delete_security_group_rule_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris([ + dict(method='DELETE', + uri='{endpoint}/os-security-group-rules/xyz'.format( + endpoint=fakes.COMPUTE_ENDPOINT)), + ]) + r = self.cloud.delete_security_group_rule('xyz') + self.assertTrue(r) + self.assert_calls() + + def test_delete_security_group_rule_none(self): + self.has_neutron = False + self.cloud.secgroup_source = None + self.assertRaises(openstack.OpenStackCloudUnavailableFeature, + self.cloud.delete_security_group_rule, + '') + + def test_delete_security_group_rule_not_found(self): + rule_id = "doesNotExist" + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}) + ]) + self.assertFalse(self.cloud.delete_security_group(rule_id)) + self.assert_calls() + + def test_delete_security_group_rule_not_found_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': [nova_grp_dict]}), + ]) + r = self.cloud.delete_security_group('doesNotExist') + self.assertFalse(r) + + self.assert_calls() + + def test_nova_egress_security_group_rule(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': [nova_grp_dict]}), + ]) + self.assertRaises(openstack.OpenStackCloudException, + self.cloud.create_security_group_rule, + secgroup_name_or_id='nova-sec-group', + direction='egress') + + self.assert_calls() + + def test_list_server_security_groups_nova(self): + self.has_neutron = False + + server = dict(id='server_id') + + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/servers/{id}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id='server_id'), + json={'security_groups': [nova_grp_dict]}), + ]) + groups = self.cloud.list_server_security_groups(server) + self.assertIn('location', groups[0]) + self.assertEqual( + groups[0]['security_group_rules'][0]['remote_ip_prefix'], + nova_grp_dict['rules'][0]['ip_range']['cidr']) + + self.assert_calls() + + def test_list_server_security_groups_bad_source(self): + self.has_neutron = False + self.cloud.secgroup_source = 'invalid' + server = dict(id='server_id') + ret = self.cloud.list_server_security_groups(server) + self.assertEqual([], ret) + + def test_add_security_group_to_server_nova(self): + + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id='server_id'), + json={'security_groups': [nova_grp_dict]}), + dict( + method='POST', + uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), + validate=dict( + json={'addSecurityGroup': {'name': 'nova-sec-group'}}), + status_code=202, + ), + ]) + + ret = self.cloud.add_server_security_groups( + dict(id='1234'), 'nova-sec-group') + + self.assertTrue(ret) + + self.assert_calls() + + def test_add_security_group_to_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', 'detail']), + json={'servers': [fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='POST', + uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), + validate=dict( + json={'addSecurityGroup': {'name': 'neutron-sec-group'}}), + status_code=202), + ]) + + self.assertTrue(self.cloud.add_server_security_groups( + 'server-name', 'neutron-sec-group')) + self.assert_calls() + + def test_remove_security_group_from_server_nova(self): + + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': [nova_grp_dict]}), + dict( + method='POST', + uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), + validate=dict( + json={'removeSecurityGroup': {'name': 'nova-sec-group'}}), + ), + ]) + + ret = self.cloud.remove_server_security_groups( + dict(id='1234'), 'nova-sec-group') + self.assertTrue(ret) + + self.assert_calls() + + def test_remove_security_group_from_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + validate = {'removeSecurityGroup': {'name': 'neutron-sec-group'}} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', 'detail']), + json={'servers': [fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}), + dict(method='POST', + uri='%s/servers/%s/action' % (fakes.COMPUTE_ENDPOINT, '1234'), + validate=dict(json=validate)), + ]) + + self.assertTrue(self.cloud.remove_server_security_groups( + 'server-name', 'neutron-sec-group')) + self.assert_calls() + + def test_add_bad_security_group_to_server_nova(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use nova for secgroup list and return an existing fake + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/servers/detail'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'servers': [fake_server]}), + dict( + method='GET', + uri='{endpoint}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'security_groups': [nova_grp_dict]}), + ]) + + ret = self.cloud.add_server_security_groups('server-name', + 'unknown-sec-group') + self.assertFalse(ret) + + self.assert_calls() + + def test_add_bad_security_group_to_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', 'detail']), + json={'servers': [fake_server]}), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'security-groups.json']), + json={'security_groups': [neutron_grp_dict]}) + ]) + self.assertFalse(self.cloud.add_server_security_groups( + 'server-name', 'unknown-sec-group')) + self.assert_calls() + + def test_add_security_group_to_bad_server(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/servers/detail'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={'servers': [fake_server]}), + ]) + + ret = self.cloud.add_server_security_groups('unknown-server-name', + 'nova-sec-group') + self.assertFalse(ret) + + self.assert_calls() + + def test_get_security_group_by_id_neutron(self): + self.cloud.secgroup_source = 'neutron' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', + 'security-groups', + neutron_grp_dict['id']]), + json={'security_group': neutron_grp_dict}) + ]) + ret_sg = self.cloud.get_security_group_by_id(neutron_grp_dict['id']) + self.assertEqual(neutron_grp_dict['id'], ret_sg['id']) + self.assertEqual(neutron_grp_dict['name'], ret_sg['name']) + self.assertEqual(neutron_grp_dict['description'], + ret_sg['description']) + self.assert_calls() + + def test_get_security_group_by_id_nova(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/os-security-groups/{id}'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id=nova_grp_dict['id']), + json={'security_group': nova_grp_dict}), + ]) + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + ret_sg = self.cloud.get_security_group_by_id(nova_grp_dict['id']) + self.assertEqual(nova_grp_dict['id'], ret_sg['id']) + self.assertEqual(nova_grp_dict['name'], ret_sg['name']) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_console.py b/openstack/tests/unit/cloud/test_server_console.py new file mode 100644 index 000000000..7b7cd2b65 --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_console.py @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import uuid + +from openstack.tests.unit import base +from openstack.tests import fakes + + +class TestServerConsole(base.RequestsMockTestCase): + + def setUp(self): + super(TestServerConsole, self).setUp() + + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.server = fakes.make_fake_server( + server_id=self.server_id, name=self.server_name) + self.output = self.getUniqueString('output') + + def test_get_server_console_dict(self): + self.register_uris([ + dict(method='POST', + uri='{endpoint}/servers/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id=self.server_id), + json={"output": self.output}, + validate=dict( + json={'os-getConsoleOutput': {'length': None}})) + ]) + + self.assertEqual( + self.output, self.cloud.get_server_console(self.server)) + self.assert_calls() + + def test_get_server_console_name_or_id(self): + + self.register_uris([ + dict(method='GET', + uri='{endpoint}/servers/detail'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json={"servers": [self.server]}), + dict(method='POST', + uri='{endpoint}/servers/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id=self.server_id), + json={"output": self.output}, + validate=dict( + json={'os-getConsoleOutput': {'length': None}})) + ]) + + self.assertEqual( + self.output, self.cloud.get_server_console(self.server['id'])) + + self.assert_calls() + + def test_get_server_console_no_console(self): + + self.register_uris([ + dict(method='POST', + uri='{endpoint}/servers/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, + id=self.server_id), + status_code=400, + validate=dict( + json={'os-getConsoleOutput': {'length': None}})) + ]) + + self.assertEqual('', self.cloud.get_server_console(self.server)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_delete_metadata.py b/openstack/tests/unit/cloud/test_server_delete_metadata.py new file mode 100644 index 000000000..d6c4cebd9 --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_delete_metadata.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_delete_metadata +---------------------------------- + +Tests for the `delete_server_metadata` command. +""" + +import uuid + +from openstack.cloud.exc import OpenStackCloudURINotFound +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerDeleteMetadata(base.RequestsMockTestCase): + + def setUp(self): + super(TestServerDeleteMetadata, self).setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name) + + def test_server_delete_metadata_with_exception(self): + """ + Test that a missing metadata throws an exception. + """ + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.fake_server['id'], + 'metadata', 'key']), + status_code=404), + ]) + + self.assertRaises( + OpenStackCloudURINotFound, self.cloud.delete_server_metadata, + self.server_name, ['key']) + + self.assert_calls() + + def test_server_delete_metadata(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.fake_server['id'], + 'metadata', 'key']), + status_code=200), + ]) + + self.cloud.delete_server_metadata(self.server_id, ['key']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_group.py b/openstack/tests/unit/cloud/test_server_group.py new file mode 100644 index 000000000..3bef1c639 --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_group.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import uuid + +from openstack.tests.unit import base +from openstack.tests import fakes + + +class TestServerGroup(base.RequestsMockTestCase): + + def setUp(self): + super(TestServerGroup, self).setUp() + self.group_id = uuid.uuid4().hex + self.group_name = self.getUniqueString('server-group') + self.policies = ['affinity'] + self.fake_group = fakes.make_fake_server_group( + self.group_id, self.group_name, self.policies) + + def test_create_server_group(self): + + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-server-groups']), + json={'server_group': self.fake_group}, + validate=dict( + json={'server_group': { + 'name': self.group_name, + 'policies': self.policies, + }})), + ]) + + self.cloud.create_server_group(name=self.group_name, + policies=self.policies) + + self.assert_calls() + + def test_delete_server_group(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-server-groups']), + json={'server_groups': [self.fake_group]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-server-groups', self.group_id]), + json={'server_groups': [self.fake_group]}), + ]) + self.assertTrue(self.cloud.delete_server_group(self.group_name)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_set_metadata.py b/openstack/tests/unit/cloud/test_server_set_metadata.py new file mode 100644 index 000000000..f0e21d2a1 --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_set_metadata.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_set_metadata +---------------------------------- + +Tests for the `set_server_metadata` command. +""" + +import uuid + +from openstack.cloud.exc import OpenStackCloudBadRequest +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerSetMetadata(base.RequestsMockTestCase): + + def setUp(self): + super(TestServerSetMetadata, self).setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name) + + def test_server_set_metadata_with_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.fake_server['id'], + 'metadata']), + validate=dict(json={'metadata': {'meta': 'data'}}), + json={}, + status_code=400), + ]) + + self.assertRaises( + OpenStackCloudBadRequest, self.cloud.set_server_metadata, + self.server_name, {'meta': 'data'}) + + self.assert_calls() + + def test_server_set_metadata(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', self.fake_server['id'], 'metadata']), + validate=dict(json={'metadata': {'meta': 'data'}}), + status_code=200), + ]) + + self.cloud.set_server_metadata(self.server_id, {'meta': 'data'}) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_services.py b/openstack/tests/unit/cloud/test_services.py new file mode 100644 index 000000000..cfbb261d0 --- /dev/null +++ b/openstack/tests/unit/cloud/test_services.py @@ -0,0 +1,283 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_cloud_services +---------------------------------- + +Tests Keystone services commands. +""" + +from openstack import OpenStackCloudException +from openstack.cloud.exc import OpenStackCloudUnavailableFeature +from openstack.tests.unit import base +from testtools import matchers + + +class CloudServices(base.RequestsMockTestCase): + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super(CloudServices, self).setUp(cloud_config_fixture) + + def get_mock_url(self, service_type='identity', interface='admin', + resource='services', append=None, base_url_append='v3'): + + return super(CloudServices, self).get_mock_url( + service_type, interface, resource, append, base_url_append) + + def test_create_service_v2(self): + self.use_keystone_v2() + service_data = self._get_service_data(name='a service', type='network', + description='A test service') + reference_req = service_data.json_request.copy() + reference_req.pop('enabled') + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url(base_url_append='OS-KSADM'), + status_code=200, + json=service_data.json_response_v2, + validate=dict(json={'OS-KSADM:service': reference_req})) + ]) + + service = self.op_cloud.create_service( + name=service_data.service_name, + service_type=service_data.service_type, + description=service_data.description) + self.assertThat(service.name, + matchers.Equals(service_data.service_name)) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + self.assertThat(service.description, + matchers.Equals(service_data.description)) + self.assertThat(service.type, + matchers.Equals(service_data.service_type)) + self.assert_calls() + + def test_create_service_v3(self): + service_data = self._get_service_data(name='a service', type='network', + description='A test service') + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url(), + status_code=200, + json=service_data.json_response_v3, + validate=dict(json={'service': service_data.json_request})) + ]) + + service = self.op_cloud.create_service( + name=service_data.service_name, + service_type=service_data.service_type, + description=service_data.description) + self.assertThat(service.name, + matchers.Equals(service_data.service_name)) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + self.assertThat(service.description, + matchers.Equals(service_data.description)) + self.assertThat(service.type, + matchers.Equals(service_data.service_type)) + self.assert_calls() + + def test_update_service_v2(self): + self.use_keystone_v2() + # NOTE(SamYaple): Update service only works with v3 api + self.assertRaises(OpenStackCloudUnavailableFeature, + self.op_cloud.update_service, + 'service_id', name='new name') + + def test_update_service_v3(self): + service_data = self._get_service_data(name='a service', type='network', + description='A test service') + request = service_data.json_request.copy() + request['enabled'] = False + resp = service_data.json_response_v3.copy() + resp['enabled'] = False + request.pop('description') + request.pop('name') + request.pop('type') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [resp['service']]}), + dict(method='PATCH', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=200, + json=resp, + validate=dict(json={'service': request})) + ]) + + service = self.op_cloud.update_service(service_data.service_id, + enabled=False) + self.assertThat(service.name, + matchers.Equals(service_data.service_name)) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + self.assertThat(service.description, + matchers.Equals(service_data.description)) + self.assertThat(service.type, + matchers.Equals(service_data.service_type)) + self.assert_calls() + + def test_list_services(self): + service_data = self._get_service_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [service_data.json_response_v3['service']]}) + ]) + services = self.op_cloud.list_services() + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat(services[0].id, + matchers.Equals(service_data.service_id)) + self.assertThat(services[0].name, + matchers.Equals(service_data.service_name)) + self.assertThat(services[0].type, + matchers.Equals(service_data.service_type)) + self.assert_calls() + + def test_get_service(self): + service_data = self._get_service_data() + service2_data = self._get_service_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=400), + ]) + + # Search by id + service = self.op_cloud.get_service(name_or_id=service_data.service_id) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + + # Search by name + service = self.op_cloud.get_service( + name_or_id=service_data.service_name) + # test we are getting exactly 1 element + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + + # Not found + service = self.op_cloud.get_service(name_or_id='INVALID SERVICE') + self.assertIs(None, service) + + # Multiple matches + # test we are getting an Exception + self.assertRaises(OpenStackCloudException, self.op_cloud.get_service, + name_or_id=None, filters={'type': 'type2'}) + self.assert_calls() + + def test_search_services(self): + service_data = self._get_service_data() + service2_data = self._get_service_data(type=service_data.service_type) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service']]}), + ]) + + # Search by id + services = self.op_cloud.search_services( + name_or_id=service_data.service_id) + # test we are getting exactly 1 element + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat(services[0].id, + matchers.Equals(service_data.service_id)) + + # Search by name + services = self.op_cloud.search_services( + name_or_id=service_data.service_name) + # test we are getting exactly 1 element + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat(services[0].name, + matchers.Equals(service_data.service_name)) + + # Not found + services = self.op_cloud.search_services(name_or_id='!INVALID!') + self.assertThat(len(services), matchers.Equals(0)) + + # Multiple matches + services = self.op_cloud.search_services( + filters={'type': service_data.service_type}) + # test we are getting exactly 2 elements + self.assertThat(len(services), matchers.Equals(2)) + self.assertThat(services[0].id, + matchers.Equals(service_data.service_id)) + self.assertThat(services[1].id, + matchers.Equals(service2_data.service_id)) + self.assert_calls() + + def test_delete_service(self): + service_data = self._get_service_data() + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=204), + dict(method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'services': [ + service_data.json_response_v3['service']]}), + dict(method='DELETE', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=204) + ]) + + # Delete by name + self.op_cloud.delete_service(name_or_id=service_data.service_name) + + # Delete by id + self.op_cloud.delete_service(service_data.service_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_shade.py b/openstack/tests/unit/cloud/test_shade.py new file mode 100644 index 000000000..32bfe6914 --- /dev/null +++ b/openstack/tests/unit/cloud/test_shade.py @@ -0,0 +1,436 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import uuid + +import testtools + +import openstack.cloud +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack.tests import fakes +from openstack.tests.unit import base + + +RANGE_DATA = [ + dict(id=1, key1=1, key2=5), + dict(id=2, key1=1, key2=20), + dict(id=3, key1=2, key2=10), + dict(id=4, key1=2, key2=30), + dict(id=5, key1=3, key2=40), + dict(id=6, key1=3, key2=40), +] + + +class TestShade(base.RequestsMockTestCase): + + def setUp(self): + # This set of tests are not testing neutron, they're testing + # rebuilding servers, but we do several network calls in service + # of a NORMAL rebuild to find the default_network. Putting + # in all of the neutron mocks for that will make the tests harder + # to read. SO - we're going mock neutron into the off position + # and then turn it back on in the few tests that specifically do. + # Maybe we should reorg these into two classes - one with neutron + # mocked out - and one with it not mocked out + super(TestShade, self).setUp() + self.has_neutron = False + + def fake_has_service(*args, **kwargs): + return self.has_neutron + + self.cloud.has_service = fake_has_service + + def test_openstack_cloud(self): + self.assertIsInstance(self.cloud, openstack.OpenStackCloud) + + @mock.patch.object(openstack.OpenStackCloud, 'search_images') + def test_get_images(self, mock_search): + image1 = dict(id='123', name='mickey') + mock_search.return_value = [image1] + r = self.cloud.get_image('mickey') + self.assertIsNotNone(r) + self.assertDictEqual(image1, r) + + @mock.patch.object(openstack.OpenStackCloud, 'search_images') + def test_get_image_not_found(self, mock_search): + mock_search.return_value = [] + r = self.cloud.get_image('doesNotExist') + self.assertIsNone(r) + + def test_get_server(self): + server1 = fakes.make_fake_server('123', 'mickey') + server2 = fakes.make_fake_server('345', 'mouse') + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [server1, server2]}), + ]) + + r = self.cloud.get_server('mickey') + self.assertIsNotNone(r) + self.assertEqual(server1['name'], r['name']) + + self.assert_calls() + + def test_get_server_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': []}), + ]) + + r = self.cloud.get_server('doesNotExist') + self.assertIsNone(r) + + self.assert_calls() + + def test_list_servers_exception(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + status_code=400) + ]) + + self.assertRaises(exc.OpenStackCloudException, + self.cloud.list_servers) + + self.assert_calls() + + def test__neutron_exceptions_resource_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + status_code=404) + ]) + self.assertRaises(exc.OpenStackCloudResourceNotFound, + self.cloud.list_networks) + self.assert_calls() + + def test__neutron_exceptions_url_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + status_code=404) + ]) + self.assertRaises(exc.OpenStackCloudURINotFound, + self.cloud.list_networks) + self.assert_calls() + + def test_list_servers(self): + server_id = str(uuid.uuid4()) + server_name = self.getUniqueString('name') + fake_server = fakes.make_fake_server(server_id, server_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [fake_server]}), + ]) + + r = self.cloud.list_servers() + + self.assertEqual(1, len(r)) + self.assertEqual(server_name, r[0]['name']) + + self.assert_calls() + + def test_list_servers_all_projects(self): + '''This test verifies that when list_servers is called with + `all_projects=True` that it passes `all_tenants=True` to nova.''' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'], + qs_elements=['all_tenants=True']), + complete_qs=True, + json={'servers': []}), + ]) + + self.cloud.list_servers(all_projects=True) + + self.assert_calls() + + def test_iterate_timeout_bad_wait(self): + with testtools.ExpectedException( + exc.OpenStackCloudException, + "Wait value must be an int or float value."): + for count in _utils._iterate_timeout( + 1, "test_iterate_timeout_bad_wait", wait="timeishard"): + pass + + @mock.patch('time.sleep') + def test_iterate_timeout_str_wait(self, mock_sleep): + iter = _utils._iterate_timeout( + 10, "test_iterate_timeout_str_wait", wait="1.6") + next(iter) + next(iter) + mock_sleep.assert_called_with(1.6) + + @mock.patch('time.sleep') + def test_iterate_timeout_int_wait(self, mock_sleep): + iter = _utils._iterate_timeout( + 10, "test_iterate_timeout_int_wait", wait=1) + next(iter) + next(iter) + mock_sleep.assert_called_with(1.0) + + @mock.patch('time.sleep') + def test_iterate_timeout_timeout(self, mock_sleep): + message = "timeout test" + with testtools.ExpectedException( + exc.OpenStackCloudTimeout, + message): + for count in _utils._iterate_timeout(0.1, message, wait=1): + pass + mock_sleep.assert_called_with(1.0) + + def test__nova_extensions(self): + body = [ + { + "updated": "2014-12-03T00:00:00Z", + "name": "Multinic", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "NMN", + "description": "Multiple network support." + }, + { + "updated": "2014-12-03T00:00:00Z", + "name": "DiskConfig", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "OS-DCF", + "description": "Disk Management Extension." + }, + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json=dict(extensions=body)) + ]) + extensions = self.cloud._nova_extensions() + self.assertEqual(set(['NMN', 'OS-DCF']), extensions) + + self.assert_calls() + + def test__nova_extensions_fails(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + status_code=404), + ]) + with testtools.ExpectedException( + exc.OpenStackCloudURINotFound, + "Error fetching extension list for nova" + ): + self.cloud._nova_extensions() + + self.assert_calls() + + def test__has_nova_extension(self): + body = [ + { + "updated": "2014-12-03T00:00:00Z", + "name": "Multinic", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "NMN", + "description": "Multiple network support." + }, + { + "updated": "2014-12-03T00:00:00Z", + "name": "DiskConfig", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "OS-DCF", + "description": "Disk Management Extension." + }, + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json=dict(extensions=body)) + ]) + self.assertTrue(self.cloud._has_nova_extension('NMN')) + + self.assert_calls() + + def test__has_nova_extension_missing(self): + body = [ + { + "updated": "2014-12-03T00:00:00Z", + "name": "Multinic", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "NMN", + "description": "Multiple network support." + }, + { + "updated": "2014-12-03T00:00:00Z", + "name": "DiskConfig", + "links": [], + "namespace": "http://openstack.org/compute/ext/fake_xml", + "alias": "OS-DCF", + "description": "Disk Management Extension." + }, + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/extensions'.format( + endpoint=fakes.COMPUTE_ENDPOINT), + json=dict(extensions=body)) + ]) + self.assertFalse(self.cloud._has_nova_extension('invalid')) + + self.assert_calls() + + def test__neutron_extensions(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": + "Enables configuration of Distributed Virtual Routers." + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs" + }, + ] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json=dict(extensions=body)) + ]) + extensions = self.cloud._neutron_extensions() + self.assertEqual(set(['dvr', 'allowed-address-pairs']), extensions) + + self.assert_calls() + + def test__neutron_extensions_fails(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + status_code=404) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudURINotFound, + "Error fetching extension list for neutron" + ): + self.cloud._neutron_extensions() + + self.assert_calls() + + def test__has_neutron_extension(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": + "Enables configuration of Distributed Virtual Routers." + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs" + }, + ] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json=dict(extensions=body)) + ]) + self.assertTrue(self.cloud._has_neutron_extension('dvr')) + self.assert_calls() + + def test__has_neutron_extension_missing(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": + "Enables configuration of Distributed Virtual Routers." + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs" + }, + ] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions.json']), + json=dict(extensions=body)) + ]) + self.assertFalse(self.cloud._has_neutron_extension('invalid')) + self.assert_calls() + + def test_range_search(self): + filters = {"key1": "min", "key2": "20"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(1, len(retval)) + self.assertEqual([RANGE_DATA[1]], retval) + + def test_range_search_2(self): + filters = {"key1": "<=2", "key2": ">10"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval) + + def test_range_search_3(self): + filters = {"key1": "2", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(0, len(retval)) + + def test_range_search_4(self): + filters = {"key1": "max", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(0, len(retval)) + + def test_range_search_5(self): + filters = {"key1": "min", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(1, len(retval)) + self.assertEqual([RANGE_DATA[0]], retval) diff --git a/openstack/tests/unit/cloud/test_shade_operator.py b/openstack/tests/unit/cloud/test_shade_operator.py new file mode 100644 index 000000000..a31bb57e7 --- /dev/null +++ b/openstack/tests/unit/cloud/test_shade_operator.py @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Port this content back in from shade repo as tests don't have +# references to ironic_client. + +from openstack.tests.unit import base + + +class TestShadeOperator(base.RequestsMockTestCase): + pass diff --git a/openstack/tests/unit/cloud/test_stack.py b/openstack/tests/unit/cloud/test_stack.py new file mode 100644 index 000000000..a9e928536 --- /dev/null +++ b/openstack/tests/unit/cloud/test_stack.py @@ -0,0 +1,559 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import tempfile +import testtools + +import openstack.cloud +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestStack(base.RequestsMockTestCase): + + def setUp(self): + super(TestStack, self).setUp() + self.stack_id = self.getUniqueString('id') + self.stack_name = self.getUniqueString('name') + self.stack = fakes.make_fake_stack(self.stack_id, self.stack_name) + + def test_list_stacks(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), + self.getUniqueString('name')) + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + json={"stacks": fake_stacks}), + ]) + stacks = self.cloud.list_stacks() + self.assertEqual( + [f.toDict() for f in self.cloud._normalize_stacks(fake_stacks)], + [f.toDict() for f in stacks]) + + self.assert_calls() + + def test_list_stacks_exception(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + status_code=404) + ]) + with testtools.ExpectedException( + openstack.OpenStackCloudURINotFound): + self.cloud.list_stacks() + self.assert_calls() + + def test_search_stacks(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), + self.getUniqueString('name')) + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + json={"stacks": fake_stacks}), + ]) + stacks = self.cloud.search_stacks() + self.assertEqual( + self.cloud._normalize_stacks(meta.obj_list_to_munch(fake_stacks)), + stacks) + self.assert_calls() + + def test_search_stacks_filters(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), + self.getUniqueString('name'), + status='CREATE_FAILED') + ] + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + json={"stacks": fake_stacks}), + ]) + filters = {'status': 'FAILED'} + stacks = self.cloud.search_stacks(filters=filters) + self.assertEqual( + self.cloud._normalize_stacks( + meta.obj_list_to_munch(fake_stacks[1:])), + stacks) + self.assert_calls() + + def test_search_stacks_exception(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + status_code=404) + ]) + with testtools.ExpectedException( + openstack.OpenStackCloudURINotFound): + self.cloud.search_stacks() + + def test_delete_stack(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + dict(method='DELETE', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id)), + ]) + self.assertTrue(self.cloud.delete_stack(self.stack_name)) + self.assert_calls() + + def test_delete_stack_not_found(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/stack_name'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + status_code=404), + ]) + self.assertFalse(self.cloud.delete_stack('stack_name')) + self.assert_calls() + + def test_delete_stack_exception(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + dict(method='DELETE', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id), + status_code=400, + reason="ouch"), + ]) + with testtools.ExpectedException( + openstack.OpenStackCloudBadRequest): + self.cloud.delete_stack(self.stack_id) + self.assert_calls() + + def test_delete_stack_wait(self): + marker_event = fakes.make_fake_stack_event( + self.stack_id, self.stack_name, status='CREATE_COMPLETE') + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id']) + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + dict(method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs='limit=1&sort_dir=desc'), + complete_qs=True, + json={"events": [marker_event]}), + dict(method='DELETE', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id)), + dict(method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs=marker_qs), + complete_qs=True, + json={"events": [ + fakes.make_fake_stack_event( + self.stack_id, self.stack_name, + status='DELETE_COMPLETE'), + ]}), + dict(method='GET', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + status_code=404), + ]) + + self.assertTrue(self.cloud.delete_stack(self.stack_id, wait=True)) + self.assert_calls() + + def test_delete_stack_wait_failed(self): + failed_stack = self.stack.copy() + failed_stack['stack_status'] = 'DELETE_FAILED' + marker_event = fakes.make_fake_stack_event( + self.stack_id, self.stack_name, status='CREATE_COMPLETE') + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id']) + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + dict(method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs='limit=1&sort_dir=desc'), + complete_qs=True, + json={"events": [marker_event]}), + dict(method='DELETE', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id)), + dict(method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs=marker_qs), + complete_qs=True, + json={"events": [ + fakes.make_fake_stack_event( + self.stack_id, self.stack_name, + status='DELETE_COMPLETE'), + ]}), + dict(method='GET', + uri='{endpoint}/stacks/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": failed_stack}), + ]) + + with testtools.ExpectedException( + openstack.OpenStackCloudException): + self.cloud.delete_stack(self.stack_id, wait=True) + + self.assert_calls() + + def test_create_stack(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + self.register_uris([ + dict( + method='POST', uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + json={"stack": self.stack}, + validate=dict( + json={ + 'disable_rollback': False, + 'environment': {}, + 'files': {}, + 'parameters': {}, + 'stack_name': self.stack_name, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60} + )), + dict( + method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + ]) + + self.cloud.create_stack( + self.stack_name, + template_file=test_template.name + ) + + self.assert_calls() + + def test_create_stack_wait(self): + + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris([ + dict( + method='POST', uri='{endpoint}/stacks'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT), + json={"stack": self.stack}, + validate=dict( + json={ + 'disable_rollback': False, + 'environment': {}, + 'files': {}, + 'parameters': {}, + 'stack_name': self.stack_name, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60} + )), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/events?sort_dir=asc'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + json={"events": [ + fakes.make_fake_stack_event( + self.stack_id, self.stack_name, + status='CREATE_COMPLETE', + resource_name='name'), + ]}), + dict( + method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + ]) + self.cloud.create_stack( + self.stack_name, + template_file=test_template.name, + wait=True) + + self.assert_calls() + + def test_update_stack(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris([ + dict( + method='PUT', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + validate=dict( + json={ + 'disable_rollback': False, + 'environment': {}, + 'files': {}, + 'parameters': {}, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60})), + dict( + method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + ]) + self.cloud.update_stack( + self.stack_name, + template_file=test_template.name) + + self.assert_calls() + + def test_update_stack_wait(self): + marker_event = fakes.make_fake_stack_event( + self.stack_id, self.stack_name, status='CREATE_COMPLETE', + resource_name='name') + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id']) + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris([ + dict( + method='GET', + uri='{endpoint}/stacks/{name}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name, + qs='limit=1&sort_dir=desc'), + json={"events": [marker_event]}), + dict( + method='PUT', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + validate=dict( + json={ + 'disable_rollback': False, + 'environment': {}, + 'files': {}, + 'parameters': {}, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60})), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name, + qs=marker_qs), + json={"events": [ + fakes.make_fake_stack_event( + self.stack_id, self.stack_name, + status='UPDATE_COMPLETE', + resource_name='name'), + ]}), + dict( + method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + ]) + self.cloud.update_stack( + self.stack_name, + template_file=test_template.name, + wait=True) + + self.assert_calls() + + def test_get_stack(self): + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": self.stack}), + ]) + + res = self.cloud.get_stack(self.stack_name) + self.assertIsNotNone(res) + self.assertEqual(self.stack['stack_name'], res['stack_name']) + self.assertEqual(self.stack['stack_name'], res['name']) + self.assertEqual(self.stack['stack_status'], res['stack_status']) + self.assertEqual('COMPLETE', res['status']) + + self.assert_calls() + + def test_get_stack_in_progress(self): + in_progress = self.stack.copy() + in_progress['stack_status'] = 'CREATE_IN_PROGRESS' + self.register_uris([ + dict(method='GET', + uri='{endpoint}/stacks/{name}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name), + status_code=302, + headers=dict( + location='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name))), + dict(method='GET', + uri='{endpoint}/stacks/{name}/{id}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, name=self.stack_name), + json={"stack": in_progress}), + ]) + + res = self.cloud.get_stack(self.stack_name) + self.assertIsNotNone(res) + self.assertEqual(in_progress['stack_name'], res['stack_name']) + self.assertEqual(in_progress['stack_name'], res['name']) + self.assertEqual(in_progress['stack_status'], res['stack_status']) + self.assertEqual('CREATE', res['action']) + self.assertEqual('IN_PROGRESS', res['status']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_subnet.py b/openstack/tests/unit/cloud/test_subnet.py new file mode 100644 index 000000000..83551513c --- /dev/null +++ b/openstack/tests/unit/cloud/test_subnet.py @@ -0,0 +1,388 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import testtools + +from openstack.cloud import exc +from openstack.tests.unit import base + + +class TestSubnet(base.RequestsMockTestCase): + + network_name = 'network_name' + subnet_name = 'subnet_name' + subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' + subnet_cidr = '192.168.199.0/24' + + mock_network_rep = { + 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', + 'name': network_name, + } + + mock_subnet_rep = { + 'allocation_pools': [{ + 'start': u'192.168.199.2', + 'end': u'192.168.199.254' + }], + 'cidr': subnet_cidr, + 'created_at': '2017-04-24T20:22:23Z', + 'description': '', + 'dns_nameservers': [], + 'enable_dhcp': False, + 'gateway_ip': '192.168.199.1', + 'host_routes': [], + 'id': subnet_id, + 'ip_version': 4, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': None, + 'name': subnet_name, + 'network_id': mock_network_rep['id'], + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'revision_number': 2, + 'service_types': [], + 'subnetpool_id': None, + 'tags': [] + } + + def test_get_subnet(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [self.mock_subnet_rep]}) + ]) + r = self.cloud.get_subnet(self.subnet_name) + self.assertIsNotNone(r) + self.assertDictEqual(self.mock_subnet_rep, r) + self.assert_calls() + + def test_get_subnet_by_id(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', + 'subnets', + self.subnet_id]), + json={'subnet': self.mock_subnet_rep}) + ]) + r = self.cloud.get_subnet_by_id(self.subnet_id) + self.assertIsNotNone(r) + self.assertDictEqual(self.mock_subnet_rep, r) + self.assert_calls() + + def test_create_subnet(self): + pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] + dns = ['8.8.8.8'] + routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['host_routes'] = routes + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'dns_nameservers': dns, + 'host_routes': routes}})) + ]) + subnet = self.cloud.create_subnet(self.network_name, self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + host_routes=routes) + self.assertDictEqual(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_string_ip_version(self): + '''Allow ip_version as a string''' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnet': self.mock_subnet_rep}, + validate=dict( + json={'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id']}})) + ]) + subnet = self.cloud.create_subnet( + self.network_name, self.subnet_cidr, ip_version='4') + self.assertDictEqual(self.mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_bad_ip_version(self): + '''String ip_versions must be convertable to int''' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}) + ]) + with testtools.ExpectedException( + exc.OpenStackCloudException, + "ip_version must be an integer" + ): + self.cloud.create_subnet( + self.network_name, self.subnet_cidr, ip_version='4x') + self.assert_calls() + + def test_create_subnet_without_gateway_ip(self): + pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] + dns = ['8.8.8.8'] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['gateway_ip'] = None + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'gateway_ip': None, + 'dns_nameservers': dns}})) + ]) + subnet = self.cloud.create_subnet(self.network_name, self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + disable_gateway_ip=True) + self.assertDictEqual(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_with_gateway_ip(self): + pool = [{'start': '192.168.199.8', 'end': '192.168.199.254'}] + gateway = '192.168.199.2' + dns = ['8.8.8.8'] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['gateway_ip'] = gateway + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}), + dict(method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'gateway_ip': gateway, + 'dns_nameservers': dns}})) + ]) + subnet = self.cloud.create_subnet(self.network_name, self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + gateway_ip=gateway) + self.assertDictEqual(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_conflict_gw_ops(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}) + ]) + gateway = '192.168.200.3' + self.assertRaises(exc.OpenStackCloudException, + self.cloud.create_subnet, 'kooky', + self.subnet_cidr, gateway_ip=gateway, + disable_gateway_ip=True) + self.assert_calls() + + def test_create_subnet_bad_network(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [self.mock_network_rep]}) + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.create_subnet, + 'duck', self.subnet_cidr) + self.assert_calls() + + def test_create_subnet_non_unique_network(self): + net1 = dict(id='123', name=self.network_name) + net2 = dict(id='456', name=self.network_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': [net1, net2]}) + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.create_subnet, + self.network_name, self.subnet_cidr) + self.assert_calls() + + def test_delete_subnet(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [self.mock_subnet_rep]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'subnets', '%s.json' % self.subnet_id]), + json={}) + ]) + self.assertTrue(self.cloud.delete_subnet(self.subnet_name)) + self.assert_calls() + + def test_delete_subnet_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': []}) + ]) + self.assertFalse(self.cloud.delete_subnet('goofy')) + self.assert_calls() + + def test_delete_subnet_multiple_found(self): + subnet1 = dict(id='123', name=self.subnet_name) + subnet2 = dict(id='456', name=self.subnet_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [subnet1, subnet2]}) + ]) + self.assertRaises(exc.OpenStackCloudException, + self.cloud.delete_subnet, + self.subnet_name) + self.assert_calls() + + def test_delete_subnet_multiple_using_id(self): + subnet1 = dict(id='123', name=self.subnet_name) + subnet2 = dict(id='456', name=self.subnet_name) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [subnet1, subnet2]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'subnets', '%s.json' % subnet1['id']]), + json={}) + ]) + self.assertTrue(self.cloud.delete_subnet(subnet1['id'])) + self.assert_calls() + + def test_update_subnet(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + expected_subnet['name'] = 'goofy' + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [self.mock_subnet_rep]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'subnets', '%s.json' % self.subnet_id]), + json={'subnet': expected_subnet}, + validate=dict( + json={'subnet': {'name': 'goofy'}})) + ]) + subnet = self.cloud.update_subnet(self.subnet_id, subnet_name='goofy') + self.assertDictEqual(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_gateway_ip(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + gateway = '192.168.199.3' + expected_subnet['gateway_ip'] = gateway + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [self.mock_subnet_rep]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'subnets', '%s.json' % self.subnet_id]), + json={'subnet': expected_subnet}, + validate=dict( + json={'subnet': {'gateway_ip': gateway}})) + ]) + subnet = self.cloud.update_subnet(self.subnet_id, gateway_ip=gateway) + self.assertDictEqual(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_disable_gateway_ip(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + expected_subnet['gateway_ip'] = None + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets.json']), + json={'subnets': [self.mock_subnet_rep]}), + dict(method='PUT', + uri=self.get_mock_url( + 'network', 'public', + append=['v2.0', 'subnets', '%s.json' % self.subnet_id]), + json={'subnet': expected_subnet}, + validate=dict( + json={'subnet': {'gateway_ip': None}})) + ]) + subnet = self.cloud.update_subnet(self.subnet_id, + disable_gateway_ip=True) + self.assertDictEqual(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_conflict_gw_ops(self): + self.assertRaises(exc.OpenStackCloudException, + self.cloud.update_subnet, + self.subnet_id, gateway_ip="192.168.199.3", + disable_gateway_ip=True) diff --git a/openstack/tests/unit/cloud/test_task_manager.py b/openstack/tests/unit/cloud/test_task_manager.py new file mode 100644 index 000000000..cad3da4a7 --- /dev/null +++ b/openstack/tests/unit/cloud/test_task_manager.py @@ -0,0 +1,109 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import concurrent.futures +import mock + +from openstack.cloud import task_manager +from openstack.tests.unit import base + + +class TestException(Exception): + pass + + +class TaskTest(task_manager.Task): + def main(self, client): + raise TestException("This is a test exception") + + +class TaskTestGenerator(task_manager.Task): + def main(self, client): + yield 1 + + +class TaskTestInt(task_manager.Task): + def main(self, client): + return int(1) + + +class TaskTestFloat(task_manager.Task): + def main(self, client): + return float(2.0) + + +class TaskTestStr(task_manager.Task): + def main(self, client): + return "test" + + +class TaskTestBool(task_manager.Task): + def main(self, client): + return True + + +class TaskTestSet(task_manager.Task): + def main(self, client): + return set([1, 2]) + + +class TaskTestAsync(task_manager.Task): + def __init__(self): + super(task_manager.Task, self).__init__() + self.run_async = True + + def main(self, client): + pass + + +class TestTaskManager(base.TestCase): + + def setUp(self): + super(TestTaskManager, self).setUp() + self.manager = task_manager.TaskManager(name='test', client=self) + + def test_wait_re_raise(self): + """Test that Exceptions thrown in a Task is reraised correctly + + This test is aimed to six.reraise(), called in Task::wait(). + Specifically, we test if we get the same behaviour with all the + configured interpreters (e.g. py27, p34, pypy, ...) + """ + self.assertRaises(TestException, self.manager.submit_task, TaskTest()) + + def test_dont_munchify_int(self): + ret = self.manager.submit_task(TaskTestInt()) + self.assertIsInstance(ret, int) + + def test_dont_munchify_float(self): + ret = self.manager.submit_task(TaskTestFloat()) + self.assertIsInstance(ret, float) + + def test_dont_munchify_str(self): + ret = self.manager.submit_task(TaskTestStr()) + self.assertIsInstance(ret, str) + + def test_dont_munchify_bool(self): + ret = self.manager.submit_task(TaskTestBool()) + self.assertIsInstance(ret, bool) + + def test_dont_munchify_set(self): + ret = self.manager.submit_task(TaskTestSet()) + self.assertIsInstance(ret, set) + + @mock.patch.object(concurrent.futures.ThreadPoolExecutor, 'submit') + def test_async(self, mock_submit): + self.manager.submit_task(TaskTestAsync()) + self.assertTrue(mock_submit.called) diff --git a/openstack/tests/unit/cloud/test_update_server.py b/openstack/tests/unit/cloud/test_update_server.py new file mode 100644 index 000000000..674525490 --- /dev/null +++ b/openstack/tests/unit/cloud/test_update_server.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_update_server +---------------------------------- + +Tests for the `update_server` command. +""" + +import uuid + +from openstack.cloud.exc import OpenStackCloudException +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestUpdateServer(base.RequestsMockTestCase): + + def setUp(self): + super(TestUpdateServer, self).setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.updated_server_name = self.getUniqueString('name2') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name) + + def test_update_server_with_update_exception(self): + """ + Test that an exception in the update raises an exception in + update_server. + """ + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id]), + status_code=400, + validate=dict( + json={'server': {'name': self.updated_server_name}})), + ]) + self.assertRaises( + OpenStackCloudException, self.cloud.update_server, + self.server_name, name=self.updated_server_name) + + self.assert_calls() + + def test_update_server_name(self): + """ + Test that update_server updates the name without raising any exception + """ + fake_update_server = fakes.make_fake_server( + self.server_id, self.updated_server_name) + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail']), + json={'servers': [self.fake_server]}), + dict(method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id]), + json={'server': fake_update_server}, + validate=dict( + json={'server': {'name': self.updated_server_name}})), + dict(method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks.json']), + json={'networks': []}), + ]) + self.assertEqual( + self.updated_server_name, + self.cloud.update_server( + self.server_name, name=self.updated_server_name)['name']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_usage.py b/openstack/tests/unit/cloud/test_usage.py new file mode 100644 index 000000000..a4f4d5f49 --- /dev/null +++ b/openstack/tests/unit/cloud/test_usage.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import uuid + +from openstack.tests.unit import base + + +class TestUsage(base.RequestsMockTestCase): + + def test_get_usage(self): + project = self.mock_for_keystone_projects(project_count=1, + list_get=True)[0] + start = end = datetime.datetime.now() + + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'compute', 'public', + append=['os-simple-tenant-usage', project.project_id], + qs_elements=[ + 'start={now}'.format(now=start.isoformat()), + 'end={now}'.format(now=end.isoformat()), + ]), + json={"tenant_usage": { + "server_usages": [ + { + "ended_at": None, + "flavor": "m1.tiny", + "hours": 1.0, + "instance_id": uuid.uuid4().hex, + "local_gb": 1, + "memory_mb": 512, + "name": "instance-2", + "started_at": "2012-10-08T20:10:44.541277", + "state": "active", + "tenant_id": "6f70656e737461636b20342065766572", + "uptime": 3600, + "vcpus": 1 + } + ], + "start": "2012-10-08T20:10:44.587336", + "stop": "2012-10-08T21:10:44.587336", + "tenant_id": "6f70656e737461636b20342065766572", + "total_hours": 1.0, + "total_local_gb_usage": 1.0, + "total_memory_mb_usage": 512.0, + "total_vcpus_usage": 1.0 + }}) + ]) + + self.op_cloud.get_compute_usage(project.project_id, start, end) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_users.py b/openstack/tests/unit/cloud/test_users.py new file mode 100644 index 000000000..a4c8a466f --- /dev/null +++ b/openstack/tests/unit/cloud/test_users.py @@ -0,0 +1,223 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import testtools + +import openstack.cloud +from openstack.tests.unit import base + + +class TestUsers(base.RequestsMockTestCase): + + def _get_keystone_mock_url(self, resource, append=None, v3=True): + base_url_append = None + if v3: + base_url_append = 'v3' + return self.get_mock_url( + service_type='identity', interface='admin', resource=resource, + append=append, base_url_append=base_url_append) + + def _get_user_list(self, user_data): + uri = self._get_keystone_mock_url(resource='users') + return { + 'users': [ + user_data.json_response['user'], + ], + 'links': { + 'self': uri, + 'previous': None, + 'next': None, + } + } + + def test_create_user_v2(self): + self.use_keystone_v2() + + user_data = self._get_user_data() + + self.register_uris([ + dict(method='POST', + uri=self._get_keystone_mock_url(resource='users', v3=False), + status_code=200, + json=user_data.json_response, + validate=dict(json=user_data.json_request)), + ]) + + user = self.op_cloud.create_user( + name=user_data.name, email=user_data.email, + password=user_data.password) + + self.assertEqual(user_data.name, user.name) + self.assertEqual(user_data.email, user.email) + self.assertEqual(user_data.user_id, user.id) + self.assert_calls() + + def test_create_user_v3(self): + user_data = self._get_user_data( + domain_id=uuid.uuid4().hex, + description=self.getUniqueString('description')) + + self.register_uris([ + dict(method='POST', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, json=user_data.json_response, + validate=dict(json=user_data.json_request)), + ]) + + user = self.op_cloud.create_user( + name=user_data.name, email=user_data.email, + password=user_data.password, + description=user_data.description, + domain_id=user_data.domain_id) + + self.assertEqual(user_data.name, user.name) + self.assertEqual(user_data.email, user.email) + self.assertEqual(user_data.description, user.description) + self.assertEqual(user_data.user_id, user.id) + self.assert_calls() + + def test_update_user_password_v2(self): + self.use_keystone_v2() + + user_data = self._get_user_data(email='test@example.com') + mock_user_resource_uri = self._get_keystone_mock_url( + resource='users', append=[user_data.user_id], v3=False) + mock_users_uri = self._get_keystone_mock_url( + resource='users', v3=False) + + self.register_uris([ + # GET list to find user id + # PUT user with password update + # PUT empty update (password change is different than update) + # but is always chained together [keystoneclient oddity] + dict(method='GET', uri=mock_users_uri, status_code=200, + json=self._get_user_list(user_data)), + dict(method='PUT', + uri=self._get_keystone_mock_url( + resource='users', v3=False, + append=[user_data.user_id, 'OS-KSADM', 'password']), + status_code=200, json=user_data.json_response, + validate=dict( + json={'user': {'password': user_data.password}})), + dict(method='PUT', uri=mock_user_resource_uri, status_code=200, + json=user_data.json_response, + validate=dict(json={'user': {}}))]) + + user = self.op_cloud.update_user( + user_data.user_id, password=user_data.password) + self.assertEqual(user_data.name, user.name) + self.assertEqual(user_data.email, user.email) + self.assert_calls() + + def test_create_user_v3_no_domain(self): + user_data = self._get_user_data(domain_id=uuid.uuid4().hex, + email='test@example.com') + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "User or project creation requires an explicit" + " domain_id argument." + ): + self.op_cloud.create_user( + name=user_data.name, email=user_data.email, + password=user_data.password) + + def test_delete_user(self): + user_data = self._get_user_data(domain_id=uuid.uuid4().hex) + user_resource_uri = self._get_keystone_mock_url( + resource='users', append=[user_data.user_id]) + + self.register_uris([ + dict(method='GET', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, + json=self._get_user_list(user_data)), + dict(method='GET', uri=user_resource_uri, status_code=200, + json=user_data.json_response), + dict(method='DELETE', uri=user_resource_uri, status_code=204)]) + + self.op_cloud.delete_user(user_data.name) + self.assert_calls() + + def test_delete_user_not_found(self): + self.register_uris([ + dict(method='GET', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, json={'users': []})]) + self.assertFalse(self.op_cloud.delete_user(self.getUniqueString())) + + def test_add_user_to_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris([ + dict(method='GET', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, + json=self._get_user_list(user_data)), + dict(method='GET', + uri=self._get_keystone_mock_url(resource='groups'), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + dict(method='PUT', + uri=self._get_keystone_mock_url( + resource='groups', + append=[group_data.group_id, 'users', user_data.user_id]), + status_code=200)]) + self.op_cloud.add_user_to_group(user_data.user_id, group_data.group_id) + self.assert_calls() + + def test_is_user_in_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris([ + dict(method='GET', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, + json=self._get_user_list(user_data)), + dict(method='GET', + uri=self._get_keystone_mock_url(resource='groups'), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + dict(method='HEAD', + uri=self._get_keystone_mock_url( + resource='groups', + append=[group_data.group_id, 'users', user_data.user_id]), + status_code=204)]) + + self.assertTrue(self.op_cloud.is_user_in_group( + user_data.user_id, group_data.group_id)) + self.assert_calls() + + def test_remove_user_from_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris([ + dict(method='GET', + uri=self._get_keystone_mock_url(resource='users'), + json=self._get_user_list(user_data)), + dict(method='GET', + uri=self._get_keystone_mock_url(resource='groups'), + status_code=200, + json={'groups': [group_data.json_response['group']]}), + dict(method='DELETE', + uri=self._get_keystone_mock_url( + resource='groups', + append=[group_data.group_id, 'users', user_data.user_id]), + status_code=204)]) + + self.op_cloud.remove_user_from_group(user_data.user_id, + group_data.group_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume.py b/openstack/tests/unit/cloud/test_volume.py new file mode 100644 index 000000000..d08069b45 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume.py @@ -0,0 +1,450 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import testtools + +import openstack.cloud +from openstack.cloud import meta +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestVolume(base.RequestsMockTestCase): + + def test_attach_volume(self): + server = dict(id='server001') + vol = {'id': 'volume001', 'status': 'available', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = {'server_id': server['id'], 'device': 'device001', + 'volumeId': volume['id'], 'id': 'attachmentId'} + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments']), + json={'volumeAttachment': rattach}, + validate=dict(json={ + 'volumeAttachment': { + 'volumeId': vol['id']}}) + )]) + ret = self.cloud.attach_volume(server, volume, wait=False) + self.assertEqual(rattach, ret) + self.assert_calls() + + def test_attach_volume_exception(self): + server = dict(id='server001') + vol = {'id': 'volume001', 'status': 'available', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments']), + status_code=404, + validate=dict(json={ + 'volumeAttachment': { + 'volumeId': vol['id']}}) + )]) + with testtools.ExpectedException( + openstack.OpenStackCloudURINotFound, + "Error attaching volume %s to server %s" % ( + volume['id'], server['id']) + ): + self.cloud.attach_volume(server, volume, wait=False) + self.assert_calls() + + def test_attach_volume_wait(self): + server = dict(id='server001') + vol = {'id': 'volume001', 'status': 'available', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['attachments'] = [{'server_id': server['id'], + 'device': 'device001'}] + vol['status'] = 'attached' + attached_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = {'server_id': server['id'], 'device': 'device001', + 'volumeId': volume['id'], 'id': 'attachmentId'} + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments']), + json={'volumeAttachment': rattach}, + validate=dict(json={ + 'volumeAttachment': { + 'volumeId': vol['id']}})), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [volume]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [attached_volume]})]) + # defaults to wait=True + ret = self.cloud.attach_volume(server, volume) + self.assertEqual(rattach, ret) + self.assert_calls() + + def test_attach_volume_wait_error(self): + server = dict(id='server001') + vol = {'id': 'volume001', 'status': 'available', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'error' + errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = {'server_id': server['id'], 'device': 'device001', + 'volumeId': volume['id'], 'id': 'attachmentId'} + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments']), + json={'volumeAttachment': rattach}, + validate=dict(json={ + 'volumeAttachment': { + 'volumeId': vol['id']}})), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [errored_volume]})]) + + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Error in attaching volume %s" % errored_volume['id'] + ): + self.cloud.attach_volume(server, volume) + self.assert_calls() + + def test_attach_volume_not_available(self): + server = dict(id='server001') + volume = dict(id='volume001', status='error', attachments=[]) + + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Volume %s is not available. Status is '%s'" % ( + volume['id'], volume['status']) + ): + self.cloud.attach_volume(server, volume) + self.assertEqual(0, len(self.adapter.request_history)) + + def test_attach_volume_already_attached(self): + device_id = 'device001' + server = dict(id='server001') + volume = dict(id='volume001', + attachments=[ + {'server_id': 'server001', 'device': device_id} + ]) + + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Volume %s already attached to server %s on device %s" % ( + volume['id'], server['id'], device_id) + ): + self.cloud.attach_volume(server, volume) + self.assertEqual(0, len(self.adapter.request_history)) + + def test_detach_volume(self): + server = dict(id='server001') + volume = dict(id='volume001', + attachments=[ + {'server_id': 'server001', 'device': 'device001'} + ]) + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments', volume['id']]))]) + self.cloud.detach_volume(server, volume, wait=False) + self.assert_calls() + + def test_detach_volume_exception(self): + server = dict(id='server001') + volume = dict(id='volume001', + attachments=[ + {'server_id': 'server001', 'device': 'device001'} + ]) + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments', volume['id']]), + status_code=404)]) + with testtools.ExpectedException( + openstack.OpenStackCloudURINotFound, + "Error detaching volume %s from server %s" % ( + volume['id'], server['id']) + ): + self.cloud.detach_volume(server, volume, wait=False) + self.assert_calls() + + def test_detach_volume_wait(self): + server = dict(id='server001') + attachments = [{'server_id': 'server001', 'device': 'device001'}] + vol = {'id': 'volume001', 'status': 'attached', 'name': '', + 'attachments': attachments} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'available' + vol['attachments'] = [] + avail_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments', volume.id])), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [avail_volume]})]) + self.cloud.detach_volume(server, volume) + self.assert_calls() + + def test_detach_volume_wait_error(self): + server = dict(id='server001') + attachments = [{'server_id': 'server001', 'device': 'device001'}] + vol = {'id': 'volume001', 'status': 'attached', 'name': '', + 'attachments': attachments} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'error' + vol['attachments'] = [] + errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', + append=['servers', server['id'], + 'os-volume_attachments', volume.id])), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [errored_volume]})]) + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "Error in detaching volume %s" % errored_volume['id'] + ): + self.cloud.detach_volume(server, volume) + self.assert_calls() + + def test_delete_volume_deletes(self): + vol = {'id': 'volume001', 'status': 'attached', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [volume]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', volume.id])), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': []})]) + self.assertTrue(self.cloud.delete_volume(volume['id'])) + self.assert_calls() + + def test_delete_volume_gone_away(self): + vol = {'id': 'volume001', 'status': 'attached', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [volume]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', volume.id]), + status_code=404)]) + self.assertFalse(self.cloud.delete_volume(volume['id'])) + self.assert_calls() + + def test_delete_volume_force(self): + vol = {'id': 'volume001', 'status': 'attached', + 'name': '', 'attachments': []} + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': [volume]}), + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', volume.id, 'action']), + json={'os-force_delete': None}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['volumes', 'detail']), + json={'volumes': []})]) + self.assertTrue(self.cloud.delete_volume(volume['id'], force=True)) + self.assert_calls() + + def test_list_volumes_with_pagination(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + vol2 = meta.obj_to_munch(fakes.FakeVolume('02', 'available', 'vol2')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail']), + json={ + 'volumes': [vol1], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + 'rel': 'next'}]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + json={ + 'volumes': [vol2], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=02']), + 'rel': 'next'}]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=02']), + json={'volumes': []})]) + self.assertEqual( + [self.cloud._normalize_volume(vol1), + self.cloud._normalize_volume(vol2)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_list_volumes_with_pagination_next_link_fails_once(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + vol2 = meta.obj_to_munch(fakes.FakeVolume('02', 'available', 'vol2')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail']), + json={ + 'volumes': [vol1], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + 'rel': 'next'}]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + status_code=404), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail']), + json={ + 'volumes': [vol1], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + 'rel': 'next'}]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + json={ + 'volumes': [vol2], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=02']), + 'rel': 'next'}]}), + + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=02']), + json={'volumes': []})]) + self.assertEqual( + [self.cloud._normalize_volume(vol1), + self.cloud._normalize_volume(vol2)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_list_volumes_with_pagination_next_link_fails_all_attempts(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + uris = [] + attempts = 5 + for i in range(attempts): + uris.extend([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail']), + json={ + 'volumes': [vol1], + 'volumes_links': [ + {'href': self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + 'rel': 'next'}]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', 'detail'], + qs_elements=['marker=01']), + status_code=404)]) + self.register_uris(uris) + # Check that found volumes are returned even if pagination didn't + # complete because call to get next link 404'ed for all the allowed + # attempts + self.assertEqual( + [self.cloud._normalize_volume(vol1)], + self.cloud.list_volumes()) + self.assert_calls() + + def test_get_volume_by_id(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['volumes', '01']), + json={'volume': vol1} + ) + ]) + self.assertEqual( + self.cloud._normalize_volume(vol1), + self.cloud.get_volume_by_id('01')) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume_access.py b/openstack/tests/unit/cloud/test_volume_access.py new file mode 100644 index 000000000..8124f3e59 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume_access.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import testtools + +import openstack.cloud +from openstack.tests.unit import base + + +class TestVolumeAccess(base.RequestsMockTestCase): + def test_list_volume_types(self): + volume_type = dict( + id='voltype01', description='volume type description', + name='name', is_public=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]})]) + self.assertTrue(self.cloud.list_volume_types()) + self.assert_calls() + + def test_get_volume_type(self): + volume_type = dict( + id='voltype01', description='volume type description', name='name', + is_public=False) + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]})]) + volume_type_got = self.cloud.get_volume_type(volume_type['name']) + self.assertEqual(volume_type_got.id, volume_type['id']) + + def test_get_volume_type_access(self): + volume_type = dict( + id='voltype01', description='volume type description', name='name', + is_public=False) + volume_type_access = [ + dict(volume_type_id='voltype01', name='name', project_id='prj01'), + dict(volume_type_id='voltype01', name='name', project_id='prj02') + ] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], + 'os-volume-type-access']), + json={'volume_type_access': volume_type_access})]) + self.assertEqual( + len(self.op_cloud.get_volume_type_access(volume_type['name'])), 2) + self.assert_calls() + + def test_remove_volume_type_access(self): + volume_type = dict( + id='voltype01', description='volume type description', name='name', + is_public=False) + project_001 = dict(volume_type_id='voltype01', name='name', + project_id='prj01') + project_002 = dict(volume_type_id='voltype01', name='name', + project_id='prj02') + volume_type_access = [project_001, project_002] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], + 'os-volume-type-access']), + json={'volume_type_access': volume_type_access}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], 'action']), + json={'removeProjectAccess': { + 'project': project_001['project_id']}}, + validate=dict( + json={'removeProjectAccess': { + 'project': project_001['project_id']}})), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], + 'os-volume-type-access']), + json={'volume_type_access': [project_001]})]) + self.assertEqual( + len(self.op_cloud.get_volume_type_access( + volume_type['name'])), 2) + self.op_cloud.remove_volume_type_access( + volume_type['name'], project_001['project_id']) + self.assertEqual( + len(self.op_cloud.get_volume_type_access(volume_type['name'])), 1) + self.assert_calls() + + def test_add_volume_type_access(self): + volume_type = dict( + id='voltype01', description='volume type description', name='name', + is_public=False) + project_001 = dict(volume_type_id='voltype01', name='name', + project_id='prj01') + project_002 = dict(volume_type_id='voltype01', name='name', + project_id='prj02') + volume_type_access = [project_001, project_002] + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], 'action']), + json={'addProjectAccess': { + 'project': project_002['project_id']}}, + validate=dict( + json={'addProjectAccess': { + 'project': project_002['project_id']}})), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types', volume_type['id'], + 'os-volume-type-access']), + json={'volume_type_access': volume_type_access})]) + self.op_cloud.add_volume_type_access( + volume_type['name'], project_002['project_id']) + self.assertEqual( + len(self.op_cloud.get_volume_type_access(volume_type['name'])), 2) + self.assert_calls() + + def test_add_volume_type_access_missing(self): + volume_type = dict( + id='voltype01', description='volume type description', name='name', + is_public=False) + project_001 = dict(volume_type_id='voltype01', name='name', + project_id='prj01') + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['types'], + qs_elements=['is_public=None']), + json={'volume_types': [volume_type]})]) + with testtools.ExpectedException( + openstack.OpenStackCloudException, + "VolumeType not found: MISSING"): + self.op_cloud.add_volume_type_access( + "MISSING", project_001['project_id']) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume_backups.py b/openstack/tests/unit/cloud/test_volume_backups.py new file mode 100644 index 000000000..6b6392f15 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume_backups.py @@ -0,0 +1,123 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.cloud import meta +from openstack.tests.unit import base + + +class TestVolumeBackups(base.RequestsMockTestCase): + def test_search_volume_backups(self): + name = 'Volume1' + vol1 = {'name': name, 'availability_zone': 'az1'} + vol2 = {'name': name, 'availability_zone': 'az1'} + vol3 = {'name': 'Volume2', 'availability_zone': 'az2'} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['backups', 'detail']), + json={"backups": [vol1, vol2, vol3]})]) + result = self.cloud.search_volume_backups( + name, {'availability_zone': 'az1'}) + self.assertEqual(len(result), 2) + self.assertEqual( + meta.obj_list_to_munch([vol1, vol2]), + result) + self.assert_calls() + + def test_get_volume_backup(self): + name = 'Volume1' + vol1 = {'name': name, 'availability_zone': 'az1'} + vol2 = {'name': name, 'availability_zone': 'az2'} + vol3 = {'name': 'Volume2', 'availability_zone': 'az1'} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['backups', 'detail']), + json={"backups": [vol1, vol2, vol3]})]) + result = self.cloud.get_volume_backup( + name, {'availability_zone': 'az1'}) + result = meta.obj_to_munch(result) + self.assertEqual( + meta.obj_to_munch(vol1), + result) + self.assert_calls() + + def test_list_volume_backups(self): + backup = {'id': '6ff16bdf-44d5-4bf9-b0f3-687549c76414', + 'status': 'available'} + search_opts = {'status': 'available'} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', append=['backups', 'detail'], + qs_elements=['='.join(i) for i in search_opts.items()]), + json={"backups": [backup]})]) + result = self.cloud.list_volume_backups(True, search_opts) + self.assertEqual(len(result), 1) + self.assertEqual( + meta.obj_list_to_munch([backup]), + result) + self.assert_calls() + + def test_delete_volume_backup_wait(self): + backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' + backup = {'id': backup_id} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": [backup]}), + dict(method='DELETE', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', backup_id])), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": [backup]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": []})]) + self.cloud.delete_volume_backup(backup_id, False, True, 1) + self.assert_calls() + + def test_delete_volume_backup_force(self): + backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' + backup = {'id': backup_id} + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": [backup]}), + dict(method='POST', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', backup_id, 'action']), + json={'os-force_delete': {}}, + validate=dict(json={u'os-force_delete': None})), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": [backup]}), + dict(method='GET', + uri=self.get_mock_url( + 'volumev2', 'public', + append=['backups', 'detail']), + json={"backups": []}) + ]) + self.cloud.delete_volume_backup(backup_id, True, True, 1) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_zone.py b/openstack/tests/unit/cloud/test_zone.py new file mode 100644 index 000000000..958d3faaa --- /dev/null +++ b/openstack/tests/unit/cloud/test_zone.py @@ -0,0 +1,156 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import testtools + +import openstack.cloud +from openstack.tests.unit import base + + +zone_dict = { + 'name': 'example.net.', + 'type': 'PRIMARY', + 'email': 'test@example.net', + 'description': 'Example zone', + 'ttl': 3600, +} + +new_zone_dict = copy.copy(zone_dict) +new_zone_dict['id'] = '1' + + +class TestZone(base.RequestsMockTestCase): + + def setUp(self): + super(TestZone, self).setUp() + self.use_designate() + + def test_create_zone(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json=new_zone_dict, + validate=dict( + json=zone_dict)) + ]) + z = self.cloud.create_zone( + name=zone_dict['name'], + zone_type=zone_dict['type'], + email=zone_dict['email'], + description=zone_dict['description'], + ttl=zone_dict['ttl'], + masters=None) + self.assertEqual(new_zone_dict, z) + self.assert_calls() + + def test_create_zone_exception(self): + self.register_uris([ + dict(method='POST', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + status_code=500) + ]) + with testtools.ExpectedException( + openstack.cloud.exc.OpenStackCloudHTTPError, + "Unable to create zone example.net." + ): + self.cloud.create_zone('example.net.') + self.assert_calls() + + def test_update_zone(self): + new_ttl = 7200 + updated_zone = copy.copy(new_zone_dict) + updated_zone['ttl'] = new_ttl + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [new_zone_dict], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='PATCH', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones', '1']), + json=updated_zone, + validate=dict( + json={"ttl": new_ttl})) + ]) + z = self.cloud.update_zone('1', ttl=new_ttl) + self.assertEqual(updated_zone, z) + self.assert_calls() + + def test_delete_zone(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [new_zone_dict], + "links": {}, + "metadata": { + 'total_count': 1}}), + dict(method='DELETE', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones', '1']), + json=new_zone_dict) + ]) + self.assertTrue(self.cloud.delete_zone('1')) + self.assert_calls() + + def test_get_zone_by_id(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [new_zone_dict], + "links": {}, + "metadata": { + 'total_count': 1}}) + ]) + zone = self.cloud.get_zone('1') + self.assertEqual(zone['id'], '1') + self.assert_calls() + + def test_get_zone_by_name(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [new_zone_dict], + "links": {}, + "metadata": { + 'total_count': 1}}) + ]) + zone = self.cloud.get_zone('example.net.') + self.assertEqual(zone['name'], 'example.net.') + self.assert_calls() + + def test_get_zone_not_found_returns_false(self): + self.register_uris([ + dict(method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones']), + json={ + "zones": [], + "links": {}, + "metadata": { + 'total_count': 1}}) + ]) + zone = self.cloud.get_zone('nonexistingzone.net.') + self.assertFalse(zone) + self.assert_calls() diff --git a/openstack/tests/unit/config/__init__.py b/openstack/tests/unit/config/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/openstack/tests/unit/config/base.py b/openstack/tests/unit/config/base.py new file mode 100644 index 000000000..60ca63d12 --- /dev/null +++ b/openstack/tests/unit/config/base.py @@ -0,0 +1,244 @@ +# -*- coding: utf-8 -*- + +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Shift to using new combined base unit test class +import copy +import os +import tempfile + +from openstack.config import cloud_config + +import extras +import fixtures +from oslotest import base +import yaml + + +VENDOR_CONF = { + 'public-clouds': { + '_test_cloud_in_our_cloud': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testotheruser', + 'project_name': 'testproject', + }, + }, + } +} +USER_CONF = { + 'cache': { + 'max_age': '1', + 'expiration': { + 'server': 5, + 'image': '7', + }, + }, + 'client': { + 'force_ipv4': True, + }, + 'clouds': { + '_test-cloud_': { + 'profile': '_test_cloud_in_our_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + 'region_name': 'test-region', + }, + '_test_cloud_no_vendor': { + 'profile': '_test_non_existant_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'project_name': 'testproject', + }, + 'region-name': 'test-region', + }, + '_test-cloud-int-project_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'domain_id': 'awesome-domain', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + }, + '_test-cloud-domain-id_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + 'domain_id': '6789', + 'project_domain_id': '123456789', + }, + 'region_name': 'test-region', + }, + '_test-cloud-networks_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + 'domain_id': '6789', + 'project_domain_id': '123456789', + }, + 'networks': [{ + 'name': 'a-public', + 'routes_externally': True, + }, { + 'name': 'another-public', + 'routes_externally': True, + 'default_interface': True, + }, { + 'name': 'a-private', + 'routes_externally': False, + }, { + 'name': 'another-private', + 'routes_externally': False, + 'nat_destination': True, + }, { + 'name': 'split-default', + 'routes_externally': True, + 'routes_ipv4_externally': False, + }, { + 'name': 'split-no-default', + 'routes_ipv6_externally': False, + 'routes_ipv4_externally': True, + }], + 'region_name': 'test-region', + }, + '_test_cloud_regions': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'regions': [ + { + 'name': 'region1', + 'values': { + 'external_network': 'region1-network', + } + }, + { + 'name': 'region2', + 'values': { + 'external_network': 'my-network', + } + } + ], + }, + '_test_cloud_hyphenated': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': '12345', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + }, + '_test-cloud_no_region': { + 'profile': '_test_cloud_in_our_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + }, + '_test-cloud-domain-scoped_': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + 'domain-id': '12345', + }, + }, + }, + 'ansible': { + 'expand-hostvars': False, + 'use_hostnames': True, + }, +} +SECURE_CONF = { + 'clouds': { + '_test_cloud_no_vendor': { + 'auth': { + 'password': 'testpass', + }, + } + } +} +NO_CONF = { + 'cache': {'max_age': 1}, +} + + +def _write_yaml(obj): + # Assume NestedTempfile so we don't have to cleanup + with tempfile.NamedTemporaryFile(delete=False) as obj_yaml: + obj_yaml.write(yaml.safe_dump(obj).encode('utf-8')) + return obj_yaml.name + + +class TestCase(base.BaseTestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + super(TestCase, self).setUp() + + self.useFixture(fixtures.NestedTempfile()) + conf = copy.deepcopy(USER_CONF) + tdir = self.useFixture(fixtures.TempDir()) + conf['cache']['path'] = tdir.path + self.cloud_yaml = _write_yaml(conf) + self.secure_yaml = _write_yaml(SECURE_CONF) + self.vendor_yaml = _write_yaml(VENDOR_CONF) + self.no_yaml = _write_yaml(NO_CONF) + self.useFixture(fixtures.MonkeyPatch( + 'openstack.__version__', '1.2.3')) + + # Isolate the test runs from the environment + # Do this as two loops because you can't modify the dict in a loop + # over the dict in 3.4 + keys_to_isolate = [] + for env in os.environ.keys(): + if env.startswith('OS_'): + keys_to_isolate.append(env) + for env in keys_to_isolate: + self.useFixture(fixtures.EnvironmentVariable(env)) + + def _assert_cloud_details(self, cc): + self.assertIsInstance(cc, cloud_config.CloudConfig) + self.assertTrue(extras.safe_hasattr(cc, 'auth')) + self.assertIsInstance(cc.auth, dict) + self.assertIsNone(cc.cloud) + self.assertIn('username', cc.auth) + self.assertEqual('testuser', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + self.assertFalse(cc.config['image_api_use_tasks']) + self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) + if 'project_name' in cc.auth: + self.assertEqual('testproject', cc.auth['project_name']) + elif 'project_id' in cc.auth: + self.assertEqual('testproject', cc.auth['project_id']) + self.assertEqual(cc.get_cache_expiration_time(), 1) + self.assertEqual(cc.get_cache_resource_expiration('server'), 5.0) + self.assertEqual(cc.get_cache_resource_expiration('image'), 7.0) diff --git a/openstack/tests/unit/config/test_cloud_config.py b/openstack/tests/unit/config/test_cloud_config.py new file mode 100644 index 000000000..2b477ef80 --- /dev/null +++ b/openstack/tests/unit/config/test_cloud_config.py @@ -0,0 +1,578 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from keystoneauth1 import exceptions as ksa_exceptions +from keystoneauth1 import session as ksa_session +import mock + +from openstack.config import cloud_config +from openstack.config import defaults +from openstack.config import exceptions +from openstack.tests.unit.config import base + + +fake_config_dict = {'a': 1, 'os_b': 2, 'c': 3, 'os_c': 4} +fake_services_dict = { + 'compute_api_version': '2', + 'compute_endpoint_override': 'http://compute.example.com', + 'compute_region_name': 'region-bl', + 'telemetry_endpoint': 'http://telemetry.example.com', + 'interface': 'public', + 'image_service_type': 'mage', + 'identity_interface': 'admin', + 'identity_service_name': 'locks', + 'volume_api_version': '1', + 'auth': {'password': 'hunter2', 'username': 'AzureDiamond'}, +} + + +class TestCloudConfig(base.TestCase): + + def test_arbitrary_attributes(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_config_dict) + self.assertEqual("test1", cc.name) + self.assertEqual("region-al", cc.region) + + # Look up straight value + self.assertEqual(1, cc.a) + + # Look up prefixed attribute, fail - returns None + self.assertIsNone(cc.os_b) + + # Look up straight value, then prefixed value + self.assertEqual(3, cc.c) + self.assertEqual(3, cc.os_c) + + # Lookup mystery attribute + self.assertIsNone(cc.x) + + # Test default ipv6 + self.assertFalse(cc.force_ipv4) + + def test_iteration(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_config_dict) + self.assertTrue('a' in cc) + self.assertFalse('x' in cc) + + def test_equality(self): + cc1 = cloud_config.CloudConfig("test1", "region-al", fake_config_dict) + cc2 = cloud_config.CloudConfig("test1", "region-al", fake_config_dict) + self.assertEqual(cc1, cc2) + + def test_inequality(self): + cc1 = cloud_config.CloudConfig("test1", "region-al", fake_config_dict) + + cc2 = cloud_config.CloudConfig("test2", "region-al", fake_config_dict) + self.assertNotEqual(cc1, cc2) + + cc2 = cloud_config.CloudConfig("test1", "region-xx", fake_config_dict) + self.assertNotEqual(cc1, cc2) + + cc2 = cloud_config.CloudConfig("test1", "region-al", {}) + self.assertNotEqual(cc1, cc2) + + def test_verify(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = None + + config_dict['verify'] = False + cc = cloud_config.CloudConfig("test1", "region-xx", config_dict) + (verify, cert) = cc.get_requests_verify_args() + self.assertFalse(verify) + + config_dict['verify'] = True + cc = cloud_config.CloudConfig("test1", "region-xx", config_dict) + (verify, cert) = cc.get_requests_verify_args() + self.assertTrue(verify) + + def test_verify_cacert(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = "certfile" + + config_dict['verify'] = False + cc = cloud_config.CloudConfig("test1", "region-xx", config_dict) + (verify, cert) = cc.get_requests_verify_args() + self.assertFalse(verify) + + config_dict['verify'] = True + cc = cloud_config.CloudConfig("test1", "region-xx", config_dict) + (verify, cert) = cc.get_requests_verify_args() + self.assertEqual("certfile", verify) + + def test_cert_with_key(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = None + config_dict['verify'] = False + + config_dict['cert'] = 'cert' + config_dict['key'] = 'key' + + cc = cloud_config.CloudConfig("test1", "region-xx", config_dict) + (verify, cert) = cc.get_requests_verify_args() + self.assertEqual(("cert", "key"), cert) + + def test_ipv6(self): + cc = cloud_config.CloudConfig( + "test1", "region-al", fake_config_dict, force_ipv4=True) + self.assertTrue(cc.force_ipv4) + + def test_getters(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_services_dict) + + self.assertEqual(['compute', 'identity', 'image', 'volume'], + sorted(cc.get_services())) + self.assertEqual({'password': 'hunter2', 'username': 'AzureDiamond'}, + cc.get_auth_args()) + self.assertEqual('public', cc.get_interface()) + self.assertEqual('public', cc.get_interface('compute')) + self.assertEqual('admin', cc.get_interface('identity')) + self.assertEqual('region-al', cc.get_region_name()) + self.assertEqual('region-al', cc.get_region_name('image')) + self.assertEqual('region-bl', cc.get_region_name('compute')) + self.assertIsNone(cc.get_api_version('image')) + self.assertEqual('2', cc.get_api_version('compute')) + self.assertEqual('mage', cc.get_service_type('image')) + self.assertEqual('compute', cc.get_service_type('compute')) + self.assertEqual('1', cc.get_api_version('volume')) + self.assertEqual('volume', cc.get_service_type('volume')) + self.assertEqual('http://compute.example.com', + cc.get_endpoint('compute')) + self.assertIsNone(cc.get_endpoint('image')) + self.assertIsNone(cc.get_service_name('compute')) + self.assertEqual('locks', cc.get_service_name('identity')) + + def test_volume_override(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_services_dict) + cc.config['volume_api_version'] = '2' + self.assertEqual('volumev2', cc.get_service_type('volume')) + + def test_volume_override_v3(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_services_dict) + cc.config['volume_api_version'] = '3' + self.assertEqual('volumev3', cc.get_service_type('volume')) + + def test_workflow_override_v2(self): + cc = cloud_config.CloudConfig("test1", "region-al", fake_services_dict) + cc.config['workflow_api_version'] = '2' + self.assertEqual('workflowv2', cc.get_service_type('workflow')) + + def test_get_session_no_auth(self): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig("test1", "region-al", config_dict) + self.assertRaises( + exceptions.OpenStackConfigException, + cc.get_session) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + mock_session.return_value = fake_session + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, cert=None, timeout=None) + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', '1.2.3')]) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session_with_app_name(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + fake_session.app_name = None + fake_session.app_version = None + mock_session.return_value = fake_session + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock(), + app_name="test_app", app_version="test_version") + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, cert=None, timeout=None) + self.assertEqual(fake_session.app_name, "test_app") + self.assertEqual(fake_session.app_version, "test_version") + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', '1.2.3')]) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session_with_timeout(self, mock_session): + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + mock_session.return_value = fake_session + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['api_timeout'] = 9 + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, cert=None, timeout=9) + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', '1.2.3')]) + + @mock.patch.object(ksa_session, 'Session') + def test_override_session_endpoint_override(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + self.assertEqual( + cc.get_session_endpoint('compute'), + fake_services_dict['compute_endpoint_override']) + + @mock.patch.object(ksa_session, 'Session') + def test_override_session_endpoint(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + self.assertEqual( + cc.get_session_endpoint('telemetry'), + fake_services_dict['telemetry_endpoint']) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session') + def test_session_endpoint(self, mock_get_session): + mock_session = mock.Mock() + mock_get_session.return_value = mock_session + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_session_endpoint('orchestration') + mock_session.get_endpoint.assert_called_with( + interface='public', + service_name=None, + region_name='region-al', + service_type='orchestration') + + @mock.patch.object(cloud_config.CloudConfig, 'get_session') + def test_session_endpoint_not_found(self, mock_get_session): + exc_to_raise = ksa_exceptions.catalog.EndpointNotFound + mock_get_session.return_value.get_endpoint.side_effect = exc_to_raise + cc = cloud_config.CloudConfig( + "test1", "region-al", {}, auth_plugin=mock.Mock()) + self.assertIsNone(cc.get_session_endpoint('notfound')) + + @mock.patch.object(cloud_config.CloudConfig, 'get_api_version') + @mock.patch.object(cloud_config.CloudConfig, 'get_auth_args') + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_object_store_password( + self, + mock_get_session_endpoint, + mock_get_auth_args, + mock_get_api_version): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://swift.example.com' + mock_get_api_version.return_value = '3' + mock_get_auth_args.return_value = dict( + username='testuser', + password='testpassword', + project_name='testproject', + auth_url='http://example.com', + ) + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('object-store', mock_client) + mock_client.assert_called_with( + session=mock.ANY, + os_options={ + 'region_name': 'region-al', + 'service_type': 'object-store', + 'object_storage_url': None, + 'endpoint_type': 'public', + }) + + @mock.patch.object(cloud_config.CloudConfig, 'get_auth_args') + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_object_store_password_v2( + self, mock_get_session_endpoint, mock_get_auth_args): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://swift.example.com' + mock_get_auth_args.return_value = dict( + username='testuser', + password='testpassword', + project_name='testproject', + auth_url='http://example.com', + ) + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('object-store', mock_client) + mock_client.assert_called_with( + session=mock.ANY, + os_options={ + 'region_name': 'region-al', + 'service_type': 'object-store', + 'object_storage_url': None, + 'endpoint_type': 'public', + }) + + @mock.patch.object(cloud_config.CloudConfig, 'get_auth_args') + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_object_store( + self, mock_get_session_endpoint, mock_get_auth_args): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + mock_get_auth_args.return_value = {} + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('object-store', mock_client) + mock_client.assert_called_with( + session=mock.ANY, + os_options={ + 'region_name': 'region-al', + 'service_type': 'object-store', + 'object_storage_url': None, + 'endpoint_type': 'public', + }) + + @mock.patch.object(cloud_config.CloudConfig, 'get_auth_args') + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_object_store_timeout( + self, mock_get_session_endpoint, mock_get_auth_args): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + mock_get_auth_args.return_value = {} + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['api_timeout'] = 9 + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('object-store', mock_client) + mock_client.assert_called_with( + session=mock.ANY, + os_options={ + 'region_name': 'region-al', + 'service_type': 'object-store', + 'object_storage_url': None, + 'endpoint_type': 'public', + }) + + @mock.patch.object(cloud_config.CloudConfig, 'get_auth_args') + def test_legacy_client_object_store_endpoint( + self, mock_get_auth_args): + mock_client = mock.Mock() + mock_get_auth_args.return_value = {} + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['object_store_endpoint'] = 'http://example.com/swift' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('object-store', mock_client) + mock_client.assert_called_with( + session=mock.ANY, + os_options={ + 'region_name': 'region-al', + 'service_type': 'object-store', + 'object_storage_url': 'http://example.com/swift', + 'endpoint_type': 'public', + }) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_image(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('image', mock_client) + mock_client.assert_called_with( + version=2.0, + service_name=None, + endpoint_override='http://example.com', + region_name='region-al', + interface='public', + session=mock.ANY, + # Not a typo - the config dict above overrides this + service_type='mage' + ) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_image_override(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['image_endpoint_override'] = 'http://example.com/override' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('image', mock_client) + mock_client.assert_called_with( + version=2.0, + service_name=None, + endpoint_override='http://example.com/override', + region_name='region-al', + interface='public', + session=mock.ANY, + # Not a typo - the config dict above overrides this + service_type='mage' + ) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_image_versioned(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + # v2 endpoint was passed, 1 requested in config, endpoint wins + config_dict['image_api_version'] = '1' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('image', mock_client) + mock_client.assert_called_with( + version=2.0, + service_name=None, + endpoint_override='http://example.com', + region_name='region-al', + interface='public', + session=mock.ANY, + # Not a typo - the config dict above overrides this + service_type='mage' + ) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_image_unversioned(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + # Versionless endpoint, config wins + config_dict['image_api_version'] = '1' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('image', mock_client) + mock_client.assert_called_with( + version='1', + service_name=None, + endpoint_override='http://example.com', + region_name='region-al', + interface='public', + session=mock.ANY, + # Not a typo - the config dict above overrides this + service_type='mage' + ) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_image_argument(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v3' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + # Versionless endpoint, config wins + config_dict['image_api_version'] = '6' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('image', mock_client, version='beef') + mock_client.assert_called_with( + version='beef', + service_name=None, + endpoint_override='http://example.com', + region_name='region-al', + interface='public', + session=mock.ANY, + # Not a typo - the config dict above overrides this + service_type='mage' + ) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_network(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('network', mock_client) + mock_client.assert_called_with( + api_version='2.0', + endpoint_type='public', + endpoint_override=None, + region_name='region-al', + service_type='network', + session=mock.ANY, + service_name=None) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_compute(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('compute', mock_client) + mock_client.assert_called_with( + version='2', + endpoint_type='public', + endpoint_override='http://compute.example.com', + region_name='region-al', + service_type='compute', + session=mock.ANY, + service_name=None) + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_identity(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com/v2' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('identity', mock_client) + mock_client.assert_called_with( + version='2.0', + endpoint='http://example.com/v2', + endpoint_type='admin', + endpoint_override=None, + region_name='region-al', + service_type='identity', + session=mock.ANY, + service_name='locks') + + @mock.patch.object(cloud_config.CloudConfig, 'get_session_endpoint') + def test_legacy_client_identity_v3(self, mock_get_session_endpoint): + mock_client = mock.Mock() + mock_get_session_endpoint.return_value = 'http://example.com' + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['identity_api_version'] = '3' + cc = cloud_config.CloudConfig( + "test1", "region-al", config_dict, auth_plugin=mock.Mock()) + cc.get_legacy_client('identity', mock_client) + mock_client.assert_called_with( + version='3', + endpoint='http://example.com', + interface='admin', + endpoint_override=None, + region_name='region-al', + service_type='identity', + session=mock.ANY, + service_name='locks') diff --git a/openstack/tests/unit/config/test_config.py b/openstack/tests/unit/config/test_config.py new file mode 100644 index 000000000..f87b18e25 --- /dev/null +++ b/openstack/tests/unit/config/test_config.py @@ -0,0 +1,1021 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import copy +import os + +import extras +import fixtures +import testtools +import yaml + +from openstack import config +from openstack.config import cloud_config +from openstack.config import defaults +from openstack.config import exceptions +from openstack.config import loader +from openstack.tests.unit.config import base + + +def prompt_for_password(prompt=None): + """Fake prompt function that just returns a constant string""" + return 'promptpass' + + +class TestConfig(base.TestCase): + + def test_get_all_clouds(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + clouds = c.get_all_clouds() + # We add one by hand because the regions cloud is going to exist + # twice since it has two regions in it + user_clouds = [ + cloud for cloud in base.USER_CONF['clouds'].keys() + ] + ['_test_cloud_regions'] + configured_clouds = [cloud.name for cloud in clouds] + self.assertItemsEqual(user_clouds, configured_clouds) + + def test_get_one_cloud(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = c.get_one_cloud(validate=False) + self.assertIsInstance(cloud, cloud_config.CloudConfig) + self.assertEqual(cloud.name, '') + + def test_get_one_cloud_auth_defaults(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'}) + self.assertEqual('user', cc.auth['username']) + self.assertEqual( + defaults._defaults['auth_type'], + cc.auth_type, + ) + self.assertEqual( + defaults._defaults['identity_api_version'], + cc.identity_api_version, + ) + + def test_get_one_cloud_auth_override_defaults(self): + default_options = {'compute_api_version': '4'} + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + override_defaults=default_options) + cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'}) + self.assertEqual('user', cc.auth['username']) + self.assertEqual('4', cc.compute_api_version) + self.assertEqual( + defaults._defaults['identity_api_version'], + cc.identity_api_version, + ) + + def test_get_one_cloud_with_config_files(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml]) + self.assertIsInstance(c.cloud_config, dict) + self.assertIn('cache', c.cloud_config) + self.assertIsInstance(c.cloud_config['cache'], dict) + self.assertIn('max_age', c.cloud_config['cache']) + self.assertIn('path', c.cloud_config['cache']) + cc = c.get_one_cloud('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one_cloud('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_get_one_cloud_with_int_project_id(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-int-project_') + self.assertEqual('12345', cc.auth['project_id']) + + def test_get_one_cloud_with_domain_id(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-domain-id_') + self.assertEqual('6789', cc.auth['user_domain_id']) + self.assertEqual('123456789', cc.auth['project_domain_id']) + self.assertNotIn('domain_id', cc.auth) + self.assertNotIn('domain-id', cc.auth) + self.assertNotIn('domain_id', cc) + + def test_get_one_cloud_domain_scoped(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-domain-scoped_') + self.assertEqual('12345', cc.auth['domain_id']) + self.assertNotIn('user_domain_id', cc.auth) + self.assertNotIn('project_domain_id', cc.auth) + + def test_get_one_cloud_infer_user_domain(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-int-project_') + self.assertEqual('awesome-domain', cc.auth['user_domain_id']) + self.assertEqual('awesome-domain', cc.auth['project_domain_id']) + self.assertNotIn('domain_id', cc.auth) + self.assertNotIn('domain_id', cc) + + def test_get_one_cloud_with_hyphenated_project_id(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test_cloud_hyphenated') + self.assertEqual('12345', cc.auth['project_id']) + + def test_get_one_cloud_with_hyphenated_kwargs(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + args = { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': '12345', + 'auth-url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + } + cc = c.get_one_cloud(**args) + self.assertEqual('http://example.com/v2', cc.auth['auth_url']) + + def test_no_environ(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars') + + def test_fallthrough(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + for k in os.environ.keys(): + if k.startswith('OS_'): + self.useFixture(fixtures.EnvironmentVariable(k)) + c.get_one_cloud(cloud='defaults', validate=False) + + def test_prefer_ipv6_true(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + cc = c.get_one_cloud(cloud='defaults', validate=False) + self.assertTrue(cc.prefer_ipv6) + + def test_prefer_ipv6_false(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_') + self.assertFalse(cc.prefer_ipv6) + + def test_force_ipv4_true(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_') + self.assertTrue(cc.force_ipv4) + + def test_force_ipv4_false(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + cc = c.get_one_cloud(cloud='defaults', validate=False) + self.assertFalse(cc.force_ipv4) + + def test_get_one_cloud_auth_merge(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_', auth={'username': 'user'}) + self.assertEqual('user', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + + def test_get_one_cloud_networks(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-networks_') + self.assertEqual( + ['a-public', 'another-public', 'split-default'], + cc.get_external_networks()) + self.assertEqual( + ['a-private', 'another-private', 'split-no-default'], + cc.get_internal_networks()) + self.assertEqual('another-private', cc.get_nat_destination()) + self.assertEqual('another-public', cc.get_default_network()) + self.assertEqual( + ['a-public', 'another-public', 'split-no-default'], + cc.get_external_ipv4_networks()) + self.assertEqual( + ['a-public', 'another-public', 'split-default'], + cc.get_external_ipv6_networks()) + + def test_get_one_cloud_no_networks(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud-domain-scoped_') + self.assertEqual([], cc.get_external_networks()) + self.assertEqual([], cc.get_internal_networks()) + self.assertIsNone(cc.get_nat_destination()) + self.assertIsNone(cc.get_default_network()) + + def test_only_secure_yaml(self): + c = config.OpenStackConfig(config_files=['nonexistent'], + vendor_files=['nonexistent'], + secure_files=[self.secure_yaml]) + cc = c.get_one_cloud(cloud='_test_cloud_no_vendor', validate=False) + self.assertEqual('testpass', cc.auth['password']) + + def test_get_cloud_names(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + secure_files=[self.no_yaml]) + self.assertEqual( + ['_test-cloud-domain-id_', + '_test-cloud-domain-scoped_', + '_test-cloud-int-project_', + '_test-cloud-networks_', + '_test-cloud_', + '_test-cloud_no_region', + '_test_cloud_hyphenated', + '_test_cloud_no_vendor', + '_test_cloud_regions', + ], + sorted(c.get_cloud_names())) + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + for k in os.environ.keys(): + if k.startswith('OS_'): + self.useFixture(fixtures.EnvironmentVariable(k)) + c.get_one_cloud(cloud='defaults', validate=False) + self.assertEqual(['defaults'], sorted(c.get_cloud_names())) + + def test_set_one_cloud_creates_file(self): + config_dir = fixtures.TempDir() + self.useFixture(config_dir) + config_path = os.path.join(config_dir.path, 'clouds.yaml') + config.OpenStackConfig.set_one_cloud(config_path, '_test_cloud_') + self.assertTrue(os.path.isfile(config_path)) + with open(config_path) as fh: + self.assertEqual({'clouds': {'_test_cloud_': {}}}, + yaml.safe_load(fh)) + + def test_set_one_cloud_updates_cloud(self): + new_config = { + 'cloud': 'new_cloud', + 'auth': { + 'password': 'newpass' + } + } + + resulting_cloud_config = { + 'auth': { + 'password': 'newpass', + 'username': 'testuser', + 'auth_url': 'http://example.com/v2', + }, + 'cloud': 'new_cloud', + 'profile': '_test_cloud_in_our_cloud', + 'region_name': 'test-region' + } + resulting_config = copy.deepcopy(base.USER_CONF) + resulting_config['clouds']['_test-cloud_'] = resulting_cloud_config + config.OpenStackConfig.set_one_cloud(self.cloud_yaml, '_test-cloud_', + new_config) + with open(self.cloud_yaml) as fh: + written_config = yaml.safe_load(fh) + # We write a cache config for testing + written_config['cache'].pop('path', None) + self.assertEqual(written_config, resulting_config) + + def test_get_region_no_region_default(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test-cloud_no_region') + self.assertEqual(region, {'name': '', 'values': {}}) + + def test_get_region_no_region(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test-cloud_no_region', + region_name='override-region') + self.assertEqual(region, {'name': 'override-region', 'values': {}}) + + def test_get_region_region_is_none(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test-cloud_no_region', region_name=None) + self.assertEqual(region, {'name': '', 'values': {}}) + + def test_get_region_region_set(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test-cloud_', region_name='test-region') + self.assertEqual(region, {'name': 'test-region', 'values': {}}) + + def test_get_region_many_regions_default(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test_cloud_regions', + region_name='') + self.assertEqual(region, {'name': 'region1', 'values': + {'external_network': 'region1-network'}}) + + def test_get_region_many_regions(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(cloud='_test_cloud_regions', + region_name='region2') + self.assertEqual(region, {'name': 'region2', 'values': + {'external_network': 'my-network'}}) + + def test_get_region_invalid_region(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, c._get_region, + cloud='_test_cloud_regions', region_name='invalid-region') + + def test_get_region_no_cloud(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml]) + region = c._get_region(region_name='no-cloud-region') + self.assertEqual(region, {'name': 'no-cloud-region', 'values': {}}) + + +class TestConfigArgparse(base.TestCase): + + def setUp(self): + super(TestConfigArgparse, self).setUp() + + self.args = dict( + auth_url='http://example.com/v2', + username='user', + password='password', + project_name='project', + region_name='region2', + snack_type='cookie', + os_auth_token='no-good-things', + ) + + self.options = argparse.Namespace(**self.args) + + def test_get_one_cloud_bad_region_argparse(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + self.assertRaises( + exceptions.OpenStackConfigException, c.get_one_cloud, + cloud='_test-cloud_', argparse=self.options) + + def test_get_one_cloud_argparse(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud( + cloud='_test_cloud_regions', argparse=self.options, validate=False) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_precedence(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + args = dict( + auth_url='http://example.com/v2', + username='user', + password='argpass', + project_name='project', + region_name='region2', + snack_type='cookie', + ) + + options = argparse.Namespace(**args) + cc = c.get_one_cloud( + argparse=options, **kwargs) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.auth['password'], 'authpass') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_precedence_osc(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + ) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + args = dict( + auth_url='http://example.com/v2', + username='user', + password='argpass', + project_name='project', + region_name='region2', + snack_type='cookie', + ) + + options = argparse.Namespace(**args) + cc = c.get_one_cloud_osc( + argparse=options, + **kwargs + ) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.auth['password'], 'argpass') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_precedence_no_argparse(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + cc = c.get_one_cloud(**kwargs) + self.assertEqual(cc.region_name, 'kwarg_region') + self.assertEqual(cc.auth['password'], 'authpass') + self.assertIsNone(cc.password) + + def test_get_one_cloud_just_argparse(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud(argparse=self.options, validate=False) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_just_kwargs(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud(validate=False, **self.args) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_dash_kwargs(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + args = { + 'auth-url': 'http://example.com/v2', + 'username': 'user', + 'password': 'password', + 'project_name': 'project', + 'region_name': 'other-test-region', + 'snack_type': 'cookie', + } + cc = c.get_one_cloud(**args) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'other-test-region') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_no_argparse(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'test-region') + self.assertIsNone(cc.snack_type) + + def test_get_one_cloud_no_argparse_regions(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud(cloud='_test_cloud_regions', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region1') + self.assertIsNone(cc.snack_type) + + def test_get_one_cloud_bad_region(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + self.assertRaises( + exceptions.OpenStackConfigException, + c.get_one_cloud, + cloud='_test_cloud_regions', region_name='bad') + + def test_get_one_cloud_bad_region_no_regions(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, + c.get_one_cloud, + cloud='_test-cloud_', region_name='bad_region') + + def test_get_one_cloud_no_argparse_region2(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud( + cloud='_test_cloud_regions', region_name='region2', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region2') + self.assertIsNone(cc.snack_type) + + def test_get_one_cloud_network(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud( + cloud='_test_cloud_regions', region_name='region1', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region1') + self.assertEqual('region1-network', cc.config['external_network']) + + def test_get_one_cloud_per_region_network(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud( + cloud='_test_cloud_regions', region_name='region2', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual('my-network', cc.config['external_network']) + + def test_get_one_cloud_no_yaml_no_cloud(self): + c = config.OpenStackConfig(load_yaml_config=False) + + self.assertRaises( + exceptions.OpenStackConfigException, + c.get_one_cloud, + cloud='_test_cloud_regions', region_name='region2', argparse=None) + + def test_get_one_cloud_no_yaml(self): + c = config.OpenStackConfig(load_yaml_config=False) + + cc = c.get_one_cloud( + region_name='region2', argparse=None, + **base.USER_CONF['clouds']['_test_cloud_regions']) + # Not using assert_cloud_details because of cache settings which + # are not present without the file + self.assertIsInstance(cc, cloud_config.CloudConfig) + self.assertTrue(extras.safe_hasattr(cc, 'auth')) + self.assertIsInstance(cc.auth, dict) + self.assertIsNone(cc.cloud) + self.assertIn('username', cc.auth) + self.assertEqual('testuser', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + self.assertFalse(cc.config['image_api_use_tasks']) + self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) + if 'project_name' in cc.auth: + self.assertEqual('testproject', cc.auth['project_name']) + elif 'project_id' in cc.auth: + self.assertEqual('testproject', cc.auth['project_id']) + self.assertEqual(cc.region_name, 'region2') + + def test_fix_env_args(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + env_args = {'os-compute-api-version': 1} + fixed_args = c._fix_args(env_args) + + self.assertDictEqual({'compute_api_version': 1}, fixed_args) + + def test_extra_config(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + defaults = {'use_hostnames': False, 'other-value': 'something'} + ansible_options = c.get_extra_config('ansible', defaults) + + # This should show that the default for use_hostnames above is + # overridden by the value in the config file defined in base.py + # It should also show that other-value key is normalized and passed + # through even though there is no corresponding value in the config + # file, and that expand-hostvars key is normalized and the value + # from the config comes through even though there is no default. + self.assertDictEqual( + { + 'expand_hostvars': False, + 'use_hostnames': True, + 'other_value': 'something', + }, + ansible_options) + + def test_register_argparse_cloud(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + opts, _remain = parser.parse_known_args(['--os-cloud', 'foo']) + self.assertEqual(opts.os_cloud, 'foo') + + def test_env_argparse_precedence(self): + self.useFixture(fixtures.EnvironmentVariable( + 'OS_TENANT_NAME', 'tenants-are-bad')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + cc = c.get_one_cloud( + cloud='envvars', argparse=self.options, validate=False) + self.assertEqual(cc.auth['project_name'], 'project') + + def test_argparse_default_no_token(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + # novaclient will add this + parser.add_argument('--os-auth-token') + opts, _remain = parser.parse_known_args() + cc = c.get_one_cloud( + cloud='_test_cloud_regions', argparse=opts) + self.assertEqual(cc.config['auth_type'], 'password') + self.assertNotIn('token', cc.config['auth']) + + def test_argparse_token(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + # novaclient will add this + parser.add_argument('--os-auth-token') + opts, _remain = parser.parse_known_args( + ['--os-auth-token', 'very-bad-things', + '--os-auth-type', 'token']) + cc = c.get_one_cloud(argparse=opts, validate=False) + self.assertEqual(cc.config['auth_type'], 'token') + self.assertEqual(cc.config['auth']['token'], 'very-bad-things') + + def test_argparse_underscores(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + parser = argparse.ArgumentParser() + parser.add_argument('--os_username') + argv = [ + '--os_username', 'user', '--os_password', 'pass', + '--os-auth-url', 'auth-url', '--os-project-name', 'project'] + c.register_argparse_arguments(parser, argv=argv) + opts, _remain = parser.parse_known_args(argv) + cc = c.get_one_cloud(argparse=opts) + self.assertEqual(cc.config['auth']['username'], 'user') + self.assertEqual(cc.config['auth']['password'], 'pass') + self.assertEqual(cc.config['auth']['auth_url'], 'auth-url') + + def test_argparse_action_append_no_underscore(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + parser = argparse.ArgumentParser() + parser.add_argument('--foo', action='append') + argv = ['--foo', '1', '--foo', '2'] + c.register_argparse_arguments(parser, argv=argv) + opts, _remain = parser.parse_known_args(argv) + self.assertEqual(opts.foo, ['1', '2']) + + def test_argparse_underscores_duplicate(self): + c = config.OpenStackConfig(config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml]) + parser = argparse.ArgumentParser() + parser.add_argument('--os_username') + argv = [ + '--os_username', 'user', '--os_password', 'pass', + '--os-username', 'user1', '--os-password', 'pass1', + '--os-auth-url', 'auth-url', '--os-project-name', 'project'] + self.assertRaises( + exceptions.OpenStackConfigException, + c.register_argparse_arguments, + parser=parser, argv=argv) + + def test_register_argparse_bad_plugin(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + self.assertRaises( + exceptions.OpenStackConfigException, + c.register_argparse_arguments, + parser, ['--os-auth-type', 'foo']) + + def test_register_argparse_not_password(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + args = [ + '--os-auth-type', 'v3token', + '--os-token', 'some-secret', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_token, 'some-secret') + + def test_register_argparse_password(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + args = [ + '--os-password', 'some-secret', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_password, 'some-secret') + with testtools.ExpectedException(AttributeError): + opts.os_token + + def test_register_argparse_service_type(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + args = [ + '--os-service-type', 'network', + '--os-endpoint-type', 'admin', + '--http-timeout', '20', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_service_type, 'network') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertEqual(opts.http_timeout, '20') + with testtools.ExpectedException(AttributeError): + opts.os_network_service_type + cloud = c.get_one_cloud(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'network') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['api_timeout'], '20') + self.assertNotIn('http_timeout', cloud.config) + + def test_register_argparse_network_service_type(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + args = [ + '--os-endpoint-type', 'admin', + '--network-api-version', '4', + ] + c.register_argparse_arguments(parser, args, ['network']) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_service_type, 'network') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertIsNone(opts.os_network_service_type) + self.assertIsNone(opts.os_network_api_version) + self.assertEqual(opts.network_api_version, '4') + cloud = c.get_one_cloud(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'network') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['network_api_version'], '4') + self.assertNotIn('http_timeout', cloud.config) + + def test_register_argparse_network_service_types(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + parser = argparse.ArgumentParser() + args = [ + '--os-compute-service-name', 'cloudServers', + '--os-network-service-type', 'badtype', + '--os-endpoint-type', 'admin', + '--network-api-version', '4', + ] + c.register_argparse_arguments( + parser, args, ['compute', 'network', 'volume']) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_network_service_type, 'badtype') + self.assertIsNone(opts.os_compute_service_type) + self.assertIsNone(opts.os_volume_service_type) + self.assertEqual(opts.os_service_type, 'compute') + self.assertEqual(opts.os_compute_service_name, 'cloudServers') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertIsNone(opts.os_network_api_version) + self.assertEqual(opts.network_api_version, '4') + cloud = c.get_one_cloud(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'compute') + self.assertEqual(cloud.config['network_service_type'], 'badtype') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['network_api_version'], '4') + self.assertNotIn('volume_service_type', cloud.config) + self.assertNotIn('http_timeout', cloud.config) + + +class TestConfigPrompt(base.TestCase): + + def setUp(self): + super(TestConfigPrompt, self).setUp() + + self.args = dict( + auth_url='http://example.com/v2', + username='user', + project_name='project', + # region_name='region2', + auth_type='password', + ) + + self.options = argparse.Namespace(**self.args) + + def test_get_one_cloud_prompt(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + pw_func=prompt_for_password, + ) + + # This needs a cloud definition without a password. + # If this starts failing unexpectedly check that the cloud_yaml + # and/or vendor_yaml do not have a password in the selected cloud. + cc = c.get_one_cloud( + cloud='_test_cloud_no_vendor', + argparse=self.options, + ) + self.assertEqual('promptpass', cc.auth['password']) + + +class TestConfigDefault(base.TestCase): + + def setUp(self): + super(TestConfigDefault, self).setUp() + + # Reset defaults after each test so that other tests are + # not affected by any changes. + self.addCleanup(self._reset_defaults) + + def _reset_defaults(self): + defaults._defaults = None + + def test_set_no_default(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual('password', cc.auth_type) + + def test_set_default_before_init(self): + loader.set_default('identity_api_version', '4') + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud(cloud='_test-cloud_', argparse=None) + self.assertEqual('4', cc.identity_api_version) + + +class TestBackwardsCompatibility(base.TestCase): + + def test_set_no_default(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'identity_endpoint_type': 'admin', + 'compute_endpoint_type': 'private', + 'endpoint_type': 'public', + 'auth_type': 'v3password', + } + result = c._fix_backwards_interface(cloud) + expected = { + 'identity_interface': 'admin', + 'compute_interface': 'private', + 'interface': 'public', + 'auth_type': 'v3password', + } + self.assertDictEqual(expected, result) + + def test_project_v2password(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'auth_type': 'v2password', + 'auth': { + 'project-name': 'my_project_name', + 'project-id': 'my_project_id' + } + } + result = c._fix_backwards_project(cloud) + expected = { + 'auth_type': 'v2password', + 'auth': { + 'tenant_name': 'my_project_name', + 'tenant_id': 'my_project_id' + } + } + self.assertEqual(expected, result) + + def test_project_password(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'auth_type': 'password', + 'auth': { + 'project-name': 'my_project_name', + 'project-id': 'my_project_id' + } + } + result = c._fix_backwards_project(cloud) + expected = { + 'auth_type': 'password', + 'auth': { + 'project_name': 'my_project_name', + 'project_id': 'my_project_id' + } + } + self.assertEqual(expected, result) + + def test_backwards_network_fail(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'external_network': 'public', + 'networks': [ + {'name': 'private', 'routes_externally': False}, + ] + } + self.assertRaises( + exceptions.OpenStackConfigException, + c._fix_backwards_networks, cloud) + + def test_backwards_network(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'external_network': 'public', + 'internal_network': 'private', + } + result = c._fix_backwards_networks(cloud) + expected = { + 'external_network': 'public', + 'internal_network': 'private', + 'networks': [ + {'name': 'public', 'routes_externally': True, + 'nat_destination': False, 'default_interface': True}, + {'name': 'private', 'routes_externally': False, + 'nat_destination': True, 'default_interface': False}, + ] + } + self.assertEqual(expected, result) + + def test_normalize_network(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'networks': [ + {'name': 'private'} + ] + } + result = c._fix_backwards_networks(cloud) + expected = { + 'networks': [ + {'name': 'private', 'routes_externally': False, + 'nat_destination': False, 'default_interface': False, + 'routes_ipv4_externally': False, + 'routes_ipv6_externally': False}, + ] + } + self.assertEqual(expected, result) + + def test_single_default_interface(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cloud = { + 'networks': [ + {'name': 'blue', 'default_interface': True}, + {'name': 'purple', 'default_interface': True}, + ] + } + self.assertRaises( + exceptions.OpenStackConfigException, + c._fix_backwards_networks, cloud) diff --git a/openstack/tests/unit/config/test_environ.py b/openstack/tests/unit/config/test_environ.py new file mode 100644 index 000000000..521d72ca2 --- /dev/null +++ b/openstack/tests/unit/config/test_environ.py @@ -0,0 +1,185 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import config +from openstack.config import cloud_config +from openstack.config import exceptions +from openstack.tests.unit.config import base + +import fixtures + + +class TestEnviron(base.TestCase): + + def setUp(self): + super(TestEnviron, self).setUp() + self.useFixture( + fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com')) + self.useFixture( + fixtures.EnvironmentVariable('OS_USERNAME', 'testuser')) + self.useFixture( + fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass')) + self.useFixture( + fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject')) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova')) + + def test_get_one_cloud(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertIsInstance(c.get_one_cloud(), cloud_config.CloudConfig) + + def test_no_fallthrough(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, c.get_one_cloud, 'openstack') + + def test_envvar_name_override(self): + self.useFixture( + fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('override') + self._assert_cloud_details(cc) + + def test_envvar_prefer_ipv6_override(self): + self.useFixture( + fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml]) + cc = c.get_one_cloud('_test-cloud_') + self.assertFalse(cc.prefer_ipv6) + + def test_environ_exists(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml]) + cc = c.get_one_cloud('envvars') + self._assert_cloud_details(cc) + self.assertNotIn('auth_url', cc.config) + self.assertIn('auth_url', cc.config['auth']) + self.assertNotIn('project_id', cc.config['auth']) + self.assertNotIn('auth_url', cc.config) + cc = c.get_one_cloud('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one_cloud('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_environ_prefix(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + envvar_prefix='NOVA_', + secure_files=[self.secure_yaml]) + cc = c.get_one_cloud('envvars') + self._assert_cloud_details(cc) + self.assertNotIn('auth_url', cc.config) + self.assertIn('auth_url', cc.config['auth']) + self.assertIn('project_id', cc.config['auth']) + self.assertNotIn('auth_url', cc.config) + cc = c.get_one_cloud('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one_cloud('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_get_one_cloud_with_config_files(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml]) + self.assertIsInstance(c.cloud_config, dict) + self.assertIn('cache', c.cloud_config) + self.assertIsInstance(c.cloud_config['cache'], dict) + self.assertIn('max_age', c.cloud_config['cache']) + self.assertIn('path', c.cloud_config['cache']) + cc = c.get_one_cloud('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one_cloud('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_config_file_override(self): + self.useFixture( + fixtures.EnvironmentVariable( + 'OS_CLIENT_CONFIG_FILE', self.cloud_yaml)) + c = config.OpenStackConfig(config_files=[], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('_test-cloud_') + self._assert_cloud_details(cc) + + +class TestEnvvars(base.TestCase): + + def test_no_envvars(self): + self.useFixture( + fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars') + + def test_test_envvars(self): + self.useFixture( + fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + self.assertRaises( + exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars') + + def test_incomplete_envvars(self): + self.useFixture( + fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('OS_USERNAME', 'user')) + config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + # This is broken due to an issue that's fixed in a subsequent patch + # commenting it out in this patch to keep the patch size reasonable + # self.assertRaises( + # keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions, + # c.get_one_cloud, 'envvars') + + def test_have_envvars(self): + self.useFixture( + fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com')) + self.useFixture( + fixtures.EnvironmentVariable('OS_USERNAME', 'user')) + self.useFixture( + fixtures.EnvironmentVariable('OS_PASSWORD', 'password')) + self.useFixture( + fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml]) + cc = c.get_one_cloud('envvars') + self.assertEqual(cc.config['auth']['username'], 'user') + + def test_old_envvars(self): + self.useFixture( + fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable( + 'NOVA_AUTH_URL', 'http://example.com')) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password')) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project')) + c = config.OpenStackConfig(config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + envvar_prefix='NOVA_') + cc = c.get_one_cloud('envvars') + self.assertEqual(cc.config['auth']['username'], 'nova') diff --git a/openstack/tests/unit/config/test_init.py b/openstack/tests/unit/config/test_init.py new file mode 100644 index 000000000..4f1af0c27 --- /dev/null +++ b/openstack/tests/unit/config/test_init.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse + +import openstack.config +from openstack.tests.unit.config import base + + +class TestInit(base.TestCase): + def test_get_config_without_arg_parser(self): + cloud_config = openstack.config.get_config( + options=None, validate=False) + self.assertIsInstance( + cloud_config, + openstack.config.cloud_config.CloudConfig + ) + + def test_get_config_with_arg_parser(self): + cloud_config = openstack.config.get_config( + options=argparse.ArgumentParser(), + validate=False) + self.assertIsInstance( + cloud_config, + openstack.config.cloud_config.CloudConfig + ) diff --git a/openstack/tests/unit/config/test_json.py b/openstack/tests/unit/config/test_json.py new file mode 100644 index 000000000..d41c509ca --- /dev/null +++ b/openstack/tests/unit/config/test_json.py @@ -0,0 +1,62 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import json +import os + +import jsonschema +from testtools import content + +from openstack.config import defaults +from openstack.tests.unit.config import base + + +class TestConfig(base.TestCase): + + def json_diagnostics(self, exc_info): + self.addDetail('filename', content.text_content(self.filename)) + for error in sorted(self.validator.iter_errors(self.json_data)): + self.addDetail('jsonschema', content.text_content(str(error))) + + def test_defaults_valid_json(self): + _schema_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'schema.json') + schema = json.load(open(_schema_path, 'r')) + self.validator = jsonschema.Draft4Validator(schema) + self.addOnException(self.json_diagnostics) + + self.filename = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'defaults.json') + self.json_data = json.load(open(self.filename, 'r')) + + self.assertTrue(self.validator.is_valid(self.json_data)) + + def test_vendors_valid_json(self): + _schema_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'vendor-schema.json') + schema = json.load(open(_schema_path, 'r')) + self.validator = jsonschema.Draft4Validator(schema) + self.addOnException(self.json_diagnostics) + + _vendors_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'vendors') + for self.filename in glob.glob(os.path.join(_vendors_path, '*.json')): + self.json_data = json.load(open(self.filename, 'r')) + + self.assertTrue(self.validator.is_valid(self.json_data)) diff --git a/openstack/tests/unit/fixtures/baremetal.json b/openstack/tests/unit/fixtures/baremetal.json new file mode 100644 index 000000000..fa0a9e7a7 --- /dev/null +++ b/openstack/tests/unit/fixtures/baremetal.json @@ -0,0 +1,30 @@ +{ + "default_version": { + "id": "v1", + "links": [ + { + "href": "https://bare-metal.example.com/v1/", + "rel": "self" + } + ], + "min_version": "1.1", + "status": "CURRENT", + "version": "1.33" + }, + "description": "Ironic is an OpenStack project which aims to provision baremetal machines.", + "name": "OpenStack Ironic API", + "versions": [ + { + "id": "v1", + "links": [ + { + "href": "https://bare-metal.example.com/v1/", + "rel": "self" + } + ], + "min_version": "1.1", + "status": "CURRENT", + "version": "1.33" + } + ] +} diff --git a/openstack/tests/unit/fixtures/catalog-v2.json b/openstack/tests/unit/fixtures/catalog-v2.json new file mode 100644 index 000000000..aa8744355 --- /dev/null +++ b/openstack/tests/unit/fixtures/catalog-v2.json @@ -0,0 +1,149 @@ +{ + "access": { + "token": { + "issued_at": "2016-04-14T10:09:58.014014Z", + "expires": "9999-12-31T23:59:59Z", + "id": "7fa3037ae2fe48ada8c626a51dc01ffd", + "tenant": { + "enabled": true, + "description": "Bootstrap project for initializing the cloud.", + "name": "admin", + "id": "1c36b64c840a42cd9e9b931a369337f0" + }, + "audit_ids": [ + "FgG3Q8T3Sh21r_7HyjHP8A" + ] + }, + "serviceCatalog": [ + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://compute.example.com/v2.1/1c36b64c840a42cd9e9b931a369337f0", + "region": "RegionOne", + "publicURL": "https://compute.example.com/v2.1/1c36b64c840a42cd9e9b931a369337f0", + "internalURL": "https://compute.example.com/v2.1/1c36b64c840a42cd9e9b931a369337f0", + "id": "32466f357f3545248c47471ca51b0d3a" + } + ], + "type": "compute", + "name": "nova" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://volume.example.com/v2/1c36b64c840a42cd9e9b931a369337f0", + "region": "RegionOne", + "publicURL": "https://volume.example.com/v2/1c36b64c840a42cd9e9b931a369337f0", + "internalURL": "https://volume.example.com/v2/1c36b64c840a42cd9e9b931a369337f0", + "id": "1e875ca2225b408bbf3520a1b8e1a537" + } + ], + "type": "volumev2", + "name": "cinderv2" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://image.example.com", + "region": "RegionOne", + "publicURL": "https://image.example.com", + "internalURL": "https://image.example.com", + "id": "5a64de3c4a614d8d8f8d1ba3dee5f45f" + } + ], + "type": "image", + "name": "glance" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://volume.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "region": "RegionOne", + "publicURL": "https://volume.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "internalURL": "https://volume.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "id": "3d15fdfc7d424f3c8923324417e1a3d1" + } + ], + "type": "volume", + "name": "cinder" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://identity.example.com/v2.0", + "region": "RegionOne", + "publicURL": "https://identity.example.com/v2.0", + "internalURL": "https://identity.example.com/v2.0", + "id": "4deb4d0504a044a395d4480741ba628c" + } + ], + "type": "identity", + "name": "keystone" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://network.example.com", + "region": "RegionOne", + "publicURL": "https://network.example.com", + "internalURL": "https://network.example.com", + "id": "4deb4d0504a044a395d4480741ba628d" + } + ], + "type": "network", + "name": "neutron" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://object-store.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "region": "RegionOne", + "publicURL": "https://object-store.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "internalURL": "https://object-store.example.com/v1/1c36b64c840a42cd9e9b931a369337f0", + "id": "4deb4d0504a044a395d4480741ba628c" + } + ], + "type": "object-store", + "name": "swift" + }, + { + "endpoints_links": [], + "endpoints": [ + { + "adminURL": "https://dns.example.com", + "region": "RegionOne", + "publicURL": "https://dns.example.com", + "internalURL": "https://dns.example.com", + "id": "652f0612744042bfbb8a8bb2c777a16d" + } + ], + "type": "dns", + "name": "designate" + } + ], + "user": { + "username": "dummy", + "roles_links": [], + "id": "71675f719c3343e8ac441cc28f396474", + "roles": [ + { + "name": "admin" + } + ], + "name": "admin" + }, + "metadata": { + "is_admin": 0, + "roles": [ + "6d813db50b6e4a1ababdbbb5a83c7de5" + ] + } + } +} diff --git a/openstack/tests/unit/fixtures/catalog-v3.json b/openstack/tests/unit/fixtures/catalog-v3.json new file mode 100644 index 000000000..a08d3ab00 --- /dev/null +++ b/openstack/tests/unit/fixtures/catalog-v3.json @@ -0,0 +1,185 @@ +{ + "token": { + "audit_ids": [ + "Rvn7eHkiSeOwucBIPaKdYA" + ], + "catalog": [ + { + "endpoints": [ + { + "id": "32466f357f3545248c47471ca51b0d3a", + "interface": "public", + "region": "RegionOne", + "url": "https://compute.example.com/v2.1/" + } + ], + "name": "nova", + "type": "compute" + }, + { + "endpoints": [ + { + "id": "1e875ca2225b408bbf3520a1b8e1a537", + "interface": "public", + "region": "RegionOne", + "url": "https://volume.example.com/v2/1c36b64c840a42cd9e9b931a369337f0" + } + ], + "name": "cinderv2", + "type": "volumev2" + }, + { + "endpoints": [ + { + "id": "5a64de3c4a614d8d8f8d1ba3dee5f45f", + "interface": "public", + "region": "RegionOne", + "url": "https://image.example.com" + } + ], + "name": "glance", + "type": "image" + }, + { + "endpoints": [ + { + "id": "3d15fdfc7d424f3c8923324417e1a3d1", + "interface": "public", + "region": "RegionOne", + "url": "https://volume.example.com/v1/1c36b64c840a42cd9e9b931a369337f0" + } + ], + "name": "cinder", + "type": "volume" + }, + { + "endpoints": [ + { + "id": "4deb4d0504a044a395d4480741ba628c", + "interface": "public", + "region": "RegionOne", + "url": "https://identity.example.com" + }, + { + "id": "012322eeedcd459edabb4933021112bc", + "interface": "admin", + "region": "RegionOne", + "url": "https://identity.example.com" + } + ], + "endpoints_links": [], + "name": "keystone", + "type": "identity" + }, + { + "endpoints": [ + { + "id": "4deb4d0504a044a395d4480741ba628d", + "interface": "public", + "region": "RegionOne", + "url": "https://network.example.com" + } + ], + "endpoints_links": [], + "name": "neutron", + "type": "network" + }, + { + "endpoints": [ + { + "id": "4deb4d0504a044a395d4480741ba628e", + "interface": "public", + "region": "RegionOne", + "url": "https://container-infra.example.com/v1" + } + ], + "endpoints_links": [], + "name": "magnum", + "type": "container-infra" + }, + { + "endpoints": [ + { + "id": "4deb4d0504a044a395d4480741ba628c", + "interface": "public", + "region": "RegionOne", + "url": "https://object-store.example.com/v1/1c36b64c840a42cd9e9b931a369337f0" + } + ], + "endpoints_links": [], + "name": "swift", + "type": "object-store" + }, + { + "endpoints": [ + { + "id": "652f0612744042bfbb8a8bb2c777a16d", + "interface": "public", + "region": "RegionOne", + "url": "https://bare-metal.example.com/" + } + ], + "endpoints_links": [], + "name": "ironic", + "type": "baremetal" + }, + { + "endpoints": [ + { + "id": "4deb4d0504a044a395d4480741ba628c", + "interface": "public", + "region": "RegionOne", + "url": "https://orchestration.example.com/v1/1c36b64c840a42cd9e9b931a369337f0" + } + ], + "endpoints_links": [], + "name": "heat", + "type": "orchestration" + }, + { + "endpoints": [ + { + "id": "10c76ffd2b744a67950ed1365190d352", + "interface": "public", + "region": "RegionOne", + "url": "https://dns.example.com" + } + ], + "endpoints_links": [], + "name": "designate", + "type": "dns" + } + ], + "expires_at": "9999-12-31T23:59:59Z", + "issued_at": "2016-12-17T14:25:05.000000Z", + "methods": [ + "password" + ], + "project": { + "domain": { + "id": "default", + "name": "default" + }, + "id": "1c36b64c840a42cd9e9b931a369337f0", + "name": "Default Project" + }, + "roles": [ + { + "id": "9fe2ff9ee4384b1894a90878d3e92bab", + "name": "_member_" + }, + { + "id": "37071fc082e14c2284c32a2761f71c63", + "name": "swiftoperator" + } + ], + "user": { + "domain": { + "id": "default", + "name": "default" + }, + "id": "c17534835f8f42bf98fc367e0bf35e09", + "name": "mordred" + } + } +} diff --git a/openstack/tests/unit/fixtures/clouds/clouds.yaml b/openstack/tests/unit/fixtures/clouds/clouds.yaml new file mode 100644 index 000000000..ebd4cc0ba --- /dev/null +++ b/openstack/tests/unit/fixtures/clouds/clouds.yaml @@ -0,0 +1,26 @@ +clouds: + _test_cloud_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + user_domain_name: default + project_domain_name: default + region_name: RegionOne + _test_cloud_v2_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + identity_api_version: '2.0' + region_name: RegionOne + _bogus_test_: + auth_type: bogus + auth: + auth_url: https://identity.example.com/v2.0 + username: _test_user_ + password: _test_pass_ + project_name: _test_project_ + region_name: _test_region_ diff --git a/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml b/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml new file mode 100644 index 000000000..eb01d37ec --- /dev/null +++ b/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml @@ -0,0 +1,31 @@ +cache: + max_age: 90 + class: dogpile.cache.memory + expiration: + server: 1 +clouds: + _test_cloud_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + user_domain_name: default + project_domain_name: default + region_name: RegionOne + _test_cloud_v2_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + identity_api_version: '2.0' + region_name: RegionOne + _bogus_test_: + auth_type: bogus + auth: + auth_url: http://identity.example.com/v2.0 + username: _test_user_ + password: _test_pass_ + project_name: _test_project_ + region_name: _test_region_ diff --git a/openstack/tests/unit/fixtures/discovery.json b/openstack/tests/unit/fixtures/discovery.json new file mode 100644 index 000000000..9162ecc9d --- /dev/null +++ b/openstack/tests/unit/fixtures/discovery.json @@ -0,0 +1,45 @@ +{ + "versions": { + "values": [ + { + "status": "stable", + "updated": "2016-04-04T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.identity-v3+json" + } + ], + "id": "v3.6", + "links": [ + { + "href": "https://identity.example.com/v3/", + "rel": "self" + } + ] + }, + { + "status": "stable", + "updated": "2014-04-17T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.identity-v2.0+json" + } + ], + "id": "v2.0", + "links": [ + { + "href": "https://identity.example.com/v2.0/", + "rel": "self" + }, + { + "href": "http://docs.openstack.org/", + "type": "text/html", + "rel": "describedby" + } + ] + } + ] + } +} diff --git a/openstack/tests/unit/fixtures/dns.json b/openstack/tests/unit/fixtures/dns.json new file mode 100644 index 000000000..1fc8e86bd --- /dev/null +++ b/openstack/tests/unit/fixtures/dns.json @@ -0,0 +1,24 @@ +{ + "versions": { + "values": [{ + "id": "v1", + "links": [ + { + "href": "https://dns.example.com/v1", + "rel": "self" + } + ], + "status": "DEPRECATED" + }, { + "id": "v2", + "links": [ + { + "href": "https://dns.example.com/v2", + "rel": "self" + } + ], + "status": "CURRENT" + }] + } +} + diff --git a/openstack/tests/unit/fixtures/image-version-broken.json b/openstack/tests/unit/fixtures/image-version-broken.json new file mode 100644 index 000000000..a130ca403 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-broken.json @@ -0,0 +1,64 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.1", + "links": [ + { + "href": "http://localhost/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://localhost/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version-v1.json b/openstack/tests/unit/fixtures/image-version-v1.json new file mode 100644 index 000000000..60b0a3bd3 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-v1.json @@ -0,0 +1,24 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v1.1", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version-v2.json b/openstack/tests/unit/fixtures/image-version-v2.json new file mode 100644 index 000000000..399a53aa9 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-v2.json @@ -0,0 +1,44 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version.json b/openstack/tests/unit/fixtures/image-version.json new file mode 100644 index 000000000..bd688ee3b --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version.json @@ -0,0 +1,64 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.1", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/test_connection.py b/openstack/tests/unit/test_connection.py index 229dbb728..500c067a2 100644 --- a/openstack/tests/unit/test_connection.py +++ b/openstack/tests/unit/test_connection.py @@ -15,7 +15,7 @@ import os import fixtures from keystoneauth1 import session as ksa_session import mock -import os_client_config +import openstack.config from openstack import connection from openstack import exceptions @@ -171,7 +171,7 @@ class TestConnection(base.TestCase): def test_from_config_given_data(self): self._prepare_test_config() - data = os_client_config.OpenStackConfig().get_one_cloud("sample") + data = openstack.config.OpenStackConfig().get_one_cloud("sample") sot = connection.from_config(cloud_config=data) diff --git a/openstack/utils.py b/openstack/utils.py index 1864ecc57..5aef9ba5f 100644 --- a/openstack/utils.py +++ b/openstack/utils.py @@ -37,6 +37,9 @@ def deprecated(deprecated_in=None, removed_in=None, partial = functools.partial(deprecation.deprecated, current_version=version.__version__) + # TODO(shade) shade's tags break these - so hard override them for now. + # We'll want a patch fixing this before we cut any releases. + removed_in = '2.0.0' return partial(deprecated_in=deprecated_in, removed_in=removed_in, details=details) diff --git a/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml b/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml new file mode 100644 index 000000000..2d157a3c8 --- /dev/null +++ b/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml @@ -0,0 +1,4 @@ +--- +features: + - All get and search functions can now take a jmespath expression in their + filters parameter. diff --git a/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml b/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml new file mode 100644 index 000000000..12f289f8b --- /dev/null +++ b/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add a list_flavor_access method to list all + the projects/tenants allowed to access a given flavor. diff --git a/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml b/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml new file mode 100644 index 000000000..a3e76872e --- /dev/null +++ b/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added get_server_console method to fetch the console + log from a Server. On clouds that do not expose this + feature, a debug line will be logged and an empty + string will be returned. diff --git a/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml b/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml new file mode 100644 index 000000000..98c320b26 --- /dev/null +++ b/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml @@ -0,0 +1,9 @@ +--- +features: + - Added flag "show_all" to list_images. The behavior of + Glance v2 to only show shared images if they have been + accepted by the user can be confusing, and the only way + to change it is to use search_images(filters=dict(member_status='all')) + which isn't terribly obvious. "show_all=True" will set + that flag, as well as disabling the filtering of images + in "deleted" state. diff --git a/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml b/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml new file mode 100644 index 000000000..98dd190bf --- /dev/null +++ b/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add description parameter to create_user, available on Keystone v3 diff --git a/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml b/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml new file mode 100644 index 000000000..0d464961b --- /dev/null +++ b/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Designate recordsets resources, with the + usual methods (search/list/get/create/update/delete). diff --git a/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml b/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml new file mode 100644 index 000000000..f5253af0f --- /dev/null +++ b/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Designate zones resources, with the + usual methods (search/list/get/create/update/delete). diff --git a/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml b/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml new file mode 100644 index 000000000..6a6ff37a1 --- /dev/null +++ b/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for host aggregates and host aggregate + membership. diff --git a/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml b/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml new file mode 100644 index 000000000..21dbed6f1 --- /dev/null +++ b/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml @@ -0,0 +1,7 @@ +--- +features: + - Add support for Magnum baymodels, with the + usual methods (search/list/get/create/update/delete). Due to upcoming + rename in Magnum from baymodel to cluster_template, the shade + functionality uses the term cluster_template. However, baymodel aliases + are provided for each api call. diff --git a/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml b/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml new file mode 100644 index 000000000..3a32e3dde --- /dev/null +++ b/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for listing Magnum services. diff --git a/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml b/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml new file mode 100644 index 000000000..e90384134 --- /dev/null +++ b/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds support to create and delete server groups. diff --git a/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml b/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml new file mode 100644 index 000000000..5bbe898d4 --- /dev/null +++ b/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add update_server method to update name or description of a server. diff --git a/releasenotes/notes/add_update_service-28e590a7a7524053.yaml b/releasenotes/notes/add_update_service-28e590a7a7524053.yaml new file mode 100644 index 000000000..ff3e7befa --- /dev/null +++ b/releasenotes/notes/add_update_service-28e590a7a7524053.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add the ability to update a keystone service information. This feature is + not available on keystone v2.0. The new function, update_service(), allows + the user to update description, name of service, service type, and enabled + status. diff --git a/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml b/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml new file mode 100644 index 000000000..cc98f8c9f --- /dev/null +++ b/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Cluster Templates have data model and normalization + now. As a result, the detail parameter is now ignored + and detailed records are always returned. diff --git a/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml b/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml new file mode 100644 index 000000000..2b8b3c319 --- /dev/null +++ b/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml @@ -0,0 +1,10 @@ +--- +prelude: > + Fixed a bug where a project was always enabled upon update, unless + ``enabled=False`` is passed explicitly. +fixes: + - | + [`bug 2001080 `_] + Project update will only update the enabled field of projects when + ``enabled=True`` or ``enabled=False`` is passed explicitly. The previous + behavior had ``enabled=True`` as the default. diff --git a/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml b/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml new file mode 100644 index 000000000..4ac0b61af --- /dev/null +++ b/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fixed caching the volume list when volumes are in + use. diff --git a/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml b/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml new file mode 100644 index 000000000..e7b98afe3 --- /dev/null +++ b/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml @@ -0,0 +1,22 @@ +--- +prelude: > + Swiftclient instantiation now provides authentication + information so that long lived swiftclient objects can + reauthenticate if necessary. This should be a temporary + situation until swiftclient supports keystoneauth + sessions at which point os-client-config will instantiate + swiftclient with a keystoneauth session. +features: + - Swiftclient instantiation now provides authentication + information so that long lived swiftclient objects can + reauthenticate if necessary. + - Add support for explicit v2password auth type. + - Add SSL support to VEXXHOST vendor profile. + - Add zetta.io cloud vendor profile. +fixes: + - Fix bug where project_domain_{name,id} was set even + if project_{name,id} was not set. +other: + - HPCloud vendor profile removed due to cloud shutdown. + - RunAbove vendor profile removed due to migration to + OVH. diff --git a/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml b/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml new file mode 100644 index 000000000..19db8ebef --- /dev/null +++ b/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``attach_volume`` method now always returns a ``volume_attachment`` + object. Previously, ``attach_volume`` would return a ``volume`` object if + it was called with ``wait=True`` and a ``volume_attachment`` object + otherwise. + diff --git a/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml b/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml new file mode 100644 index 000000000..380b653f4 --- /dev/null +++ b/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Cinder volume backup resources, with the + usual methods (search/list/get/create/delete). diff --git a/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml b/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml new file mode 100644 index 000000000..0847ee667 --- /dev/null +++ b/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fixed the volume normalization function when used with cinder v2. diff --git a/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml b/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml new file mode 100644 index 000000000..b447ed0a4 --- /dev/null +++ b/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add a field to vendor cloud profiles to indicate + active, deprecated and shutdown status. A message to + the user is triggered when attempting to use cloud + with either deprecated or shutdown status. diff --git a/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml b/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml new file mode 100644 index 000000000..6e170359c --- /dev/null +++ b/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_compute_quotas(), OperatorCloud.set_compute_quotas() and OperatorCloud.delete_compute_quotas() to manage nova quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml b/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml new file mode 100644 index 000000000..7ca6b37f5 --- /dev/null +++ b/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml @@ -0,0 +1,9 @@ +--- +features: + - get_compute_usage now has a default value for the start + parameter of 2010-07-06. That was the date the OpenStack + project started. It's completely impossible for someone + to have Nova usage data that goes back further in time. + Also, both the start and end date parameters now also + accept strings which will be parsed and timezones will + be properly converted to UTC which is what Nova expects. diff --git a/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml b/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml new file mode 100644 index 000000000..4bb1e9013 --- /dev/null +++ b/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml @@ -0,0 +1,4 @@ +--- +features: + - Adds ability to add a config setting to clouds.yaml to + disable fetching extra_specs from flavors. diff --git a/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml b/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml new file mode 100644 index 000000000..35bb8c0b6 --- /dev/null +++ b/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - The create_stack() call was fixed to call the correct iterator + method and to return the updated stack object when waiting. diff --git a/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml b/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml new file mode 100644 index 000000000..9f9bd5474 --- /dev/null +++ b/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - The create_server() API call would not use the supplied 'network' + parameter if the 'nics' parameter was also supplied, even though it would + be an empty list. It now uses 'network' if 'nics' is not supplied or if + it is an empty list. diff --git a/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml b/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml new file mode 100644 index 000000000..2f6d018a6 --- /dev/null +++ b/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - The returned data from a create_service() call was not being normalized. diff --git a/releasenotes/notes/data-model-cf50d86982646370.yaml b/releasenotes/notes/data-model-cf50d86982646370.yaml new file mode 100644 index 000000000..66a814aae --- /dev/null +++ b/releasenotes/notes/data-model-cf50d86982646370.yaml @@ -0,0 +1,8 @@ +--- +features: + - Explicit data model contracts are now defined for + Flavors, Images, Security Groups, Security Group Rules, + and Servers. + - Resources with data model contracts are now being returned with + 'location' attribute. The location carries cloud name, region + name and information about the project that owns the resource. diff --git a/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml b/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml new file mode 100644 index 000000000..00ce4998d --- /dev/null +++ b/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml @@ -0,0 +1,18 @@ +--- +fixes: + - Delete swift objects uploaded in service of uploading images + at the time that the corresponding image is deleted. On some clouds, + image uploads are accomplished by uploading the image to swift and + then running a task-import. As shade does this action on behalf of the + user, it is not reasonable to assume that the user would then be aware + of or manage the swift objects shade created, which led to an ongoing + leak of swift objects. + - Upload swift Large Objects as Static Large Objects by default. Shade + automatically uploads objects as Large Objects when they are over a + segment_size threshold. It had been doing this as Dynamic Large Objects, + which sound great, but which have the downside of not deleting their + sub-segments when the primary object is deleted. Since nothing in the + shade interface exposes that the object was segmented, the user would not + know they would also need to find and delete the segments. Instead, we + now upload as Static Large Objects which behave as expected and delete + segments when the object is deleted. diff --git a/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml b/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml new file mode 100644 index 000000000..381bcb99a --- /dev/null +++ b/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - The delete_object() method was not returning True/False, + similar to other delete methods. It is now consistent with + the other delete APIs. diff --git a/releasenotes/notes/delete_project-399f9b3107014dde.yaml b/releasenotes/notes/delete_project-399f9b3107014dde.yaml new file mode 100644 index 000000000..e4cf39fb9 --- /dev/null +++ b/releasenotes/notes/delete_project-399f9b3107014dde.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - The delete_project() API now conforms to our standard of returning True + when the delete succeeds, or False when the project was not found. It + would previously raise an expection if the project was not found. diff --git a/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml b/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml new file mode 100644 index 000000000..6d58e43c1 --- /dev/null +++ b/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added name_or_id parameter to domain operations, allowing + an admin to update/delete/get by domain name. diff --git a/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml b/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml new file mode 100644 index 000000000..70e28e7b1 --- /dev/null +++ b/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added support for dual stack networks where the IPv4 subnet and the + IPv6 subnet have opposite public/private qualities. It is now possible + to add configuration to clouds.yaml that will indicate that a network + is public for v6 and private for v4, which is otherwise very difficult + to correctly infer while setting server attributes like private_v4, + public_v4 and public_v6. diff --git a/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml b/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml new file mode 100644 index 000000000..2db7bc947 --- /dev/null +++ b/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add new method, 'endpoint_for' which will return the + raw endpoint for a given service from the current catalog. diff --git a/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml b/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml new file mode 100644 index 000000000..e474e0266 --- /dev/null +++ b/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - delete_image used to fail with an AttributeError if an invalid image + name or id was passed, rather than returning False which was the + intent. This is worthy of note because it's a behavior change, but the + previous behavior was a bug. diff --git a/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml b/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml new file mode 100644 index 000000000..e0a3f6c83 --- /dev/null +++ b/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OpenStackCloud.set_server_metadata() and OpenStackCloud.delete_server_metadata() to manage metadata of existing nova compute instances diff --git a/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml b/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml new file mode 100644 index 000000000..2f98ebbda --- /dev/null +++ b/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - When creating a new server, the timeout was not being passed through to + floating IP creation, which could also timeout. diff --git a/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml b/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml new file mode 100644 index 000000000..80d09fb83 --- /dev/null +++ b/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml @@ -0,0 +1,7 @@ +--- +issues: + - Fixed a regression when using latest os-client-config with + the keystoneauth from stable/newton. Although this isn't a + super common combination, the added feature that broke the + interaction is really not worthy of the incompatibility, so + a workaround was added. diff --git a/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml b/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml new file mode 100644 index 000000000..cd08b87cf --- /dev/null +++ b/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml @@ -0,0 +1,6 @@ +--- +issues: + - Fixed an issue where nodepool could cause config_drive + to be passed explicitly as None, which was getting directly + passed through to the JSON. Also fix the same logic for key_name + and scheduler_hints while we're in there. diff --git a/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml b/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml new file mode 100644 index 000000000..7d8199dee --- /dev/null +++ b/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml @@ -0,0 +1,6 @@ +--- +issues: + - Fixed the logic in delete_ips and added regression + tests to cover it. The old logic was incorrectly looking + for floating ips using port syntax. It was also not + swallowing errors when it should. diff --git a/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml b/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml new file mode 100644 index 000000000..eecc255e6 --- /dev/null +++ b/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fix for list_networks() ignoring any filters. diff --git a/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml b/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml new file mode 100644 index 000000000..94a2ab857 --- /dev/null +++ b/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Added missing dependency on futures library for python 2. + The depend was missed in testing due to it having been listed + in test-requirements already. diff --git a/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml b/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml new file mode 100644 index 000000000..d681f93ca --- /dev/null +++ b/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml @@ -0,0 +1,4 @@ +--- +issues: + - Images in the cloud with a string property named "properties" + caused image normalization to bomb. diff --git a/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml b/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml new file mode 100644 index 000000000..66a5f33c9 --- /dev/null +++ b/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Fixed an issue where shade could report a floating IP being attached + to a server erroneously due to only matching on fixed ip. Changed the + lookup to match on port ids. This adds an API call in the case where + the workaround is needed because of a bug in the cloud, but in most + cases it should have no difference. diff --git a/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml b/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml new file mode 100644 index 000000000..060461d09 --- /dev/null +++ b/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fix for update_domain() where 'name' was not updatable. diff --git a/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml b/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml new file mode 100644 index 000000000..bc0f768bd --- /dev/null +++ b/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Fixed magnum service_type. shade was using it as 'container' + but the correct type is 'container-infra'. It's possible that on + old clouds with magnum shade may now do the wrong thing. If that + occurs, please file a bug. diff --git a/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml b/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml new file mode 100644 index 000000000..9a7ba7de1 --- /dev/null +++ b/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml @@ -0,0 +1,7 @@ +--- +features: + - Flavors will always contain an 'extra_specs' attribute. Client cruft, + such as 'links', 'HUMAN_ID', etc. has been removed. +fixes: + - Setting and unsetting flavor extra specs now works. This had + been broken since the 1.2.0 release. diff --git a/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml b/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml new file mode 100644 index 000000000..dcdccd249 --- /dev/null +++ b/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml @@ -0,0 +1,5 @@ +--- +features: + - name_or_id parameters to search/get methods now support + filename-like globbing. This means search_servers('nb0*') + will return all servers whose names start with 'nb0'. diff --git a/releasenotes/notes/get-limits-c383c512f8e01873.yaml b/releasenotes/notes/get-limits-c383c512f8e01873.yaml new file mode 100644 index 000000000..58ed1e100 --- /dev/null +++ b/releasenotes/notes/get-limits-c383c512f8e01873.yaml @@ -0,0 +1,3 @@ +--- +features: + - Allow to retrieve the limits of a specific project diff --git a/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml b/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml new file mode 100644 index 000000000..4b447f4d4 --- /dev/null +++ b/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml @@ -0,0 +1,3 @@ +--- +features: + - Allow to retrieve the usage of a specific project diff --git a/releasenotes/notes/get_object_api-968483adb016bce1.yaml b/releasenotes/notes/get_object_api-968483adb016bce1.yaml new file mode 100644 index 000000000..bc830d57c --- /dev/null +++ b/releasenotes/notes/get_object_api-968483adb016bce1.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added a new API call, OpenStackCloud.get_object(), to download objects from swift. diff --git a/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml b/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml new file mode 100644 index 000000000..3b134fcb5 --- /dev/null +++ b/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml @@ -0,0 +1,4 @@ +--- +issues: + - Fixed an issue where glance image list pagination was being ignored, + leading to truncated image lists. diff --git a/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml b/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml new file mode 100644 index 000000000..9776030ca --- /dev/null +++ b/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml @@ -0,0 +1,3 @@ +--- +features: + - add granting and revoking of roles from groups and users diff --git a/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml b/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml new file mode 100644 index 000000000..654812104 --- /dev/null +++ b/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml @@ -0,0 +1,9 @@ +--- +features: + - The image and flavor parameters for create_server + now accept name in addition to id and dict. If given + as a name or id, shade will do a get_image or a + get_flavor to find the matching image or flavor. + If you have an id already and are not using any caching + and the extra lookup is annoying, passing the id in + as "dict(id='my-id')" will avoid the lookup. diff --git a/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml b/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml new file mode 100644 index 000000000..6461f5edf --- /dev/null +++ b/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added ability to create an image from a volume. diff --git a/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml b/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml new file mode 100644 index 000000000..f3f35f480 --- /dev/null +++ b/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml @@ -0,0 +1,9 @@ +--- +features: + - If a cloud does not have a neutron service, it is now + assumed that Nova will be the source of security groups. + To handle clouds that have nova-network and do not have + the security group extension, setting secgroup_source to + None will prevent attempting to use them at all. If the + cloud has neutron but it is not a functional source of + security groups, set secgroup_source to nova. diff --git a/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml b/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml new file mode 100644 index 000000000..62e36277d --- /dev/null +++ b/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for passing Ironic microversion to the ironicclient + constructor in get_legacy_client. diff --git a/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml b/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml new file mode 100644 index 000000000..4d0fd1a1f --- /dev/null +++ b/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - shade will now only generate file hashes for glance + images if both hashes are empty. If only one is given, + the other will be treated as an empty string. diff --git a/releasenotes/notes/list-az-names-a38c277d1192471b.yaml b/releasenotes/notes/list-az-names-a38c277d1192471b.yaml new file mode 100644 index 000000000..7b492716d --- /dev/null +++ b/releasenotes/notes/list-az-names-a38c277d1192471b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added list_availability_zone_names API call. diff --git a/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml b/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml new file mode 100644 index 000000000..df0d96b3d --- /dev/null +++ b/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml @@ -0,0 +1,3 @@ +--- +features: + - Implement list_role_assignments for keystone v2, using roles_for_user. diff --git a/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml b/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml new file mode 100644 index 000000000..c993d2d81 --- /dev/null +++ b/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add 'all_projects' parameter to list_servers and + search_servers which will tell Nova to return servers for all projects + rather than just for the current project. This is only available to + cloud admins. diff --git a/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml b/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml new file mode 100644 index 000000000..2438f83a4 --- /dev/null +++ b/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml @@ -0,0 +1,7 @@ +--- +features: + - Added a flag, 'load_yaml_config' that defaults to True. + If set to false, no clouds.yaml files will be loaded. This + is beneficial if os-client-config wants to be used inside of + a service where end-user clouds.yaml files would make things + more confusing. diff --git a/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml b/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml new file mode 100644 index 000000000..6c81b7756 --- /dev/null +++ b/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml @@ -0,0 +1,5 @@ +--- +other: + - The contents of x-openstack-request-id are no longer + added to object returned. Instead, they are logged to + a logger named 'openstack.cloud.request_ids'. diff --git a/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml b/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml new file mode 100644 index 000000000..570e4dcca --- /dev/null +++ b/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Refactor ``OpenStackConfig._fix_backward_madness()`` into + ``OpenStackConfig.magic_fixes()`` that allows subclasses + to inject more fixup magic into the flow during + ``get_one_cloud()`` processing. diff --git a/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml b/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml new file mode 100644 index 000000000..8e34e5198 --- /dev/null +++ b/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - Renamed session_client to make_rest_client. session_client + will continue to be supported for backwards compatability. diff --git a/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml b/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml new file mode 100644 index 000000000..eaa718307 --- /dev/null +++ b/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml @@ -0,0 +1,5 @@ +--- +features: + - create_object() now has a "metadata" parameter that can be used to create + an object with metadata of each key and value pair in that dictionary + - Add an update_object() function that updates the metadata of a swift object diff --git a/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml b/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml new file mode 100644 index 000000000..f7718aabb --- /dev/null +++ b/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml @@ -0,0 +1,4 @@ +--- +other: + - The shade and os-client-config libraries have been + merged into python-openstacksdk. diff --git a/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml b/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml new file mode 100644 index 000000000..13eb7ca2f --- /dev/null +++ b/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml @@ -0,0 +1,7 @@ +--- +features: + - Added a parameter to create_image 'meta' which allows + for providing parameters to the API that will not have + any type conversions performed. For the simple case, + the existing kwargs approach to image metadata is still + the best bet. diff --git a/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml b/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml new file mode 100644 index 000000000..30a380225 --- /dev/null +++ b/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml @@ -0,0 +1,15 @@ +--- +features: + - Add min_version and max_version to get_legacy_client + and to get_session_endpoint. At the moment this is only + really fully plumbed through for cinder, which has extra + special fun around volume, volumev2 and volumev3. Min and max + versions to both methods will look through the options available + in the service catalog and try to return the latest one available + from the span of requested versions. This means a user can say + volume_api_version=None, min_version=2, max_version=3 will get + an endpoint from get_session_endpoint or a Client from cinderclient + that will be either v2 or v3 but not v1. In the future, min and max + version for get_session_endpoint should be able to sort out + appropriate endpoints via version discovery, but that does not + currently exist. diff --git a/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml b/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml new file mode 100644 index 000000000..5df3f6d51 --- /dev/null +++ b/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml @@ -0,0 +1,14 @@ +--- +features: + - Removed unneeded calls that were made when deleting servers with + floating ips. + - Added pagination support for volume listing. +upgrade: + - Removed designateclient as a dependency. All designate operations + are now performed with direct REST calls using keystoneauth + Adapter. + - Server creation calls are now done with direct REST calls. +fixes: + - Fixed a bug related to neutron endpoints that did not have trailing + slashes. + - Fixed issue with ports not having a created_at attribute. diff --git a/releasenotes/notes/net_provider-dd64b697476b7094.yaml b/releasenotes/notes/net_provider-dd64b697476b7094.yaml new file mode 100644 index 000000000..65a007302 --- /dev/null +++ b/releasenotes/notes/net_provider-dd64b697476b7094.yaml @@ -0,0 +1,3 @@ +--- +features: + - Network provider options are now accepted in create_network(). diff --git a/releasenotes/notes/network-list-e6e9dafdd8446263.yaml b/releasenotes/notes/network-list-e6e9dafdd8446263.yaml new file mode 100644 index 000000000..8f793c2bc --- /dev/null +++ b/releasenotes/notes/network-list-e6e9dafdd8446263.yaml @@ -0,0 +1,10 @@ +--- +features: + - Support added for configuring metadata about networks + for a cloud in a list of dicts, rather than in the + external_network and internal_network entries. The dicts + support a name, a routes_externally field, a nat_destination + field and a default_interface field. +deprecations: + - external_network and internal_network are deprecated and + should be replaced with the list of network dicts. diff --git a/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml b/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml new file mode 100644 index 000000000..a58cbeab4 --- /dev/null +++ b/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_network_quotas(), OperatorCloud.set_network_quotas() and OperatorCloud.delete_network_quotas() to manage neutron quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml b/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml new file mode 100644 index 000000000..61f4ec1db --- /dev/null +++ b/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added support for created_at, updated_at, description + and revision_number attributes for floating ips. diff --git a/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml b/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml new file mode 100644 index 000000000..1096921a5 --- /dev/null +++ b/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - troveclient is no longer a hard dependency. Users + who were using shade to construct a troveclient + Client object should use os_client_config.make_legacy_client + instead. diff --git a/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml b/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml new file mode 100644 index 000000000..39ee2765d --- /dev/null +++ b/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Role assignments were being returned as plain dicts instead of Munch objects. + This has been corrected. diff --git a/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml b/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml new file mode 100644 index 000000000..bbe2dfb51 --- /dev/null +++ b/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml @@ -0,0 +1,6 @@ +--- +features: + - Image dicts that are returned are now normalized across glance v1 + and glance v2. Extra key/value properties are now both in the root + dict and in a properties dict. Additionally, cloud and region have + been added like they are for server. diff --git a/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml b/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml new file mode 100644 index 000000000..1e1f501c2 --- /dev/null +++ b/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Nova flavor operations are now handled via REST calls + instead of via novaclient. There should be no noticable + difference. diff --git a/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml b/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml new file mode 100644 index 000000000..013ed82fa --- /dev/null +++ b/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Nova microversion is being requested. Since shade is not yet + actively microversion aware, but has been dealing with the 2.0 structures + anyway, this should not affect anyone. diff --git a/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml b/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml new file mode 100644 index 000000000..06e6bd2f6 --- /dev/null +++ b/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Reverse the order of option selction in + ``OpenStackConfig._validate_auth()`` to prefer auth options + passed in (from argparse) over those found in clouds.yaml. + This allows the application to override config profile + auth settings. diff --git a/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml b/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml new file mode 100644 index 000000000..249d1725b --- /dev/null +++ b/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - magnumclient is no longer a direct dependency as + magnum API calls are now made directly via REST. diff --git a/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml b/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml new file mode 100644 index 000000000..27db18cb9 --- /dev/null +++ b/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - All Nova interactions are done via direct REST calls. + python-novaclient is no longer a direct dependency of + openstack.cloud. diff --git a/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml b/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml new file mode 100644 index 000000000..157e90e83 --- /dev/null +++ b/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml @@ -0,0 +1,9 @@ +--- +prelude: > + This release marks the beginning of the path towards removing all + of the 'python-\*client' libraries as dependencies. Subsequent releases + should expect to have fewer and fewer library depdencies. +upgrade: + - Removed glanceclient as a dependency. All glance operations + are now performed with direct REST calls using keystoneauth + Adapter. diff --git a/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml b/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml new file mode 100644 index 000000000..4927c1e68 --- /dev/null +++ b/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Removed swiftclient as a dependency. All swift operations + are now performed with direct REST calls using keystoneauth + Adapter. diff --git a/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml b/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml new file mode 100644 index 000000000..84d9a1ac0 --- /dev/null +++ b/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - No longer fail in list_router_interfaces() if a router does + not have the external_gateway_info key. diff --git a/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml b/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml new file mode 100644 index 000000000..a18b57dc3 --- /dev/null +++ b/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added helper method for constructing OpenStack SDK + Connection objects. diff --git a/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml b/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml new file mode 100644 index 000000000..673c7dcb8 --- /dev/null +++ b/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml @@ -0,0 +1,4 @@ +--- +features: + - server creation errors now include the server id in the + Exception to allow people to clean up. diff --git a/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml b/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml new file mode 100644 index 000000000..d9de793e9 --- /dev/null +++ b/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add the `add_server_security_groups` and `remove_server_security_groups` + functions to add and remove security groups from a specific server. diff --git a/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml b/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml new file mode 100644 index 000000000..089d297c9 --- /dev/null +++ b/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Keystone service descriptions were missing an attribute describing whether + or not the service was enabled. A new 'enabled' boolean attribute has been + added to the service data. diff --git a/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml b/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml new file mode 100644 index 000000000..11219016b --- /dev/null +++ b/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added kwargs and argparse processing for session_client. +deprecations: + - Renamed simple_client to session_client. simple_client + will remain as an alias for backwards compat. diff --git a/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml b/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml new file mode 100644 index 000000000..70aab0a13 --- /dev/null +++ b/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added helper method for constructing shade + OpenStackCloud objects. diff --git a/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml b/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml new file mode 100644 index 000000000..29a155236 --- /dev/null +++ b/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml @@ -0,0 +1,4 @@ +--- +features: + - Implement update_stack to perform the update action on existing + orchestration stacks. diff --git a/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml b/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml new file mode 100644 index 000000000..d7cfb5145 --- /dev/null +++ b/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml @@ -0,0 +1,3 @@ +--- +other: +- Started using reno for release notes. diff --git a/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml b/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml new file mode 100644 index 000000000..60e6d64c8 --- /dev/null +++ b/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml @@ -0,0 +1,3 @@ +--- +features: + - get_object now supports streaming output directly to a file. diff --git a/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml b/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml new file mode 100644 index 000000000..ea81b138b --- /dev/null +++ b/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added 'strict' mode, which is set by passing strict=True + to the OpenStackCloud constructor. strict mode tells shade + to only return values in resources that are part of shade's + declared data model contract. diff --git a/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml b/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml new file mode 100644 index 000000000..27848a5d2 --- /dev/null +++ b/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Fixed an issue where a section of code that was supposed to be resetting + the SwiftService object was instead resetting the protective mutex around + the SwiftService object leading to an exception of "__exit__" diff --git a/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml b/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml new file mode 100644 index 000000000..a7b6a458b --- /dev/null +++ b/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added update_endpoint as a new function that allows + the user to update a created endpoint with new values + rather than deleting and recreating that endpoint. + This feature only works with keystone v3, with v2 it + will raise an exception stating the feature is not + available. diff --git a/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml b/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml new file mode 100644 index 000000000..14a4fd4a1 --- /dev/null +++ b/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml @@ -0,0 +1,13 @@ +--- +fixes: + - shade now correctly does not try to attach a floating ip with auto_ip + if the cloud has given a public IPv6 address and the calling context + supports IPv6 routing. shade has always used this logic to determine + the server 'interface_ip', but the auto floating ip was incorrectly only + looking at the 'public_v4' value to determine whether the server needed + additional networking. +upgrade: + - If your cloud presents a default split IPv4/IPv6 stack with a public + v6 and a private v4 address and you have the expectation that auto_ip + should procure a v4 floating ip, you need to set 'force_ipv4' to True in + your clouds.yaml entry for the cloud. diff --git a/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml b/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml new file mode 100644 index 000000000..e1d6d41a2 --- /dev/null +++ b/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml @@ -0,0 +1,4 @@ +--- +other: + - Add citycloud regions for Buffalo, Frankfurt, Karlskrona and Los Angles + - Add new DreamCompute cloud and deprecate DreamHost cloud diff --git a/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml b/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml new file mode 100644 index 000000000..c55792fe8 --- /dev/null +++ b/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml @@ -0,0 +1,13 @@ +--- +features: + - Version discovery is now done via the keystoneauth + library. shade still has one behavioral difference + from default keystoneauth behavior, which is that + shade will use a version it understands if it can + find one even if the user has requested a different + version. This change opens the door for shade to + start being able to consume API microversions as + needed. +upgrade: + - keystoneauth version 3.2.0 or higher is required + because of version discovery. diff --git a/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml b/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml new file mode 100644 index 000000000..dfb3b1cd6 --- /dev/null +++ b/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_volume_quotas(), OperatorCloud.set_volume_quotas() and OperatorCloud.delete_volume_quotas() to manage cinder quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml b/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml new file mode 100644 index 000000000..59fea21bb --- /dev/null +++ b/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for listing volume types. + - Add support for managing volume type access. diff --git a/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml b/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml new file mode 100644 index 000000000..ae434e28b --- /dev/null +++ b/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml @@ -0,0 +1,7 @@ +--- +features: + - Adds a new pair of options to create_image_snapshot(), wait and timeout, + to have the function wait until the image snapshot being created goes + into an active state. + - Adds a new function wait_for_image() which will wait for an image to go + into an active state. diff --git a/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml b/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml new file mode 100644 index 000000000..58bc54c5c --- /dev/null +++ b/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml @@ -0,0 +1,3 @@ +--- +features: + - New wait_for_server() API call to wait for a server to reach ACTIVE status. diff --git a/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml b/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml new file mode 100644 index 000000000..aa1b361dd --- /dev/null +++ b/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - Added requests and Babel to the direct dependencies list to work around + issues with pip installation, entrypoints and transitive dependencies + with conflicting exclusion ranges. Packagers of shade do not need to + add these two new requirements to shade's dependency list - they are + transitive depends and should be satisfied by the other things in the + requirements list. Both will be removed from the list again once the + python client libraries that pull them in have been removed. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 000000000..1a6db74cd --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,285 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# oslo.config Release Notes documentation build configuration file, created by +# sphinx-quickstart on Tue Nov 3 17:40:50 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'openstackdocstheme', + 'reno.sphinxext', +] + +# openstackdocstheme options +repository_name = 'openstack/python-openstacksdk' +bug_project = '760' +bug_tag = '' +html_last_updated_fmt = '%Y-%m-%d %H:%M' + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'OpenStack SDK Release Notes' +copyright = u'2017, Various members of the OpenStack Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +import pbr.version +version_info = pbr.version.VersionInfo('openstacksdk') +# The full version, including alpha/beta/rc tags. +release = version_info.version_string_with_vcs() +# The short X.Y version. +version = version_info.canonical_version_string() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'shadeReleaseNotesdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'shadeReleaseNotes.tex', + u'Shade Release Notes Documentation', + u'Shade Developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'shadereleasenotes', + u'shade Release Notes Documentation', + [u'shade Developers'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'shadeReleaseNotes', + u'shade Release Notes Documentation', + u'shade Developers', 'shadeReleaseNotes', + u'A client library for interacting with OpenStack clouds', + u'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 000000000..74d2b566c --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,10 @@ +===================== + Shade Release Notes +===================== + + .. toctree:: + :maxdepth: 1 + + mainline + unreleased + pike diff --git a/releasenotes/source/mainline.rst b/releasenotes/source/mainline.rst new file mode 100644 index 000000000..065da7150 --- /dev/null +++ b/releasenotes/source/mainline.rst @@ -0,0 +1,5 @@ +========================= + Mainline Release Series +========================= + +.. release-notes:: diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst new file mode 100644 index 000000000..e54560965 --- /dev/null +++ b/releasenotes/source/mitaka.rst @@ -0,0 +1,6 @@ +=================================== + Mitaka Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/mitaka diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst new file mode 100644 index 000000000..97036ed25 --- /dev/null +++ b/releasenotes/source/newton.rst @@ -0,0 +1,6 @@ +=================================== + Newton Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst new file mode 100644 index 000000000..ebe62f42e --- /dev/null +++ b/releasenotes/source/ocata.rst @@ -0,0 +1,6 @@ +=================================== + Ocata Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst new file mode 100644 index 000000000..e43bfc0ce --- /dev/null +++ b/releasenotes/source/pike.rst @@ -0,0 +1,6 @@ +=================================== + Pike Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/pike diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 000000000..abed3d2e3 --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,11 @@ +===================== + Unreleased Versions +===================== + +.. NOTE(dhellmann): The earliest-version field is set to avoid + duplicating *all* of the history on this page. When we start + creating stable branches the history should be truncated + automatically and we can remove the setting. + +.. release-notes:: + :earliest-version: 1.17.0 diff --git a/requirements.txt b/requirements.txt index b4d574ed5..bb087c608 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,9 +2,21 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 +PyYAML>=3.10 # MIT +appdirs>=1.3.0 # MIT License +requestsexceptions>=1.2.0 # Apache-2.0 jsonpatch>=1.16 # BSD six>=1.9.0 # MIT stevedore>=1.20.0 # Apache-2.0 -os-client-config>=1.28.0 # Apache-2.0 keystoneauth1>=3.2.0 # Apache-2.0 deprecation>=1.0 # Apache-2.0 + +munch>=2.1.0 # MIT +decorator>=3.4.0 # BSD +jmespath>=0.9.0 # MIT +ipaddress>=1.0.16;python_version<'3.3' # PSF +futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD +iso8601>=0.1.11 # MIT +netifaces>=0.10.4 # MIT + +dogpile.cache>=0.6.2 # BSD diff --git a/setup.cfg b/setup.cfg index d006218f5..37e19d4bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,13 +16,17 @@ classifier = Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 - Programming Language :: Python :: 3.3 Programming Language :: Python :: 3.5 [files] packages = openstack +# TODO(mordred) Move this to an OSC command before 1.0 +[entry_points] +console_scripts = + openstack-inventory = openstack.cloud.cmd.inventory:main + [build_sphinx] source-dir = doc/source build-dir = doc/build diff --git a/test-requirements.txt b/test-requirements.txt index 40cb35668..c81a47b4f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,13 +6,20 @@ hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 beautifulsoup4>=4.6.0 # MIT coverage!=4.4,>=4.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 +docutils>=0.11 # OSI-Approved Open Source, Public Domain +extras>=0.0.3 # MIT fixtures>=3.0.0 # Apache-2.0/BSD +jsonschema<3.0.0,>=2.6.0 # MIT mock>=2.0.0 # BSD +python-subunit>=0.0.18 # Apache-2.0/BSD openstackdocstheme>=1.17.0 # Apache-2.0 -os-testr>=1.0.0 # Apache-2.0 -requests>=2.14.2 # Apache-2.0 +oslotest>=1.10.0 # Apache-2.0 +reno>=2.5.0 # Apache-2.0 requests-mock>=1.1.0 # Apache-2.0 sphinx>=1.6.2 # BSD +stestr>=1.0.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT +python-glanceclient>=2.8.0 # Apache-2.0 +python-ironicclient>=1.14.0 # Apache-2.0 diff --git a/tools/keystone_version.py b/tools/keystone_version.py new file mode 100644 index 000000000..418505c6b --- /dev/null +++ b/tools/keystone_version.py @@ -0,0 +1,89 @@ +# Copyright (c) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openstack.config +import pprint +import sys +import urlparse + + +def print_versions(r): + if 'version' in r: + for version in r['version']: + print_version(version) + if 'values' in r: + for version in r['values']: + print_version(version) + if isinstance(r, list): + for version in r: + print_version(version) + + +def print_version(version): + if version['status'] in ('CURRENT', 'stable'): + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), + updated=version.get('updated'))) + + +verbose = '-v' in sys.argv +ran = [] +for cloud in openstack.config.OpenStackConfig().get_all_clouds(): + if cloud.name in ran: + continue + ran.append(cloud.name) + # We don't actually need a compute client - but we'll be getting full urls + # anyway. Without this SSL cert info becomes wrong. + c = cloud.get_session_client('compute') + endpoint = cloud.config['auth']['auth_url'] + try: + print(endpoint) + r = c.get(endpoint).json() + if verbose: + pprint.pprint(r) + except Exception as e: + print("Error with {cloud}: {e}".format(cloud=cloud.name, e=str(e))) + continue + if 'version' in r: + print_version(r['version']) + url = urlparse.urlparse(endpoint) + parts = url.path.split(':') + if len(parts) == 2: + path, port = parts + else: + path = url.path + port = None + stripped = path.rsplit('/', 2)[0] + if port: + stripped = '{stripped}:{port}'.format(stripped=stripped, port=port) + endpoint = urlparse.urlunsplit( + (url.scheme, url.netloc, stripped, url.params, url.query)) + print(" also {endpoint}".format(endpoint=endpoint)) + try: + r = c.get(endpoint).json() + if verbose: + pprint.pprint(r) + except Exception: + print("\tUnauthorized") + continue + if 'version' in r: + print_version(r) + elif 'versions' in r: + print_versions(r['versions']) + else: + print("\n\nUNKNOWN\n\n{r}".format(r=r)) + else: + print_versions(r['versions']) diff --git a/tools/nova_version.py b/tools/nova_version.py new file mode 100644 index 000000000..955812251 --- /dev/null +++ b/tools/nova_version.py @@ -0,0 +1,56 @@ +# Copyright (c) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openstack.config + +ran = [] +for cloud in openstack.config.OpenStackConfig().get_all_clouds(): + if cloud.name in ran: + continue + ran.append(cloud.name) + c = cloud.get_session_client('compute') + try: + raw_endpoint = c.get_endpoint() + have_current = False + endpoint = raw_endpoint.rsplit('/', 2)[0] + print(endpoint) + r = c.get(endpoint).json() + except Exception: + print("Error with %s" % cloud.name) + continue + for version in r['versions']: + if version['status'] == 'CURRENT': + have_current = True + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), + updated=version.get('updated'))) + print( + "\tVersion Max: {max}".format(max=version.get('version'))) + print( + "\tVersion Min: {min}".format(min=version.get('min_version'))) + if not have_current: + for version in r['versions']: + if version['status'] == 'SUPPORTED': + have_current = True + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), + updated=version.get('updated'))) + print( + "\tVersion Max: {max}".format(max=version.get('version'))) + print( + "\tVersion Min: {min}".format( + min=version.get('min_version'))) diff --git a/tools/tox_install.sh b/tools/tox_install.sh new file mode 100755 index 000000000..43468e450 --- /dev/null +++ b/tools/tox_install.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Client constraint file contains this client version pin that is in conflict +# with installing the client from source. We should remove the version pin in +# the constraints file before applying it for from-source installation. + +CONSTRAINTS_FILE=$1 +shift 1 + +set -e + +# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get +# published to logs.openstack.org for easy debugging. +localfile="$VIRTUAL_ENV/log/upper-constraints.txt" + +if [[ $CONSTRAINTS_FILE != http* ]]; then + CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE +fi +# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep +curl $CONSTRAINTS_FILE --insecure --progress-bar --output $localfile + +pip install -c$localfile openstack-requirements + +# This is the main purpose of the script: Allow local installation of +# the current repo. It is listed in constraints file and thus any +# install will be constrained and we need to unconstrain it. +edit-constraints $localfile -- $CLIENT_NAME + +pip install -c$localfile -U $* +exit $? diff --git a/tox.ini b/tox.ini index ac1e695a9..a419cf316 100644 --- a/tox.ini +++ b/tox.ini @@ -5,48 +5,78 @@ skipsdist = True [testenv] usedevelop = True -install_command = pip install -U {opts} {packages} +basepython = {env:OPENSTACKSDK_TOX_PYTHON:python2} +passenv = UPPER_CONSTRAINTS_FILE +install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} setenv = - VIRTUAL_ENV={envdir} + VIRTUAL_ENV={envdir} + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C + BRANCH_NAME=master + CLIENT_NAME=openstacksdk deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} +commands = stestr run {posargs} + stestr slowest [testenv:examples] -setenv = OS_TEST_PATH=./openstack/tests/examples -passenv = OS_* - -[functionalbase] -setenv = OS_TEST_PATH=./openstack/tests/functional -passenv = OS_* +passenv = OS_* OPENSTACKSDK_* UPPER_CONSTRAINTS_FILE +commands = stestr --test-path ./openstack/tests/examples run {posargs} + stestr slowest [testenv:functional] -basepython = python2.7 -setenv = {[functionalbase]setenv} -passenv = {[functionalbase]passenv} - -[testenv:functional3] -basepython = python3.4 -setenv = {[functionalbase]setenv} -passenv = {[functionalbase]passenv} +passenv = OS_* OPENSTACKSDK_* UPPER_CONSTRAINTS_FILE +commands = stestr --test-path ./openstack/tests/functional run --serial {posargs} + stestr slowest [testenv:pep8] -commands = flake8 +commands = + doc8 doc/source + flake8 [testenv:venv] commands = {posargs} -; If this fails for you, you may be running an old version of tox. -; Run 'pip install tox' to install a newer version of tox. +[testenv:debug] +whitelist_externals = find +commands = + find . -type f -name "*.pyc" -delete + oslo_debug_helper {posargs} + [testenv:cover] -commands = python setup.py test --coverage --coverage-package-name=openstack --testr-args='{posargs}' +setenv = + {[testenv]setenv} + PYTHON=coverage run --source shade --parallel-mode +commands = + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + +[testenv:ansible] +# Need to pass some env vars for the Ansible playbooks +passenv = HOME USER +commands = {toxinidir}/extras/run-ansible-tests.sh -e {envdir} {posargs} [testenv:docs] -commands = - doc8 doc/source - python setup.py build_sphinx +skip_install = True +deps = -r{toxinidir}/test-requirements.txt +commands = sphinx-build -b html doc/source/ doc/build + +[testenv:releasenotes] +commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] -ignore=D100,D101,D102,D103,D104,D105,D200,D202,D204,D205,D211,D301,D400,D401 +# The following are ignored on purpose. It's not super worth it to fix them. +# However, if you feel strongly about it, patches will be accepted to fix them +# if they fix ALL of the occurances of one and only one of them. +# H103 Is about the Apache license. It's strangely strict about the use of +# single vs double quotes in the license text. If someone decides to fix +# this, please be sure to preseve all copyright lines. +# H306 Is about alphabetical imports - there's a lot to fix. +# H4 Are about docstrings and there's just a huge pile of pre-existing issues. +# D* Came from sdk, unknown why they're skipped. +ignore = H103,H306,H4,D100,D101,D102,D103,D104,D105,D200,D202,D204,D205,D211,D301,D400,D401 show-source = True exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build