From 448209459dfca5f361daefe526b3281976b0c732 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 10 Apr 2024 12:44:21 -0400 Subject: [PATCH] CI: Add codespell to pep8 Fix existing spelling errors Change-Id: Ie689cf5a344aaa630a4860448b09242333a8e119 --- .codespell-ignore | 2 ++ ansible/group_vars/all.yml | 2 +- ansible/module_utils/kolla_container_worker.py | 2 +- ansible/module_utils/kolla_podman_worker.py | 6 +++--- ansible/roles/common/defaults/main.yml | 6 +++--- .../roles/neutron/templates/sriov_agent.ini.j2 | 2 +- ansible/roles/nova-cell/defaults/main.yml | 4 ++-- .../tasks/cell_proxy_loadbalancer.yml | 2 +- ansible/roles/nova-cell/tasks/loadbalancer.yml | 6 +++--- ansible/roles/nova-cell/tasks/reload.yml | 2 +- ansible/roles/opensearch/tasks/post-config.yml | 2 +- ansible/roles/ovs-dpdk/defaults/main.yml | 2 +- ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh | 18 +++++++++--------- ansible/roles/ovs-dpdk/tasks/config.yml | 2 +- ansible/roles/rabbitmq/tasks/precheck.yml | 4 ++-- .../high-availability/haproxy-guide.rst | 4 ++-- .../central-logging-guide.rst | 4 ++-- .../prometheus-guide.rst | 8 ++++---- .../reference/networking/designate-guide.rst | 2 +- doc/source/reference/networking/neutron.rst | 2 +- .../shared-services/keystone-guide.rst | 4 ++-- doc/source/user/security.rst | 6 +++--- etc/kolla/globals.yml | 2 +- ...eful-timeout-argument-a8b71a389351599b.yaml | 2 +- ...-encrypt-intergration-9e5f9846536379af.yaml | 2 +- ...-monasca-notification-03283c42a8df3d71.yaml | 2 +- ...manager-notifications-27f5d0474f470512.yaml | 2 +- ...dd-trove-singletenant-dd02a7b7cc1a4f99.yaml | 2 +- .../notes/bug-1947710-6d0975ae72f43ada.yaml | 2 +- ...-pid-flag-in-template-d915fe4b71548da0.yaml | 2 +- ...precate-sanity-checks-928ef3af2dc0f187.yaml | 2 +- .../enable-influxdb-tsi-858cc95e99605107.yaml | 2 +- ...vices-backend-haproxy-29467a9771e99917.yaml | 2 +- ...crypt-backend-haproxy-fb96285d74fb464c.yaml | 2 +- ...d-coordination-config-b1c9f900ef13be13.yaml | 2 +- .../fix-etcd-protocol-3c9482f90070ee6e.yaml | 2 +- ...zun_cni_daemon-mounts-9a7664896cfc7a9f.yaml | 2 +- ...izon-local-settings-d-32c9ad3d40ca23f4.yaml | 2 +- ...proxy-max-connections-df6aff5c82fdef24.yaml | 2 +- .../notes/kolla-host-584270e3aee6dfd6.yaml | 4 ++-- .../octavia_jobboard-823f44393f3e109e.yaml | 2 +- .../openstack-exporter-hammering-os-apis.yaml | 2 +- .../ovn-monitor-all-782e7cdc04688fc6.yaml | 2 +- .../notes/ovs-dpdk-a48404777d3836a3.yaml | 2 +- ...abbitmq-configuration-6b100a390734dc29.yaml | 2 +- ...te-on-shutdown-always-e8db9ad15fd1b8fb.yaml | 2 +- .../reduce-ceph-pgs-27e88e3b6e3b809c.yaml | 2 +- ...t-replication-network-40ecd13e4339f299.yaml | 4 ++-- .../tacker-conductor-dc90739426381e14.yaml | 2 +- specs/internal-tls-endpoints.rst | 2 +- specs/kubernetes-deployment.rst | 4 ++-- specs/logging-with-heka.rst | 2 +- specs/multiple-globals-files.rst | 2 +- specs/template.rst | 2 +- tests/templates/globals-default.j2 | 2 +- tests/test-core-openstack.sh | 2 +- tools/cleanup-host | 10 +++++----- tox.ini | 2 ++ 58 files changed, 91 insertions(+), 87 deletions(-) create mode 100644 .codespell-ignore diff --git a/.codespell-ignore b/.codespell-ignore new file mode 100644 index 0000000000..2276aa9aa0 --- /dev/null +++ b/.codespell-ignore @@ -0,0 +1,2 @@ +ist +solum diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index fa40744836..649800e3e6 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -1407,7 +1407,7 @@ octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, ################################### # Identity federation configuration ################################### -# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone. +# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone. # We require the administrator to enter the following metadata: # * name (internal name of the IdP in Keystone); # * openstack_domain (the domain in Keystone that the IdP belongs to) diff --git a/ansible/module_utils/kolla_container_worker.py b/ansible/module_utils/kolla_container_worker.py index 8e85c7fd9a..c3889bab4f 100644 --- a/ansible/module_utils/kolla_container_worker.py +++ b/ansible/module_utils/kolla_container_worker.py @@ -30,7 +30,7 @@ class ContainerWorker(ABC): self.systemd = SystemdWorker(self.params) - # NOTE(mgoddard): The names used by Docker are inconsisent between + # NOTE(mgoddard): The names used by Docker are inconsistent between # configuration of a container's resources and the resources in # container_info['HostConfig']. This provides a mapping between the # two. diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 1cba08a127..9e27dece4f 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -24,7 +24,7 @@ CONTAINER_PARAMS = [ 'name', # string 'cap_add', # list 'cgroupns', # 'str',choices=['private', 'host'] - 'command', # arrray of strings -- docker string + 'command', # array of strings -- docker string # this part is hidden inside dimensions 'cpu_period', # int @@ -56,7 +56,7 @@ CONTAINER_PARAMS = [ 'privileged', # bool 'restart_policy', # set to none, handled by systemd 'remove', # bool - 'restart_tries', # int doesnt matter done by systemd + 'restart_tries', # int doesn't matter done by systemd 'stop_timeout', # int 'tty' # bool # VOLUMES NOT WORKING HAS TO BE DONE WITH MOUNTS @@ -390,7 +390,7 @@ class PodmanWorker(ContainerWorker): def compare_dimensions(self, container_info): new_dimensions = self.params.get('dimensions') - # NOTE(mgoddard): The names used by Docker are inconsisent between + # NOTE(mgoddard): The names used by Docker are inconsistent between # configuration of a container's resources and the resources in # container_info['HostConfig']. This provides a mapping between the # two. diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml index cdad806d6e..4b70c8ae17 100644 --- a/ansible/roles/common/defaults/main.yml +++ b/ansible/roles/common/defaults/main.yml @@ -30,9 +30,9 @@ common_services: volumes: "{{ cron_default_volumes + cron_extra_volumes }}" dimensions: "{{ cron_dimensions }}" -####################### -# TLS and authenication -####################### +######################## +# TLS and authentication +######################## fluentd_elasticsearch_path: "" fluentd_elasticsearch_scheme: "{{ internal_protocol }}" diff --git a/ansible/roles/neutron/templates/sriov_agent.ini.j2 b/ansible/roles/neutron/templates/sriov_agent.ini.j2 index cfc9b817e7..fb8ba15077 100644 --- a/ansible/roles/neutron/templates/sriov_agent.ini.j2 +++ b/ansible/roles/neutron/templates/sriov_agent.ini.j2 @@ -5,7 +5,7 @@ extensions = qos [sriov_nic] # 'physical_device_mappings' is a comma separated list -# Maps a physical network to network inferface used for SRIOV +# Maps a physical network to network interface used for SRIOV # This template should be modified for specific environments # See Official OpenStack SRIOV documentation for all available options physical_device_mappings = {{ neutron_sriov_physnets }} diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml index 4e07db6e89..829fc1ba24 100644 --- a/ansible/roles/nova-cell/defaults/main.yml +++ b/ansible/roles/nova-cell/defaults/main.yml @@ -552,7 +552,7 @@ libvirt_tls: false # also means the deployer is responsible for restarting the nova_compute and # nova_libvirt containers when the key changes, as we can't know when to do that libvirt_tls_manage_certs: true -# When using tls we are verfiying the hostname we are connected to matches the +# When using tls we are verifying the hostname we are connected to matches the # libvirt cert we are presented. As such we can't use IP's here, but keep the # ability for people to override the hostname to use. migration_hostname: "{{ ansible_facts.nodename }}" @@ -579,7 +579,7 @@ nova_dev_mode: "{{ kolla_dev_mode }}" nova_source_version: "{{ kolla_source_version }}" ################################### -# Enable Shared Bind Propogation +# Enable Shared Bind Propagation ################################### enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}" diff --git a/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml b/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml index bcf95227b9..e57839e800 100644 --- a/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml +++ b/ansible/roles/nova-cell/tasks/cell_proxy_loadbalancer.yml @@ -4,7 +4,7 @@ name: loadbalancer-config vars: project_services: "{{ cell_proxy_project_services | namespace_haproxy_for_cell(cell_name) }}" - # Default is necessary because this play may not be targetting the hosts in + # Default is necessary because this play may not be targeting the hosts in # the cell_proxy_group group, and therefore they would not have role # defaults defined. If we put this variable in group_vars, then it cannot # be overridden by the inventory. diff --git a/ansible/roles/nova-cell/tasks/loadbalancer.yml b/ansible/roles/nova-cell/tasks/loadbalancer.yml index 2c47ad139f..16b6e53cbe 100644 --- a/ansible/roles/nova-cell/tasks/loadbalancer.yml +++ b/ansible/roles/nova-cell/tasks/loadbalancer.yml @@ -20,7 +20,7 @@ - import_tasks: proxy_loadbalancer.yml vars: - # Default is necessary because this play may not be targetting the hosts in + # Default is necessary because this play may not be targeting the hosts in # the nova-novncproxy group, and therefore they would not have role # defaults defined. If we put these variables in group_vars, then they # cannot be overridden by the inventory. @@ -57,7 +57,7 @@ - import_tasks: proxy_loadbalancer.yml vars: - # Default is necessary because this play may not be targetting the hosts in + # Default is necessary because this play may not be targeting the hosts in # the nova-spicehtml5proxy group, and therefore they would not have role # defaults defined. If we put these variables in group_vars, then they # cannot be overridden by the inventory. @@ -94,7 +94,7 @@ - import_tasks: proxy_loadbalancer.yml vars: - # Default is necessary because this play may not be targetting the hosts in + # Default is necessary because this play may not be targeting the hosts in # the nova-serialproxy group, and therefore they would not have role # defaults defined. If we put these variables in group_vars, then they # cannot be overridden by the inventory. diff --git a/ansible/roles/nova-cell/tasks/reload.yml b/ansible/roles/nova-cell/tasks/reload.yml index f365035b11..7f61efb302 100644 --- a/ansible/roles/nova-cell/tasks/reload.yml +++ b/ansible/roles/nova-cell/tasks/reload.yml @@ -7,7 +7,7 @@ # Speaking to the nova team, this seems to be an issue in oslo.service, # with a fix proposed here: https://review.openstack.org/#/c/641907. # This issue also seems to affect the proxy services, which exit non-zero in -# reponse to a SIGHUP, so restart those too. +# response to a SIGHUP, so restart those too. # The issue actually affects all nova services, since they remain with RPC # version pinned to the previous release: # https://bugs.launchpad.net/kolla-ansible/+bug/1833069. diff --git a/ansible/roles/opensearch/tasks/post-config.yml b/ansible/roles/opensearch/tasks/post-config.yml index ac26052449..c8723707da 100644 --- a/ansible/roles/opensearch/tasks/post-config.yml +++ b/ansible/roles/opensearch/tasks/post-config.yml @@ -45,7 +45,7 @@ changed_when: opensearch_retention_policy_create.status == 201 when: opensearch_retention_policy_check.status == 404 -- name: Apply retention policy to existing indicies +- name: Apply retention policy to existing indices become: true vars: opensearch_set_policy_body: {"policy_id": "retention"} diff --git a/ansible/roles/ovs-dpdk/defaults/main.yml b/ansible/roles/ovs-dpdk/defaults/main.yml index 2b052f27dd..209eb95fbe 100644 --- a/ansible/roles/ovs-dpdk/defaults/main.yml +++ b/ansible/roles/ovs-dpdk/defaults/main.yml @@ -48,7 +48,7 @@ ovs_hugepage_mountpoint: /dev/hugepages # ovs <2.7 required dpdk phyical port names to be index # in pci address order as dpdkX where X is the index -# ovs>=2.7 allows arbitray names but the pci address +# ovs>=2.7 allows arbitrary names but the pci address # must be set in a new dpdkdev-opt field # valid values are indexed or named. ovs_physical_port_policy: named diff --git a/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh b/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh index a09662805f..14223915e5 100755 --- a/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh +++ b/ansible/roles/ovs-dpdk/files/ovs-dpdkctl.sh @@ -386,8 +386,8 @@ function usage { ovs-dpdkctl.sh: A tool to configure ovs with dpdk. - This tool automate the process of binding host insterfacesto a dpdk - compaible driver (uio_pci_generic | vfio-pci) at boot. -- This tool automate bootstraping ovs so that it can use the + compatible driver (uio_pci_generic | vfio-pci) at boot. +- This tool automate bootstrapping ovs so that it can use the dpdk accelerated netdev datapath. commands: @@ -403,14 +403,14 @@ commands: - removes ovs-dpdkctl configuration file. - bind_nics: - iterates over all dpdk interfaces defined in ovs-dpdkctl config - and binds the interface to the target driver specifed in the config + and binds the interface to the target driver specified in the config if current driver does not equal target. - unbind_nics: - iterates over all dpdk interfaces defined in ovs-dpdkctl config and restores the interface to its original non dpdk driver. - init: - - defines dpdk specific configuration paramater in the ovsdb. - - creates bridges as spcified by ovs bridge_mappings in + - defines dpdk specific configuration parameter in the ovsdb. + - creates bridges as specified by ovs bridge_mappings in ovs-dpdkctl config. - creates dpdk ports as defined by ovs port_mappings in ovs-dpdkctl config. @@ -418,10 +418,10 @@ commands: - prints this message options: - - debuging: - - To enable debuging export OVS_DPDK_CTL_DEBUG=True + - debugging: + - To enable debugging export OVS_DPDK_CTL_DEBUG=True - install: - - The varibles described below can be defined to customise + - The variables described below can be defined to customise installation of ovs-dpdkctl. = ovs-dpdkctl.sh install - bridge_mappings: @@ -462,7 +462,7 @@ options: - Example: ovs_mem_channels=2 - Default: "4" - ovs_socket_mem: - - A comma separated list of hugepage memory, specifed in MBs per numa node, + - A comma separated list of hugepage memory, specified in MBs per numa node, allocated to the ovs-vswitchd to use for the dpdk dataplane. - For best performance memory should be allocated evenly across all numa node that will run a pmd. diff --git a/ansible/roles/ovs-dpdk/tasks/config.yml b/ansible/roles/ovs-dpdk/tasks/config.yml index f318b3abc4..c6e8421e74 100644 --- a/ansible/roles/ovs-dpdk/tasks/config.yml +++ b/ansible/roles/ovs-dpdk/tasks/config.yml @@ -45,7 +45,7 @@ hugepage_mountpoint: "{{ ovs_hugepage_mountpoint }}" ovs_physical_port_policy: "{{ ovs_physical_port_policy }}" -- name: Binds the interface to the target driver specifed in the config +- name: Binds the interface to the target driver specified in the config become: True command: "{{ node_config_directory }}/ovsdpdk-db/ovs-dpdkctl.sh bind_nics" environment: diff --git a/ansible/roles/rabbitmq/tasks/precheck.yml b/ansible/roles/rabbitmq/tasks/precheck.yml index b1e8c5fdb5..3c03853600 100644 --- a/ansible/roles/rabbitmq/tasks/precheck.yml +++ b/ansible/roles/rabbitmq/tasks/precheck.yml @@ -215,7 +215,7 @@ om_enable_rabbitmq_high_availability is True but no mirroring policy has been found. Currently the procedure to migrate from transient non-mirrored queues to durable mirrored queues is manual. Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability. - Note that this process may take several hours on larger systems, and may cause a degredation in performance at large scale. + Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale. If you do not wish to enable this feature, set om_enable_rabbitmq_high_availability to False. run_once: true @@ -239,7 +239,7 @@ om_enable_rabbitmq_quorum_queues is True but {{ item.name }} is a non-quorum queue. Currently the procedure to migrate to quorum queues is manual. Please follow the process described here: https://docs.openstack.org/kolla-ansible/latest/reference/message-queues/rabbitmq.html#high-availability. - Note that this process may take several hours on larger systems, and may cause a degredation in performance at large scale. + Note that this process may take several hours on larger systems, and may cause a degradation in performance at large scale. If you do not wish to enable this feature, set om_enable_rabbitmq_quorum_queues to False. loop: "{{ (rabbitmq_queues.stdout | from_json) if rabbitmq_queues is not skipped else [] }}" loop_control: diff --git a/doc/source/reference/high-availability/haproxy-guide.rst b/doc/source/reference/high-availability/haproxy-guide.rst index 418ad534fb..e5327e70b0 100644 --- a/doc/source/reference/high-availability/haproxy-guide.rst +++ b/doc/source/reference/high-availability/haproxy-guide.rst @@ -6,7 +6,7 @@ HAProxy Guide Kolla Ansible supports a Highly Available (HA) deployment of Openstack and other services. High-availability in Kolla -is implented as via Keepalived and HAProxy. Keepalived manages virtual IP +is implemented as via Keepalived and HAProxy. Keepalived manages virtual IP addresses, while HAProxy load-balances traffic to service backends. These two components must be installed on the same hosts and they are deployed to hosts in the ``loadbalancer`` group. @@ -71,7 +71,7 @@ Backend weights When different baremetal are used in infrastructure as haproxy backends or they are overloaded for some reason, kolla-ansible is able to change -weight of backend per sevice. Weight can be any integer value from 1 to +weight of backend per service. Weight can be any integer value from 1 to 256. To set weight of backend per service, modify inventory file as below: diff --git a/doc/source/reference/logging-and-monitoring/central-logging-guide.rst b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst index 4988f4ca57..edfdc845d6 100644 --- a/doc/source/reference/logging-and-monitoring/central-logging-guide.rst +++ b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst @@ -39,8 +39,8 @@ Applying log retention policies To stop your disks filling up, the Index State Management plugin for OpenSearch can be used to define log retention policies. A default -retention policy is applied to all indicies which match the -``opensearch_log_index_prefix``. This policy first closes old indicies, +retention policy is applied to all indices which match the +``opensearch_log_index_prefix``. This policy first closes old indices, and then eventually deletes them. It can be customised via the following variables: diff --git a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst index a36e906f2e..9a3c8333e4 100644 --- a/doc/source/reference/logging-and-monitoring/prometheus-guide.rst +++ b/doc/source/reference/logging-and-monitoring/prometheus-guide.rst @@ -40,9 +40,9 @@ Basic Auth Prometheus is protected with basic HTTP authentication. Kolla-ansible will create the following users: ``admin``, ``grafana`` (if grafana is enabled) and ``skyline`` (if skyline is enabled). The grafana username can -be overidden using the variable +be overridden using the variable ``prometheus_grafana_user``, the skyline username can -be overidden using the variable ``prometheus_skyline_user``. +be overridden using the variable ``prometheus_skyline_user``. The passwords are defined by the ``prometheus_password``, ``prometheus_grafana_password`` and ``prometheus_skyline_password`` variables in @@ -56,7 +56,7 @@ The passwords are defined by the password: hello enabled: true -or completely overriden with the ``prometheus_basic_auth_users`` variable. +or completely overridden with the ``prometheus_basic_auth_users`` variable. Extending the default command line options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -124,7 +124,7 @@ Extra files Sometimes it is necessary to reference additional files from within ``prometheus.yml``, for example, when defining file service discovery -configuration. To enable you to do this, kolla-ansible will resursively +configuration. To enable you to do this, kolla-ansible will recursively discover any files in ``{{ node_custom_config }}/prometheus/extras`` and template them. The templated output is then copied to ``/etc/prometheus/extras`` within the container on startup. For example to diff --git a/doc/source/reference/networking/designate-guide.rst b/doc/source/reference/networking/designate-guide.rst index b59b5fcfea..058af33b5f 100644 --- a/doc/source/reference/networking/designate-guide.rst +++ b/doc/source/reference/networking/designate-guide.rst @@ -74,7 +74,7 @@ Infoblox Backend .. important:: When using Infoblox as the Designate backend the MDNS node - requires the container to listen on port 53. As this is a privilaged + requires the container to listen on port 53. As this is a privileged port you will need to build your designate-mdns container to run as the user root rather than designate. diff --git a/doc/source/reference/networking/neutron.rst b/doc/source/reference/networking/neutron.rst index 57b50efcbb..c748c6d4e4 100644 --- a/doc/source/reference/networking/neutron.rst +++ b/doc/source/reference/networking/neutron.rst @@ -225,7 +225,7 @@ Mellanox Infiniband (ml2/mlnx) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to add ``mlnx_infiniband`` to the list of mechanism driver -for ``neutron`` to support Infiniband virtual funtions, you need to +for ``neutron`` to support Infiniband virtual functions, you need to set the following (assuming neutron SR-IOV agent is also enabled using ``enable_neutron_sriov`` flag): diff --git a/doc/source/reference/shared-services/keystone-guide.rst b/doc/source/reference/shared-services/keystone-guide.rst index c0f157cb84..d0958a3f92 100644 --- a/doc/source/reference/shared-services/keystone-guide.rst +++ b/doc/source/reference/shared-services/keystone-guide.rst @@ -32,8 +32,8 @@ a buffer key - three in total. If the rotation interval is set lower than the sum of the token expiry and token allow expired window, more active keys will be configured in Keystone as necessary. -Further infomation on Fernet tokens is available in the :keystone-doc:`Keystone -documentation `. +Further information on Fernet tokens is available in the +:keystone-doc:`Keystone documentation `. Federated identity ------------------ diff --git a/doc/source/user/security.rst b/doc/source/user/security.rst index d0e185bce5..b1cb4149aa 100644 --- a/doc/source/user/security.rst +++ b/doc/source/user/security.rst @@ -41,7 +41,7 @@ This absolutely solves the problem of persistent data, but it introduces another security issue, permissions. With this host bind mount solution the data in ``var/lib/mysql`` will be owned by the mysql user in the container. Unfortunately, that mysql user in the container could have -any UID/GID and thats who will own the data outside the container +any UID/GID and that's who will own the data outside the container introducing a potential security risk. Additionally, this method dirties the host and requires host permissions to the directories to bind mount. @@ -98,8 +98,8 @@ The following variables should be configured in Kolla Ansible's * Bool - set to true or false -Prerequsites -============ +Prerequisites +============= Firewalld needs to be installed beforehand. Kayobe can be used to automate the installation and configuration of firewalld diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 286c36b2b9..feb9a47bea 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -32,7 +32,7 @@ workaround_ansible_issue_8743: yes # scenarios with all facts cached (as there is no task to fail). #kolla_ansible_setup_any_errors_fatal: false -# This variable may be used to set the maxiumum failure percentage for all +# This variable may be used to set the maximum failure percentage for all # plays. More fine-grained control is possible via per-service variables, e.g. # nova_max_fail_percentage. The default behaviour is to set a max fail # percentage of 100, which is equivalent to not setting it. diff --git a/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml b/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml index 0775df5825..371c0c7daf 100644 --- a/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml +++ b/releasenotes/notes/add-graceful-timeout-argument-a8b71a389351599b.yaml @@ -1,4 +1,4 @@ --- features: - - Add graceful timeout argument to kolla_docker library for stoping, + - Add graceful timeout argument to kolla_docker library for stopping, restaring container. diff --git a/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml index 47687f627d..f5b27f8c77 100644 --- a/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml +++ b/releasenotes/notes/add-lets-encrypt-intergration-9e5f9846536379af.yaml @@ -2,7 +2,7 @@ features: - Add Lets Encrypt TLS certificate service integration into Openstack deployment. Enables trusted TLS certificate generation option for - secure communcation with OpenStack HAProxy instances using + secure communication with OpenStack HAProxy instances using ``letsencrypt_email``, ``kolla_internal_fqdn`` and/or ``kolla_external_fqdn`` is required. One container runs an Apache ACME client webserver and one runs Lego for certificate retrieval diff --git a/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml b/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml index aab4c19bdd..ccabf36751 100644 --- a/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml +++ b/releasenotes/notes/add-monasca-notification-03283c42a8df3d71.yaml @@ -2,6 +2,6 @@ features: - | Add support for deploying the Monasca Notification service. The - Notification service is responsible for notifiying users when + Notification service is responsible for notifying users when an alert, as defined via the Monasca API, is generated by the Monasca Thresh topology. diff --git a/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml b/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml index 7c25f5c51f..c7fd1ae99f 100644 --- a/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml +++ b/releasenotes/notes/add-support-for-custom-alertmanager-notifications-27f5d0474f470512.yaml @@ -1,5 +1,5 @@ --- features: - | - Adds possibility for inlcuding custom alert notification templates with + Adds possibility for including custom alert notification templates with Prometheus Alertmanager. diff --git a/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml b/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml index 8615136879..bf88cd08e2 100644 --- a/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml +++ b/releasenotes/notes/add-trove-singletenant-dd02a7b7cc1a4f99.yaml @@ -2,5 +2,5 @@ features: - | Add "enable_trove_singletenant" option to enable the Trove single - tenant functionnality. This feature will allow Trove to create + tenant functionality. This feature will allow Trove to create Nova instances in a different tenant than the user tenant. diff --git a/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml b/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml index c3c62719dc..028a7143cc 100644 --- a/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml +++ b/releasenotes/notes/bug-1947710-6d0975ae72f43ada.yaml @@ -2,6 +2,6 @@ fixes: - | Fixes the copy job for grafana custom home dashboard file. - The copy job for the grafana home dashboard file needs to run priviliged, + The copy job for the grafana home dashboard file needs to run privileged, otherwise permission denied error occurs. `LP#1947710 `__ diff --git a/releasenotes/notes/correct-ovn-ctl-pid-flag-in-template-d915fe4b71548da0.yaml b/releasenotes/notes/correct-ovn-ctl-pid-flag-in-template-d915fe4b71548da0.yaml index 6ecd421be6..b3fd0e1477 100644 --- a/releasenotes/notes/correct-ovn-ctl-pid-flag-in-template-d915fe4b71548da0.yaml +++ b/releasenotes/notes/correct-ovn-ctl-pid-flag-in-template-d915fe4b71548da0.yaml @@ -1,7 +1,7 @@ --- fixes: - | - The flags ``--db-nb-pid`` and ``--db-sb-pid`` have been corected to be + The flags ``--db-nb-pid`` and ``--db-sb-pid`` have been corrected to be ``--db-nb-pidfile`` and ``--db-sb-pidfile`` respectively. See here for reference: https://github.com/ovn-org/ovn/blob/6c6a7ad1c64a21923dc9b5bea7069fd88bcdd6a8/utilities/ovn-ctl#L1045 diff --git a/releasenotes/notes/deprecate-sanity-checks-928ef3af2dc0f187.yaml b/releasenotes/notes/deprecate-sanity-checks-928ef3af2dc0f187.yaml index 035c584925..1fec824ef3 100644 --- a/releasenotes/notes/deprecate-sanity-checks-928ef3af2dc0f187.yaml +++ b/releasenotes/notes/deprecate-sanity-checks-928ef3af2dc0f187.yaml @@ -11,7 +11,7 @@ deprecations: is not possible any more. features: - | - Sanity checks have been removed. These "smoke tests" orignially + Sanity checks have been removed. These "smoke tests" originally were implemented for barbican, cinder, glance and keystone. upgrade: - | diff --git a/releasenotes/notes/enable-influxdb-tsi-858cc95e99605107.yaml b/releasenotes/notes/enable-influxdb-tsi-858cc95e99605107.yaml index 6a17a687ac..09b857cef6 100644 --- a/releasenotes/notes/enable-influxdb-tsi-858cc95e99605107.yaml +++ b/releasenotes/notes/enable-influxdb-tsi-858cc95e99605107.yaml @@ -6,5 +6,5 @@ upgrade: Instructions to migrate existing data to the new, disk based format can be found at https://docs.influxdata.com/influxdb/v1.7/administration/upgrading/ - If you do not follow the migration proceedure, InfluxDB should continue + If you do not follow the migration procedure, InfluxDB should continue to work, but this is not recommended. diff --git a/releasenotes/notes/encrypt-additional-services-backend-haproxy-29467a9771e99917.yaml b/releasenotes/notes/encrypt-additional-services-backend-haproxy-29467a9771e99917.yaml index 624e4dde72..4af625c41e 100644 --- a/releasenotes/notes/encrypt-additional-services-backend-haproxy-29467a9771e99917.yaml +++ b/releasenotes/notes/encrypt-additional-services-backend-haproxy-29467a9771e99917.yaml @@ -3,5 +3,5 @@ features: - | Adds configuration options to enable backend TLS encryption from HAProxy to the Nova, Ironic, and Neutron services. When used in conjunction with - enabling TLS for service API endpoints, network communcation will be + enabling TLS for service API endpoints, network communication will be encrypted end to end, from client through HAProxy to the backend service. diff --git a/releasenotes/notes/encrypt-backend-haproxy-fb96285d74fb464c.yaml b/releasenotes/notes/encrypt-backend-haproxy-fb96285d74fb464c.yaml index 8307b120e0..2e5d418f20 100644 --- a/releasenotes/notes/encrypt-backend-haproxy-fb96285d74fb464c.yaml +++ b/releasenotes/notes/encrypt-backend-haproxy-fb96285d74fb464c.yaml @@ -4,5 +4,5 @@ features: Adds configuration options to enable backend TLS encryption from HAProxy to the Keystone, Glance, Heat, Placement, Horizon, Barbican, and Cinder services. When used in conjunction with enabling TLS for service API - endpoints, network communcation will be encrypted end to end, from client + endpoints, network communication will be encrypted end to end, from client through HAProxy to the backend service. diff --git a/releasenotes/notes/fix-etcd-coordination-config-b1c9f900ef13be13.yaml b/releasenotes/notes/fix-etcd-coordination-config-b1c9f900ef13be13.yaml index ec8c3dcb94..4711bb908d 100644 --- a/releasenotes/notes/fix-etcd-coordination-config-b1c9f900ef13be13.yaml +++ b/releasenotes/notes/fix-etcd-coordination-config-b1c9f900ef13be13.yaml @@ -3,6 +3,6 @@ fixes: - | Set the etcd internal hostname and cacert for tls internal enabled deployments. This allows services to work with etcd when - coordination is enabled for TLS interal deployments. Without this + coordination is enabled for TLS internal deployments. Without this fix, the coordination backend fails to connect to etcd and the service itself crashes. diff --git a/releasenotes/notes/fix-etcd-protocol-3c9482f90070ee6e.yaml b/releasenotes/notes/fix-etcd-protocol-3c9482f90070ee6e.yaml index a4ea2544ef..c3b3e6d23a 100644 --- a/releasenotes/notes/fix-etcd-protocol-3c9482f90070ee6e.yaml +++ b/releasenotes/notes/fix-etcd-protocol-3c9482f90070ee6e.yaml @@ -2,7 +2,7 @@ fixes: - | Fix the configuration of the etcd service so that its protocol is - independant of the value of the ``internal_protocol`` parameter. The etcd + independent of the value of the ``internal_protocol`` parameter. The etcd service is not load balanced by HAProxy, so there is no proxy layer to do TLS termination when ``internal_protocol`` is configured to be ``https``. diff --git a/releasenotes/notes/fix-zun_cni_daemon-mounts-9a7664896cfc7a9f.yaml b/releasenotes/notes/fix-zun_cni_daemon-mounts-9a7664896cfc7a9f.yaml index 52d92d67c2..911b35646d 100644 --- a/releasenotes/notes/fix-zun_cni_daemon-mounts-9a7664896cfc7a9f.yaml +++ b/releasenotes/notes/fix-zun_cni_daemon-mounts-9a7664896cfc7a9f.yaml @@ -1,5 +1,5 @@ --- fixes: - | - Fixes Zun capsules loosing network namespaces after + Fixes Zun capsules losing network namespaces after restarting zun_cni_daemon container diff --git a/releasenotes/notes/horizon-local-settings-d-32c9ad3d40ca23f4.yaml b/releasenotes/notes/horizon-local-settings-d-32c9ad3d40ca23f4.yaml index b2821e87fb..278602a66d 100644 --- a/releasenotes/notes/horizon-local-settings-d-32c9ad3d40ca23f4.yaml +++ b/releasenotes/notes/horizon-local-settings-d-32c9ad3d40ca23f4.yaml @@ -1,7 +1,7 @@ --- upgrade: - | - Horizon role was reworked to preffered local_settings.d + Horizon role was reworked to preferred local_settings.d configuration model. Files ``local_settings`` and ``custom_local_settings`` were renamed to ``_9998-kolla-settings.py`` and ``_9999-custom-settings.py`` Users who use horizon's custom diff --git a/releasenotes/notes/increase-haproxy-max-connections-df6aff5c82fdef24.yaml b/releasenotes/notes/increase-haproxy-max-connections-df6aff5c82fdef24.yaml index 44f521991f..56a654efd1 100644 --- a/releasenotes/notes/increase-haproxy-max-connections-df6aff5c82fdef24.yaml +++ b/releasenotes/notes/increase-haproxy-max-connections-df6aff5c82fdef24.yaml @@ -2,7 +2,7 @@ upgrade: - | The default connection limit for HAProxy backends is 2000 however, MariaDB - defaults to a max of 10000 conections. This has been changed to match the + defaults to a max of 10000 connections. This has been changed to match the MariaDB limit. 'haproxy_max_connections' has also been increased to 40000 to accommodate diff --git a/releasenotes/notes/kolla-host-584270e3aee6dfd6.yaml b/releasenotes/notes/kolla-host-584270e3aee6dfd6.yaml index 8dca4ca0c4..009019d794 100644 --- a/releasenotes/notes/kolla-host-584270e3aee6dfd6.yaml +++ b/releasenotes/notes/kolla-host-584270e3aee6dfd6.yaml @@ -9,10 +9,10 @@ prelude: > ubuntu 16.04 and centos 7.2 as target servers. See features section for more details. features: - - The kolla-host playbook supports bootrapping + - The kolla-host playbook supports bootstrapping clean os installations to enable them to be used as kolla hosts. When the playbook completes - the bootstraped systems should pass the kolla + the bootstrapped systems should pass the kolla prechecks. - The kolla-host playbook will install docker 1.11 and docker-py on all baremetal nodes. diff --git a/releasenotes/notes/octavia_jobboard-823f44393f3e109e.yaml b/releasenotes/notes/octavia_jobboard-823f44393f3e109e.yaml index b2156e04c2..6c1e630218 100644 --- a/releasenotes/notes/octavia_jobboard-823f44393f3e109e.yaml +++ b/releasenotes/notes/octavia_jobboard-823f44393f3e109e.yaml @@ -15,5 +15,5 @@ upgrade: The Octavia amphora provider by default is now deployed with the jobboard feature enabled. This requires the Redis service to be enabled as a dependency, please update your configuration accordingly if needed. - For futher information see + For further information see `Amphorav2 docs `_ diff --git a/releasenotes/notes/openstack-exporter-hammering-os-apis.yaml b/releasenotes/notes/openstack-exporter-hammering-os-apis.yaml index 78ef561c9d..d87920e89d 100644 --- a/releasenotes/notes/openstack-exporter-hammering-os-apis.yaml +++ b/releasenotes/notes/openstack-exporter-hammering-os-apis.yaml @@ -8,7 +8,7 @@ fixes: With the previous behavior each openstack exporter was scraped at the same time. This caused each exporter to query the openstack APIs - simultaneously introducing unneccesary load and duplicate + simultaneously introducing unnecessary load and duplicate time series in the prometheus database due to the instance label being unique for each exporter. `LP#1972818 `__ diff --git a/releasenotes/notes/ovn-monitor-all-782e7cdc04688fc6.yaml b/releasenotes/notes/ovn-monitor-all-782e7cdc04688fc6.yaml index 76ee3651dd..0e49e07c5d 100644 --- a/releasenotes/notes/ovn-monitor-all-782e7cdc04688fc6.yaml +++ b/releasenotes/notes/ovn-monitor-all-782e7cdc04688fc6.yaml @@ -6,5 +6,5 @@ features: databases. Setting ``ovn-monitor-all`` variable to 'true' will remove some CPU load from OVN SouthBound DB but will effect with more updates - comming to ovn-controller. Might be helpfull in large deployments + coming to ovn-controller. Might be helpful in large deployments with many compute hosts. diff --git a/releasenotes/notes/ovs-dpdk-a48404777d3836a3.yaml b/releasenotes/notes/ovs-dpdk-a48404777d3836a3.yaml index cb7a031423..6bcb150caf 100644 --- a/releasenotes/notes/ovs-dpdk-a48404777d3836a3.yaml +++ b/releasenotes/notes/ovs-dpdk-a48404777d3836a3.yaml @@ -28,7 +28,7 @@ issues: issue has been addressed. upgrade: - | - When upgrading ovs-dpdk it should be noted that this will alway invovle a + When upgrading ovs-dpdk it should be noted that this will always involve a dataplane outage. Unlike kernel OVS the dataplane for ovs-dpdk executes in the ovs-vswitchd process. As such it is recommended to always evacuate all vm workloads from a node running ovs-dpdk prior to upgrading. diff --git a/releasenotes/notes/rabbitmq-configuration-6b100a390734dc29.yaml b/releasenotes/notes/rabbitmq-configuration-6b100a390734dc29.yaml index 86728bd711..2bfa79cce1 100644 --- a/releasenotes/notes/rabbitmq-configuration-6b100a390734dc29.yaml +++ b/releasenotes/notes/rabbitmq-configuration-6b100a390734dc29.yaml @@ -2,4 +2,4 @@ features: - | Adds the ability to configure rabbitmq via ``rabbitmq_extra_config`` - which can be overriden in globals.yml. + which can be overridden in globals.yml. diff --git a/releasenotes/notes/rabbitmq-set-ha-promote-on-shutdown-always-e8db9ad15fd1b8fb.yaml b/releasenotes/notes/rabbitmq-set-ha-promote-on-shutdown-always-e8db9ad15fd1b8fb.yaml index 71962b3493..68d6efe999 100755 --- a/releasenotes/notes/rabbitmq-set-ha-promote-on-shutdown-always-e8db9ad15fd1b8fb.yaml +++ b/releasenotes/notes/rabbitmq-set-ha-promote-on-shutdown-always-e8db9ad15fd1b8fb.yaml @@ -4,7 +4,7 @@ upgrade: The RabbitMQ variable `rabbitmq-ha-promote-on-shutdown` now defaults to `"always"`. This only has an effect if `om_enable_rabbitmq_high_availability` is set to `True`. When - `ha-promote-on-shutdown` is set to `always`, queue mirrors are promted on + `ha-promote-on-shutdown` is set to `always`, queue mirrors are promoted on shutdown even if they aren't fully synced. This means that value availability over the risk of losing some messages. Note that the contents of the RabbitMQ definitions.json are now changed, meaning RabbitMQ diff --git a/releasenotes/notes/reduce-ceph-pgs-27e88e3b6e3b809c.yaml b/releasenotes/notes/reduce-ceph-pgs-27e88e3b6e3b809c.yaml index 974b985e4f..fadc9620c2 100644 --- a/releasenotes/notes/reduce-ceph-pgs-27e88e3b6e3b809c.yaml +++ b/releasenotes/notes/reduce-ceph-pgs-27e88e3b6e3b809c.yaml @@ -3,7 +3,7 @@ issues: - | As of Ceph Luminous 12.2.1 the maximum number of PGs per OSD before the monitor issues a warning has been reduced from 300 to 200 PGs. In addition, - Ceph now fails with an error rather than a warning in the case of exeeding + Ceph now fails with an error rather than a warning in the case of exceeding the max value. In order to allow Kolla to continue to be used out of the box we have reduced the default values for pg_num and pgp_num from 128 to 8. This will diff --git a/releasenotes/notes/swift-replication-network-40ecd13e4339f299.yaml b/releasenotes/notes/swift-replication-network-40ecd13e4339f299.yaml index de806e069f..d774ae7484 100644 --- a/releasenotes/notes/swift-replication-network-40ecd13e4339f299.yaml +++ b/releasenotes/notes/swift-replication-network-40ecd13e4339f299.yaml @@ -1,11 +1,11 @@ --- features: - | - Adds support to seperate Swift access and replication traffic from other + Adds support to separate Swift access and replication traffic from other storage traffic. In a deployment where both Ceph and Swift have been deployed, - this changes adds functionalality to support optional seperation + this changes adds functionalality to support optional separation of storage network traffic. This adds two new network interfaces ``swift_storage_interface`` and ``swift_replication_interface`` which maintain backwards compatibility. diff --git a/releasenotes/notes/tacker-conductor-dc90739426381e14.yaml b/releasenotes/notes/tacker-conductor-dc90739426381e14.yaml index 2927088481..a12cfc6e9f 100644 --- a/releasenotes/notes/tacker-conductor-dc90739426381e14.yaml +++ b/releasenotes/notes/tacker-conductor-dc90739426381e14.yaml @@ -5,7 +5,7 @@ features: upgrade: - | To support new tacker-conductor service, tacker role has been - reformated, before upgrade, tacker-server and tacker-conductor + reformatted, before upgrade, tacker-server and tacker-conductor groups should be included in inventory files. - Tacker requires Mistral service to be enabled as of Pike release to implement vim monitoring. diff --git a/specs/internal-tls-endpoints.rst b/specs/internal-tls-endpoints.rst index f2b23db0b5..08721b69a8 100644 --- a/specs/internal-tls-endpoints.rst +++ b/specs/internal-tls-endpoints.rst @@ -113,7 +113,7 @@ Work Items disables certificate verification. - Ensure that all tasks that interact with OpenStack APIs support disabling certificate verification. - - Fix heat-api bootstrap process, which currently requires valid certficate, + - Fix heat-api bootstrap process, which currently requires valid certificate, probably by moving domain/user creation out of the container, and into the ansible itself. - Allow for providing a CA used to verify connections to the service backends. diff --git a/specs/kubernetes-deployment.rst b/specs/kubernetes-deployment.rst index f96946e343..e451f643c1 100644 --- a/specs/kubernetes-deployment.rst +++ b/specs/kubernetes-deployment.rst @@ -199,7 +199,7 @@ At the broadest level, OpenStack can split up into two main roles, Controller and Compute. With Kubernetes, the role definition layer changes. Kolla-kubernetes will still need to define Compute nodes, but not Controller nodes. Compute nodes hold the libvirt container and the running vms. That -service cannont migrate because the vms associated with it exist on the node. +service cannot migrate because the vms associated with it exist on the node. However, the Controller role is more flexible. The Kubernetes layer provides IP persistence so that APIs will remain active and abstracted from the operator's view [15]. kolla-kubernetes can direct Controller services away from the Compute @@ -245,7 +245,7 @@ kibana as the default logging mechanism. The community will implement centralized logging by using a 'side car' container in the Kubernetes pod [17]. The logging service will trace the logs from the -shared volume of the running serivce and send the data to elastic search. This +shared volume of the running service and send the data to elastic search. This solution is ideal because volumes are shared among the containers in a pod. Implementation diff --git a/specs/logging-with-heka.rst b/specs/logging-with-heka.rst index 8087a6a41a..c8abc03cb9 100644 --- a/specs/logging-with-heka.rst +++ b/specs/logging-with-heka.rst @@ -167,7 +167,7 @@ Handling HAProxy and Keepalived As already mentioned HAProxy and Keepalived do not support logging to files. This means that some other mechanism should be used for these two services (and -any other services that only suppport logging to Syslog). +any other services that only support logging to Syslog). Our prototype has demonstrated that we can make Heka act as a Syslog server. This works by using Heka's ``UdpInput`` plugin with its ``net`` option set diff --git a/specs/multiple-globals-files.rst b/specs/multiple-globals-files.rst index ab9dc8767b..1c0ed93144 100644 --- a/specs/multiple-globals-files.rst +++ b/specs/multiple-globals-files.rst @@ -36,7 +36,7 @@ more granular control, without the need to add the ``-e @/path/to/file`` flag. Use cases --------- -1. Allow a more granular controler over individual service's options +1. Allow a more granular controller over individual service's options 2. Better file and directory structure Proposed change diff --git a/specs/template.rst b/specs/template.rst index 15a1886afb..70c80c2b66 100644 --- a/specs/template.rst +++ b/specs/template.rst @@ -45,7 +45,7 @@ Include where in the kolla tree hierarchy this will reside. Security impact --------------- -How does this feature impact the securtiy of the deployed OpenStack. +How does this feature impact the security of the deployed OpenStack. Performance Impact ------------------ diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2 index 75316e5f27..8f358b845c 100644 --- a/tests/templates/globals-default.j2 +++ b/tests/templates/globals-default.j2 @@ -70,7 +70,7 @@ openstack_service_rpc_workers: "1" {% endif %} {% if need_build_image and not is_previous_release %} -# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deplying +# NOTE(Jeffrey4l): use different a docker namespace name in case it pull image from hub.docker.io when deploying docker_namespace: "lokolla" # NOTE(yoctozepto): use hostname or FQDN to be compatible between IPv4 and IPv6 # docker does not support referencing registry via an IPv6 address diff --git a/tests/test-core-openstack.sh b/tests/test-core-openstack.sh index c887ed5a49..4062b78ce7 100755 --- a/tests/test-core-openstack.sh +++ b/tests/test-core-openstack.sh @@ -246,7 +246,7 @@ function unset_cirros_image_q35_machine_type { function test_neutron_modules { # Exit the function if scenario is "ovn" or if there's an upgrade - # as inly concerns ml2/ovs + # as it only concerns ml2/ovs if [[ $SCENARIO == "ovn" ]] || [[ $HAS_UPGRADE == "yes" ]]; then return fi diff --git a/tools/cleanup-host b/tools/cleanup-host index f6361d8559..b0dfb47829 100755 --- a/tools/cleanup-host +++ b/tools/cleanup-host @@ -44,27 +44,27 @@ if [[ "$enable_swift" == "yes" ]]; then fi if [[ "$glance_file_datadir_volume" != "glance" && -d "$glance_file_datadir_volume" ]]; then - echo "Removing glance volume if it is customzied" + echo "Removing glance volume if it is customized" rm -rfv $glance_file_datadir_volume fi if [[ "$nova_instance_datadir_volume" != "nova_compute" && -d "$nova_instance_datadir_volume" ]]; then - echo "Removing nova_compute volume if it is customzied" + echo "Removing nova_compute volume if it is customized" rm -rfv $nova_instance_datadir_volume fi if [[ "$gnocchi_metric_datadir_volume" != "gnocchi" && -d "$gnocchi_metric_datadir_volume" ]]; then - echo "Removing gnocchi volume if it is customzied" + echo "Removing gnocchi volume if it is customized" rm -rfv $gnocchi_metric_datadir_volume fi if [[ "$influxdb_datadir_volume" != "influxdb" && -d "$influxdb_datadir_volume" ]]; then - echo "Removing influxdb volume if it is customzied" + echo "Removing influxdb volume if it is customized" rm -rfv $influxdb_datadir_volume fi if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then - echo "Removing opensearch volume if it is customzied" + echo "Removing opensearch volume if it is customized" rm -rfv $opensearch_datadir_volume fi diff --git a/tox.ini b/tox.ini index 206f08cc28..2598c35334 100644 --- a/tox.ini +++ b/tox.ini @@ -100,6 +100,7 @@ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/lint-requirements.txt + codespell allowlist_externals = bash find commands = @@ -115,6 +116,7 @@ commands = deps = {[testenv:linters]deps} commands = flake8 {posargs} + codespell -I {toxinidir}/.codespell-ignore [flake8] show-source = True