From 3f8ad21583f9ef0a77d818f0f483e47a2b771c31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= Date: Wed, 3 Jan 2018 17:56:09 +0100 Subject: [PATCH 001/207] neutron: add support for apic multipod The opflex section of the proposal now is a sequence. Each element of the seqence is a specific opflex configuration that is applied to the nodes of a given pod. Two new attributes are added: - pod: This is a descriptive tag of the pod. It is not applied to any final configuration, it is just there to help the administrator handle the proposal. - nodes: This is a sequence containing all the host names of the compute nodes in the pod. --- chef/cookbooks/neutron/attributes/default.rb | 27 ++++++++++----- .../neutron/recipes/cisco_apic_agents.rb | 19 +++++++---- .../neutron/114_add_cisco_apic_multipod.rb | 21 ++++++++++++ chef/data_bags/crowbar/template-neutron.json | 8 +++-- .../data_bags/crowbar/template-neutron.schema | 34 +++++++++++-------- 5 files changed, 75 insertions(+), 34 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index a56f484dd2..a30687c24f 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -70,15 +70,24 @@ default[:neutron][:apic][:username] = "admin" default[:neutron][:apic][:password] = "" -default[:neutron][:apic][:opflex][:peer_ip] = "" -default[:neutron][:apic][:opflex][:peer_port] = 8009 -default[:neutron][:apic][:opflex][:encap] = "vxlan" -default[:neutron][:apic][:opflex][:vxlan][:uplink_iface] = "vlan.4093" -default[:neutron][:apic][:opflex][:vxlan][:uplink_vlan] = 4093 -default[:neutron][:apic][:opflex][:vxlan][:encap_iface] = "br-int_vxlan0" -default[:neutron][:apic][:opflex][:vxlan][:remote_ip] = "" -default[:neutron][:apic][:opflex][:vxlan][:remote_port] = 8472 -default[:neutron][:apic][:opflex][:vlan][:encap_iface] = "" +default[:neutron][:apic][:opflex] = [{ + pod: "", + nodes: [], + peer_ip: "", + peer_port: "", + encap: "vxlan", + vxlan: { + uplink_iface: "vlan.4093", + uplink_vlan: 4093, + encap_iface: "br-int_vxlan0", + remote_ip: "", + remote_port: 8472 + }, + vlan: { + encap_iface: "" + } +}] + case node[:platform_family] when "suse" diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb index 85fc599a88..f5c35aef90 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb @@ -93,6 +93,11 @@ # Update config file from template opflex_agent_conf = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" + apic = neutron[:neutron][:apic] + opflex_list = apic[:opflex].select { |i| i[:nodes].include? node[:hostname] } + opflex_list.any? || raise("Opflex instance not found for node '#{node[:hostname]}'") + opflex_list.one? || raise("Multiple opflex instances found for node '#{node[:hostname]}'") + opflex = opflex_list.first template opflex_agent_conf do cookbook "neutron" source "10-opflex-agent-ovs.conf.erb" @@ -103,13 +108,13 @@ opflex_apic_domain_name: neutron[:neutron][:apic][:system_id], hostname: node[:hostname], socketgroup: neutron[:neutron][:platform][:group], - opflex_peer_ip: neutron[:neutron][:apic][:opflex][:peer_ip], - opflex_peer_port: neutron[:neutron][:apic][:opflex][:peer_port], - opflex_vxlan_encap_iface: neutron[:neutron][:apic][:opflex][:vxlan][:encap_iface], - opflex_vxlan_uplink_iface: neutron[:neutron][:apic][:opflex][:vxlan][:uplink_iface], - opflex_vxlan_uplink_vlan: neutron[:neutron][:apic][:opflex][:vxlan][:uplink_vlan], - opflex_vxlan_remote_ip: neutron[:neutron][:apic][:opflex][:vxlan][:remote_ip], - opflex_vxlan_remote_port: neutron[:neutron][:apic][:opflex][:vxlan][:remote_port], + opflex_peer_ip: opflex[:peer_ip], + opflex_peer_port: opflex[:peer_port], + opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], + opflex_vxlan_uplink_iface: opflex[:vxlan][:uplink_iface], + opflex_vxlan_uplink_vlan: opflex[:vxlan][:uplink_vlan], + opflex_vxlan_remote_ip: opflex[:vxlan][:remote_ip], + opflex_vxlan_remote_port: opflex[:vxlan][:remote_port], # TODO(mmnelemane) : update VLAN encapsulation config when it works. # Currently set to VXLAN by default but can be modified from proposal. ml2_type_drivers: neutron[:neutron][:ml2_type_drivers] diff --git a/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb b/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb new file mode 100644 index 0000000000..885e943d64 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb @@ -0,0 +1,21 @@ +def upgrade(ta, td, a, d) + if a.key?("apic") && a["apic"]["opflex"].is_a?(Hash) + nodes = a["apic"]["apic_switches"] + .map { |_, value| value["switch_ports"].keys } + .flatten + .uniq + a["apic"]["opflex"]["nodes"] = nodes + opflex = [ta["apic"]["opflex"].first.merge(a["apic"]["opflex"])] + a["apic"]["opflex"] = opflex + end + return a, d +end + +def downgrade(ta, td, a, d) + if a.key?("apic") && ta["apic"]["opflex"].is_a?(Array) + a["apic"]["opflex"] = a["apic"]["opflex"].first + a["apic"]["opflex"].delete("pod") + a["apic"]["opflex"].delete("nodes") + end + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 86c46ccc7d..84e3a0f91a 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -45,7 +45,9 @@ "system_id": "soc", "username": "admin", "password": "", - "opflex": { + "opflex": [{ + "pod": "", + "nodes" : [], "peer_ip": "", "peer_port": 8009, "encap": "vxlan", @@ -59,7 +61,7 @@ "vlan": { "encap_iface": "" } - }, + }], "apic_switches": { "101": { "switch_ports": { @@ -175,7 +177,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 113, + "schema-revision": 114, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index 2736c43cf4..b740e26390 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -50,21 +50,25 @@ "system_id": { "type" : "str", "required" : true }, "username": { "type" : "str", "required": true }, "password": { "type" : "str", "required": true }, - "opflex": { "type": "map", "required": true, "mapping": { - "peer_ip": { "type": "str", "required" : true }, - "peer_port": { "type": "int", "required" : true }, - "encap": { "type": "str", "required": true }, - "vxlan": { "type": "map", "required": true, "mapping" : { - "encap_iface": {"type": "str", "required": true }, - "uplink_iface": { "type": "str", "required": true }, - "uplink_vlan": { "type": "int", "required": true }, - "remote_ip": { "type": "str", "required": true }, - "remote_port": { "type": "int", "required": true } - }}, - "vlan": { "type": "map", "required": true, "mapping": { - "encap_iface": { "type": "str", "required": true } - }} - }}, + "opflex": { "type": "seq", "required": true, "sequence": [ { + "type": "map", "required": true, "mapping": { + "pod": { "type" : "str", "required" : false }, + "nodes": { "type" : "seq", "required" : true, "sequence": [ { "type": "str" } ] }, + "peer_ip": { "type": "str", "required" : true }, + "peer_port": { "type": "int", "required" : true }, + "encap": { "type": "str", "required": true }, + "vxlan": { "type": "map", "required": true, "mapping" : { + "encap_iface": {"type": "str", "required": true }, + "uplink_iface": { "type": "str", "required": true }, + "uplink_vlan": { "type": "int", "required": true }, + "remote_ip": { "type": "str", "required": true }, + "remote_port": { "type": "int", "required": true } + }}, + "vlan": { "type": "map", "required": true, "mapping": { + "encap_iface": { "type": "str", "required": true } + }} + } + } ] }, "apic_switches": { "type" : "map", "required" : true, "mapping" : { = : { "type" : "map", "required" : true, "mapping" : { "switch_ports": { "type" : "map", "required" : true, "mapping" : { From 055dc1382a0b9a37bf29403f8b6ef44f839d34b3 Mon Sep 17 00:00:00 2001 From: Johannes Grassler Date: Tue, 23 Jan 2018 15:45:44 +0100 Subject: [PATCH 002/207] monasca: fix configuration glitches This commit fixes the two configuration glitches described in bsc#1077231 by * Exposing monitor_ceph in the Crowbar UI * Correcting the type of monasca['agent']['plugins']['libvirt']['ping_check'] and providing a new default value. (cherry picked from commit f8dd2e6990c0e0cdfa564904fd9fbd2243f6d3d3) --- .../monasca/103_fix_ping_check_type.rb | 24 +++++++++++++++++++ chef/data_bags/crowbar/template-monasca.json | 4 ++-- .../data_bags/crowbar/template-monasca.schema | 2 +- .../monasca/_edit_attributes.html.haml | 1 + .../config/locales/monasca/en.yml | 3 ++- 5 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb diff --git a/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb b/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb new file mode 100644 index 0000000000..c4934907e3 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb @@ -0,0 +1,24 @@ +def upgrade(ta, td, a, d) + key_pingcheck = a["agent"]["plugins"]["libvirt"].key?("ping_check") + ta_pingcheck = ta["agent"]["plugins"]["libvirt"]["ping_check"] + + # If there is no ping_check key at all, simply migrate to current value + unless key_pingcheck + a["agent"]["plugins"]["libvirt"]["ping_check"] = ta_pingcheck + return a, d + end + + a_pingcheck = a["agent"]["plugins"]["libvirt"]["ping_check"] + + # Only override existing value if it is boolean + a["agent"]["plugins"]["libvirt"]["ping_check"] = ta_pingcheck if + a_pingcheck.is_a?(TrueClass) || a_pingcheck.is_a?(FalseClass) + + return a, d +end + +def downgrade(ta, td, a, d) + a["agent"]["plugins"]["libvirt"]["ping_check"] = false + + return a, d +end diff --git a/chef/data_bags/crowbar/template-monasca.json b/chef/data_bags/crowbar/template-monasca.json index abbb1b3f22..30e7fc5c4c 100644 --- a/chef/data_bags/crowbar/template-monasca.json +++ b/chef/data_bags/crowbar/template-monasca.json @@ -24,7 +24,7 @@ "tenant_name" ], "nova_refresh": 14400, - "ping_check": false, + "ping_check": "/bin/ip netns exec NAMESPACE /usr/bin/ping", "vm_cpu_check_enable": true, "vm_disks_check_enable": true, "vm_extended_disks_check_enable": false, @@ -132,7 +132,7 @@ "monasca": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 102, + "schema-revision": 103, "element_states": { "monasca-server": [ "readying", "ready", "applying" ], "monasca-master": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-monasca.schema b/chef/data_bags/crowbar/template-monasca.schema index 402f899733..9ef270d511 100644 --- a/chef/data_bags/crowbar/template-monasca.schema +++ b/chef/data_bags/crowbar/template-monasca.schema @@ -40,7 +40,7 @@ "max_ping_concurrency": { "type": "int", "required": true }, "metadata": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "nova_refresh": { "type": "int", "required": true }, - "ping_check": { "type": "bool", "required": true }, + "ping_check": { "type": "str", "required": true }, "vm_cpu_check_enable": { "type": "bool", "required": true }, "vm_disks_check_enable": { "type": "bool", "required": true }, "vm_extended_disks_check_enable": { "type": "bool", "required": true }, diff --git a/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml index f75aeeb847..65d3d68367 100644 --- a/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml @@ -10,6 +10,7 @@ = select_field %w(agent log_level), :collection => :agent_log_levels = integer_field %w(agent statsd_port) + = boolean_field %w(agent monitor_ceph) = boolean_field %w(agent monitor_libvirt) = integer_field %w(agent check_frequency) = integer_field %w(agent num_collector_threads) diff --git a/crowbar_framework/config/locales/monasca/en.yml b/crowbar_framework/config/locales/monasca/en.yml index 5defda6cd1..dfbd174768 100644 --- a/crowbar_framework/config/locales/monasca/en.yml +++ b/crowbar_framework/config/locales/monasca/en.yml @@ -35,7 +35,8 @@ en: insecure: 'Do you want insecure connection?' ca_file: 'Sets the path to the ca certs file if using certificates. Required only if insecure is set to False (ca_file)' log_level: 'Log level' - monitor_libvirt: 'Whether to monitor the libvirt process on compute nodes' + monitor_ceph: 'Monitor Ceph' + monitor_libvirt: 'Monitor libvirt' statsd_port: 'Monasca Statsd port' check_frequency: 'Time to wait between collection runs (check_frequency)' num_collector_threads: 'Number of Collector Threads to run (num_collector_threads)' From 344b6e2071163e5965c30cda29de322bda240858 Mon Sep 17 00:00:00 2001 From: sayalilunkad Date: Fri, 26 Jan 2018 15:15:46 +0100 Subject: [PATCH 003/207] neutron-ha-tool: Add insecure flag (bsc#1075394) The insecure flag was missing from the status command which was causing the neutron-l3-ha-service to fail. (cherry picked from commit 506bee760d6da279e263bd9b37da41736128dd78) --- chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb b/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb index 3d3007828e..d8c19d8dcc 100644 --- a/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb +++ b/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb @@ -334,7 +334,7 @@ def insecure_flag end def status_command - [@options.program, "--l3-agent-check", "--quiet"] + [@options.program, "--l3-agent-check", "--quiet"] + insecure_flag end def migration_command From d515eb6f0b26a721f4722fec6dbb0aaca9fa84bc Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Fri, 15 Dec 2017 12:14:54 +0100 Subject: [PATCH 004/207] neutron: Fix for GBP authentication issue with ACI With OpenStack Newton, the Group-Based-Policy library expects special authentication mechanism with the Cisco ACI fabric. It expects configuration of [apic_aim_auth] with similar parameters as the [keystone_authtoken] section. This commit adds this additional section to neutron.conf along with the data from keystone_settings and a variable in the recipe to add this section only when the APIC GBP mode is used. (cherry picked from commit 3b5e6b8fc6bc8e4fb13972bed63b7ff3b7865123) --- chef/cookbooks/neutron/recipes/common_config.rb | 6 +++++- .../neutron/templates/default/neutron.conf.erb | 12 ++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index 41afad1f04..40194a002a 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -24,6 +24,9 @@ neutron = node end +use_apic_gbp = neutron[:neutron][:networking_plugin] == "ml2" && + neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp") + # RDO package magic (non-standard packages) if node[:platform_family] == "rhel" net_core_pkgs=%w(kernel-*openstack* iproute-*el6ost.netns* iputils) @@ -137,7 +140,8 @@ mtu_value: mtu_value, infoblox: infoblox_settings, ipam_driver: ipam_driver, - rpc_workers: neutron[:neutron][:rpc_workers] + rpc_workers: neutron[:neutron][:rpc_workers], + use_apic_gbp: use_apic_gbp ) end diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index c8abcc3f41..9cbf553e14 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -53,6 +53,18 @@ project_name = <%= @keystone_settings['service_tenant'] %> project_domain_name = <%= @keystone_settings['admin_domain'] %> user_domain_name = <%= @keystone_settings['admin_domain'] %> +<% if @use_apic_gbp -%> +[apic_aim_auth] +auth_plugin = v3password +auth_url = <%= @keystone_settings['internal_auth_url'] %> +user_domain_name = <%= @keystone_settings['admin_domain'] %> +project_name = <%= @keystone_settings['service_tenant'] %> +project_domain_name = <%= @keystone_settings['admin_domain'] %> +username = <%= @keystone_settings['service_user'] %> +password = <%= @keystone_settings['service_password'] %> + +<% end -%> + [nova] region_name = <%= @keystone_settings['endpoint_region'] %> endpoint_type = internal From 76c93d4c67665f6a87877cd55d5067a7ed437775 Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Fri, 15 Dec 2017 12:16:38 +0100 Subject: [PATCH 005/207] neutron: Allow enable/disable of optimized metadata/dhcp options for Cisco ACI. There has been recent customer use-cases specially involving vmware and kvm running together to be able to disable optimized_dhcp option. Also the Cisco ACI configuration guides recommend it to have optimized_metadata and optimized_dhcp to be optional. These were not configurable from crowbar proposals. This commit adds these as configurable parameters and sets the config file as configured from the crowbar proposal. The commit basically adds two more parameters to neutron[:apic] section called "optimized_metadata" and "optimized_dhcp". (cherry picked from commit 21883d5688abd5ee405ab396e613e46fb82323ba) --- chef/cookbooks/neutron/attributes/default.rb | 3 ++- .../templates/default/ml2_conf_cisco_apic.ini.erb | 3 ++- .../neutron/templates/default/neutron.conf.erb | 1 - .../neutron/115_add_apic_optimized_dhcp_metadata.rb | 12 ++++++++++++ chef/data_bags/crowbar/template-neutron.json | 4 +++- chef/data_bags/crowbar/template-neutron.schema | 2 ++ 6 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index a30687c24f..8bf57d6467 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -69,7 +69,8 @@ default[:neutron][:apic][:hosts] = "" default[:neutron][:apic][:username] = "admin" default[:neutron][:apic][:password] = "" - +default[:neutron][:apic][:optimized_metadata] = true +default[:neutron][:apic][:optimized_dhcp] = true default[:neutron][:apic][:opflex] = [{ pod: "", nodes: [], diff --git a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb index 90935dda63..06077fff92 100644 --- a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb +++ b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb @@ -11,7 +11,8 @@ apic_name_mapping = use_name apic_clear_node_profiles = True enable_aci_routing = True apic_arp_flooding = True -enable_optimized_metadata = True +enable_optimized_metadata = <%= node[:neutron][:apic][:optimized_metadata] %> +enable_optimized_dhcp = <%= node[:neutron][:apic][:optimized_dhcp] %> apic_provision_infra = True apic_provision_hostlinks = True <% @apic_switches.keys.each do |ip| -%> diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index 9cbf553e14..35a7b4d9fd 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -64,7 +64,6 @@ username = <%= @keystone_settings['service_user'] %> password = <%= @keystone_settings['service_password'] %> <% end -%> - [nova] region_name = <%= @keystone_settings['endpoint_region'] %> endpoint_type = internal diff --git a/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb b/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb new file mode 100644 index 0000000000..d83193af05 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb @@ -0,0 +1,12 @@ +def upgrade(ta, td, a, d) + a["apic"]["optimized_metadata"] = ta["apic"]["optimized_metadata"] \ + unless a["apic"].key? "optimized_metadata" + a["apic"]["optimized_dhcp"] = ta["apic"]["optimized_dhcp"] unless a["apic"].key? "optimized_dhcp" + return a, d +end + +def downgrade(ta, td, a, d) + a["apic"].delete("optimized_metadata") unless ta["apic"].key? "optimized_metadata" + a["apic"].delete("optimized_dhcp") unless ta["apic"].key? "optimized_dhcp" + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 84e3a0f91a..27ef442474 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -45,6 +45,8 @@ "system_id": "soc", "username": "admin", "password": "", + "optimized_metadata": true, + "optimized_dhcp": true, "opflex": [{ "pod": "", "nodes" : [], @@ -177,7 +179,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 114, + "schema-revision": 115, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index b740e26390..e934751503 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -50,6 +50,8 @@ "system_id": { "type" : "str", "required" : true }, "username": { "type" : "str", "required": true }, "password": { "type" : "str", "required": true }, + "optimized_metadata": { "type" : "bool", "required" : true }, + "optimized_dhcp": { "type" : "bool", "required" : true }, "opflex": { "type": "seq", "required": true, "sequence": [ { "type": "map", "required": true, "mapping": { "pod": { "type" : "str", "required" : false }, From 26593e94f0c8729a388011805f26da4daf15a93d Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Wed, 14 Feb 2018 00:48:12 +0100 Subject: [PATCH 006/207] Revert "mysql: Disable use of galera-clustercheck" This reverts commit a5b0329ee2a1a1e5bd9786a7b5005cf3bd792c0c. --- chef/cookbooks/mysql/attributes/server.rb | 3 -- chef/cookbooks/mysql/recipes/ha_galera.rb | 63 ++++++++++------------- 2 files changed, 28 insertions(+), 38 deletions(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index 8709dd3782..6c99ca68b5 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -36,6 +36,3 @@ # in pacemamker default[:mysql][:ha][:op][:monitor][:interval] = "20s" default[:mysql][:ha][:op][:monitor][:role] = "Master" - -# Let users override this if galera-python-clustercheck is available to them -default[:mysql][:ha][:clustercheck] = false diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 0689ac6028..4a1684de24 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -19,10 +19,9 @@ resource_agent = "ocf:heartbeat:galera" -["galera-3-wsrep-provider", "mariadb-tools", "xtrabackup", "socat"].each do |p| +["galera-3-wsrep-provider", "mariadb-tools", "xtrabackup", "socat", "galera-python-clustercheck"].each do |p| package p end -package "galera-python-clustercheck" if node[:mysql][:ha][:clustercheck] unless node[:database][:galera_bootstrapped] directory "/var/run/mysql/" do @@ -259,7 +258,6 @@ revision node[:database]["crowbar-revision"] end -if node[:mysql][:ha][:clustercheck] # Configuration files for galera-python-clustercheck template "/etc/galera-python-clustercheck/galera-python-clustercheck.conf" do source "galera-python-clustercheck.conf.erb" @@ -281,38 +279,37 @@ ) end - # Start galera-clustercheck which serves the cluster state as http return codes - # on port 5555 - transaction_objects = [] - service_name = "galera-python-clustercheck" +# Start galera-clustercheck which serves the cluster state as http return codes +# on port 5555 +transaction_objects = [] +service_name = "galera-python-clustercheck" - pacemaker_primitive service_name do - agent "systemd:#{service_name}" - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end +pacemaker_primitive service_name do + agent "systemd:#{service_name}" + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end - transaction_objects.push("pacemaker_primitive[#{service_name}]") +transaction_objects.push("pacemaker_primitive[#{service_name}]") - clone_name = "cl-#{service_name}" - pacemaker_clone clone_name do - rsc service_name - meta CrowbarPacemakerHelper.clone_meta(node, remote: false) - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end +clone_name = "cl-#{service_name}" +pacemaker_clone clone_name do + rsc service_name + meta CrowbarPacemakerHelper.clone_meta(node, remote: false) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end - transaction_objects.push("pacemaker_clone[#{clone_name}]") +transaction_objects.push("pacemaker_clone[#{clone_name}]") - clone_location_name = openstack_pacemaker_controller_only_location_for clone_name - transaction_objects << "pacemaker_location[#{clone_location_name}]" +clone_location_name = openstack_pacemaker_controller_only_location_for clone_name +transaction_objects << "pacemaker_location[#{clone_location_name}]" - pacemaker_transaction "clustercheck" do - cib_objects transaction_objects - action :commit_new - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end -end # if node[:mysql][:ha][:clustercheck] +pacemaker_transaction "clustercheck" do + cib_objects transaction_objects + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end include_recipe "crowbar-pacemaker::haproxy" @@ -339,12 +336,8 @@ mode "tcp" # leave some room for pacemaker health checks max_connections node[:database][:mysql][:max_connections] - 10 - if node[:mysql][:ha][:clustercheck] - options ["httpchk", "clitcpka"] - default_server "port 5555" - else - options ["mysql-check user monitoring", "clitcpka"] - end + options ["httpchk", "clitcpka"] + default_server "port 5555" stick ({ "on" => "dst" }) servers ha_servers action :nothing From cd5d368ac427fbfb6eff2d79da086b4ce6d91509 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 1 Dec 2017 12:18:44 +0100 Subject: [PATCH 007/207] mariadb: Turn package list into attribute (cherry picked from commit 7384aae5eb328977cc8ec80c46a702d140964772) --- chef/cookbooks/mysql/attributes/server.rb | 8 ++++++++ chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index 6c99ca68b5..7c7ccf597e 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -36,3 +36,11 @@ # in pacemamker default[:mysql][:ha][:op][:monitor][:interval] = "20s" default[:mysql][:ha][:op][:monitor][:role] = "Master" + +default[:mysql][:galera_packages] = [ + "galera-3-wsrep-provider", + "mariadb-tools", + "xtrabackup", + "socat", + "galera-python-clustercheck" +] diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 4a1684de24..279f84cbaf 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -19,7 +19,7 @@ resource_agent = "ocf:heartbeat:galera" -["galera-3-wsrep-provider", "mariadb-tools", "xtrabackup", "socat", "galera-python-clustercheck"].each do |p| +node[:mysql][:galera_packages].each do |p| package p end From fa228cf79161950f94ddd57fb339bb337ae3d2e3 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 1 Dec 2017 12:19:25 +0100 Subject: [PATCH 008/207] mariadb: Update package list for mariadb 10.2 We need the new "mariadb-galera" subpackage installed. It contains the wsrep_sst helpers. (cherry picked from commit 2f41251163a044ae87169c6b9a7d8b0f64189c34) --- chef/cookbooks/mysql/attributes/server.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index 7c7ccf597e..bbea6cc777 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -37,6 +37,10 @@ default[:mysql][:ha][:op][:monitor][:interval] = "20s" default[:mysql][:ha][:op][:monitor][:role] = "Master" +# If needed we can enhance this to set the mariadb version +# depeding on "platform" and "platform_version". But currently +# this should be enough +default[:mysql][:mariadb][:version] = "10.1" default[:mysql][:galera_packages] = [ "galera-3-wsrep-provider", "mariadb-tools", @@ -44,3 +48,8 @@ "socat", "galera-python-clustercheck" ] + +# newer version need an additional package on SLES +unless node[:mysql][:mariadb][:version] == "10.1" + default[:mysql][:galera_packages] << "mariadb-galera" +end From 639cfc90d48e5076b0606b5d8c2a26491249b930 Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Mon, 12 Feb 2018 17:02:46 +0100 Subject: [PATCH 009/207] mysql: Switch to mariadb 10.2 (cherry picked from commit bb6fc8a0b318335e56f5e687b6121bf7d866e242) --- chef/cookbooks/mysql/attributes/server.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index bbea6cc777..ab87e8ac3f 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -40,7 +40,7 @@ # If needed we can enhance this to set the mariadb version # depeding on "platform" and "platform_version". But currently # this should be enough -default[:mysql][:mariadb][:version] = "10.1" +default[:mysql][:mariadb][:version] = "10.2" default[:mysql][:galera_packages] = [ "galera-3-wsrep-provider", "mariadb-tools", From bebcb7a5e53de351a47ff52cf49b7cb0e6ae9e02 Mon Sep 17 00:00:00 2001 From: sayalilunkad Date: Tue, 13 Feb 2018 15:20:54 +0100 Subject: [PATCH 010/207] magnum: Add domain name to keystone_auth (bsc#1080335) This adds the domain_names to the keystone_auth section as well as this is needed when magnum uses barbican as the cert-manager. (cherry picked from commit 0f6a08ff145e70f96ac132d87d633f16474100a8) --- chef/cookbooks/magnum/templates/default/magnum.conf.erb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/magnum/templates/default/magnum.conf.erb b/chef/cookbooks/magnum/templates/default/magnum.conf.erb index 1fe68357e4..2aef6fe4bb 100644 --- a/chef/cookbooks/magnum/templates/default/magnum.conf.erb +++ b/chef/cookbooks/magnum/templates/default/magnum.conf.erb @@ -41,6 +41,8 @@ project_name = <%= @keystone_settings['service_tenant'] %> username = <%= @keystone_settings['service_user'] %> password = <%= @keystone_settings['service_password'] %> insecure = <%= @keystone_settings['insecure'] %> +project_domain_name = <%= @keystone_settings["admin_domain"]%> +user_domain_name = <%= @keystone_settings["admin_domain"] %> [keystone_authtoken] auth_uri = <%= @keystone_settings['public_auth_url'] %> From 26cb1b5d9796318d6be03ee11dc3e181f4b746fc Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 31 Jan 2018 14:07:19 +0100 Subject: [PATCH 011/207] ironic: Fix failing migrations During migration service objects are created without logger parameter. Missing default value caused the process to fail. (cherry picked from commit 3e6a24b82517b5c5c8777f1c69d631848318fabf) --- crowbar_framework/app/models/ironic_service.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crowbar_framework/app/models/ironic_service.rb b/crowbar_framework/app/models/ironic_service.rb index f19b550613..20e3ecc8e0 100644 --- a/crowbar_framework/app/models/ironic_service.rb +++ b/crowbar_framework/app/models/ironic_service.rb @@ -16,7 +16,7 @@ # class IronicService < ServiceObject - def initialize(thelogger) + def initialize(thelogger = nil) super(thelogger) @bc_name = "ironic" end From e842d91d70c7afcf3017df9cfe6672909f968aac Mon Sep 17 00:00:00 2001 From: Jan Zerebecki Date: Mon, 19 Feb 2018 18:14:14 +0100 Subject: [PATCH 012/207] barbican: Add creator role (bsc#1081573) Barbican defaults assume that a creator role exists, which allows: creation of resources, reading all resources, deleting resources one owns, but not deleting resources owned by others. (cherry picked from commit a097f888e6b6d1468534d27cbf6758465e6512ed) Backport of https://github.com/crowbar/crowbar-openstack/pull/1566 --- chef/cookbooks/barbican/recipes/api.rb | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/barbican/recipes/api.rb b/chef/cookbooks/barbican/recipes/api.rb index 75bd1d98dd..6e7ba9d60f 100644 --- a/chef/cookbooks/barbican/recipes/api.rb +++ b/chef/cookbooks/barbican/recipes/api.rb @@ -112,7 +112,7 @@ action :add_user end -keystone_register "give barbican user access" do +keystone_register "give barbican user access as admin" do protocol keystone_settings["protocol"] insecure keystone_settings["insecure"] host keystone_settings["internal_url_host"] @@ -124,6 +124,28 @@ action :add_access end +keystone_register "add creator role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "creator" + action :add_role +end + +keystone_register "give barbican user access as creator" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "creator" + action :add_access +end + crowbar_pacemaker_sync_mark "create-barbican_register" if ha_enabled if node[:barbican][:ha][:enabled] From d7afbc09e0a74f436b0b882de740fe4295610427 Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Fri, 5 Jan 2018 10:29:19 +0100 Subject: [PATCH 013/207] rabitmq: allow creating extra users with the introduction of the definitions file it looks like rabbit will disregard any existing users when reseting, using the definitions file as the blank slate to start from. This gets in the middle of having any extra users for other tasks like monitoring and there is no mechanism to add users. This patch allows to create an arbitrary number of users, with permissions and tags so in case of a rabbit reset it would keep those extra users intact. (cherry picked from commit 1ade84888fd3da0c606d4421986973c3e401c6b5) --- chef/cookbooks/rabbitmq/recipes/default.rb | 3 +- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 29 ++++++++++ .../templates/default/definitions.json.erb | 16 ++++++ .../migrate/rabbitmq/105_add_extra_users.rb | 9 ++++ chef/data_bags/crowbar/template-rabbitmq.json | 4 +- .../crowbar/template-rabbitmq.schema | 14 +++++ .../app/models/rabbitmq_service.rb | 41 +++++++++++++- .../rabbitmq/_edit_attributes.html.haml | 53 +++++++++++++++++++ .../config/locales/rabbitmq/en.yml | 11 ++++ 9 files changed, 176 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 0a6d3242bf..3deb5aff72 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -132,7 +132,8 @@ json_trove_password: node[:rabbitmq][:trove][:password].to_json, json_trove_vhost: node[:rabbitmq][:trove][:vhost].to_json, ha_all_policy: cluster_enabled, - quorum: quorum + quorum: quorum, + extra_users: node[:rabbitmq][:users] ) # no notification to restart rabbitmq, as we still do changes with # rabbitmqctl in the rabbit.rb recipe (this is less disruptive) diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index 8b0c890574..829f102fd0 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -134,6 +134,35 @@ only_if only_if_command if ha_enabled end +node[:rabbitmq][:users].each do |user| + # create extra users + rabbitmq_user "adding user #{user[:username]}" do + user user[:username] + password user[:password] + address node[:rabbitmq][:management_address] + port node[:rabbitmq][:management_port] + action :add + only_if only_if_command if ha_enabled + end + + # add permisions to those extra users into the default vhost + rabbitmq_user "setting permissions for #{user[:username]}" do + user user[:username] + vhost node[:rabbitmq][:vhost] + # permissions is a list but the resource needs an escaped string + permissions user[:permissions].map { |x| "'#{x}'" }.join(" ") + action :set_permissions + only_if only_if_command if ha_enabled + end + + # tag those users as management + execute "rabbitmqctl set_user_tags #{user[:username]} #{user[:tags]}" do + not_if "rabbitmqctl list_users | grep #{user[:username]} | grep -q #{user[:tags].join(",")}" + action :run + only_if only_if_command if ha_enabled + end +end + if cluster_enabled quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 diff --git a/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb b/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb index d31e26f02a..7c77fc0532 100644 --- a/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb +++ b/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb @@ -34,6 +34,13 @@ "password": <%= @json_trove_password %>, "tags": "" }, +<% end -%> +<% @extra_users.each do |user| -%> + { + "name": "<%= user[:username] %>", + "password": "<%= user[:password] %>", + "tags": "<%= user[:tags].join(',') %>" + }, <% end -%> { "name": <%= @json_user %>, @@ -50,6 +57,15 @@ "read": ".*", "write": ".*" }, +<% end -%> +<% @extra_users.each do |user| -%> + { + "user": "<%= user[:username] %>", + "vhost": <%= @json_vhost %>, + "configure": "<%= user[:permissions][0] %>", + "read": "<%= user[:permissions][2] %>", + "write": "<%= user[:permissions][1] %>" + }, <% end -%> { "user": <%= @json_user %>, diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb b/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb new file mode 100644 index 0000000000..22dfda4169 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["extra_users"] = ta["extra_users"] unless a["extra_users"] + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("extra_users") unless ta.key?("extra_users") + return a, d +end diff --git a/chef/data_bags/crowbar/template-rabbitmq.json b/chef/data_bags/crowbar/template-rabbitmq.json index 60783dc047..0f2886baf9 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.json +++ b/chef/data_bags/crowbar/template-rabbitmq.json @@ -7,6 +7,7 @@ "port": 5672, "password": "", "user": "nova", + "extra_users": {}, "vhost": "/nova", "ssl": { "enabled": false, @@ -57,7 +58,7 @@ "rabbitmq": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 104, + "schema-revision": 105, "element_states": { "rabbitmq-server": [ "readying", "ready", "applying" ] }, @@ -74,4 +75,3 @@ } } } - diff --git a/chef/data_bags/crowbar/template-rabbitmq.schema b/chef/data_bags/crowbar/template-rabbitmq.schema index b57d43d236..12dfe6f113 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.schema +++ b/chef/data_bags/crowbar/template-rabbitmq.schema @@ -16,6 +16,20 @@ "port": { "type": "int", "required": true }, "password": { "type": "str", "required": true }, "user": { "type": "str", "required": true }, + "extra_users": { + "type": "map", + "required": false, + "mapping": { + = : { + "type": "map", + "required": false, + "mapping": { + "permissions": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, + "tags": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] } + } + } + } + }, "vhost": { "type": "str", "required": true }, "ssl": { "type": "map", "required": true, "mapping": { diff --git a/crowbar_framework/app/models/rabbitmq_service.rb b/crowbar_framework/app/models/rabbitmq_service.rb index 822f17c7f3..bb566de691 100644 --- a/crowbar_framework/app/models/rabbitmq_service.rb +++ b/crowbar_framework/app/models/rabbitmq_service.rb @@ -71,6 +71,35 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Rabbitmq apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? + # prepare extra users + save_role = false + old_attrs = old_role.nil? ? nil : old_role.default_attributes[@bc_name] + role.default_attributes[@bc_name]["users"] ||= [] + role.default_attributes[@bc_name]["extra_users"].each do |username, user| + save_role = true + updated_user = { + username: username, + tags: user["tags"], + permissions: user["permissions"] + } + if !old_attrs.nil? && old_attrs.include?("users") && !old_attrs["users"].each.select do |u| + u["username"] == user["username"] + end.empty? + # reuse the existing pass + pass = old_attrs["users"].each.select do |u| + u["username"] == user["username"] + end.first["password"] + + updated_user.update(password: pass) + else + # new user, so create a random pass + updated_user.update(password: random_password) + end + role.default_attributes[@bc_name]["users"].push(updated_user) + end + + role.save if save_role + rabbitmq_elements, rabbitmq_nodes, rabbitmq_ha_enabled = role_expand_elements(role, "rabbitmq-server") Openstack::HA.set_controller_role(rabbitmq_nodes) if rabbitmq_ha_enabled @@ -125,6 +154,17 @@ def validate_proposal_after_save(proposal) servers = proposal["deployment"][@bc_name]["elements"]["rabbitmq-server"] ha_enabled = !(servers.nil? || servers.first.nil? || !is_cluster?(servers.first)) + # extra users validation for permissions + unless attributes["extra_users"].empty? + attributes["extra_users"].each do |username, user| + if user["permissions"].length != 3 + validation_error I18n.t( + "barclamp.#{bc_name}.validation.wrong_permissions", user: username + ) + end + end + end + # Shared storage validation for HA if ha_enabled && !attributes["cluster"] storage_mode = attributes["ha"]["storage"]["mode"] @@ -154,4 +194,3 @@ def validate_proposal_after_save(proposal) super end end - diff --git a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml index 9408427e99..a389eff252 100644 --- a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml @@ -7,6 +7,36 @@ = integer_field :port = string_field :user + %fieldset + %legend + = t(".extra_users.title") + + %table.table.table-middle{ "data-dynamic" => "#extrauser-entries", "data-namespace" => "extra_users", "data-optional" => "tags", + "data-invalid" => t(".extra_users.error_invalid"), "data-duplicate" => t(".extra_users.error_duplicate") } + %thead + %tr + %th.col-sm-2 + = t(".extra_users.username") + %th.col-sm-3 + = t(".extra_users.permissions") + %th.col-sm-3 + = t(".extra_users.tags") + %th.col-sm-1 + %tbody + %tfoot + %tr + %td + = text_field_tag "extrauser[name]", "", :placeholder => t(".extra_users.username"), + :class => "form-control", "data-name" => "name", "data-type" => "string" + %td + = text_field_tag "extrauser[permissions]", "", :placeholder => t(".extra_users.permissions"), + :class => "form-control", "data-name" => "permissions", "data-type" => "array-string" + %td + = text_field_tag "extrauser[tags]", "", :placeholder => t(".extra_users.tags"), + :class => "form-control", "data-name" => "tags", "data-type" => "array-string" + %td + = link_to t(".extra_users.add"), "#", :class => "btn btn-default btn-block", "data-add" => true + %fieldset %legend = t(".ssl_header") @@ -48,3 +78,26 @@ = string_field %w(ha storage shared device) = string_field %w(ha storage shared fstype) = string_field %w(ha storage shared options) + + +%script#extrauser-entries{ :type => "text/x-handlebars-template" } + {{#each entries}} + %tr.edit + %td + = text_field_tag "extrauser[name]", "{{name}}", :placeholder => t(".extra_users.username"), + :class => "form-control", :disabled => "disabled" + %td + = text_field_tag "extrauser[permissions]", "{{permissions}}", :placeholder => t(".extra_users.permissions"), + :class => "form-control", "data-update" => "extra_users/{{name}}/permissions", "data-name" => "permissions", + "data-type" => "array-string" + %td + = text_field_tag "extrauser[tags]", "{{tags}}", :placeholder => t(".extra_users.tags"), :class => "form-control", + "data-update" => "extra_users/{{name}}/tags", "data-name" => "tags", "data-type" => "array-string" + %td + = link_to t(".extra_users.record_remove"), "#", :class => "btn btn-default btn-block", "data-remove" => "{{name}}" + {{else}} + %tr + %td{ :colspan => 4 } + .empty.alert.alert-info.text-center + = t(".extra_users.no_records") + {{/each}} diff --git a/crowbar_framework/config/locales/rabbitmq/en.yml b/crowbar_framework/config/locales/rabbitmq/en.yml index f8c512379b..e3d6081a41 100644 --- a/crowbar_framework/config/locales/rabbitmq/en.yml +++ b/crowbar_framework/config/locales/rabbitmq/en.yml @@ -21,6 +21,16 @@ en: edit_attributes: vhost: 'Virtual host' user: 'User' + extra_users: + username: 'Username' + permissions: 'Permissions (3 comma separated items for configure, write, read; e.g. ".*,.*,.*")' + tags: 'Tags (comma separated)' + error_invalid: 'Username and Permissions cannot be empty' + error_duplicate: 'There is a user with this username' + title: 'Extra users' + add: 'Add' + record_remove: 'Delete' + no_records: 'No records' port: 'Port' ssl_header: 'SSL Support' ssl: @@ -54,3 +64,4 @@ en: no_filesystem: 'No filesystem type specified for shared storage.' drbd: 'DRBD is not enabled for cluster %{cluster_name}.' invalid_size: 'Invalid size for DRBD device.' + wrong_permissions: 'Wrong permissions for user %{user}. Permissions need to be 3 comma separated items (configure, write, read)' From 897eda87f10bc9f8139181c56178f49b304c066f Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Wed, 24 Jan 2018 12:57:44 +0100 Subject: [PATCH 014/207] apache: don't collect Listen ports from wsgi vhosts (bsc#1077234) The listen_ports_crowbar attribute controls which ports are added as Listen directives to the /etc/apache2/listen.conf file. However, all wsgi vhosts aside from horizon already have an explicit Listen directive in the vhost configuration file, so there's no need to add these ports to the /etc/apache2/listen.conf file. To align horizon to the other WSGI vhosts, a Listen directive is added to the horizon WSGI vhost configuration file. Since it doesn't also have to be added to the /etc/apache2/listen.conf file, the usage of listen_ports_crowbar can be removed altogether. (cherry picked from commit 5ea09e7cadd31615ceef188acee9e9c64c918f0c) --- chef/cookbooks/aodh/recipes/aodh.rb | 3 --- chef/cookbooks/ceilometer/recipes/server.rb | 3 --- chef/cookbooks/horizon/recipes/server.rb | 13 ------------- .../templates/default/openstack-dashboard.conf.erb | 5 +++++ .../templates/suse/openstack-dashboard.conf.erb | 6 ++++++ chef/cookbooks/keystone/recipes/server.rb | 3 --- 6 files changed, 11 insertions(+), 22 deletions(-) diff --git a/chef/cookbooks/aodh/recipes/aodh.rb b/chef/cookbooks/aodh/recipes/aodh.rb index f39a48aa56..3a0dc06472 100644 --- a/chef/cookbooks/aodh/recipes/aodh.rb +++ b/chef/cookbooks/aodh/recipes/aodh.rb @@ -145,9 +145,6 @@ bind_port = node[:aodh][:api][:port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:aodh] = { plain: [bind_port] } - template node[:aodh][:config_file] do source "aodh.conf.erb" owner "root" diff --git a/chef/cookbooks/ceilometer/recipes/server.rb b/chef/cookbooks/ceilometer/recipes/server.rb index 6f7df194c6..211ec916eb 100644 --- a/chef/cookbooks/ceilometer/recipes/server.rb +++ b/chef/cookbooks/ceilometer/recipes/server.rb @@ -207,9 +207,6 @@ bind_port = node[:ceilometer][:api][:port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:ceilometer] = { plain: [bind_port] } - if ceilometer_protocol == "https" ssl_setup "setting up ssl for ceilometer" do generate_certs node[:ceilometer][:ssl][:generate_certs] diff --git a/chef/cookbooks/horizon/recipes/server.rb b/chef/cookbooks/horizon/recipes/server.rb index edb8c98f1d..1a0557dca6 100644 --- a/chef/cookbooks/horizon/recipes/server.rb +++ b/chef/cookbooks/horizon/recipes/server.rb @@ -492,21 +492,8 @@ bind_port_ssl = 443 end -node.normal[:apache][:listen_ports_crowbar] ||= {} - -if node[:horizon][:apache][:ssl] - node.normal[:apache][:listen_ports_crowbar][:horizon] = { plain: [bind_port], ssl: [bind_port_ssl] } -else - node.normal[:apache][:listen_ports_crowbar][:horizon] = { plain: [bind_port] } -end - -# we can only include the recipe after having defined the listen_ports_crowbar attribute include_recipe "horizon::ha" if ha_enabled -# Override what the apache2 cookbook does since it enforces the ports -resource = resources(template: "#{node[:apache][:dir]}/ports.conf") -resource.variables({apache_listen_ports: node.normal[:apache][:listen_ports_crowbar].values.map{ |p| p.values }.flatten.uniq.sort}) - if node[:horizon][:apache][:ssl] && node[:horizon][:apache][:generate_certs] package "apache2-utils" diff --git a/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb b/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb index 919cebadeb..4b5b05faf8 100644 --- a/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb +++ b/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb @@ -6,6 +6,9 @@ RewriteEngine On RewriteCond %{SERVER_PORT} ^<%= @bind_port %>$ RewriteRule / https://%{HTTP_HOST}%{REQUEST_URI} [L,R] +Listen <%= @bind_host %>:<%= @bind_port %>> +Listen <%= @bind_host %>:<%= @bind_port_ssl %> + :<%= @bind_port_ssl %>> SSLEngine On SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH @@ -17,6 +20,8 @@ RewriteRule / https://%{HTTP_HOST}%{REQUEST_URI} [L,R] <% end %> <% else %> +Listen <%= @bind_host %>:<%= @bind_port %>> + :<%= @bind_port %>> <% end %> WSGIDaemonProcess horizon user=<%= @user %> group=<%= @group %> processes=3 threads=10 home=<%= @horizon_dir %> display-name=%{GROUP} diff --git a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb index 81dbed809a..01ee231930 100644 --- a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb +++ b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb @@ -2,6 +2,8 @@ +Listen <%= @bind_host %>:<%= @bind_port %> + # Redirect non-SSL traffic to SSL :<%= @bind_port %>> RewriteEngine On @@ -24,6 +26,8 @@ RewriteRule / https://%1%{REQUEST_URI} [L,R] +Listen <%= @bind_host %>:<%= @bind_port_ssl %> + :<%= @bind_port_ssl %>> SSLEngine On SSLCipherSuite DEFAULT_SUSE @@ -35,6 +39,8 @@ <% end %> <% else %> +Listen <%= @bind_host %>:<%= @bind_port %> + :<%= @bind_port %>> <% end %> WSGIScriptAlias / <%= @horizon_dir %>/openstack_dashboard/wsgi/django.wsgi diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index d9837b4871..2eacc9867b 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -48,9 +48,6 @@ bind_service_port = node[:keystone][:api][:service_port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:keystone] = { admin: [bind_admin_port], service: [bind_service_port] } - # Ideally this would be called admin_host, but that's already being # misleadingly used to store a value which actually represents the # service bind address. From d404a604dfb1f5c754cd25c8b1d72e0a354ad025 Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Wed, 11 Oct 2017 16:55:11 +0200 Subject: [PATCH 015/207] keystone: store old admin password in node attr for update Use a node attribute to store a backup copy of the admin password, to be used when the admin password is changed. This replaces the previous method of managing the admin password change through a dedicated UI updated_password attribute and allows the user to change the password by simply modifying the regular password UI attribute. The advantage of having a backup copy of the admin password in the node attributes is that the new password value is immediately available after a password change, even during the chef compilation phase. The implications of this: - the new password value can immediately be used by all recipes during the chef compilation phase, which removes the need of having to deal with a password change scenario in every openstack cookbook using keystone for authentication - reconfiguring keystone with the new password doesn't need to happen in the compilation phase anymore. Doing it in the convergence phase, at the same time as everything else, removes this unnecessary exception from the keystone cookbook and aligns everything together better (cherry picked from commit de52e39e3884103ed081aa24cf612f817bc25e09) --- chef/cookbooks/keystone/recipes/server.rb | 66 +++++++++---------- .../keystone/113_remove_updated_password.rb | 14 ++++ chef/data_bags/crowbar/template-keystone.json | 3 +- .../crowbar/template-keystone.schema | 3 +- .../app/models/keystone_service.rb | 20 ------ .../keystone/_edit_attributes.html.haml | 7 +- .../config/locales/keystone/en.yml | 2 - 7 files changed, 50 insertions(+), 65 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index d9837b4871..b433bbaed6 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -574,6 +574,39 @@ keystone_insecure = node["keystone"]["api"]["protocol"] == "https" && node[:keystone][:ssl][:insecure] +register_auth_hash = { user: node[:keystone][:admin][:username], + password: node[:keystone][:admin][:password], + tenant: node[:keystone][:admin][:tenant] } + +old_password = node[:keystone][:admin][:old_password] +old_register_auth_hash = register_auth_hash.clone +old_register_auth_hash[:password] = old_password + +keystone_register "update admin password" do + protocol node[:keystone][:api][:protocol] + insecure keystone_insecure + host my_admin_host + port node[:keystone][:api][:admin_port] + auth old_register_auth_hash + user_name node[:keystone][:admin][:username] + user_password node[:keystone][:admin][:password] + tenant_name node[:keystone][:admin][:tenant] + action :add_user + only_if do + node[:keystone][:bootstrap] && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && + old_password && !old_password.empty? && + old_password != node[:keystone][:admin][:password] + end +end + +ruby_block "backup current admin password on node attributes" do + block do + node.set[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + node.save + end +end + # Creates admin user, admin role and admin project execute "keystone-manage bootstrap" do command "keystone-manage bootstrap \ @@ -593,39 +626,6 @@ end end -register_auth_hash = { user: node[:keystone][:admin][:username], - password: node[:keystone][:admin][:password], - tenant: node[:keystone][:admin][:tenant] } - -updated_password = node[:keystone][:admin][:updated_password] - -unless updated_password.nil? || - updated_password.empty? || - updated_password == node[:keystone][:admin][:password] - - if !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) - keystone_register "update admin password" do - protocol node[:keystone][:api][:protocol] - insecure keystone_insecure - host my_admin_host - port node[:keystone][:api][:admin_port] - auth register_auth_hash - user_name node[:keystone][:admin][:username] - user_password updated_password - tenant_name node[:keystone][:admin][:tenant] - action :nothing - end.run_action(:add_user) - end - - ruby_block "update admin password on node attributes" do - block do - node.set[:keystone][:admin][:password] = updated_password - node.save - register_auth_hash[:password] = updated_password - end - action :nothing - end.run_action(:create) -end # Silly wake-up call - this is a hack; we use retries because the server was # just (re)started, and might not answer on the first try diff --git a/chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb b/chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb new file mode 100644 index 0000000000..1ab52963ad --- /dev/null +++ b/chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb @@ -0,0 +1,14 @@ +def upgrade(ta, td, a, d) + a["admin"].delete("updated_password") + nodes = NodeObject.find("roles:keystone-server") + nodes.each do |node| + node[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + node.save + end + return a, d +end + +def downgrade(ta, td, a, d) + a["admin"]["updated_password"] = ta["admin"]["updated_password"] + return a, d +end diff --git a/chef/data_bags/crowbar/template-keystone.json b/chef/data_bags/crowbar/template-keystone.json index cb9b78011a..9f1a44f243 100644 --- a/chef/data_bags/crowbar/template-keystone.json +++ b/chef/data_bags/crowbar/template-keystone.json @@ -41,8 +41,7 @@ "admin": { "tenant": "admin", "username": "admin", - "password": "crowbar", - "updated_password": "" + "password": "crowbar" }, "service": { "tenant": "service", diff --git a/chef/data_bags/crowbar/template-keystone.schema b/chef/data_bags/crowbar/template-keystone.schema index e16594cf64..668068ef1a 100644 --- a/chef/data_bags/crowbar/template-keystone.schema +++ b/chef/data_bags/crowbar/template-keystone.schema @@ -46,8 +46,7 @@ "admin": { "type": "map", "required": true, "mapping": { "tenant": { "type" : "str", "required" : true }, "username": { "type" : "str", "required" : true }, - "password": { "type" : "str", "required" : true }, - "updated_password": { "type" : "str", "required" : false } + "password": { "type" : "str", "required" : true } }}, "service": { "type": "map", "required": true, "mapping": { "tenant": { "type" : "str", "required" : true }, diff --git a/crowbar_framework/app/models/keystone_service.rb b/crowbar_framework/app/models/keystone_service.rb index 19e9c0f417..878e9d8816 100644 --- a/crowbar_framework/app/models/keystone_service.rb +++ b/crowbar_framework/app/models/keystone_service.rb @@ -133,26 +133,6 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Keystone apply_role_pre_chef_call: leaving") end - def update_proposal_status(inst, status, message, bc = @bc_name) - @logger.debug("update_proposal_status: enter #{inst} #{bc} #{status} #{message}") - - prop = Proposal.where(barclamp: bc, name: inst).first - unless prop.nil? - prop["deployment"][bc]["crowbar-status"] = status - prop["deployment"][bc]["crowbar-failed"] = message - # save the updated_password into the password field to update the raw_view - if status == "success" && !prop["attributes"][bc]["admin"]["updated_password"].blank? - prop["attributes"][bc]["admin"]["password"] = prop["attributes"][bc]["admin"]["updated_password"] - end - res = prop.save - else - res = true - end - - @logger.debug("update_proposal_status: exit #{inst} #{bc} #{status} #{message}") - res - end - def apply_role_post_chef_call(old_role, role, all_nodes) @logger.debug("Keystone apply_role_post_chef_call: entering #{all_nodes.inspect}") diff --git a/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml index 04b642918e..006f6c849d 100644 --- a/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml @@ -14,12 +14,7 @@ = string_field %w(default tenant) = string_field %w(admin username) - - if @proposal.active? - = password_field %w(admin updated_password) - .alert.alert-info - = t('.admin.updated_password_hint') - - else - = password_field %w(admin password) + = password_field %w(admin password) = boolean_field %w(default create_user), "data-showit" => "true", diff --git a/crowbar_framework/config/locales/keystone/en.yml b/crowbar_framework/config/locales/keystone/en.yml index 11fabcdafc..ac615688ce 100644 --- a/crowbar_framework/config/locales/keystone/en.yml +++ b/crowbar_framework/config/locales/keystone/en.yml @@ -32,8 +32,6 @@ en: admin: username: 'Administrator Username' password: 'Administrator Password' - updated_password: 'Update Administrator Password' - updated_password_hint: 'Changing the admin password directly in OpenStack its not supported. You can change the admin password directly using this field.' ssl_header: 'SSL Support' api: protocol: 'Protocol' From eb8fc99bcb1f09ec0ba4d89a3938e54f5b44aa3f Mon Sep 17 00:00:00 2001 From: Itxaka Date: Wed, 13 Sep 2017 10:43:42 +0200 Subject: [PATCH 016/207] Revert "rabbitmq: dont let the template changes restart if in cluster mode" This reverts commit 0dd6cc373e97832e0738bd2c8fe537eb4782cb7f. After commit e9dbeb20594b52dffc9c312b609cf65080f7c621 the service status check for pacemaker services works correctly for master nodes so this is not needed. (cherry picked from commit 05b312f87db5af354247acb48630d803b5ad5179) --- chef/cookbooks/rabbitmq/recipes/default.rb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 0a6d3242bf..5f2ebc5ab0 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -21,8 +21,6 @@ ha_enabled = node[:rabbitmq][:ha][:enabled] # we only do cluster if we do HA cluster_enabled = node[:rabbitmq][:cluster] && ha_enabled -# dont let the changes to the templates restart the rabbitmq in cluster mode -service_action = cluster_enabled ? :nothing : :restart quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 cluster_partition_handling = if cluster_enabled @@ -94,7 +92,7 @@ owner "root" group "root" mode 0644 - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end `systemd-detect-virt -v -q` @@ -111,7 +109,7 @@ cluster_partition_handling: cluster_partition_handling, hipe_compile: hipe_compile ) - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end # create a file with definitions to load on start, to be 100% sure we always @@ -154,7 +152,7 @@ environment "HOME" => "/root/" code "#{rabbitmq_plugins} #{rabbitmq_plugins_param} enable rabbitmq_management > /dev/null" not_if "#{rabbitmq_plugins} list -E | grep rabbitmq_management -q", environment: {"HOME" => "/root/"} - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end service "rabbitmq-server" do From f443ca27224f35aedc209547d566e6d0e301c39e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Fri, 2 Mar 2018 10:36:55 +0100 Subject: [PATCH 017/207] cinder: Set os_privileged_* values When the privileged user is not set, cinder does not provide correct credentials to nova client and migration of attached volumes fails. See https://bugs.launchpad.net/cinder/+bug/1614344 for upstream bug. The relevant code was rewritten for Pike so providing those options won't be necessary in newer releases. But as the work was not backported, we have to provide os_privileged values in Newton codebase if we want to make the migration work (bsc#1079763). --- chef/cookbooks/cinder/templates/default/cinder.conf.erb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/chef/cookbooks/cinder/templates/default/cinder.conf.erb b/chef/cookbooks/cinder/templates/default/cinder.conf.erb index 3a4decda66..127e72d4f2 100644 --- a/chef/cookbooks/cinder/templates/default/cinder.conf.erb +++ b/chef/cookbooks/cinder/templates/default/cinder.conf.erb @@ -12,6 +12,12 @@ wsgi_keep_alive = false state_path = /var/lib/cinder my_ip = <%= node[:cinder][:my_ip] %> +# os_privileged_* values are required for migrations of attached volumes +# See bsc#1079763 +os_privileged_user_name = <%= @keystone_settings['service_user'] %> +os_privileged_user_password = <%= @keystone_settings['service_password'] %> +os_privileged_user_tenant = <%= @keystone_settings['service_tenant'] %> + glance_api_servers = <%= @glance_server_protocol %>://<%= @glance_server_host %>:<%= @glance_server_port %> glance_api_version = 2 <% unless @glance_server_insecure.nil? -%> From 3f0c93ec915cc3203cf3417dad0c045a0c9d994d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 22 Feb 2018 13:37:07 +0100 Subject: [PATCH 018/207] nova: Add option to modify scheduler_default_filters In some cases user needs to modify default filters, e.g. remove DiskFilter when instances are only booted from volumes (bsc#1080997) --- chef/cookbooks/nova/recipes/config.rb | 1 + .../nova/templates/default/nova.conf.erb | 17 ++++++++++++++--- .../nova/122_add_scheduler_default_filters.rb | 9 +++++++++ chef/data_bags/crowbar/template-nova.json | 5 +++-- chef/data_bags/crowbar/template-nova.schema | 3 ++- 5 files changed, 29 insertions(+), 6 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index 6b98f8f1f8..c2ff5726f1 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -388,6 +388,7 @@ oat_appraiser_host: oat_server[:hostname], oat_appraiser_port: "8443", has_itxt: has_itxt, + default_filters: node[:nova][:scheduler][:default_filters], reserved_host_memory: reserved_host_memory, use_baremetal_filters: use_baremetal_filters, track_instance_changes: track_instance_changes, diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index 45269a1ebf..ae52fedd6a 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -34,9 +34,20 @@ reserved_host_memory_mb = <%= @reserved_host_memory %> cpu_allocation_ratio = <%= node[:nova][:scheduler][:cpu_allocation_ratio] %> ram_allocation_ratio = <%= node[:nova][:scheduler][:ram_allocation_ratio] %> disk_allocation_ratio = <%= node[:nova][:scheduler][:disk_allocation_ratio] %> -<% if @use_baremetal_filters %>scheduler_use_baremetal_filters = true<% end -%> -<% if @has_itxt -%>scheduler_available_filters = nova.scheduler.filters.standard_filters<% end %> -<% if @has_itxt %>scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,TrustedFilter<% end %> +<% if @use_baremetal_filters %> +scheduler_use_baremetal_filters = true +<% end -%> +<% if @has_itxt %> +scheduler_available_filters = nova.scheduler.filters.standard_filters +<% if @default_filters.empty? %> +scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,TrustedFilter +<% else %> +scheduler_default_filters = <%= @default_filters %> +<% end %> +<% elsif !@default_filters.empty? %> +scheduler_available_filters = nova.scheduler.filters.all_filters +scheduler_default_filters = <%= @default_filters %> +<% end %> <% unless @track_instance_changes %>scheduler_tracks_instance_changes = false<% end %> <% if @libvirt_type.eql?('vmware') -%> compute_driver = vmwareapi.VMwareVCDriver diff --git a/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb b/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb new file mode 100644 index 0000000000..0a64fb1f7f --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["scheduler"]["default_filters"] = ta["scheduler"]["default_filters"] + return a, d +end + +def downgrade(ta, td, a, d) + a["scheduler"].delete("default_filters") + return a, d +end diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 99d78a4a0c..440640487a 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -36,7 +36,8 @@ "ram_allocation_ratio": 1.0, "cpu_allocation_ratio": 16.0, "disk_allocation_ratio": 1.0, - "reserved_host_memory_mb": 512 + "reserved_host_memory_mb": 512, + "default_filters": "" }, "ec2-api": { "db": { @@ -174,7 +175,7 @@ "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 121, + "schema-revision": 122, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index cd07ef299f..56d296e684 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -53,7 +53,8 @@ "ram_allocation_ratio": { "type": "number" }, "cpu_allocation_ratio": { "type": "number" }, "disk_allocation_ratio": { "type": "number" }, - "reserved_host_memory_mb": { "type": "number" } + "reserved_host_memory_mb": { "type": "number" }, + "default_filters": { "type": "str", "required": true } } }, "ec2-api": { From af00a7aaa8791aba35becc123895a420bb881ff0 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 15 Sep 2017 14:54:51 +0200 Subject: [PATCH 019/207] rabbitmq: do not create the erlang cookie file on cluster deployment There should be no need to create the erlang cookie file for rabbit on cluster deployments as the pacemaker resource agent already creates it for us. (cherry picked from commit 0a00afc7cc23d18769a0febfe52555ceb8455456) --- chef/cookbooks/rabbitmq/attributes/default.rb | 1 - chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 9 --------- 2 files changed, 10 deletions(-) diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index 1ccd279d1d..3c4d0f4344 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -40,7 +40,6 @@ default[:rabbitmq][:cluster] = false default[:rabbitmq][:clustername] = "rabbit@#{node[:hostname]}" -default[:rabbitmq][:erlang_cookie_path] = "/var/lib/rabbitmq/.erlang.cookie" # ha default[:rabbitmq][:ha][:enabled] = false diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index e2e7bca39e..49da5d12bb 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -17,15 +17,6 @@ agent_name = "ocf:rabbitmq:rabbitmq-server-ha" -# set the shared rabbitmq cookie -# cookie is automatically set during barclamp apply -# on the apply_role_pre_chef_call method -file node[:rabbitmq][:erlang_cookie_path] do - content node[:rabbitmq][:erlang_cookie] - owner node[:rabbitmq][:rabbitmq_user] - group node[:rabbitmq][:rabbitmq_group] -end - # create file that will be sourced by OCF resource agent on promote template "/etc/rabbitmq/ocf-promote" do source "ocf-promote.erb" From 42ce2b2a634e820e1350ac2a8d351762c03bee04 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Thu, 1 Mar 2018 10:33:45 +0100 Subject: [PATCH 020/207] neutron: enable automatic lbaas reschedule Enable the ability to automatically reschedule load balancers from LBaaS agents the server detects to have died. Previously, load balancers could be scheduled and realized across multiple LBaaS agents, however if a hypervisor died, the load balancers scheduled to that node would cease operation. Now, these load balancers will be automatically rescheduled to a different agent. (cherry picked from commit 40744d5f1bee5fd87c82677ad652241eafe03b2e) --- chef/cookbooks/neutron/recipes/network_agents.rb | 1 + .../neutron/templates/default/lbaas_agent.ini.erb | 1 + .../116_add_allow_automatic_lbass_agent_failover.rb | 11 +++++++++++ chef/data_bags/crowbar/template-neutron.json | 3 ++- chef/data_bags/crowbar/template-neutron.schema | 1 + 5 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index 8c143d32c0..d3a13f946c 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -180,6 +180,7 @@ debug: node[:neutron][:debug], interface_driver: interface_driver, user_group: node[:neutron][:platform][:lbaas_haproxy_group], + allow_automatic_lbaas_agent_failover: node[:neutron][:allow_automatic_lbaas_agent_failover], device_driver: "neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver" ) end diff --git a/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb b/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb index e0958c381d..eb3830e702 100644 --- a/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb @@ -2,5 +2,6 @@ device_driver = <%= @device_driver %> interface_driver = <%= @interface_driver %> debug = <%= @debug %> +allow_automatic_lbaas_agent_failover = <%= @allow_automatic_lbaas_agent_failover %> [haproxy] user_group = <%= @user_group %> diff --git a/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb b/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb new file mode 100644 index 0000000000..33f690705d --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb @@ -0,0 +1,11 @@ +def upgrade(ta, td, a, d) + attr = "allow_automatic_lbaas_agent_failover" + a[attr] = ta[attr] unless a.key? attr + return a, d +end + +def downgrade(ta, td, a, d) + attr = "allow_automatic_lbaas_agent_failover" + a.delete(attr) unless ta.key? attr + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 27ef442474..5a0c974fe0 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -14,6 +14,7 @@ "rpc_workers": 1, "use_lbaas": true, "lbaasv2_driver": "haproxy", + "allow_automatic_lbaas_agent_failover": true, "use_l2pop": false, "l2pop": { "agent_boot_time": 180 @@ -179,7 +180,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 115, + "schema-revision": 116, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index e934751503..4deb492348 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -19,6 +19,7 @@ "rpc_workers": { "type": "int", "required": true }, "use_lbaas": { "type": "bool", "required": true }, "lbaasv2_driver": { "type": "str", "required": true }, + "allow_automatic_lbaas_agent_failover": { "type": "bool", "required": true }, "use_l2pop": { "type": "bool", "required": true }, "l2pop": { "type": "map", "required": true, "mapping": { "agent_boot_time": { "type" : "int", "required" : true } From 0d4db777136186ffafcfe7ae7631b5874a1ac8c6 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 2 Mar 2018 14:24:16 +0100 Subject: [PATCH 021/207] rabbitmq: allow pacemaker service restart Up until now we were relaying on the restart for rabbit while on HA to silently fail due to the resource service name called rabbitmq-server while the pacemaker reasource is called rabbitmq. This triggered a restart but it silently failed. Instead pass the correct service name to allow for the services to be properly promoted/demoted/restarted even on HA. (cherry picked from commit adee5bb2b5cc24b2abc5e707a13b18043f2d1bf3) --- chef/cookbooks/rabbitmq/recipes/default.rb | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 3e981c38dc..868981885d 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -22,6 +22,8 @@ # we only do cluster if we do HA cluster_enabled = node[:rabbitmq][:cluster] && ha_enabled quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 +crm_resource_stop_cmd = cluster_enabled ? "force-demote" : "force-stop" +crm_resource_start_cmd = cluster_enabled ? "force-promote" : "force-start" cluster_partition_handling = if cluster_enabled if CrowbarPacemakerHelper.num_corosync_nodes(node) > 2 @@ -157,7 +159,14 @@ end service "rabbitmq-server" do - supports restart: true, start: true, stop: true, status: true + supports restart: true, + start: true, + stop: true, + status: true, + crm_resource_stop_cmd: crm_resource_stop_cmd, + crm_resource_start_cmd: crm_resource_start_cmd, + restart_crm_resource: true, + pacemaker_resource_name: "rabbitmq" action [:enable, :start] provider Chef::Provider::CrowbarPacemakerService if ha_enabled end From a1ed6d0eb24adb09a42ef92286916808ac5e664b Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 29 Jan 2018 14:04:02 +0100 Subject: [PATCH 022/207] swift: Fix switch detection 1. The addresses in `crowbar_wall` are full CIDR entries so netmask needs to be removed before comparing to plain `storage_ip`. 2. Sometimes switch_config is not fully populated for all nodes (yet). Fall back to default in such cases. (cherry picked from commit 186ebc0213234ff40f32408dd4bc28692e1812e6) --- chef/cookbooks/swift/libraries/rack_awareness.rb | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/swift/libraries/rack_awareness.rb b/chef/cookbooks/swift/libraries/rack_awareness.rb index 266aeee01c..2ad76ebdcb 100644 --- a/chef/cookbooks/swift/libraries/rack_awareness.rb +++ b/chef/cookbooks/swift/libraries/rack_awareness.rb @@ -38,7 +38,10 @@ def get_node_sw(n) #get storage iface iface="" n[:crowbar_wall][:network][:interfaces].keys.each do |ifn| - if n[:crowbar_wall][:network][:interfaces][ifn.to_s][:addresses].include?(storage_ip) + ifaddrs = n[:crowbar_wall][:network][:interfaces][ifn.to_s][:addresses] + # strip netmasks from CIDR addresses + ifaddrs.map! { |addr| addr[%r{^[^/]+}] } + if ifaddrs.include?(storage_ip) iface=ifn break end @@ -52,7 +55,7 @@ def get_node_sw(n) #fallback to something default iface=n[:crowbar_ohai][:switch_config].keys[0] end - sw_name=n[:crowbar_ohai][:switch_config][iface][:switch_name] + return n[:crowbar_ohai][:switch_config].fetch(iface, switch_name: -1)[:switch_name] end def switch_to_zone() From 47f380ce50c80a54fa4981cccc978e27f3c73739 Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Fri, 16 Feb 2018 14:22:32 +0100 Subject: [PATCH 023/207] mariadb: Remove redundant config values After the MariaDB 10.2 switch the new default makes these entrys redundant so there are not needed anymore. (cherry picked from commit 1aaa8c20d1e34022354ca2f0e39e4d1532910c8a) --- chef/cookbooks/mysql/attributes/server.rb | 3 --- chef/cookbooks/mysql/templates/default/my.cnf.erb | 4 ---- chef/cookbooks/mysql/templates/default/tuning.cnf.erb | 5 ----- 3 files changed, 12 deletions(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index ab87e8ac3f..93c7b39ef1 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -26,9 +26,6 @@ default[:database][:mysql][:ebs_vol_size] = 50 end -default[:database][:mysql][:tunable][:max_allowed_packet] = "16M" -default[:database][:mysql][:tunable][:thread_cache_size] = 8 - # Ports to bind to when haproxy is used default[:mysql][:ha][:ports][:admin_port] = 3306 diff --git a/chef/cookbooks/mysql/templates/default/my.cnf.erb b/chef/cookbooks/mysql/templates/default/my.cnf.erb index 669d02690d..a659b1b087 100644 --- a/chef/cookbooks/mysql/templates/default/my.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/my.cnf.erb @@ -13,7 +13,3 @@ tmpdir = <%= node[:database][:mysql][:tmpdir] %> # Instead of skip-networking the default is now to listen only on # localhost which is more compatible and is not less secure. bind-address = <%= node[:database][:mysql][:bind_address] %> - -[mysqldump] -# FIXME: Remove after MariaDB 10.2.X switch (new default is 16777216) -max_allowed_packet = <%= node[:database][:mysql][:tunable][:max_allowed_packet] %> diff --git a/chef/cookbooks/mysql/templates/default/tuning.cnf.erb b/chef/cookbooks/mysql/templates/default/tuning.cnf.erb index f32f63a420..7634b64be6 100644 --- a/chef/cookbooks/mysql/templates/default/tuning.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/tuning.cnf.erb @@ -15,8 +15,3 @@ tmp_table_size = <%= @tmp_table_size %>M max_heap_table_size = <%= @max_heap_table_size %>M skip_name_resolve = 1 - -# FIXME: Remove after MariaDB 10.2.X switch (new default is auto) -thread_cache_size = <%= node[:database][:mysql][:tunable][:thread_cache_size] %> -# FIXME: Remove after MariaDB 10.2.X switch (new default is 16777216) -max_allowed_packet = <%= node[:database][:mysql][:tunable][:max_allowed_packet] %> From ecfbd9d99892f31823b56fe9d7661e9e20293ae5 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 9 Mar 2018 10:00:39 +0100 Subject: [PATCH 024/207] memcached: Disable UDP by default (bsc#1083903) This reduces the ability to mis-use it for cache amplification attacks, and we don't really need it. (cherry picked from commit 07f036903c5fbf95d2f7bb95e1c26dc61b62ec14) --- .../memcached/templates/default/memcached.sysconfig.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb b/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb index 8a5451d7f7..917b6c976e 100644 --- a/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb +++ b/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb @@ -10,7 +10,7 @@ # # see man 1 memcached for more # -MEMCACHED_PARAMS="<%= @daemonize ? "-d" : "" %> -m <%= @memory %> -l <%= @listen %> -p <%= @port %> -c <%= @max_connections %>" +MEMCACHED_PARAMS="<%= @daemonize ? "-d" : "" %> -U 0 -m <%= @memory %> -l <%= @listen %> -p <%= @port %> -c <%= @max_connections %>" ## Path: Network/WWW/Memcached ## Description: username memcached should run as From 052d29a371380cb86a890d8a27aa9182bd47516d Mon Sep 17 00:00:00 2001 From: Nicolas Bock Date: Thu, 9 Feb 2017 04:31:56 -0700 Subject: [PATCH 025/207] nova: Added placement api service The nova-placement-api service is mandatory in Ocata. (cherry picked from commit 7714c5614d211f004e348683e3f4f12ff246b35e) --- chef/cookbooks/nova/attributes/default.rb | 17 +++ chef/cookbooks/nova/recipes/config.rb | 16 +++ chef/cookbooks/nova/recipes/database.rb | 2 +- chef/cookbooks/nova/recipes/placement_api.rb | 117 ++++++++++++++++++ .../nova/recipes/role_nova_controller.rb | 1 + .../templates/default/nova-placement.conf.erb | 14 +++ .../crowbar/migrate/nova/123_placement_api.rb | 15 +++ chef/data_bags/crowbar/template-nova.json | 8 +- chef/data_bags/crowbar/template-nova.schema | 13 ++ crowbar_framework/app/models/nova_service.rb | 1 + 10 files changed, 201 insertions(+), 3 deletions(-) create mode 100644 chef/cookbooks/nova/recipes/placement_api.rb create mode 100644 chef/cookbooks/nova/templates/default/nova-placement.conf.erb create mode 100644 chef/data_bags/crowbar/migrate/nova/123_placement_api.rb diff --git a/chef/cookbooks/nova/attributes/default.rb b/chef/cookbooks/nova/attributes/default.rb index 4341793981..87d63bab49 100644 --- a/chef/cookbooks/nova/attributes/default.rb +++ b/chef/cookbooks/nova/attributes/default.rb @@ -21,6 +21,7 @@ default[:nova][:debug] = false default[:nova][:max_header_line] = 16384 default[:nova][:config_file] = "/etc/nova/nova.conf.d/100-nova.conf" +default[:nova][:placement_config_file] = "/etc/nova/nova.conf.d/101-nova-placement.conf" # # Database Settings @@ -47,6 +48,14 @@ default[:nova][:api_db][:max_overflow] = nil default[:nova][:api_db][:pool_timeout] = nil +# +# Placement API database settings +# +default[:nova][:placement_db][:password] = nil +default[:nova][:placement_db][:user] = "placement" +default[:nova][:placement_db][:database] = "placement" + + # Feature settings default[:nova][:use_migration] = false default[:nova][:setup_shared_instance_storage] = false @@ -83,6 +92,12 @@ default[:nova][:scheduler][:disk_allocation_ratio] = 1.0 default[:nova][:scheduler][:reserved_host_memory_mb] = 512 +# +# Placement Settings +# +default[:nova][:placement_service_user] = "placement" +default[:nova][:placement_service_password] = "placement" + # # Shared Settings # @@ -117,6 +132,7 @@ default[:nova][:ports][:api_ec2] = 8788 default[:nova][:ports][:api] = 8774 +default[:nova][:ports][:placement_api] = 8780 default[:nova][:ports][:metadata] = 8775 default[:nova][:ports][:objectstore] = 3333 default[:nova][:ports][:novncproxy] = 6080 @@ -142,6 +158,7 @@ default[:nova][:ha][:ports][:objectstore] = 5553 default[:nova][:ha][:ports][:novncproxy] = 5554 default[:nova][:ha][:ports][:serialproxy] = 5556 +default[:nova][:ha][:ports][:placement_api] = 5560 default[:nova][:ha][:compute][:enabled] = false default[:nova][:ha][:compute][:compute][:op][:monitor][:interval] = "10s" diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index c2ff5726f1..79a9185f16 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -48,9 +48,11 @@ include_recipe "#{db_settings[:backend_name]}::python-client" database_connection = fetch_database_connection_string(node[:nova][:db]) + placement_database_connection = fetch_database_connection_string(node[:nova][:placement_db]) api_database_connection = fetch_database_connection_string(node[:nova][:api_db]) else database_connection = nil + placement_database_connection = nil api_database_connection = nil end @@ -324,6 +326,20 @@ include_recipe "crowbar-openstack::common" end +template node[:nova][:placement_config_file] do + source "nova-placement.conf.erb" + user "root" + group node[:nova][:group] + mode 0640 + variables( + keystone_settings: keystone_settings, + placement_database_connection: placement_database_connection, + placement_service_user: node["nova"]["placement_service_user"], + placement_service_password: node["nova"]["placement_service_password"] + ) +end + + template node[:nova][:config_file] do source "nova.conf.erb" user "root" diff --git a/chef/cookbooks/nova/recipes/database.rb b/chef/cookbooks/nova/recipes/database.rb index 89d73f8d01..7582b919c4 100644 --- a/chef/cookbooks/nova/recipes/database.rb +++ b/chef/cookbooks/nova/recipes/database.rb @@ -31,7 +31,7 @@ only_if { ha_enabled } end -[node[:nova][:db], node[:nova][:api_db]].each do |d| +[node[:nova][:db], node[:nova][:api_db], node[:nova][:placement_db]].each do |d| # Creates empty nova database database "create #{d[:database]} database" do connection db_settings[:connection] diff --git a/chef/cookbooks/nova/recipes/placement_api.rb b/chef/cookbooks/nova/recipes/placement_api.rb new file mode 100644 index 0000000000..46fa962c6a --- /dev/null +++ b/chef/cookbooks/nova/recipes/placement_api.rb @@ -0,0 +1,117 @@ +# +# Cookbook Name:: nova +# Recipe:: placement_api +# +# Copyright 2017, SUSE Linux GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include_recipe "apache2" +include_recipe "apache2::mod_wsgi" +include_recipe "nova::config" + +keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) + +package "openstack-nova-placement-api" + +api_ha_enabled = node[:nova][:ha][:enabled] +admin_api_host = CrowbarHelper.get_host_for_admin_url(node, api_ha_enabled) +public_api_host = CrowbarHelper.get_host_for_public_url( + node, node[:nova][:ssl][:enabled], api_ha_enabled +) +api_port = node[:nova][:ports][:placement_api] + +api_protocol = node[:nova][:ssl][:enabled] ? "https" : "http" + +crowbar_pacemaker_sync_mark "wait-nova-placement_register" if api_ha_enabled + +register_auth_hash = { user: keystone_settings["admin_user"], + password: keystone_settings["admin_password"], + tenant: keystone_settings["admin_tenant"] } + +keystone_register "register placement user '#{node["nova"]["placement_service_user"]}'" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name node["nova"]["placement_service_user"] + user_password node["nova"]["placement_service_password"] + tenant_name keystone_settings["service_tenant"] + action :add_user +end + +keystone_register "give placement user '#{node["nova"]["placement_service_user"]}' access" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name node["nova"]["placement_service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "admin" + action :add_access +end + +keystone_register "register placement service" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + service_name "placement" + service_type "placement" + service_description "Openstack Placement Service" + action :add_service +end + +keystone_register "register placement endpoint" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + endpoint_service "placement" + endpoint_region keystone_settings["endpoint_region"] + endpoint_publicURL "#{api_protocol}://#{public_api_host}:#{api_port}" + endpoint_adminURL "#{api_protocol}://#{admin_api_host}:#{api_port}" + endpoint_internalURL "#{api_protocol}://#{admin_api_host}:#{api_port}" + action :add_endpoint_template +end + +if node[:nova][:ha][:enabled] + admin_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address + bind_host = admin_address + bind_port = node[:nova][:ha][:ports][:placement_api] +else + bind_host = "0.0.0.0" + bind_port = node[:nova][:ports][:placement_api] +end + +node.normal[:apache][:listen_ports_crowbar] ||= {} +node.normal[:apache][:listen_ports_crowbar][:nova] = { plain: [bind_port] } + +crowbar_openstack_wsgi "WSGI entry for nova-placement-api" do + bind_host bind_host + bind_port bind_port + daemon_process "nova-placement-api" + user node[:nova][:user] + group node[:nova][:group] +end + +apache_site "nova-placement-api.conf" do + enable true +end + +crowbar_pacemaker_sync_mark "create-nova-placement_register" if api_ha_enabled diff --git a/chef/cookbooks/nova/recipes/role_nova_controller.rb b/chef/cookbooks/nova/recipes/role_nova_controller.rb index 51e9a9b8dd..cc83b38362 100644 --- a/chef/cookbooks/nova/recipes/role_nova_controller.rb +++ b/chef/cookbooks/nova/recipes/role_nova_controller.rb @@ -18,6 +18,7 @@ include_recipe "nova::config" include_recipe "nova::database" include_recipe "nova::api" + include_recipe "nova::placement_api" include_recipe "nova::cert" include_recipe "nova::instances" include_recipe "nova::scheduler" diff --git a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb new file mode 100644 index 0000000000..454030575a --- /dev/null +++ b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb @@ -0,0 +1,14 @@ +[placement] +os_region_name = <%= @keystone_settings['endpoint_region'] %> +auth_url = <%= @keystone_settings['admin_auth_url'] %> +project_name = <%= @keystone_settings['service_tenant'] %> +project_domain_name = <%= @keystone_settings["admin_domain"] %> +user_domain_name = <%= @keystone_settings['admin_domain'] %> +auth_type = password +username = <%= @placement_service_user %> +password = <%= @placement_service_password %> + +<% if @placement_database_connection -%> +[placement_database] +connection = <%= @placement_database_connection %> +<% end -%> \ No newline at end of file diff --git a/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb new file mode 100644 index 0000000000..fa95865afc --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb @@ -0,0 +1,15 @@ +def upgrade(ta, td, a, d) + a["placement_db"] = ta["placement_db"] + + if a["placement_db"]["password"].nil? || a["placement_db"]["password"].empty? + service = ServiceObject.new "fake-logger" + a["placement_db"]["password"] = service.random_password + end + + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("placement_db") + return a, d +end diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 440640487a..55adf6e00a 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -65,7 +65,11 @@ "user": "nova_api", "database": "nova_api" }, - + "placement_db": { + "password": "", + "user": "placement", + "database": "placement" + }, "rbd": { "user": "", "secret_uuid": "" @@ -175,7 +179,7 @@ "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 122, + "schema-revision": 123, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index 56d296e684..53ac604341 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -111,6 +111,19 @@ "min_pool_size": { "type": "int", "required": false } } }, + "placement_db": { + "type": "map", + "required": true, + "mapping": { + "password": { "type": "str", "required": true }, + "user": { "type": "str", "required": true }, + "database": { "type": "str", "required": true }, + "max_pool_size": { "type": "int", "required": false }, + "max_overflow": { "type": "int", "required": false }, + "pool_timeout": { "type": "int", "required": false }, + "min_pool_size": { "type": "int", "required": false } + } + }, "rbd": { "type": "map", "required": false, "mapping": { "user": { "type": "str", "required": true }, diff --git a/crowbar_framework/app/models/nova_service.rb b/crowbar_framework/app/models/nova_service.rb index f0728079a3..8a9a8c0638 100644 --- a/crowbar_framework/app/models/nova_service.rb +++ b/crowbar_framework/app/models/nova_service.rb @@ -187,6 +187,7 @@ def create_proposal base["attributes"]["nova"]["service_password"] = random_password base["attributes"]["nova"]["api_db"]["password"] = random_password + base["attributes"]["nova"]["placement_db"]["password"] = random_password base["attributes"]["nova"]["db"]["password"] = random_password base["attributes"]["nova"]["neutron_metadata_proxy_shared_secret"] = random_password From 19ba0cfa9c223c1780219da8ab35f702586b0754 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Tue, 25 Jul 2017 17:17:22 +0200 Subject: [PATCH 026/207] nova: Fix placement API setup in HA haproxy needs to be setup in front of the placement API services. Otherwise no service is listening on the correct port and the placement API is not responsive. (cherry picked from commit 224e6f61c2546406b975ecef434ed4dbc8604d60) --- chef/cookbooks/nova/recipes/controller_ha.rb | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/chef/cookbooks/nova/recipes/controller_ha.rb b/chef/cookbooks/nova/recipes/controller_ha.rb index 9a5b135716..92ef398545 100644 --- a/chef/cookbooks/nova/recipes/controller_ha.rb +++ b/chef/cookbooks/nova/recipes/controller_ha.rb @@ -33,6 +33,14 @@ action :nothing end.run_action(:create) +haproxy_loadbalancer "nova-placement-api" do + address "0.0.0.0" + port node[:nova][:ports][:placement_api] + use_ssl node[:nova][:ssl][:enabled] + servers CrowbarPacemakerHelper.haproxy_servers_for_service(node, "nova", "nova-controller", "placement_api") + action :nothing +end.run_action(:create) + haproxy_loadbalancer "nova-metadata" do address cluster_admin_ip port node[:nova][:ports][:metadata] From 07f8333709a73210ed90bb8e6fb2d7bd7ebe567d Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Wed, 23 Aug 2017 15:19:02 +0200 Subject: [PATCH 027/207] nova: Randomize placement password Without this change, the nova placement user has a default password of "placement", set in attributes.rb, and it is impossible to change it. This is a massive security flaw for obvious reasons. This change adds the ability to change the placement service password via the raw view of the nova barclamp and also ensures it defaults to a random one. (cherry picked from commit 6a28e53e93ef4a09999d1f277ac6726415f2012c) --- chef/data_bags/crowbar/migrate/nova/123_placement_api.rb | 6 ++++++ chef/data_bags/crowbar/template-nova.schema | 1 + crowbar_framework/app/models/nova_service.rb | 1 + 3 files changed, 8 insertions(+) diff --git a/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb index fa95865afc..0cf3764d78 100644 --- a/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb +++ b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb @@ -6,10 +6,16 @@ def upgrade(ta, td, a, d) a["placement_db"]["password"] = service.random_password end + if a["placement_service_password"].nil? || a["placement_service_password"].empty? + service = ServiceObject.new "fake-logger" + a["placement_service_password"] = service.random_password + end + return a, d end def downgrade(ta, td, a, d) a.delete("placement_db") + a.delete("placement_service_password") return a, d end diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index 53ac604341..c0ad6c37cc 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -18,6 +18,7 @@ "keystone_instance": { "type": "str", "required": true }, "service_user": { "type": "str", "required": true }, "service_password": { "type": "str" }, + "placement_service_password": { "type": "str" }, "glance_instance": { "type": "str", "required": true }, "cinder_instance": { "type": "str", "required": true }, "neutron_instance": { "type": "str", "required": true }, diff --git a/crowbar_framework/app/models/nova_service.rb b/crowbar_framework/app/models/nova_service.rb index 8a9a8c0638..20d7603ab6 100644 --- a/crowbar_framework/app/models/nova_service.rb +++ b/crowbar_framework/app/models/nova_service.rb @@ -186,6 +186,7 @@ def create_proposal base["attributes"][@bc_name]["neutron_instance"] = find_dep_proposal("neutron") base["attributes"]["nova"]["service_password"] = random_password + base["attributes"]["nova"]["placement_service_password"] = random_password base["attributes"]["nova"]["api_db"]["password"] = random_password base["attributes"]["nova"]["placement_db"]["password"] = random_password base["attributes"]["nova"]["db"]["password"] = random_password From db3f404699f2b32a3d376cd8bcb5b57fbc44c2dd Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Wed, 23 Aug 2017 14:49:39 +0200 Subject: [PATCH 028/207] nova: Subscribe to placement config (bsc#1055188) Without this change, changing the placement configuration after the initial deployment will have no effect on any of the nova services. This is a problem if we want to change the way the compute service connects to the placement API after the initial deployment. This patch ensures that nova services subscribe to both configuration files. The placement config file is really just an extension of the main config file, so it makes sense for the services to subscribe to both. (cherry picked from commit cc63475ba3fe31bcce82f53f8b0a60bef19a6c90) --- chef/cookbooks/nova/definitions/nova_package.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/definitions/nova_package.rb b/chef/cookbooks/nova/definitions/nova_package.rb index d133f38f1d..c6ed620e96 100644 --- a/chef/cookbooks/nova/definitions/nova_package.rb +++ b/chef/cookbooks/nova/definitions/nova_package.rb @@ -55,7 +55,8 @@ end end - subscribes :restart, resources(template: node[:nova][:config_file]) + subscribes :restart, [resources(template: node[:nova][:config_file]), + resources(template: node[:nova][:placement_config_file])] provider Chef::Provider::CrowbarPacemakerService if params[:use_pacemaker_provider] end From 56cccba211f452a212e8976dfcd893bf1372e2d9 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Wed, 23 Aug 2017 23:04:29 +0200 Subject: [PATCH 029/207] nova: Use internal placement url (bsc#1055188) By default, the nova compute service tries to access the public endpoint of the placement API. The "public" network defined in network.json isn't accessible to the compute nodes. Without this patch, the nova-compute service is unable to connect to the placement API and so is unable to be scheduled. This patch forces the service to request the internal placement endpoint, which it can reach. (cherry picked from commit ce956698852f957598c5d02ca2f1b992c72f0fc6) --- chef/cookbooks/nova/templates/default/nova-placement.conf.erb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb index 454030575a..d536bb3dbd 100644 --- a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb @@ -7,6 +7,7 @@ user_domain_name = <%= @keystone_settings['admin_domain'] %> auth_type = password username = <%= @placement_service_user %> password = <%= @placement_service_password %> +os_interface = internal <% if @placement_database_connection -%> [placement_database] From 4db160a796ba8530a8f09b465209b4e198b04aad Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Tue, 26 Sep 2017 14:04:00 +0200 Subject: [PATCH 030/207] nova: fix SSL configuration for nova-placement Add the missing SSL parameters to the WSGI configuration for the nova-placement API service, when SSL is enabled for nova . Also add the missing [placement]/insecure configuration option to the nova placement configuration file. (cherry picked from commit 2dc9358c6405a60e9fe8ba58592928a402c51f38) --- chef/cookbooks/nova/recipes/config.rb | 3 ++- chef/cookbooks/nova/recipes/placement_api.rb | 6 ++++++ .../nova/templates/default/nova-placement.conf.erb | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index 79a9185f16..5173021913 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -335,7 +335,8 @@ keystone_settings: keystone_settings, placement_database_connection: placement_database_connection, placement_service_user: node["nova"]["placement_service_user"], - placement_service_password: node["nova"]["placement_service_password"] + placement_service_password: node["nova"]["placement_service_password"], + placement_service_insecure: node[:nova][:ssl][:insecure] ) end diff --git a/chef/cookbooks/nova/recipes/placement_api.rb b/chef/cookbooks/nova/recipes/placement_api.rb index 46fa962c6a..59c9f84de7 100644 --- a/chef/cookbooks/nova/recipes/placement_api.rb +++ b/chef/cookbooks/nova/recipes/placement_api.rb @@ -108,6 +108,12 @@ daemon_process "nova-placement-api" user node[:nova][:user] group node[:nova][:group] + ssl_enable node[:nova][:ssl][:enabled] + ssl_certfile node[:nova][:ssl][:certfile] + ssl_keyfile node[:nova][:ssl][:keyfile] + if node[:nova][:ssl][:cert_required] + ssl_cacert node[:nova][:ssl][:ca_certs] + end end apache_site "nova-placement-api.conf" do diff --git a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb index d536bb3dbd..07487d6680 100644 --- a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb @@ -8,6 +8,7 @@ auth_type = password username = <%= @placement_service_user %> password = <%= @placement_service_password %> os_interface = internal +insecure = <%= @placement_service_insecure %> <% if @placement_database_connection -%> [placement_database] From ccff01f3d3da49a8a4590dbe9fdd41ec97abeb47 Mon Sep 17 00:00:00 2001 From: Jan Zerebecki Date: Mon, 19 Mar 2018 14:06:26 +0100 Subject: [PATCH 031/207] barbican: Add missing roles used in policy.json (bsc#1081573) Add the key-manager:service-admin, observer and audit roles. These roles are used in the policy.json file for the barbican API. (cherry picked from commit c553445307a390d679020af43c5d88c2d525cf71) Backport of: https://github.com/crowbar/crowbar-openstack/pull/1593 --- chef/cookbooks/barbican/recipes/api.rb | 66 ++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/chef/cookbooks/barbican/recipes/api.rb b/chef/cookbooks/barbican/recipes/api.rb index 6e7ba9d60f..4aa6d429ad 100644 --- a/chef/cookbooks/barbican/recipes/api.rb +++ b/chef/cookbooks/barbican/recipes/api.rb @@ -124,6 +124,28 @@ action :add_access end +keystone_register "add key-manager:service-admin role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "key-manager:service-admin" + action :add_role +end + +keystone_register "give barbican user access as key-manager:service-admin" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "key-manager:service-admin" + action :add_access +end + keystone_register "add creator role for barbican" do protocol keystone_settings["protocol"] insecure keystone_settings["insecure"] @@ -146,6 +168,50 @@ action :add_access end +keystone_register "add observer role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "observer" + action :add_role +end + +keystone_register "give barbican user access as observer" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "observer" + action :add_access +end + +keystone_register "add audit role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "audit" + action :add_role +end + +keystone_register "give barbican user access as audit" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "audit" + action :add_access +end + crowbar_pacemaker_sync_mark "create-barbican_register" if ha_enabled if node[:barbican][:ha][:enabled] From 3c514bff6089fde2a405b634b5b98e57f39f8636 Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Wed, 21 Mar 2018 17:46:21 +0100 Subject: [PATCH 032/207] neutron: [cisco-aci]: Add support for vpc_pairs. Cisco ACI supports connecting to the fabric either through direct port mappings that in which case, we need to specify the exact ports under [apic-switches] configuration. The pattern usually looks like "1/34" for interface Eth1 and Port 34. Associating these ports creates new policy profiles and groups for the integration. This is not feasible if the ACI is already running another production cloud and we need to integrate openstack as a supplement (which is the case with most ACI customers). However, ACI also allows for associating pre-existing profiles using VPC_Pairs. This can be configured by specifying the apic_vpc_pairs in ml2_conf_cisco_apic.ini. This commit provides an option to test with either modes (VPC Pairs or direct connection) as the use-case may be. (cherry picked from commit 7e4c3da1063f94ec31e2f353a66a6eb949a6953e) --- chef/cookbooks/neutron/recipes/cisco_apic_support.rb | 1 + .../neutron/templates/default/ml2_conf_cisco_apic.ini.erb | 4 ++++ chef/data_bags/crowbar/template-neutron.schema | 5 +++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb index 3ae366a60f..f37b6d81d1 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb @@ -28,6 +28,7 @@ owner "root" group node[:neutron][:platform][:group] variables( + vpc_pairs: node[:neutron][:apic][:vpc_pairs], apic_switches: aciswitches, ml2_mechanism_drivers: node[:neutron][:ml2_mechanism_drivers], policy_drivers: "implicit_policy,apic", diff --git a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb index 06077fff92..f54c369586 100644 --- a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb +++ b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb @@ -15,6 +15,10 @@ enable_optimized_metadata = <%= node[:neutron][:apic][:optimized_metadata] %> enable_optimized_dhcp = <%= node[:neutron][:apic][:optimized_dhcp] %> apic_provision_infra = True apic_provision_hostlinks = True +<% unless @vpc_pairs.nil? -%> +apic_vpc_pairs = <%= @vpc_pairs %> +<% end -%> + <% @apic_switches.keys.each do |ip| -%> [apic_switch:<%=ip%>] <% if @apic_switches[ip].key?(:switch_ports) -%> diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index 4deb492348..e9e29bed4c 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -51,8 +51,9 @@ "system_id": { "type" : "str", "required" : true }, "username": { "type" : "str", "required": true }, "password": { "type" : "str", "required": true }, - "optimized_metadata": { "type" : "bool", "required" : true }, - "optimized_dhcp": { "type" : "bool", "required" : true }, + "optimized_metadata": { "type" : "bool", "required": true }, + "optimized_dhcp": { "type" : "bool", "required": true }, + "vpc_pairs": { "type": "str", "required": false }, "opflex": { "type": "seq", "required": true, "sequence": [ { "type": "map", "required": true, "mapping": { "pod": { "type" : "str", "required" : false }, From 5fe91c472971133823875915143b55e65d11ca5e Mon Sep 17 00:00:00 2001 From: Itxaka Date: Tue, 27 Mar 2018 15:14:26 +0200 Subject: [PATCH 033/207] Revert "[4.0] rabbitmq: allow pacemaker service restart" After way too many tests, pacemaker restarting rabbitmq can lead to unwanted issues like rabbitmq dying while the restart is ongoing and pacemaker ignoring it due to maintenance mode, or simply the restart taking way too long and affecting the whole chef run and other services too much. --- chef/cookbooks/rabbitmq/recipes/default.rb | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 868981885d..3e981c38dc 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -22,8 +22,6 @@ # we only do cluster if we do HA cluster_enabled = node[:rabbitmq][:cluster] && ha_enabled quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 -crm_resource_stop_cmd = cluster_enabled ? "force-demote" : "force-stop" -crm_resource_start_cmd = cluster_enabled ? "force-promote" : "force-start" cluster_partition_handling = if cluster_enabled if CrowbarPacemakerHelper.num_corosync_nodes(node) > 2 @@ -159,14 +157,7 @@ end service "rabbitmq-server" do - supports restart: true, - start: true, - stop: true, - status: true, - crm_resource_stop_cmd: crm_resource_stop_cmd, - crm_resource_start_cmd: crm_resource_start_cmd, - restart_crm_resource: true, - pacemaker_resource_name: "rabbitmq" + supports restart: true, start: true, stop: true, status: true action [:enable, :start] provider Chef::Provider::CrowbarPacemakerService if ha_enabled end From 1e6271c51344d1d7eb54f8ef46201324e3ede4ec Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Tue, 27 Mar 2018 12:05:48 +0200 Subject: [PATCH 034/207] [cisco_aci]neutron: Add support for L3Out External Connectivity This commit provides an option in neutron proposal to configure external connectivity for the Openstack Instances (VMs) when using Cisco ACI as ML2 backend. The commit adds a new section [apic_external_network] in ml2_conf_cisco_apic.ini. The name of the section and the EPG (End Point Group) provided will be reflected on the ACI as External Routed Network and External EPG on the ACI. These elements facilitate external connectivity through the L3Out on the ACI fabric. With this configuration, the neutron subnets get mapped to the subnets on the ACI fabric and therefore will be subject to the Contracts and Policies defined on the corresponding ACI tenant. (cherry picked from commit 3e09d2235f38d9821407916489a854c1220cff66) --- .../neutron/recipes/cisco_apic_support.rb | 2 +- .../default/ml2_conf_cisco_apic.ini.erb | 18 +++++++++++++----- ...117_add_apic_external_network_attributes.rb | 9 +++++++++ chef/data_bags/crowbar/template-neutron.json | 8 +++++++- chef/data_bags/crowbar/template-neutron.schema | 7 +++++++ 5 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb index f37b6d81d1..f08d9e50c4 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb @@ -32,7 +32,7 @@ apic_switches: aciswitches, ml2_mechanism_drivers: node[:neutron][:ml2_mechanism_drivers], policy_drivers: "implicit_policy,apic", - default_ip_pool: "192.168.0.0/16", + default_ip_pool: "192.168.0.0/16" ) notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" end diff --git a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb index f54c369586..30e24dc022 100644 --- a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb +++ b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb @@ -21,11 +21,11 @@ apic_vpc_pairs = <%= @vpc_pairs %> <% @apic_switches.keys.each do |ip| -%> [apic_switch:<%=ip%>] -<% if @apic_switches[ip].key?(:switch_ports) -%> -<% @apic_switches[ip][:switch_ports].each do |name, values| -%> -<%= name %> = <%= values[:switch_port] %> -<% end -%> -<% end -%> + <% if @apic_switches[ip].key?(:switch_ports) -%> + <% @apic_switches[ip][:switch_ports].each do |name, values| -%> +<%= name %> = <%= values[:switch_port] %> + <% end -%> + <% end -%> <% end -%> <% if @ml2_mechanism_drivers.include?("apic_gbp") -%> [group_policy] @@ -33,3 +33,11 @@ policy_drivers = <%= @policy_drivers %> [group_policy_implicit_policy] default_ip_pool = <%= @default_ip_pool %> <% end -%> + +[apic_external_network:<%=node[:neutron][:apic][:ext_net][:name]%>] +preexisting = <%= node[:neutron][:apic][:ext_net][:preexisting] %> +<% unless node[:neutron][:apic][:ext_net][:nat_enabled].nil? -%> +enable_nat = <%= node[:neutron][:apic][:ext_net][:nat_enabled] %> +<% end -%> +external_epg = <%= node[:neutron][:apic][:ext_net][:ext_epg] %> +host_pool_cidr = <%= node[:neutron][:apic][:ext_net][:host_pool_cidr] %> diff --git a/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb b/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb new file mode 100644 index 0000000000..bcc0fb0738 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["apic"]["ext_net"] = ta["apic"]["ext_net"] unless a["apic"].key? "ext_net" + return a, d +end + +def downgrade(ta, td, a, d) + a["apic"].delete("ext_net") unless ta["apic"].key? "ext_net" + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 5a0c974fe0..8dbad1b978 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -48,6 +48,12 @@ "password": "", "optimized_metadata": true, "optimized_dhcp": true, + "ext_net": { + "name": "l3out", + "preexisting": true, + "ext_epg": "l3out-epg", + "host_pool_cidr": "" + }, "opflex": [{ "pod": "", "nodes" : [], @@ -180,7 +186,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 116, + "schema-revision": 117, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index e9e29bed4c..a767fb9dd1 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -54,6 +54,13 @@ "optimized_metadata": { "type" : "bool", "required": true }, "optimized_dhcp": { "type" : "bool", "required": true }, "vpc_pairs": { "type": "str", "required": false }, + "ext_net": { "type" : "map", "required" : true, "mapping" : { + "name": { "type" : "str", "required" : true }, + "preexisting": { "type" : "bool", "required" : true }, + "nat_enabled": { "type" : "bool", "required" : false }, + "ext_epg": { "type" : "str", "required" : true }, + "host_pool_cidr": { "type" : "str", "required" : true } + }}, "opflex": { "type": "seq", "required": true, "sequence": [ { "type": "map", "required": true, "mapping": { "pod": { "type" : "str", "required" : false }, From 444a10328e02439146a1a915f7759ed88f747a3a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 6 Apr 2018 22:29:08 +0200 Subject: [PATCH 035/207] keystone: Increase WSGI request timeout to 10 minutes Some keystone requests can be incredibly timeconsuming in real world environmentes with large, slow LDAP servers. Increase timeout from the default of 60 seconds. (cherry picked from commit fda7393edff10e406e9e6040f5dde9eb5fcab62d) --- chef/cookbooks/keystone/recipes/server.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index dc8f5c6c99..1b624ac111 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -136,6 +136,8 @@ ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] ssl_cacert node[:keystone][:ssl][:ca_certs] + # LDAP backend can be slow.. + timeout 600 end apache_site "keystone-public.conf" do @@ -157,6 +159,8 @@ ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] ssl_cacert node[:keystone][:ssl][:ca_certs] + # LDAP backend can be slow.. + timeout 600 end apache_site "keystone-admin.conf" do From 782cae6f1510c0152b9bc6c9a3b583c9b2f31dc6 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Mon, 9 Apr 2018 10:50:45 +0200 Subject: [PATCH 036/207] mysql: Set monitor to 10s for galera-python-clustercheck This wasn't set and the default is 0, which means it wasn't monitored at all. (cherry picked from commit ee9b6ea7f4985d7f78655da0a89a9a113bb040fc) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 279f84cbaf..4e0a102229 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -284,8 +284,13 @@ transaction_objects = [] service_name = "galera-python-clustercheck" +clustercheck_op = {} +clustercheck_op["monitor"] = {} +clustercheck_op["monitor"]["interval"] = "10s" + pacemaker_primitive service_name do agent "systemd:#{service_name}" + op clustercheck_op action :update only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end From 42b6911d56494703779c382f2e6e14e0bc4dc96c Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 9 Apr 2018 10:23:04 +0200 Subject: [PATCH 037/207] nova: disable progress timeout for live migration The progress timeout is unreliable and was already disabled by default in pike, however it is still enabled in Newton, hence this is going to be backported. Also be a little bit more generous to slow compute nodes and allow migration to as slow as 1MByte/s before aborting it. this tremendeously helps live migration to succeed on very busy compute nodes (where migrating away busy workload is the best thing to do) Also remove an outdated setting that is defaulting to 30 already. (cherry picked from commit 4e1feb7cea89092f17e838f8df3b7993ed6bc539) --- chef/cookbooks/nova/templates/default/nova.conf.erb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index ae52fedd6a..126cebb613 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -203,6 +203,9 @@ live_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MI <% else -%> block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_LIVE <% end -%> +# Timeout migration if less than 1MB/s RAM can be copied +live_migration_progress_timeout=0 +live_migration_completion_timeout=1000 <% end -%> <% end -%> <%= "disk_prefix = xvd" if @libvirt_type.eql?('xen') %> From 526b8087c41205ce8e5ecd0fb81a26b0cbea9590 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Wed, 7 Mar 2018 13:41:27 +0100 Subject: [PATCH 038/207] Make sure pacemaker hash exists before accessing the contents (bsc#1083427) At least in the special case of adding new member to existing cluster, Openstack::HA.set_controller_role is called and node[:pacemaker] accessed. (cherry picked from commit c17a43cecc650ff89bb7dd3c82685af56a16cc86) --- crowbar_framework/lib/openstack/ha.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crowbar_framework/lib/openstack/ha.rb b/crowbar_framework/lib/openstack/ha.rb index f25e5ed1ce..29fdfd099b 100644 --- a/crowbar_framework/lib/openstack/ha.rb +++ b/crowbar_framework/lib/openstack/ha.rb @@ -21,6 +21,8 @@ def self.set_role(nodes, role) save_it = false node = NodeObject.find_node_by_name nodename + node[:pacemaker] ||= {} + node[:pacemaker][:attributes] ||= {} if node[:pacemaker][:attributes]["OpenStack-role"] != role node[:pacemaker][:attributes]["OpenStack-role"] = role From f06a568ee508f142ba9666af1f56fe40a349c065 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 15 Mar 2018 09:15:06 +0100 Subject: [PATCH 039/207] cinder: Do not access role attributes with symbol keys At the time when cinder's apply_role_pre_chef_call is called from pacemaker's apply_cluster_roles_to_new_nodes, role attributes are not accessible by symbolic keys. (cherry picked from commit 115bffb48f35d0b52724872552a5cf162a43d90e) --- crowbar_framework/app/models/cinder_service.rb | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/crowbar_framework/app/models/cinder_service.rb b/crowbar_framework/app/models/cinder_service.rb index cdaabbbebb..f5924fd4ec 100644 --- a/crowbar_framework/app/models/cinder_service.rb +++ b/crowbar_framework/app/models/cinder_service.rb @@ -242,14 +242,13 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) # Generate secrets uuid for libvirt rbd backend dirty = false proposal = Proposal.find_by(barclamp: "cinder", name: role.inst) - role.default_attributes[:cinder][:volumes].each_with_index do |volume, volid| - next unless volume[:backend_driver] == "rbd" - if volume[:rbd][:secret_uuid].empty? - secret_uuid = `uuidgen`.strip - volume[:rbd][:secret_uuid] = secret_uuid - proposal[:attributes][:cinder][:volumes][volid][:rbd][:secret_uuid] = secret_uuid - dirty = true - end + role.default_attributes["cinder"]["volumes"].each_with_index do |volume, volid| + next unless volume["backend_driver"] == "rbd" + next unless volume["rbd"]["secret_uuid"].empty? + secret_uuid = `uuidgen`.strip + volume["rbd"]["secret_uuid"] = secret_uuid + proposal["attributes"]["cinder"]["volumes"][volid]["rbd"]["secret_uuid"] = secret_uuid + dirty = true end if dirty # This makes the proposal in the UI looked as 'applied', even if you make changes to it From 0f62cbf8b949489c7b8425faca3be1ceb66903f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 15 Mar 2018 09:17:55 +0100 Subject: [PATCH 040/207] neutron: Check network presence before accessing it The content of node[:crowbar_wall][:network][:nets] might not be available in the compile phase. (cherry picked from commit b2bf70c9a0fb8476d096c2953385e5b38511079e) --- chef/cookbooks/neutron/recipes/common_agent.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index d0d4183636..020af2f733 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -184,6 +184,7 @@ external_networks.concat(neutron[:neutron][:additional_external_networks]) ext_physnet_map = NeutronHelper.get_neutron_physnets(node, external_networks) external_networks.each do |net| + next if node[:crowbar_wall][:network][:nets][net].nil? ext_iface = node[:crowbar_wall][:network][:nets][net].last # we can't do "floating:br-public, physnet1:br-public"; this also means # that all relevant nodes here must have a similar bridge_mappings From 8342a7d5577bcc4728a410dfaeaa963cdef0dffc Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Mon, 9 Apr 2018 11:14:17 +0200 Subject: [PATCH 041/207] rabbitmq: block client port on startup This script blocks the connection to the rabbitmq cluster in case the number of nodes decay bellow the half of the total. In this case the remain rabbit nodes won't accept new connections if the quorum is reached. It take advantage of the pacemaker notifications that notify when a rabbitmq has failed or has restored. All nodes rabbitmq ports will be blocked if the total number of alive nodes are below to the half of nodes of the cluster, or unblock if its over this value. The script is divided in two parts: On one hand, the alert handler that manages the alerts discarding non interesting alerts, launches to the blocker script in this node and other cluster nodes (via SSH). And in the other hand, the blocker script than checks the condition of running nodes and blocks or unblocks the rabbitmq client port. (cherry picked from commit 60fb8cea03569884d5cdbc8b3af465896211d631) --- .../rabbitmq/files/default/rabbitmq.tmpfiles | 1 + chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 68 +++++++++++++++++++ .../templates/default/hacluster_sudoers.erb | 4 ++ .../default/rabbitmq-alert-handler.erb | 19 ++++++ .../default/rabbitmq-port-blocker.erb | 31 +++++++++ 5 files changed, 123 insertions(+) create mode 100644 chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles create mode 100644 chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb create mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb create mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb diff --git a/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles b/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles new file mode 100644 index 0000000000..fffdfe7945 --- /dev/null +++ b/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles @@ -0,0 +1 @@ +d /var/lock/rabbit 0755 root root - diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index 49da5d12bb..ff60faca10 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -120,3 +120,71 @@ end end # block end # ruby_block + +if CrowbarPacemakerHelper.cluster_nodes(node).size > 2 + # create the directory to lock rabbitmq-port-blocker + cookbook_file "/etc/tmpfiles.d/rabbitmq.conf" do + owner "root" + group "root" + mode "0644" + action :create + source "rabbitmq.tmpfiles" + end + + bash "create tmpfiles.d files for rabbitmq" do + code "systemd-tmpfiles --create /etc/tmpfiles.d/rabbitmq.conf" + action :nothing + subscribes :run, resources("cookbook_file[/etc/tmpfiles.d/rabbitmq.conf]"), :immediately + end + + # create the scripts to block the client port on startup + template "/usr/bin/rabbitmq-alert-handler.sh" do + source "rabbitmq-alert-handler.erb" + owner "root" + group "root" + mode "0755" + variables(node: node, nodes: CrowbarPacemakerHelper.cluster_nodes(node)) + end + + template "/usr/bin/rabbitmq-port-blocker.sh" do + source "rabbitmq-port-blocker.erb" + owner "root" + group "root" + mode "0755" + variables(total_nodes: CrowbarPacemakerHelper.cluster_nodes(node).size) + end + + template "/etc/sudoers.d/rabbitmq-port-blocker" do + source "hacluster_sudoers.erb" + owner "root" + group "root" + mode "0440" + end + + # create the alert + pacemaker_alert "rabbitmq-alert-handler" do + handler "/usr/bin/rabbitmq-alert-handler.sh" + action :create + end +else + pacemaker_alert "rabbitmq-alert-handler" do + handler "/usr/bin/rabbitmq-alert-handler.sh" + action :delete + end + + cookbook_file "/etc/tmpfiles.d/rabbitmq.conf" do + action :delete + end + + file "/usr/bin/rabbitmq-alert-handler.sh" do + action :delete + end + + file "/usr/bin/rabbitmq-port-blocker.sh" do + action :delete + end + + file "/etc/sudoers.d/rabbitmq-port-blocker" do + action :delete + end +end diff --git a/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb b/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb new file mode 100644 index 0000000000..64c607b9bd --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb @@ -0,0 +1,4 @@ +Defaults:hacluster !requiretty + +hacluster ALL = (root) NOPASSWD: /usr/bin/ssh +hacluster ALL = (root) NOPASSWD: /usr/bin/flock diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb new file mode 100644 index 0000000000..ab9d0de355 --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb @@ -0,0 +1,19 @@ +#!/bin/sh +if [ -z "$CRM_alert_version" ]; then + echo "$0 must be run by Pacemaker version 1.1.15 or later" + exit 0 +fi + +# exit if isn't a rabbitmq alert or is not a monitor task +[ "$CRM_alert_kind" = "resource" -a "$CRM_alert_rsc" = "rabbitmq" -a "$CRM_alert_task" = "monitor" ] || exit 0 + +# launch the blocker in exclusive mode +nohup sudo flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh & + +# for each node in the cluster +<% @nodes.each do |cluster_node| -%> + # unless this is the current node launch remotely the blocker in exclusive mode + <% unless cluster_node.name==@node.name -%> + nohup /usr/bin/timeout 15 sudo /usr/bin/ssh -o TCPKeepAlive=no -o ServerAliveInterval=15 root@<%= cluster_node.name %> nohup flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh & + <% end -%> +<% end -%> diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb new file mode 100644 index 0000000000..f4a0aa084b --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb @@ -0,0 +1,31 @@ +#!/bin/sh + +# calcules the blocking level applying the formula +total_nodes=<%= @total_nodes %> +blocking_level=$(expr $total_nodes / 2) +comment_text="rabbitmq port blocker (no quorum)" + +# get the number of running nodes of rabbitmq in the current cluster +function running_nodes() +{ + rabbitmqctl cluster_status 2>/dev/null | tr -d "\n" | sed -e 's/running_nodes,/\nrunning_nodes/g'| grep running_nodes | cut -d "[" -f2 | cut -d "]" -f1 | tr "," "\n" | wc -l +} + +# check if exists the blocking rule for rabbitmq clients +function check_rule() +{ + iptables -L -n | grep -F "tcp dpt:5672 /* $comment_text */" | grep DROP | wc -l +} + +# if the running nodes is les that the blocking level, then... +if [ $(running_nodes) -le $blocking_level ]; then + # if rule not exists the rule will be added to block the clients port + if [ $(check_rule) -eq 0 ]; then + iptables -A INPUT -p tcp --destination-port 5672 -m comment --comment "$comment_text" -j DROP + fi +else + # finally if the rule exists it will be deleted. If there are more than one, will remove all + if [[ $(check_rule) -gt 0 ]]; then + iptables -D INPUT -p tcp --destination-port 5672 -m comment --comment "$comment_text" -j DROP + fi +fi From fb26c4161926d4fbf7c077661056ac3e9f644ac8 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 11 Apr 2018 16:41:49 +0200 Subject: [PATCH 042/207] mysql: Create clustercheck config on nodes early enough (bsc#1059530) The config files were created very late, just before we create and start the galera-python-clustercheck pacemaker resource. As a result, only the founder nodes had the config files when the resource is started, which resulted in the other nodes not having galera-python-clustercheck listening on the right port. Part of https://bugzilla.suse.com/show_bug.cgi?id=1059530 (cherry picked from commit dc6c7e35a4e1e1cf5ab5f8e9ffa0d85f94c0c87c) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 4e0a102229..0caad83a18 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -141,6 +141,27 @@ ) end +# Configuration files for galera-python-clustercheck +template "/etc/galera-python-clustercheck/galera-python-clustercheck.conf" do + source "galera-python-clustercheck.conf.erb" + owner "galera-python-clustercheck" + group "mysql" + mode "0640" + variables( + node_address: node_address + ) +end + +template "/etc/galera-python-clustercheck/my.cnf" do + source "galera-python-clustercheck-my.cnf.erb" + owner "galera-python-clustercheck" + group "mysql" + mode "0640" + variables( + node_address: node_address + ) +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages and configurations installed before we create the # pacemaker resources @@ -258,27 +279,6 @@ revision node[:database]["crowbar-revision"] end - # Configuration files for galera-python-clustercheck - template "/etc/galera-python-clustercheck/galera-python-clustercheck.conf" do - source "galera-python-clustercheck.conf.erb" - owner "galera-python-clustercheck" - group "mysql" - mode "0640" - variables( - node_address: node_address - ) - end - - template "/etc/galera-python-clustercheck/my.cnf" do - source "galera-python-clustercheck-my.cnf.erb" - owner "galera-python-clustercheck" - group "mysql" - mode "0640" - variables( - node_address: node_address - ) - end - # Start galera-clustercheck which serves the cluster state as http return codes # on port 5555 transaction_objects = [] From f3d3db3594de92072844c79fda2dcd5bba68e8f4 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 11 Apr 2018 17:20:01 +0200 Subject: [PATCH 043/207] mysql: Ensure that the galera resource has a monitor op (bsc#1059530) The monitor op was dropped by accident with 2c5d20d15f8e96ad206eddb04d726433b0f2615e: when we moved the definition of the some of the ops in the proposal, what happened is that the all hash defining the ops in default attributes has been overridden by the hash from the proposal, hence the monitor op getting lost. Now we merge the two to get the desired results. This makes sure that if mariadb is stopped, pacemaker will restart it. https://bugzilla.suse.com/show_bug.cgi?id=1059530 (cherry picked from commit 7d381fe6bfc7514624090d88a44908db77f5727c) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 0caad83a18..1a79ea94b0 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -176,6 +176,12 @@ revision node[:database]["crowbar-revision"] end +# some of the op attributes are now in the proposal, so we need to merge the +# default attributes and the proposal attributes (that actually completely +# override the default attributes, even the ones not defined in the proposal) +primitive_op = node.default_attrs[:mysql][:ha][:op].to_hash +primitive_op.merge!(node[:database][:mysql][:ha][:op].to_hash) + pacemaker_primitive service_name do agent resource_agent params({ @@ -186,7 +192,7 @@ "datadir" => node[:database][:mysql][:datadir], "log" => "/var/log/mysql/mysql_error.log" }) - op node[:database][:mysql][:ha][:op] + op primitive_op action :update only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end From 64c2eb4039c1b256794d1a5f32484fc3f485f23d Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Fri, 16 Feb 2018 14:26:41 +0100 Subject: [PATCH 044/207] mariadb: Add prefix to configs All default MariaDB configs have now a prefix to allow overwriting of the options. Let's follow the same to not conflict with the defaults. (cherry picked from commit 4cc520ab12bc5d9b0a488652742a2afb074bf879) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 11 ++++++--- chef/cookbooks/mysql/recipes/server.rb | 28 +++++++++++++++++++---- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 1a79ea94b0..fce618833e 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -44,8 +44,8 @@ # To bootstrap for the first time, start galera on one node # to set up the seed sst and monitoring users. - template "temporary bootstrap /etc/my.cnf.d/galera.cnf" do - path "/etc/my.cnf.d/galera.cnf" + template "temporary bootstrap /etc/my.cnf.d/75-galera.cnf" do + path "/etc/my.cnf.d/75-galera.cnf" source "galera.cnf.erb" owner "root" group "mysql" @@ -126,7 +126,7 @@ cluster_addresses = "gcomm://" + nodes_names.join(",") -template "/etc/my.cnf.d/galera.cnf" do +template "/etc/my.cnf.d/75-galera.cnf" do source "galera.cnf.erb" owner "root" group "mysql" @@ -162,6 +162,11 @@ ) end +file "/etc/my.cnf.d/galera.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages and configurations installed before we create the # pacemaker resources diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index 1c43f3603f..c107ff2332 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -95,7 +95,7 @@ end end -template "/etc/my.cnf.d/openstack.cnf" do +template "/etc/my.cnf.d/72-openstack.cnf" do source "my.cnf.erb" owner "root" group "mysql" @@ -103,7 +103,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/ssl.cnf" do +file "/etc/my.cnf.d/openstack.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/73-ssl.cnf" do source "ssl.cnf.erb" owner "root" group "mysql" @@ -111,7 +116,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/logging.cnf" do +file "/etc/my.cnf.d/ssl.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/71-logging.cnf" do source "logging.cnf.erb" owner "root" group "mysql" @@ -122,7 +132,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/tuning.cnf" do +file "/etc/my.cnf.d/logging.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/74-tuning.cnf" do source "tuning.cnf.erb" owner "root" group "mysql" @@ -138,6 +153,11 @@ notifies :restart, "service[mysql]", :immediately end +file "/etc/my.cnf.d/tuning.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + unless Chef::Config[:solo] ruby_block "save node data" do block do From 9affee67efa75b2491426c7c9ac1e4252beeefc0 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 27 Apr 2018 11:27:47 +0200 Subject: [PATCH 045/207] keystone: Add retry loop to _get_token (bsc#1087466) Sometimes _get_token fails with 502 HTTP code because Apache is being restarted while chef-client tries to access it. Small retry loop will give it a bit more chance to complete the operation. (backported from commit 9ee935f86f5e085eba10cc5ed802f8515d361b00) --- chef/cookbooks/keystone/providers/register.rb | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index 4178486c6f..e5ff13317c 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -547,15 +547,26 @@ def _get_token(http, user_name, password, tenant = "") path = "/v2.0/tokens" headers = _build_headers body = _build_auth(user_name, password, tenant) - resp = http.send_request("POST", path, JSON.generate(body), headers) - if resp.is_a?(Net::HTTPCreated) || resp.is_a?(Net::HTTPOK) - data = JSON.parse(resp.read_body) - data["access"]["token"]["id"] - else + + resp = nil + count = 0 + error = true + while error && count < 10 + count += 1 + Chef::Log.debug "Trying to get keystone token for user '#{user_name}' (try #{count})" + resp = http.send_request("POST", path, JSON.generate(body), headers) + error = !(resp.is_a?(Net::HTTPCreated) || resp.is_a?(Net::HTTPOK)) + sleep 5 if error + end + + if error Chef::Log.info "Failed to get token for User '#{user_name}' Tenant '#{tenant}'" Chef::Log.info "Response Code: #{resp.code}" Chef::Log.info "Response Message: #{resp.message}" nil + else + data = JSON.parse(resp.read_body) + data["access"]["token"]["id"] end end From b8308c74fe8ddcae8c9f3c15266456de268db5af Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 5 Apr 2018 10:52:01 +0200 Subject: [PATCH 046/207] nova: Configure a rng device for guest VM entropy (bsc#985882) This allows the administrator to select flavors or images to pass through entropy devices to the guest to give them a chance of securely generating crypto nounces. Note the default seems to be off, it needs to be enabled in the nova flavor via an "hw_rng:allowed=True" extra specs (cherry picked from commit 31e29a55320cea7b44a2ff211689e6887b5e77d8) --- chef/cookbooks/nova/recipes/config.rb | 10 ++++++++++ chef/cookbooks/nova/templates/default/nova.conf.erb | 3 +++ 2 files changed, 13 insertions(+) diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index 5173021913..1596368c78 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -301,6 +301,7 @@ cpu_mode = "" cpu_model = "" +rng_device = nil if node.roles.include? "nova-compute-kvm" compute_flags = node[:nova][:compute]["kvm-#{node[:kernel][:machine]}"] @@ -313,6 +314,14 @@ cpu_mode = compute_flags["cpu_mode"] end +if File.exist?("/sys/devices/virtual/misc/hw_random/rng_current") && + !File.read("/sys/devices/virtual/misc/hw_random/rng_current").include?("none") + # Unfortunately that file isn't readable by non-root so we can not set it + # rng_device = "/dev/hwrng" +else + rng_device = "/dev/random" +end + # lock path prevents race conditions for cinder-volume and nova-compute on same # node. Keep code in sync between cinder and nova recipes. For reference check # http://docs.openstack.org/releasenotes/nova/newton.html @@ -350,6 +359,7 @@ cpu_mode: cpu_mode, cpu_model: cpu_model, bind_host: bind_host, + rng_device: rng_device, bind_port_api: bind_port_api, bind_port_api_ec2: bind_port_api_ec2, bind_port_metadata: bind_port_metadata, diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index 126cebb613..c0a6a406f5 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -214,6 +214,9 @@ live_migration_completion_timeout=1000 <% if @libvirt_type.eql?('kvm') %>use_virtio_for_bridges = true<% end %> <%= "volume_use_multipath = true" if @use_multipath %> <%= "iser_use_multipath = true" if @use_multipath %> +<% if @rng_device %> +rng_dev_path = <%= @rng_device %> +<% end %> [neutron] service_metadata_proxy = true From 1e2d647daa70f4db308cf4e5c0a73f44b7628e2f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 29 Nov 2017 22:21:57 +0100 Subject: [PATCH 047/207] openstack: turn off automatic wsgi script reloading Installation order isn't properly defined, so we don't want apache2 to restart in the middle of while we're still updating . Chef will trigger a restart when we're ready (cherry picked from commit 7c4e11116fd0ade760bbd545887f1bec78c00f4a) --- .../crowbar-openstack/templates/default/vhost-wsgi.conf.erb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb b/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb index 563f2b7ee1..63d0638014 100644 --- a/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb +++ b/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb @@ -5,6 +5,7 @@ Listen <%= @bind_host %>:<%= @bind_port %> WSGIProcessGroup <%= @process_group %> WSGIScriptAlias / <%= @script_alias %> WSGIApplicationGroup %{GLOBAL} + WSGIScriptReloading Off <% if @pass_authorization %> WSGIPassAuthorization On <% end %> From e523fba8a0ee699aae799b84d0ba0019e79b9496 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 29 Nov 2017 14:40:46 +0100 Subject: [PATCH 048/207] keystone: lower threads to 1 by default This seems to be the upstream default, and more than one thread seems to act really weirdly (slow) when using the LDAP backend (causing random multi second hangs) (cherry picked from commit 5ff2560432c632df3043b4aeb2c9cac5d0f65a4b) --- chef/data_bags/crowbar/template-keystone.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/data_bags/crowbar/template-keystone.json b/chef/data_bags/crowbar/template-keystone.json index 9f1a44f243..f6c852ccfd 100644 --- a/chef/data_bags/crowbar/template-keystone.json +++ b/chef/data_bags/crowbar/template-keystone.json @@ -36,7 +36,7 @@ "version": "3", "region": "RegionOne", "processes" : 8, - "threads" : 8 + "threads" : 1 }, "admin": { "tenant": "admin", From 433188bd0a1910cd16f9518df2d474aabe6d01d4 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 12 Apr 2018 15:02:24 +0200 Subject: [PATCH 049/207] neutron: enable trunk service plugin This enables the creation of trunk ports in neutron e.g. to be able to use tagged vlans inside VM instances. (cherry picked from commit 0d63f7ebb2790f8dc02bb307e5fdb42db25c69f4) --- chef/cookbooks/neutron/recipes/common_config.rb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index 40194a002a..81ccb4b387 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -81,6 +81,12 @@ if neutron[:neutron][:networking_plugin] == "ml2" service_plugins.unshift("neutron.services.l3_router.l3_router_plugin.L3RouterPlugin") + + if neutron[:neutron][:ml2_mechanism_drivers].include?("linuxbridge") || + neutron[:neutron][:ml2_mechanism_drivers].include?("openvswitch") + service_plugins.push("neutron.services.trunk.plugin.TrunkPlugin") + end + if neutron[:neutron][:ml2_mechanism_drivers].include?("cisco_apic_ml2") service_plugins = ["cisco_apic_l3"] elsif neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp") From 15f8eaf98b1616393b5017e915707c5ceae74b70 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 10 Apr 2018 10:14:41 +0200 Subject: [PATCH 050/207] nova: avoid scheduling conflicts on HA CP When all schedulers are picking the "best" host, in a 3 node HA case they will all pick the same host, which is suboptimal. Try to scale out a bit and increase retries in case of conflicts. Even worse, a node that had still capacity when the scheduling started might be out of memory at the time the VM is launched, causing failed starts of VMs. (cherry picked from commit 89071b47aef48f90146713867fca1047b5766fe9) --- chef/cookbooks/nova/templates/default/nova.conf.erb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index c0a6a406f5..ed38bb616e 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -48,6 +48,9 @@ scheduler_default_filters = <%= @default_filters %> scheduler_available_filters = nova.scheduler.filters.all_filters scheduler_default_filters = <%= @default_filters %> <% end %> +# Avoid scheduler conflicts when using HA +scheduler_max_attempts = 9 +scheduler_host_subset_size = 4 <% unless @track_instance_changes %>scheduler_tracks_instance_changes = false<% end %> <% if @libvirt_type.eql?('vmware') -%> compute_driver = vmwareapi.VMwareVCDriver From fad58e6f86054f938fbee7199297323048b54d47 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 13 Apr 2018 18:43:01 +0200 Subject: [PATCH 051/207] nova: make disk_cachemodes configurable Also default to writeback for network, which is improving the performance quite a bit for ceph backed volumes and is safe to enable (and recommended by almost every openstack install guide) (cherry picked from commit d5f5567dbbd9d6ca4c05d9966d4c8d642a6b9ead) --- chef/cookbooks/nova/templates/default/nova.conf.erb | 1 + .../crowbar/migrate/nova/124_add_cachemodes.rb | 13 +++++++++++++ chef/data_bags/crowbar/template-nova.json | 5 +++-- chef/data_bags/crowbar/template-nova.schema | 3 ++- 4 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index ed38bb616e..421ce7f64d 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -220,6 +220,7 @@ live_migration_completion_timeout=1000 <% if @rng_device %> rng_dev_path = <%= @rng_device %> <% end %> +disk_cachemodes = <%= node[:nova][:kvm][:disk_cachemodes] %> [neutron] service_metadata_proxy = true diff --git a/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb b/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb new file mode 100644 index 0000000000..e15adfa91b --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb @@ -0,0 +1,13 @@ +def upgrade(ta, td, a, d) + unless a["kvm"].key? "disk_cachemodes" + a["kvm"]["disk_cachemodes"] = ta["kvm"]["disk_cachemodes"] + end + return a, d +end + +def downgrade(ta, td, a, d) + unless ta["kvm"].key? "disk_cachemodes" + a["kvm"].delete("disk_cachemodes") + end + return a, d +end diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 55adf6e00a..2fc4301a2c 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -75,7 +75,8 @@ "secret_uuid": "" }, "kvm": { - "ksm_enabled": false + "ksm_enabled": false, + "disk_cachemodes": "network=writeback" }, "vcenter": { "host": "", @@ -179,7 +180,7 @@ "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 123, + "schema-revision": 124, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index c0ad6c37cc..fceb697903 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -133,7 +133,8 @@ }, "kvm": { "type": "map", "required": true, "mapping": { - "ksm_enabled": { "type": "bool", "required": true } + "ksm_enabled": { "type": "bool", "required": true }, + "disk_cachemodes": { "type": "str", "required": true } } }, "vcenter": { From d3e28ab1c75f6a6470cc91cbd3f8cef565333795 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 10 Oct 2017 23:04:15 +0200 Subject: [PATCH 052/207] tempest: remove world-readable permission from tempest.conf This is probably not a good idea, it contains passwords. (cherry picked from commit bfbcc2d0c43dd99698d1caa664acef8e77e465e2) --- chef/cookbooks/tempest/recipes/config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/tempest/recipes/config.rb b/chef/cookbooks/tempest/recipes/config.rb index b11ddb8773..ef08589380 100644 --- a/chef/cookbooks/tempest/recipes/config.rb +++ b/chef/cookbooks/tempest/recipes/config.rb @@ -485,7 +485,7 @@ template "/etc/tempest/tempest.conf" do source "tempest.conf.erb" - mode 0644 + mode 0o640 variables( lazy { { From 2aa74a91ff449719884f4955a56678f709bd686a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Wed, 2 May 2018 10:31:04 +0200 Subject: [PATCH 053/207] rabbitmq: create empty users list which is expected by some recipes (cherry picked from commit 2787b72e40c5b6a2f48fc2af94075711b9dda507) --- chef/cookbooks/rabbitmq/attributes/default.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index 3c4d0f4344..f819d98235 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -65,3 +65,6 @@ default[:rabbitmq][:ha][:clustered_rmq_features] = true end end + +# create empty users list as it is expected by some recipes +default[:rabbitmq][:users] = [] From f04cbdccc42ad44f7f959dff99eba827b5873948 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 7 May 2018 17:08:13 +0200 Subject: [PATCH 054/207] Revert "[4.0] rabbitmq: block client port on startup" --- .../rabbitmq/files/default/rabbitmq.tmpfiles | 1 - chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 68 ------------------- .../templates/default/hacluster_sudoers.erb | 4 -- .../default/rabbitmq-alert-handler.erb | 19 ------ .../default/rabbitmq-port-blocker.erb | 31 --------- 5 files changed, 123 deletions(-) delete mode 100644 chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles delete mode 100644 chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb delete mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb delete mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb diff --git a/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles b/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles deleted file mode 100644 index fffdfe7945..0000000000 --- a/chef/cookbooks/rabbitmq/files/default/rabbitmq.tmpfiles +++ /dev/null @@ -1 +0,0 @@ -d /var/lock/rabbit 0755 root root - diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index ff60faca10..49da5d12bb 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -120,71 +120,3 @@ end end # block end # ruby_block - -if CrowbarPacemakerHelper.cluster_nodes(node).size > 2 - # create the directory to lock rabbitmq-port-blocker - cookbook_file "/etc/tmpfiles.d/rabbitmq.conf" do - owner "root" - group "root" - mode "0644" - action :create - source "rabbitmq.tmpfiles" - end - - bash "create tmpfiles.d files for rabbitmq" do - code "systemd-tmpfiles --create /etc/tmpfiles.d/rabbitmq.conf" - action :nothing - subscribes :run, resources("cookbook_file[/etc/tmpfiles.d/rabbitmq.conf]"), :immediately - end - - # create the scripts to block the client port on startup - template "/usr/bin/rabbitmq-alert-handler.sh" do - source "rabbitmq-alert-handler.erb" - owner "root" - group "root" - mode "0755" - variables(node: node, nodes: CrowbarPacemakerHelper.cluster_nodes(node)) - end - - template "/usr/bin/rabbitmq-port-blocker.sh" do - source "rabbitmq-port-blocker.erb" - owner "root" - group "root" - mode "0755" - variables(total_nodes: CrowbarPacemakerHelper.cluster_nodes(node).size) - end - - template "/etc/sudoers.d/rabbitmq-port-blocker" do - source "hacluster_sudoers.erb" - owner "root" - group "root" - mode "0440" - end - - # create the alert - pacemaker_alert "rabbitmq-alert-handler" do - handler "/usr/bin/rabbitmq-alert-handler.sh" - action :create - end -else - pacemaker_alert "rabbitmq-alert-handler" do - handler "/usr/bin/rabbitmq-alert-handler.sh" - action :delete - end - - cookbook_file "/etc/tmpfiles.d/rabbitmq.conf" do - action :delete - end - - file "/usr/bin/rabbitmq-alert-handler.sh" do - action :delete - end - - file "/usr/bin/rabbitmq-port-blocker.sh" do - action :delete - end - - file "/etc/sudoers.d/rabbitmq-port-blocker" do - action :delete - end -end diff --git a/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb b/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb deleted file mode 100644 index 64c607b9bd..0000000000 --- a/chef/cookbooks/rabbitmq/templates/default/hacluster_sudoers.erb +++ /dev/null @@ -1,4 +0,0 @@ -Defaults:hacluster !requiretty - -hacluster ALL = (root) NOPASSWD: /usr/bin/ssh -hacluster ALL = (root) NOPASSWD: /usr/bin/flock diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb deleted file mode 100644 index ab9d0de355..0000000000 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh -if [ -z "$CRM_alert_version" ]; then - echo "$0 must be run by Pacemaker version 1.1.15 or later" - exit 0 -fi - -# exit if isn't a rabbitmq alert or is not a monitor task -[ "$CRM_alert_kind" = "resource" -a "$CRM_alert_rsc" = "rabbitmq" -a "$CRM_alert_task" = "monitor" ] || exit 0 - -# launch the blocker in exclusive mode -nohup sudo flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh & - -# for each node in the cluster -<% @nodes.each do |cluster_node| -%> - # unless this is the current node launch remotely the blocker in exclusive mode - <% unless cluster_node.name==@node.name -%> - nohup /usr/bin/timeout 15 sudo /usr/bin/ssh -o TCPKeepAlive=no -o ServerAliveInterval=15 root@<%= cluster_node.name %> nohup flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh & - <% end -%> -<% end -%> diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb deleted file mode 100644 index f4a0aa084b..0000000000 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -# calcules the blocking level applying the formula -total_nodes=<%= @total_nodes %> -blocking_level=$(expr $total_nodes / 2) -comment_text="rabbitmq port blocker (no quorum)" - -# get the number of running nodes of rabbitmq in the current cluster -function running_nodes() -{ - rabbitmqctl cluster_status 2>/dev/null | tr -d "\n" | sed -e 's/running_nodes,/\nrunning_nodes/g'| grep running_nodes | cut -d "[" -f2 | cut -d "]" -f1 | tr "," "\n" | wc -l -} - -# check if exists the blocking rule for rabbitmq clients -function check_rule() -{ - iptables -L -n | grep -F "tcp dpt:5672 /* $comment_text */" | grep DROP | wc -l -} - -# if the running nodes is les that the blocking level, then... -if [ $(running_nodes) -le $blocking_level ]; then - # if rule not exists the rule will be added to block the clients port - if [ $(check_rule) -eq 0 ]; then - iptables -A INPUT -p tcp --destination-port 5672 -m comment --comment "$comment_text" -j DROP - fi -else - # finally if the rule exists it will be deleted. If there are more than one, will remove all - if [[ $(check_rule) -gt 0 ]]; then - iptables -D INPUT -p tcp --destination-port 5672 -m comment --comment "$comment_text" -j DROP - fi -fi From c0d8a1c2094d853e5a997efd0666952affeaaedb Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 4 May 2018 22:19:35 +0200 Subject: [PATCH 055/207] nova: fix variable naming typo (cherry picked from commit fd3505c6d97aa5766944bdc24f074b741c66e708) --- chef/cookbooks/nova/recipes/compute.rb | 4 ++-- .../templates/default/crowbar-compute-set-sys-options.erb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb index b3483c0a16..6b913deedf 100644 --- a/chef/cookbooks/nova/recipes/compute.rb +++ b/chef/cookbooks/nova/recipes/compute.rb @@ -341,8 +341,8 @@ source "crowbar-compute-set-sys-options.erb" variables({ ksm_enabled: node[:nova][:kvm][:ksm_enabled] ? 1 : 0, - tranparent_hugepage_enabled: node[:nova][:kvm][:ksm_enabled] ? "never" : "always", - tranparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "always" + transparent_hugepage_enabled: node[:nova][:kvm][:ksm_enabled] ? "never" : "always", + transparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "always" }) mode "0755" end diff --git a/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb b/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb index e6c4785dc1..be0dd27c7d 100644 --- a/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb +++ b/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb @@ -8,11 +8,11 @@ if test -w /sys/kernel/mm/ksm/run; then fi if test -w /sys/kernel/mm/transparent_hugepage/enabled; then - echo <%= @tranparent_hugepage_enabled %> > /sys/kernel/mm/transparent_hugepage/enabled + echo <%= @transparent_hugepage_enabled %> > /sys/kernel/mm/transparent_hugepage/enabled fi if test -w /sys/kernel/mm/transparent_hugepage/defrag; then - echo <%= @tranparent_hugepage_defrag %> > /sys/kernel/mm/transparent_hugepage/defrag + echo <%= @transparent_hugepage_defrag %> > /sys/kernel/mm/transparent_hugepage/defrag fi find /sys/block -type l -name 'sd*' -exec sh -c 'echo deadline > {}/queue/scheduler' \; From 675b189efb2f3571ef131405c8fdef5c03ca2f5b Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 4 May 2018 22:20:16 +0200 Subject: [PATCH 056/207] nova: default thp defragt to madvise The "always" setting has dubious negative performance impact and the SLES default is "madvise", so we should be using that instead. See https://www.kernel.org/doc/Documentation/vm/transhuge.txt for details. (cherry picked from commit f5bb2c5ae19cb5d844c34dcfba442fc96401b0f6) --- chef/cookbooks/nova/recipes/compute.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb index 6b913deedf..69ca3306db 100644 --- a/chef/cookbooks/nova/recipes/compute.rb +++ b/chef/cookbooks/nova/recipes/compute.rb @@ -342,7 +342,7 @@ variables({ ksm_enabled: node[:nova][:kvm][:ksm_enabled] ? 1 : 0, transparent_hugepage_enabled: node[:nova][:kvm][:ksm_enabled] ? "never" : "always", - transparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "always" + transparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "madvise" }) mode "0755" end From 022c8ebf206e7d8b94495925dd4f4173619555da Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Tue, 8 May 2018 12:59:08 +0200 Subject: [PATCH 057/207] keystone: avoid race condition during admin password change (bsc#1091829) Issuing a new keystone token immediately after updating the admin user password may sometimes return an invalid token. In the context of crowbar, this issue can be triggered when calling the keystone_register 'wakeup' action immediately after the admin password has been updated. When triggered, it results in timeout errors on non-founder nodes, while the founder node is stuck doing retry iterations with an expired token. As a workaround for bsc#1091829, the 'wakeup' action is updated with an optional 'reissue_token_on_error' argument, which, when set, will re-issue a token *before* checking the keystone API again, instead of reusing the same token for subsequent attempts. (cherry picked from commit 3d664edfc29b29c64e32b84bc3d71e4fe8d128b1) --- chef/cookbooks/keystone/providers/register.rb | 7 +++++-- chef/cookbooks/keystone/recipes/server.rb | 13 ++++++------- chef/cookbooks/keystone/resources/register.rb | 3 +++ 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index e5ff13317c..67eb4a491a 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -27,10 +27,13 @@ # Lets verify that the service does not exist yet count = 0 error = true - while error and count < 50 do + loop do count = count + 1 item_id, error = _find_id(http, headers, "fred", path, dir) - sleep 1 if error + break unless error && count < 50 + sleep 1 + next unless new_resource.reissue_token_on_error + http, headers = _build_connection(new_resource) end raise "Failed to validate keystone is wake" if error diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 1b624ac111..d728ec9aaf 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -582,6 +582,10 @@ old_password = node[:keystone][:admin][:old_password] old_register_auth_hash = register_auth_hash.clone old_register_auth_hash[:password] = old_password +update_admin_password = node[:keystone][:bootstrap] && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && + old_password && !old_password.empty? && + old_password != node[:keystone][:admin][:password] keystone_register "update admin password" do protocol node[:keystone][:api][:protocol] @@ -593,12 +597,7 @@ user_password node[:keystone][:admin][:password] tenant_name node[:keystone][:admin][:tenant] action :add_user - only_if do - node[:keystone][:bootstrap] && - (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && - old_password && !old_password.empty? && - old_password != node[:keystone][:admin][:password] - end + only_if { update_admin_password } end ruby_block "backup current admin password on node attributes" do @@ -627,7 +626,6 @@ end end - # Silly wake-up call - this is a hack; we use retries because the server was # just (re)started, and might not answer on the first try keystone_register "wakeup keystone" do @@ -638,6 +636,7 @@ auth register_auth_hash retries 5 retry_delay 10 + reissue_token_on_error update_admin_password action :wakeup end diff --git a/chef/cookbooks/keystone/resources/register.rb b/chef/cookbooks/keystone/resources/register.rb index dc3a4f6044..9c3d8a014f 100644 --- a/chef/cookbooks/keystone/resources/register.rb +++ b/chef/cookbooks/keystone/resources/register.rb @@ -64,3 +64,6 @@ # :add_ec2 specific attributes attribute :user_name, kind_of: String attribute :tenant_name, kind_of: String + +# :wakeup specific attributes +attribute :reissue_token_on_error, kind_of: [TrueClass, FalseClass], default: false From 402cb4e76120f40b85e199045fcab8087afd5184 Mon Sep 17 00:00:00 2001 From: "Bernhard M. Wiedemann" Date: Tue, 12 Sep 2017 11:23:18 +0200 Subject: [PATCH 058/207] nova: allow to enable nested virt on Intel because it defaults to off but a lot of people rely on nested virt being available While in https://fate.suse.com/320082 the virtualisation team declined to promote nested virt to fully supported status for SLE12, we are using this since 2012 in all kinds of places without problems. (cherry picked from commit afbcc5c3a4f4e552e2f130b45fe70932f82094be) --- chef/cookbooks/nova/recipes/compute.rb | 14 ++++++++++++++ chef/data_bags/crowbar/template-nova.json | 1 + chef/data_bags/crowbar/template-nova.schema | 1 + 3 files changed, 16 insertions(+) diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb index 69ca3306db..33c863a78f 100644 --- a/chef/cookbooks/nova/recipes/compute.rb +++ b/chef/cookbooks/nova/recipes/compute.rb @@ -109,6 +109,20 @@ package "qemu-block-rbd" end + execute "enable kvm intel nested virt" do + command <<-SHELL + grep -q nested /etc/modprobe.d/80-kvm-intel.conf || + echo "options kvm_intel nested=1" > /etc/modprobe.d/80-kvm-intel.conf + ! grep -q N /sys/module/kvm_intel/parameters/nested || + /sbin/modprobe -r kvm_intel + SHELL + only_if do + node[:nova][:kvm][:nested_virt] && + `uname -r`.include?("default") && + system("grep -qw vmx /proc/cpuinfo") + end + end + # load modules only when appropriate kernel is present execute "loading kvm modules" do command <<-EOF diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 2fc4301a2c..856ac624a8 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -75,6 +75,7 @@ "secret_uuid": "" }, "kvm": { + "nested_virt": false, "ksm_enabled": false, "disk_cachemodes": "network=writeback" }, diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index fceb697903..52674c087b 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -133,6 +133,7 @@ }, "kvm": { "type": "map", "required": true, "mapping": { + "nested_virt": { "type": "bool", "required": false }, "ksm_enabled": { "type": "bool", "required": true }, "disk_cachemodes": { "type": "str", "required": true } } From 1b7a561acd52b0b858cd02fae9bc2814c8ab0ecb Mon Sep 17 00:00:00 2001 From: Jan Zerebecki Date: Mon, 4 Jun 2018 14:14:24 +0200 Subject: [PATCH 059/207] Add rate limiting for glance api (bsc#1005886) Disabled by default default. It can be set to avoid filling up image related tables. Though the tables are only filled by POST requests this limit is for all request types. See https://wiki.openstack.org/wiki/OSSN/OSSN-0076 for details. (cherry picked from commit c4c1b8e0f27489dbeaa7503b920051b67a4523d1) Backport of https://github.com/crowbar/crowbar-openstack/pull/1677 --- chef/cookbooks/glance/recipes/ha.rb | 1 + .../crowbar/migrate/glance/105_add_rate_limit.rb | 9 +++++++++ chef/data_bags/crowbar/template-glance.json | 7 +++++-- chef/data_bags/crowbar/template-glance.schema | 7 ++++++- 4 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb diff --git a/chef/cookbooks/glance/recipes/ha.rb b/chef/cookbooks/glance/recipes/ha.rb index 90ecf2e08d..120b227539 100644 --- a/chef/cookbooks/glance/recipes/ha.rb +++ b/chef/cookbooks/glance/recipes/ha.rb @@ -29,6 +29,7 @@ port network_settings[:api][:ha_bind_port] use_ssl (node[:glance][:api][:protocol] == "https") servers CrowbarPacemakerHelper.haproxy_servers_for_service(node, "glance", "glance-server", "api") + rate_limit node[:glance][:ha_rate_limit]["glance-api"] action :nothing end.run_action(:create) diff --git a/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb b/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb new file mode 100644 index 0000000000..64051f37d0 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["ha_rate_limit"] = ta["ha_rate_limit"] unless a.key? "ha_rate_limit" + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("ha_rate_limit") unless ta.key? "ha_rate_limit" + return a, d +end diff --git a/chef/data_bags/crowbar/template-glance.json b/chef/data_bags/crowbar/template-glance.json index 23a5334f57..c38caa31bc 100644 --- a/chef/data_bags/crowbar/template-glance.json +++ b/chef/data_bags/crowbar/template-glance.json @@ -65,14 +65,17 @@ "keystone_instance": "none", "service_user": "glance", "database_instance": "none", - "rabbitmq_instance": "none" + "rabbitmq_instance": "none", + "ha_rate_limit": { + "glance-api": 0 + } } }, "deployment": { "glance": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 104, + "schema-revision": 105, "element_states": { "glance-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-glance.schema b/chef/data_bags/crowbar/template-glance.schema index abd2c2e357..d4142d8d8b 100644 --- a/chef/data_bags/crowbar/template-glance.schema +++ b/chef/data_bags/crowbar/template-glance.schema @@ -96,7 +96,12 @@ "keystone_instance": { "type": "str", "required": true }, "service_user": { "type": "str", "required": true }, "service_password": { "type": "str" }, - "database_instance": { "type": "str", "required": true } + "database_instance": { "type": "str", "required": true }, + "ha_rate_limit": { + "type": "map", "required": true, "mapping": { + "glance-api": { "type": "int", "required": true } + } + } } } } From 25c1200dd3005e5a036368d1140b1d103fceca0f Mon Sep 17 00:00:00 2001 From: Stephanie Miller Date: Mon, 30 Apr 2018 15:10:48 -0700 Subject: [PATCH 060/207] aodh: Add config for alarm_history_ttl (bsc#1073703) The alarm_history_ttl config option for aodh was not previously configurable. (cherry picked from commit 47a7d26c864e7ca1a21053e6b50b3916558f1fcc) --- chef/cookbooks/aodh/attributes/default.rb | 1 + chef/cookbooks/aodh/recipes/aodh.rb | 3 ++- chef/cookbooks/aodh/templates/default/aodh.conf.erb | 1 + .../migrate/aodh/101_add_alarm_history_ttl.rb | 13 +++++++++++++ chef/data_bags/crowbar/template-aodh.json | 3 ++- chef/data_bags/crowbar/template-aodh.schema | 1 + .../views/barclamp/aodh/_edit_attributes.html.haml | 2 +- crowbar_framework/config/locales/aodh/en.yml | 2 ++ 8 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb diff --git a/chef/cookbooks/aodh/attributes/default.rb b/chef/cookbooks/aodh/attributes/default.rb index 8a151751a0..cd3e89f18e 100644 --- a/chef/cookbooks/aodh/attributes/default.rb +++ b/chef/cookbooks/aodh/attributes/default.rb @@ -55,6 +55,7 @@ default[:aodh][:notifier][:service_name] = notifier_service_name default[:aodh][:listener][:service_name] = listener_service_name default[:aodh][:evaluation_interval] = 600 +default[:aodh][:alarm_history_ttl] = -1 default[:aodh][:debug] = false default[:aodh][:verbose] = false diff --git a/chef/cookbooks/aodh/recipes/aodh.rb b/chef/cookbooks/aodh/recipes/aodh.rb index 3a0dc06472..a9f0ad3cdf 100644 --- a/chef/cookbooks/aodh/recipes/aodh.rb +++ b/chef/cookbooks/aodh/recipes/aodh.rb @@ -160,7 +160,8 @@ database_connection: db_connection, node_hostname: node["hostname"], aodh_ssl: node[:aodh][:ssl], - evaluation_interval: node[:aodh][:evaluation_interval] + evaluation_interval: node[:aodh][:evaluation_interval], + alarm_history_ttl: node[:aodh][:alarm_history_ttl] ) notifies :reload, resources(service: "apache2") end diff --git a/chef/cookbooks/aodh/templates/default/aodh.conf.erb b/chef/cookbooks/aodh/templates/default/aodh.conf.erb index 3c702255cf..dcdec2fbf2 100644 --- a/chef/cookbooks/aodh/templates/default/aodh.conf.erb +++ b/chef/cookbooks/aodh/templates/default/aodh.conf.erb @@ -9,6 +9,7 @@ transport_url = <%= @rabbit_settings[:url] %> workers = <%= [node["cpu"]["total"], 2, 4].sort[1] %> [database] +alarm_history_time_to_live = <%= @alarm_history_ttl %> connection = <%= @database_connection %> [keystone_authtoken] diff --git a/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb b/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb new file mode 100644 index 0000000000..9a81fde5b2 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb @@ -0,0 +1,13 @@ +def upgrade(ta, td, a, d) + unless a.key? "alarm_history_ttl" + a["alarm_history_ttl"] = ta["alarm_history_ttl"] + end + return a, d +end + +def downgrade(ta, td, a, d) + unless ta.key? "alarm_history_ttl" + a.delete("alarm_history_ttl") + end + return a, d +end diff --git a/chef/data_bags/crowbar/template-aodh.json b/chef/data_bags/crowbar/template-aodh.json index d7b6eb1e6a..0fd5b2ed8e 100644 --- a/chef/data_bags/crowbar/template-aodh.json +++ b/chef/data_bags/crowbar/template-aodh.json @@ -6,6 +6,7 @@ "debug": false, "verbose": true, "evaluation_interval": 600, + "alarm_history_ttl": -1, "rabbitmq_instance": "none", "database_instance": "none", "keystone_instance": "none", @@ -35,7 +36,7 @@ "aodh": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 100, + "schema-revision": 101, "element_states": { "aodh-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-aodh.schema b/chef/data_bags/crowbar/template-aodh.schema index 83747d4b30..d2e51765e1 100644 --- a/chef/data_bags/crowbar/template-aodh.schema +++ b/chef/data_bags/crowbar/template-aodh.schema @@ -15,6 +15,7 @@ "debug": { "type": "bool", "required": true }, "verbose": { "type": "bool", "required": true }, "evaluation_interval": { "type": "int", "required": true }, + "alarm_history_ttl": { "type": "int", "required": true }, "database_instance": { "type": "str", "required": true }, "rabbitmq_instance": { "type": "str", "required": true }, "keystone_instance": { "type": "str", "required": true }, diff --git a/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml index 9692919632..c8b9a0705f 100644 --- a/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml @@ -9,7 +9,7 @@ = instance_field :ceilometer = integer_field :evaluation_interval - + = integer_field :alarm_history_ttl %fieldset %legend diff --git a/crowbar_framework/config/locales/aodh/en.yml b/crowbar_framework/config/locales/aodh/en.yml index da7e097fef..7256c91390 100644 --- a/crowbar_framework/config/locales/aodh/en.yml +++ b/crowbar_framework/config/locales/aodh/en.yml @@ -27,6 +27,8 @@ en: evaluation_interval: 'Evaluation interval for threshold alarms (in seconds).' logging_header: 'Logging' verbose: 'Verbose Logging' + alarm_history_ttl: 'Number of seconds that alarm histories are kept in the database for (<= 0 means forever).' + api: protocol: 'Protocol' ssl_header: 'SSL Support' From 839e79a79f55bae706eed4abf11b093c06b887f4 Mon Sep 17 00:00:00 2001 From: Johannes Grassler Date: Wed, 30 May 2018 10:45:34 +0200 Subject: [PATCH 061/207] monasca: add elasticsearch tunables (bsc#1090343) This commit adds various new elasticsearch tunables and passes them to monasca-installer. (cherry picked from commit a91f9c5d0fde868540b85ea14c7cf864a564f53a) --- chef/cookbooks/monasca/recipes/master.rb | 1 + .../monasca/templates/default/crowbar_vars.yml.erb | 3 +++ .../monasca/104_add_elasticsearch_tunables.rb | 14 ++++++++++++++ chef/data_bags/crowbar/template-monasca.json | 13 +++++++++++-- chef/data_bags/crowbar/template-monasca.schema | 13 +++++++++++++ 5 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb diff --git a/chef/cookbooks/monasca/recipes/master.rb b/chef/cookbooks/monasca/recipes/master.rb index 9cdf2bfd85..a03c295c91 100644 --- a/chef/cookbooks/monasca/recipes/master.rb +++ b/chef/cookbooks/monasca/recipes/master.rb @@ -124,6 +124,7 @@ curator_cron_config: [curator_cron_config].to_yaml.split("\n")[1..-1], curator_excluded_index: curator_excluded_index.to_yaml.split("\n")[1..-1], elasticsearch_repo_dir: node[:monasca][:elasticsearch][:repo_dir].to_yaml.split("\n")[1..-1], + elasticsearch_tunables: node[:monasca][:elasticsearch][:tunables], monitor_libvirt: node[:monasca][:agent][:monitor_libvirt], delegate_role: node[:monasca][:delegate_role] ) diff --git a/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb b/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb index c331da07b8..edc5f34b8d 100644 --- a/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb +++ b/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb @@ -58,6 +58,9 @@ monasca_log_api_url: "http://<%= @pub_net_ip %>:<%= @log_api_settings['bind_port memcached_nodes: ["<%= @monasca_net_ip %>:11211"] elasticsearch_nodes: ["<%= @monasca_net_ip %>"] elasticsearch_hosts: <%= @monasca_net_ip %> +<%- @elasticsearch_tunables.each_key do |t| %> +elasticsearch_<%= t %>: <%= @elasticsearch_tunables[t] %> +<%- end %> monasca_api_log_level: <%= @api_settings['log_level'] %> log_api_log_level: <%= @log_api_settings['log_level'] %> diff --git a/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb b/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb new file mode 100644 index 0000000000..4662a7f8a3 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb @@ -0,0 +1,14 @@ +def upgrade(ta, td, a, d) + # this migration already happened if the tsdb key exists + return a, d if a["elasticsearch"].key?("tunables") + + a["elasticsearch"]["tunables"] = ta["elasticsearch"]["tunables"] + + return a, d +end + +def downgrade(ta, td, a, d) + a["elasticsearch"].delete("tunables") + + return a, d +end diff --git a/chef/data_bags/crowbar/template-monasca.json b/chef/data_bags/crowbar/template-monasca.json index 30e7fc5c4c..9f611c1e90 100644 --- a/chef/data_bags/crowbar/template-monasca.json +++ b/chef/data_bags/crowbar/template-monasca.json @@ -69,7 +69,16 @@ "log_level": "INFO" }, "elasticsearch": { - "repo_dir": [] + "repo_dir": [], + "tunables": { + "heap_size": "4g", + "max_locked_memory": "infinity", + "max_open_files_hard_limit": 65536, + "max_open_files_soft_limit": 16384, + "max_procs": 65536, + "memory_lock": true, + "vm_max_map_count": 262144 + } }, "elasticsearch_curator": { "delete_exclude_index": [ ".kibana" ], @@ -132,7 +141,7 @@ "monasca": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 103, + "schema-revision": 104, "element_states": { "monasca-server": [ "readying", "ready", "applying" ], "monasca-master": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-monasca.schema b/chef/data_bags/crowbar/template-monasca.schema index 9ef270d511..06bfa2194d 100644 --- a/chef/data_bags/crowbar/template-monasca.schema +++ b/chef/data_bags/crowbar/template-monasca.schema @@ -107,6 +107,19 @@ "required": true, "type": "seq", "sequence": [ { "type": "str" } ] + }, + "tunables": { + "required": true, + "type": "map", + "mapping": { + "heap_size": { "type": "str", "required": true }, + "max_locked_memory": { "type": "str", "required": true }, + "max_open_files_hard_limit": { "type": "int", "required": true }, + "max_open_files_soft_limit": { "type": "int", "required": true }, + "max_procs": { "type": "int", "required": true }, + "memory_lock": { "type": "bool", "required": true }, + "vm_max_map_count": { "type": "int", "required": true } + } } } }, From c0d204d1f9f2c01644485f5aba651a494849d088 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Wed, 27 Jun 2018 15:20:44 +1000 Subject: [PATCH 062/207] manila: Correct field name for cluster name The correct field name for the Ceph cluster name is cephfs_cluster_name, correct it for the custom view so configurations using CephFS can be successfully applied. (cherry picked from commit 72a0f7f0d6ea5e2e56ed1bc4a386a69e3589cc02) --- .../app/views/barclamp/manila/_edit_attributes.html.haml | 2 +- crowbar_framework/config/locales/manila/en.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml index f9abd67d65..637301d315 100644 --- a/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml @@ -72,7 +72,7 @@ = boolean_field %w(shares {{@index}} cephfs use_crowbar), "data-hideit" => "true", "data-hideit-target" => "#cephfs_conf_path_{{@index}}", "data-hideit-direct" => "true" %div{:id => "cephfs_conf_path_{{@index}}"} = string_field %w(shares {{@index}} cephfs cephfs_conf_path) - = string_field %w(shares {{@index}} cephfs cephfs_clustername) + = string_field %w(shares {{@index}} cephfs cephfs_cluster_name) = string_field %w(shares {{@index}} cephfs cephfs_auth_id) {{/if_eq}} diff --git a/crowbar_framework/config/locales/manila/en.yml b/crowbar_framework/config/locales/manila/en.yml index 3d7c13126b..df5714314c 100644 --- a/crowbar_framework/config/locales/manila/en.yml +++ b/crowbar_framework/config/locales/manila/en.yml @@ -74,7 +74,7 @@ en: cephfs: use_crowbar: 'Use Ceph deployed by Crowbar' cephfs_conf_path: 'Path to Ceph configuration file' - cephfs_clustername: 'Cluster name' + cephfs_cluster_name: 'Cluster name' cephfs_auth_id: 'Authentication ID' manual: config: 'Options' From 9c9c7590b54f624cae3ab6f67228ff2a342832fc Mon Sep 17 00:00:00 2001 From: Itxaka Date: Wed, 13 Dec 2017 12:46:04 +0100 Subject: [PATCH 063/207] rabbitmq: Make sure rabbit is running on cluster As the resource agent for rabbitmq with cluster HA restart the rabbitmq service several times, the current check can fail to validate rabbitmq status, as it could do the check just on one of those times that rabbit is up while creating/joining the cluster. Then if the check passed and continued the chef execution, the next steps could fail as they are dependant on having a running rabbitmq, while the rabbitmq server may still be restarting. Instead expand the checks to first look for a rabbit master for the resource and expand the check for a local runing rabbit to make sure we are checking for the local copy. Also add an extra check after the crm checks to make sure there are no pending operations for the resource so we can try to avoid continuing if there is a promotion going on. (cherry picked from commit 3060a3ed829df22394c83b4fcf0cc436980b338b) --- chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 27 ++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index 49da5d12bb..9977ba2f2b 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -95,12 +95,26 @@ block do require "timeout" begin - Timeout.timeout(240) do - # Check that the service is running + Timeout.timeout(300) do + # Check that the service has a master cmd = "crm resource show #{ms_name} 2> /dev/null " - cmd << "| grep -q \"is running on\"" + cmd << "| grep \"is running on\" | grep -q \"Master\"" until ::Kernel.system(cmd) - Chef::Log.debug("#{ms_name} still not started") + Chef::Log.info("#{ms_name} still without master") + sleep(2) + end + # Check that the service is running on this node + cmd = "crm resource show #{ms_name} 2> /dev/null " + cmd << "| grep -q \"is running on: #{node.hostname}\"" + until ::Kernel.system(cmd) + Chef::Log.info("#{ms_name} still not running locally") + sleep(2) + end + # Check that we dont have any pending resource operations + cmd = "crm resource operations #{ms_name} 2> /dev/null " + cmd << "| grep -q \"pending\"" + while ::Kernel.system(cmd) + Chef::Log.info("resource #{ms_name} still has pending operations") sleep(2) end # The sed command grabs everything between '{running_applications' @@ -109,12 +123,13 @@ cmd = "rabbitmqctl -q status 2> /dev/null " cmd << "| sed -n '/{running_applications/,/\]}/p' | grep -q '{rabbit,'" until ::Kernel.system(cmd) - Chef::Log.debug("#{ms_name} still not answering") + Chef::Log.info("#{ms_name} still not answering") sleep(2) end end rescue Timeout::Error - message = "The #{ms_name} pacemaker resource is not started. Please manually check for an error." + message = "The #{ms_name} pacemaker resource is not started or doesn't have a master yet." + message << " Please manually check for an error." Chef::Log.fatal(message) raise message end From 99aa7c7d2e336c2ad2a2108df299c1d31a65a894 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 26 Mar 2018 12:32:52 +0200 Subject: [PATCH 064/207] rabbitmq: check for rabbit readiness As the other checks are not enough, as pacemaker keeps restarting rabbitmq, we need a more robust way of checking that rabbit has entered an stable situation. So check that rabbit is up 5 times in a row with a delay of 2 seconds between checks to make sure pacemaker has left it alone. Also, only trigger that check for rabbit if the pacemaker_transaction is updated, otherwise there is no need to do so (cherry picked from commit 8b568947b4ab43f9dc13d130d5ffbc9b246a74f4) --- chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 113 ++++++++++-------- 1 file changed, 64 insertions(+), 49 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index 9977ba2f2b..9aa086cb79 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -17,6 +17,9 @@ agent_name = "ocf:rabbitmq:rabbitmq-server-ha" +service_name = "rabbitmq" +ms_name = "ms-#{service_name}" + # create file that will be sourced by OCF resource agent on promote template "/etc/rabbitmq/ocf-promote" do source "ocf-promote.erb" @@ -28,16 +31,74 @@ ) end +# wait for service to have a master, and to be active +ruby_block "wait for #{ms_name} to be started" do + block do + require "timeout" + begin + Timeout.timeout(360) do + # Check that the service has a master + cmd = "crm resource show #{ms_name} 2> /dev/null " + cmd << "| grep \"is running on\" | grep -q \"Master\"" + until ::Kernel.system(cmd) + Chef::Log.info("#{ms_name} still without master") + sleep(2) + end + + # Check that the service is running on this node + cmd = "crm resource show #{ms_name} 2> /dev/null " + cmd << "| grep -q \"is running on: #{node.hostname}\"" + until ::Kernel.system(cmd) + Chef::Log.info("#{ms_name} still not running locally") + sleep(2) + end + + # The sed command grabs everything between '{running_applications' + # and ']}', and what we want is that the rabbit application is + # running + # Checks if the actual rabbit app is running properly at least 5 times in a row + # as to prevent continuing when its not stable enough + cmd = "rabbitmqctl -q status 2> /dev/null " + cmd << "| sed -n '/{running_applications/,/\]}/p' | grep -q '{rabbit,'" + count = 0 + until count == 5 + if ::Kernel.system(cmd) + count += 1 + sleep(2) + else + count = 0 + end + end + + # Check that we dont have any pending pacemaker resource operations + cmd = "crm resource operations #{ms_name} 2> /dev/null " + cmd << "| grep -q \"pending\"" + while ::Kernel.system(cmd) + Chef::Log.info("resource #{ms_name} still has pending operations") + sleep(2) + end + end + rescue Timeout::Error + message = "The #{ms_name} pacemaker resource is not started or doesn't have a master yet." + message << " Please manually check for an error." + Chef::Log.fatal(message) + raise message + end + end + action :nothing +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages installed before we create the pacemaker # resources crowbar_pacemaker_sync_mark "sync-rabbitmq_before_ha" -crowbar_pacemaker_sync_mark "wait-rabbitmq_ha_resources" +crowbar_pacemaker_sync_mark "wait-rabbitmq_ha_resources" do + timeout 300 +end transaction_objects = [] -service_name = "rabbitmq" pacemaker_primitive service_name do agent agent_name # nodename is empty so that we explicitly depend on the config files @@ -62,7 +123,6 @@ # no location on the role here: the ms resource will have this constraint -ms_name = "ms-#{service_name}" pacemaker_ms ms_name do rsc service_name meta ({ @@ -86,52 +146,7 @@ # note that this will also automatically start the resources action :commit_new only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + notifies :create, resources(ruby_block: "wait for #{ms_name} to be started"), :immediately end crowbar_pacemaker_sync_mark "create-rabbitmq_ha_resources" - -# wait for service to have a master, and to be active -ruby_block "wait for #{ms_name} to be started" do - block do - require "timeout" - begin - Timeout.timeout(300) do - # Check that the service has a master - cmd = "crm resource show #{ms_name} 2> /dev/null " - cmd << "| grep \"is running on\" | grep -q \"Master\"" - until ::Kernel.system(cmd) - Chef::Log.info("#{ms_name} still without master") - sleep(2) - end - # Check that the service is running on this node - cmd = "crm resource show #{ms_name} 2> /dev/null " - cmd << "| grep -q \"is running on: #{node.hostname}\"" - until ::Kernel.system(cmd) - Chef::Log.info("#{ms_name} still not running locally") - sleep(2) - end - # Check that we dont have any pending resource operations - cmd = "crm resource operations #{ms_name} 2> /dev/null " - cmd << "| grep -q \"pending\"" - while ::Kernel.system(cmd) - Chef::Log.info("resource #{ms_name} still has pending operations") - sleep(2) - end - # The sed command grabs everything between '{running_applications' - # and ']}', and what we want is that the rabbit application is - # running - cmd = "rabbitmqctl -q status 2> /dev/null " - cmd << "| sed -n '/{running_applications/,/\]}/p' | grep -q '{rabbit,'" - until ::Kernel.system(cmd) - Chef::Log.info("#{ms_name} still not answering") - sleep(2) - end - end - rescue Timeout::Error - message = "The #{ms_name} pacemaker resource is not started or doesn't have a master yet." - message << " Please manually check for an error." - Chef::Log.fatal(message) - raise message - end - end # block -end # ruby_block From acd69dd2a99e2fec2943cbc65c7418f85d328dca Mon Sep 17 00:00:00 2001 From: Johannes Grassler Date: Wed, 30 May 2018 10:45:34 +0200 Subject: [PATCH 065/207] monasca: various monasca-installer improvements This commit improves the execution of monasca-installer in various ways: * Run monasca-installer from dedicated wrapper script * Determine whether to run monasca-installer in wrapper script * Signal changed resources by deleting wrapper script's version information file (causes a re-run) * Add time stamps to /var/log/monasca-installer.log (cherry picked from commit 5e05e2445eb4b6b493af9aa13df9a2012fdf35da) --- chef/cookbooks/monasca/recipes/master.rb | 44 ++++++++++--------- .../default/run-monasca-installer.erb | 23 ++++++++++ 2 files changed, 46 insertions(+), 21 deletions(-) create mode 100644 chef/cookbooks/monasca/templates/default/run-monasca-installer.erb diff --git a/chef/cookbooks/monasca/recipes/master.rb b/chef/cookbooks/monasca/recipes/master.rb index 9cdf2bfd85..8207965d5d 100644 --- a/chef/cookbooks/monasca/recipes/master.rb +++ b/chef/cookbooks/monasca/recipes/master.rb @@ -26,7 +26,7 @@ package "ansible" package "monasca-installer" do - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end cookbook_file "/etc/ansible/ansible.cfg" do @@ -66,7 +66,7 @@ ansible_ssh_user: "root", keystone_host: keystone_settings["internal_url_host"] ) - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end monasca_net_ip = MonascaHelper.get_host_for_monitoring_url(monasca_node) @@ -127,7 +127,7 @@ monitor_libvirt: node[:monasca][:agent][:monitor_libvirt], delegate_role: node[:monasca][:delegate_role] ) - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end # This file is used to mark that ansible installer run successfully. @@ -137,15 +137,6 @@ # and monasca-installer. If they change re-execute ansible installer. lock_file = "/opt/monasca-installer/.installed" -previous_versions = if Pathname.new(lock_file).file? - File.read(lock_file).gsub(/^$\n/, "") - else - "" - end - -get_versions = "rpm -qa | grep -e crowbar-openstack -e monasca-installer | sort" -actual_versions = IO.popen(get_versions, &:read).gsub(/^$\n/, "") - cookbook_file "/etc/logrotate.d/monasca-installer" do owner "root" group "root" @@ -154,15 +145,26 @@ source "monasca-installer.logrotate" end -ansible_cmd = - "rm -f #{lock_file} " \ - "&& ansible-playbook " \ - "-i monasca-hosts -e '@/opt/monasca-installer/crowbar_vars.yml' " \ - "monasca.yml -vvv >> /var/log/monasca-installer.log 2>&1 " \ - "&& echo '#{actual_versions}' > #{lock_file}" +template "/usr/sbin/run-monasca-installer" do + source "run-monasca-installer.erb" + owner "root" + group "root" + mode "0555" + variables( + lock_file: lock_file + ) + notifies :run, "execute[remove lock file]", :immediately +end + +# Remove lock file. This gets notified if parameters change and ensures the +# version check in run-monasca-installer fails. +execute "remove lock file" do + command "rm -f #{lock_file}" + action :nothing +end execute "run ansible" do - command ansible_cmd - cwd "/opt/monasca-installer" - action :nothing unless actual_versions != previous_versions + command "/usr/sbin/run-monasca-installer 2>&1"\ + " | awk '{ print strftime(\"[%Y-%m-%d %H:%M:%S]\"), $0 }'"\ + " >> /var/log/monasca-installer.log" end diff --git a/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb b/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb new file mode 100644 index 0000000000..19fc3ed235 --- /dev/null +++ b/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e + +actual_versions=$(LC_ALL=C rpm -qa crowbar-openstack openstack-monasca-installer | sort) +previous_versions=$(cat <%= @lock_file %> 2> /dev/null || echo) + +# No need to run if versions match (Crowbar will ensure a mismatch by deleting +# <%= @lock_file %> if any parameters change. +if [ "$actual_versions" = "$previous_versions" ]; then + echo "No package version changes, skipping monasca-installer run" + exit 0 +fi + +cd "/opt/monasca-installer" +rm -f <%= @lock_file %> + /usr/bin/ansible-playbook \ + -i monasca-hosts \ + -e '@/opt/monasca-installer/crowbar_vars.yml' \ + monasca.yml -vvv + +# Record version information to indicate a successful run. +LC_ALL=C rpm -qa crowbar-openstack openstack-monasca-installer | sort > <%= @lock_file %> From 1a1c28b0f5e808b7d2121a5c4ea50ff302f0dcf3 Mon Sep 17 00:00:00 2001 From: Boris Bobrov Date: Tue, 6 Mar 2018 16:41:06 +0100 Subject: [PATCH 066/207] copytruncate apache logs instead of creating apache2 reload causes responses 406 from keystone bso#1083093 (cherry picked from commit cfda2347d15a16dfccbd69f0cdd08cbf0a9e31de) --- .../default/openstack-dashboard.logrotate.erb | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb b/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb index 40a2e8c05d..17d863a5e6 100644 --- a/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb +++ b/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb @@ -1,27 +1,21 @@ /var/log/apache2/openstack-dashboard-access_log { compress + copytruncate dateext maxage 365 rotate 99 size=+4096k notifempty missingok - create 644 root root - postrotate - /etc/init.d/apache2 reload - endscript } /var/log/apache2/openstack-dashboard-error_log { compress + copytruncate dateext maxage 365 rotate 99 size=+1024k notifempty missingok - create 644 root root - postrotate - /etc/init.d/apache2 reload - endscript } From 9d2cbfe4e3acd23ba1255b541d97d7b0eb3a08f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Wed, 18 Jul 2018 18:12:02 +0200 Subject: [PATCH 067/207] Do not automatically put manila-share roles to compute nodes Such scenario is not supported by non-disruptive upgrade. User can still place the role to compute nodes manually, this only affects automatic allocation. --- crowbar_framework/app/models/manila_service.rb | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crowbar_framework/app/models/manila_service.rb b/crowbar_framework/app/models/manila_service.rb index 5ce22009da..3ab354de33 100644 --- a/crowbar_framework/app/models/manila_service.rb +++ b/crowbar_framework/app/models/manila_service.rb @@ -78,10 +78,17 @@ def create_proposal storage = select_nodes_for_role( nodes, "manila-share", "storage") || [] + # Do not put manila-share roles to compute nodes + # (it does not work with non-disruptive upgrade) + shares = storage.reject { |n| n.roles.include? "nova-compute-kvm" } + + # Take at least one manila-share role if it was emptied by previous filter + shares << controllers.first if shares.empty? + base["deployment"][@bc_name]["elements"] = { "manila-server" => controllers.empty? ? [] : [controllers.first.name], - "manila-share" => storage.map(&:name) + "manila-share" => shares.map(&:name) } base["attributes"][@bc_name]["database_instance"] = From 015d208b4d7e997d79981841ce71c23f14e3c922 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 22 Jun 2018 17:23:14 +0200 Subject: [PATCH 068/207] database: Split database-server role into backend specific roles This commit splits the "database-server" role into "postgresql-server" and "mysql-server". This is the first change in a series to allow deploying both databases from a single proposal to the same (or different) set of nodes. The current state still does only allow a single database to be deployed, follow up commits with enhance the code to allow for parallel deployments. Schema migrations were added to reassign the roles correctly on existing deployments. (cherry picked from commit 5b23644341613d296bcecb6f8320aa7cb72714f8) --- .../crowbar-openstack/libraries/helpers.rb | 14 +++++- .../database/recipes/role_database_server.rb | 6 +-- .../{server.rb => role_mysql_server.rb} | 16 ++----- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 47 +++++++++++++++++++ chef/data_bags/crowbar/template-database.json | 10 ++-- chef/roles/database-server.rb | 3 +- chef/roles/mysql-server.rb | 5 ++ .../app/models/database_service.rb | 39 +++++++++++---- 9 files changed, 111 insertions(+), 31 deletions(-) rename chef/cookbooks/database/recipes/{server.rb => role_mysql_server.rb} (64%) create mode 100644 chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb create mode 100644 chef/roles/mysql-server.rb diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 73c76c14ee..1e3b8431e3 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -79,7 +79,19 @@ def self.database_settings(node, barclamp) Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new - database = get_node(node, "database-server", "database", instance) + db_roles, = Chef::Search::Query.new.search( + :role, + "name:database-config-#{instance}" + ) + db_proposal_role = db_roles.first unless db_roles.empty? + sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end + + database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") diff --git a/chef/cookbooks/database/recipes/role_database_server.rb b/chef/cookbooks/database/recipes/role_database_server.rb index 65a6817a08..a71952ff35 100644 --- a/chef/cookbooks/database/recipes/role_database_server.rb +++ b/chef/cookbooks/database/recipes/role_database_server.rb @@ -1,5 +1,5 @@ # -# Copyright 2016, SUSE LINUX GmbH +# Copyright 2018, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # - if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "database-server") include_recipe "database::crowbar" - include_recipe "database::server" + Chef::Log.info("Running database::server for PostgreSQL") + include_recipe "postgresql::server" end diff --git a/chef/cookbooks/database/recipes/server.rb b/chef/cookbooks/database/recipes/role_mysql_server.rb similarity index 64% rename from chef/cookbooks/database/recipes/server.rb rename to chef/cookbooks/database/recipes/role_mysql_server.rb index 97ea5c1ce0..ef6113fe87 100644 --- a/chef/cookbooks/database/recipes/server.rb +++ b/chef/cookbooks/database/recipes/role_mysql_server.rb @@ -1,14 +1,11 @@ # -# Cookbook Name:: database -# Recipe:: server -# -# Copyright 2012, SUSE Linux Products GmbH +# Copyright 2018, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -backend = node[:database][:sql_engine] - -Chef::Log.info("Running database::server for #{backend}") - -include_recipe "#{backend}::server" +if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "mysql-server") + include_recipe "mysql::server" +end diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index fce618833e..095ea536ab 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -330,7 +330,7 @@ include_recipe "crowbar-pacemaker::haproxy" ha_servers = CrowbarPacemakerHelper.haproxy_servers_for_service( - node, "mysql", "database-server", "admin_port" + node, "mysql", "mysql-server", "admin_port" ) # Let all nodes but one act as backup (standby) servers. diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb new file mode 100644 index 0000000000..4837975fbf --- /dev/null +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -0,0 +1,47 @@ +def upgrade(ta, td, a, d) + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if a["sql_engine"] == "mysql" + d["elements"]["mysql-server"] = d["elements"]["database-server"] + d["elements"]["atabase-server"] = [] + if d.fetch("elements_expanded", {}).key? "database-server" + d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] + d["elements_expanded"].delete("database-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:database-server") + nodes.each do |node| + node.add_to_run_list("mysql-server", chef_order, + td["element_states"]["mysql-server"]) + node.delete_from_run_list("database-server") + node.save + end + end + return a, d +end + +def downgrade(ta, td, a, d) + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if a["sql_engine"] == "mysql" + d["elements"]["database-server"] = d["elements"]["mysql-server"] + d["elements"].delete("mysql-server") + if d.fetch("elements_expanded", {}).key? "mysql-server" + d["elements_expanded"]["database-server"] = d["elements_expanded"]["mysql-server"] + d["elements_expanded"].delete("mysql-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:mysql-server") + nodes.each do |node| + node.add_to_run_list("database-server", chef_order, + td["element_states"]["database-server"]) + node.delete_from_run_list("mysql-server") + node.save + end + end + return a, d +end diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 599a55474d..6132bf41e2 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -83,15 +83,17 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 108, + "schema-revision": 109, "element_states": { - "database-server": [ "readying", "ready", "applying" ] + "database-server": [ "readying", "ready", "applying" ], + "mysql-server": [ "readying", "ready", "applying" ] }, "elements": { - "database-server": [] + "database-server": [], + "mysql-server": [] }, "element_order": [ - [ "database-server" ] + [ "database-server", "mysql-server" ] ], "config": { "environment": "database-base-config", diff --git a/chef/roles/database-server.rb b/chef/roles/database-server.rb index 0651705110..ad9b8ca79c 100644 --- a/chef/roles/database-server.rb +++ b/chef/roles/database-server.rb @@ -1,6 +1,5 @@ name "database-server" -description "Database Server Role" +description "PostgreSQL Server Role" run_list("recipe[database::role_database_server]") default_attributes() override_attributes() - diff --git a/chef/roles/mysql-server.rb b/chef/roles/mysql-server.rb new file mode 100644 index 0000000000..ac82c000c0 --- /dev/null +++ b/chef/roles/mysql-server.rb @@ -0,0 +1,5 @@ +name "mysql-server" +description "MySQL/MariaDB Server Role" +run_list("recipe[database::role_mysql_server]") +default_attributes() +override_attributes() diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index ae5b17d230..32260202a1 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -38,6 +38,16 @@ def role_constraints "suse" => "< 12.2", "windows" => "/.*/" } + }, + "mysql-server" => { + "unique" => false, + "count" => 1, + "cluster" => true, + "admin" => false, + "exclude_platform" => { + "suse" => "< 12.2", + "windows" => "/.*/" + } } } end @@ -111,17 +121,22 @@ def validate_ha_attributes(attributes, cluster) end def validate_proposal_after_save(proposal) - validate_one_for_role proposal, "database-server" - attributes = proposal["attributes"][@bc_name] - db_engine = attributes["sql_engine"] + sql_engine = attributes["sql_engine"] + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end + validate_one_for_role proposal, db_role + validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_db_engine", - db_engine: db_engine - ) unless %w(mysql postgresql).include?(db_engine) + db_engine: sql_engine + ) unless %w(mysql postgresql).include?(sql_engine) # HA validation - servers = proposal["deployment"][@bc_name]["elements"]["database-server"] + servers = proposal["deployment"][@bc_name]["elements"][db_role] unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) cluster = servers.first validate_ha_attributes(attributes, cluster) @@ -134,10 +149,18 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Database apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, "database-server") + sql_engine = role.default_attributes["database"]["sql_engine"] + db_role = if engine == "postgresql" + "database-server" + else + "mysql-server" + end + + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] + dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], database_ha_enabled, database_elements, @@ -146,8 +169,6 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) reset_sync_marks_on_clusters_founders(database_elements) - sql_engine = role.default_attributes["database"]["sql_engine"] - if database_ha_enabled net_svc = NetworkService.new @logger case sql_engine From 201c14b68cf0f31e63070abea7e05522802ab6f4 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 23 Jul 2018 16:21:32 +0200 Subject: [PATCH 069/207] rabbitmq: set client timout to default value A very low client timeout can lead to multiple disconnections of the clients due to missing heartbeats, depending on the load on the rabbit nodes. Instead of configuring the clients with a very low value, lets just use the default openstack values for safety (cherry picked from commit ebc4922866eae1ae714ee30875847563b6c2d50c) --- chef/data_bags/crowbar/template-rabbitmq.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/data_bags/crowbar/template-rabbitmq.json b/chef/data_bags/crowbar/template-rabbitmq.json index 0f2886baf9..88f8b1db05 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.json +++ b/chef/data_bags/crowbar/template-rabbitmq.json @@ -21,7 +21,7 @@ "client_ca_certs": "/etc/ssl/certs/rabbitca.pem" }, "client": { - "heartbeat_timeout": 10 + "heartbeat_timeout": 60 }, "cluster": false, "ha": { From fcdbd4ae9d9c670ae895d0329a9f357f68e68f44 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 22 Jun 2018 17:29:22 +0200 Subject: [PATCH 070/207] database: Allow parallel deployments of postgresql and mysql todo: cleanup, re-enable caching of db_settings, HA (cherry picked from commit c49adf8eb94412843951f3d8c695e0274927c69c) --- .../crowbar-openstack/libraries/helpers.rb | 13 ++++++++----- .../database/libraries/database_library.rb | 9 +++------ chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- chef/cookbooks/mysql/recipes/server.rb | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 1e3b8431e3..495f1928ec 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -75,7 +75,7 @@ def self.database_settings(node, barclamp) @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) + if @database_settings && @database_settings.include?(instance) && false Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new @@ -85,6 +85,9 @@ def self.database_settings(node, barclamp) ) db_proposal_role = db_roles.first unless db_roles.empty? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] + if barclamp == "mysql" || barclamp == "postgresql" + sql_engine = barclamp + end db_role = if sql_engine == "postgresql" "database-server" else @@ -97,7 +100,7 @@ def self.database_settings(node, barclamp) Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = DatabaseLibrary::Database::Util.get_backend_name(database) + backend_name = sql_engine ssl_opts = {} if backend_name == "mysql" @@ -112,9 +115,9 @@ def self.database_settings(node, barclamp) address: address, url_scheme: backend_name, backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database), + provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), connection: { host: address, username: "db_maker", diff --git a/chef/cookbooks/database/libraries/database_library.rb b/chef/cookbooks/database/libraries/database_library.rb index 2e343db085..8164f42373 100644 --- a/chef/cookbooks/database/libraries/database_library.rb +++ b/chef/cookbooks/database/libraries/database_library.rb @@ -19,8 +19,7 @@ module DatabaseLibrary class Database class Util - def self.get_database_provider(node) - backend = node[:database][:sql_engine] + def self.get_database_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -33,8 +32,7 @@ def self.get_database_provider(node) db_provider end - def self.get_user_provider(node) - backend = node[:database][:sql_engine] + def self.get_user_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -51,8 +49,7 @@ def self.get_backend_name(node) node[:database][:sql_engine] end - def self.get_default_priviledges(node) - backend = node[:database][:sql_engine] + def self.get_default_priviledges(node, backend = node[:database][:sql_engine]) privs = nil case backend when "postgresql" diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 095ea536ab..2fcc98d835 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -71,7 +71,7 @@ # unauthenticated root user is later removed in server.rb after the # bootstraping. Once the cluster has started other nodes will pick up on # the sstuser and we are able to use these credentails. - db_settings = fetch_database_settings + db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index c107ff2332..869f15a6de 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -183,7 +183,7 @@ only_if "/usr/bin/mysql -u root -e 'show databases;'" end -db_settings = fetch_database_settings +db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" From 761c7b9233ffa8cac5c817917b60aac0236fed61 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Mon, 25 Jun 2018 17:08:49 +0200 Subject: [PATCH 071/207] database: Allow parallel HA deployment of PostgreSQL and MariaDB This fixes the Crowbar Model and cookbooks to allow for the mysql- and postgresql-server roles being applied to the same cluster. The schema migration is update to put the "ha" attributes subtree in a per backend specific location to avoid overlap. (cherry picked from commit 86a1893c7a415a0d2c8c20fe36562367ec30c092) --- chef/cookbooks/database/attributes/default.rb | 6 +- chef/cookbooks/database/libraries/crowbar.rb | 12 +-- chef/cookbooks/mysql/recipes/server.rb | 4 +- chef/cookbooks/postgresql/recipes/ha.rb | 6 +- .../postgresql/recipes/ha_storage.rb | 18 ++-- chef/cookbooks/postgresql/recipes/server.rb | 2 +- .../postgresql/recipes/server_debian.rb | 2 +- .../postgresql/recipes/server_redhat.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 8 ++ chef/data_bags/crowbar/template-database.json | 24 ++--- .../crowbar/template-database.schema | 40 ++++---- .../app/models/database_service.rb | 99 +++++++++++-------- 12 files changed, 125 insertions(+), 98 deletions(-) diff --git a/chef/cookbooks/database/attributes/default.rb b/chef/cookbooks/database/attributes/default.rb index 32f3e09f79..0440382476 100644 --- a/chef/cookbooks/database/attributes/default.rb +++ b/chef/cookbooks/database/attributes/default.rb @@ -18,5 +18,7 @@ # # ha -default[:database][:ha][:enabled] = false -default[:database][:ha][:storage][:mode] = nil +default[:database][:postgresql][:ha][:enabled] = false +default[:database][:postgresql][:ha][:storage][:mode] = nil + +default[:database][:mysql][:ha][:enabled] = false diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index ed13e9e4fd..548be83637 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -1,10 +1,10 @@ module CrowbarDatabaseHelper - def self.get_ha_vhostname(node) - if node[:database][:ha][:enabled] + def self.get_ha_vhostname(node, sql_engine=node[:database][:sql_engine]) + if node["database"][sql_engine]["ha"]["enabled"] cluster_name = CrowbarPacemakerHelper.cluster_name(node) # Any change in the generation of the vhostname here must be reflected in # apply_role_pre_chef_call of the database barclamp model - if node[:database][:sql_engine] == "postgresql" + if sql_engine == "postgresql" "#{node[:database][:config][:environment].gsub("-config", "")}-#{cluster_name}".tr("_", "-") else "cluster-#{cluster_name}".tr("_", "-") @@ -14,10 +14,10 @@ def self.get_ha_vhostname(node) end end - def self.get_listen_address(node) + def self.get_listen_address(node, sql_engine=node[:database][:sql_engine]) # For SSL we prefer a cluster hostname (for certificate validation) - use_ssl = node[:database][:sql_engine] == "mysql" && node[:database][:mysql][:ssl][:enabled] - if node[:database][:ha][:enabled] + use_ssl = sql_engine == "mysql" && node[:database][:mysql][:ssl][:enabled] + if node["database"][sql_engine]["ha"]["enabled"] vhostname = get_ha_vhostname(node) use_ssl ? "#{vhostname}.#{node[:domain]}" : CrowbarPacemakerHelper.cluster_vip(node, "admin", vhostname) else diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index 869f15a6de..f19219514e 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -20,7 +20,7 @@ include_recipe "mysql::client" include_recipe "database::client" -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:mysql][:ha][:enabled] # For Crowbar, we need to set the address to bind - default to admin node. addr = node[:database][:mysql][:bind_address] || "" @@ -91,7 +91,7 @@ node[:database][:mysql][:ssl][:generate_certs] || node[:database][:mysql][:ssl][:insecure]) group "mysql" - fqdn CrowbarDatabaseHelper.get_listen_address(node) + fqdn CrowbarDatabaseHelper.get_listen_address(node, "mysql") end end diff --git a/chef/cookbooks/postgresql/recipes/ha.rb b/chef/cookbooks/postgresql/recipes/ha.rb index 313759f993..0b9305133f 100644 --- a/chef/cookbooks/postgresql/recipes/ha.rb +++ b/chef/cookbooks/postgresql/recipes/ha.rb @@ -22,14 +22,14 @@ # # This is the second step. -vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node)}" +vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node, "postgresql")}" service_name = "postgresql" fs_primitive = "fs-#{service_name}" group_name = "g-#{service_name}" agent_name = "ocf:heartbeat:pgsql" -ip_addr = CrowbarDatabaseHelper.get_listen_address(node) +ip_addr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") postgres_op = {} postgres_op["monitor"] = {} @@ -85,7 +85,7 @@ end transaction_objects << "pacemaker_primitive[#{service_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{service_name}" pacemaker_colocation colocation_constraint do diff --git a/chef/cookbooks/postgresql/recipes/ha_storage.rb b/chef/cookbooks/postgresql/recipes/ha_storage.rb index 837b4f52bb..2e13291cc0 100644 --- a/chef/cookbooks/postgresql/recipes/ha_storage.rb +++ b/chef/cookbooks/postgresql/recipes/ha_storage.rb @@ -37,21 +37,21 @@ fs_params = {} fs_params["directory"] = "/var/lib/pgsql" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" include_recipe "crowbar-pacemaker::drbd" crowbar_pacemaker_drbd drbd_resource do - size "#{node[:database][:ha][:storage][:drbd][:size]}G" + size "#{node[:database][:postgresql][:ha][:storage][:drbd][:size]}G" action :nothing end.run_action(:create) fs_params["device"] = node["drbd"]["rsc"][drbd_resource]["device"] fs_params["fstype"] = "xfs" -elsif node[:database][:ha][:storage][:mode] == "shared" - fs_params["device"] = node[:database][:ha][:storage][:shared][:device] - fs_params["fstype"] = node[:database][:ha][:storage][:shared][:fstype] - unless node[:database][:ha][:storage][:shared][:options].empty? - fs_params["options"] = node[:database][:ha][:storage][:shared][:options] +elsif node[:database][:postgresql][:ha][:storage][:mode] == "shared" + fs_params["device"] = node[:database][:postgresql][:ha][:storage][:shared][:device] + fs_params["fstype"] = node[:database][:postgresql][:ha][:storage][:shared][:fstype] + unless node[:database][:postgresql][:ha][:storage][:shared][:options].empty? + fs_params["options"] = node[:database][:postgresql][:ha][:storage][:shared][:options] end else raise "Invalid mode for HA storage!" @@ -71,7 +71,7 @@ transaction_objects = [] -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" drbd_params = {} drbd_params["drbd_resource"] = drbd_resource @@ -120,7 +120,7 @@ location_name = openstack_pacemaker_controller_only_location_for fs_primitive transaction_objects << "pacemaker_location[#{location_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{fs_primitive}" pacemaker_colocation colocation_constraint do score "inf" diff --git a/chef/cookbooks/postgresql/recipes/server.rb b/chef/cookbooks/postgresql/recipes/server.rb index 9d7d272df7..7bf681483e 100644 --- a/chef/cookbooks/postgresql/recipes/server.rb +++ b/chef/cookbooks/postgresql/recipes/server.rb @@ -121,7 +121,7 @@ notifies change_notify, "service[postgresql]", :immediately end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] if ha_enabled log "HA support for postgresql is enabled" diff --git a/chef/cookbooks/postgresql/recipes/server_debian.rb b/chef/cookbooks/postgresql/recipes/server_debian.rb index 78da2a6c27..4258923b17 100644 --- a/chef/cookbooks/postgresql/recipes/server_debian.rb +++ b/chef/cookbooks/postgresql/recipes/server_debian.rb @@ -28,7 +28,7 @@ # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in # the directory that will be mounted for HA -if node[:database][:ha][:enabled] +if node[:database][:postgresql][:ha][:enabled] include_recipe "postgresql::ha_storage" end diff --git a/chef/cookbooks/postgresql/recipes/server_redhat.rb b/chef/cookbooks/postgresql/recipes/server_redhat.rb index f2139a5c10..5c3a26a671 100644 --- a/chef/cookbooks/postgresql/recipes/server_redhat.rb +++ b/chef/cookbooks/postgresql/recipes/server_redhat.rb @@ -49,7 +49,7 @@ package pg_pack end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb index 4837975fbf..bd91018994 100644 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -1,4 +1,12 @@ def upgrade(ta, td, a, d) + db_engine = a["sql_engine"] + a[db_engine]["ha"] = a["ha"] + a.delete("ha") + if db_engine == "postgresql" + a["mysql"]["ha"] = ta["mysql"]["ha"] + else + a["postgresql"]["ha"] = ta["postgresql"]["ha"] + end d["element_states"] = td["element_states"] d["element_order"] = td["element_order"] diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 6132bf41e2..364d565b59 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -57,18 +57,18 @@ "log_filename": "postgresql.log-%Y%m%d%H%M", "log_truncate_on_rotation": false, "log_min_duration_statement": -1 - } - }, - "ha": { - "storage": { - "mode": "shared", - "drbd": { - "size": 50 - }, - "shared": { - "device": "", - "fstype": "", - "options": "" + }, + "ha": { + "storage": { + "mode": "shared", + "drbd": { + "size": 50 + }, + "shared": { + "device": "", + "fstype": "", + "options": "" + } } } }, diff --git a/chef/data_bags/crowbar/template-database.schema b/chef/data_bags/crowbar/template-database.schema index 401d087c31..571799ea17 100644 --- a/chef/data_bags/crowbar/template-database.schema +++ b/chef/data_bags/crowbar/template-database.schema @@ -85,32 +85,32 @@ "log_filename": {"type": "str" }, "log_min_duration_statement": { "type": "int" } } - } - } - }, - "ha" : { - "type": "map", - "required": true, - "mapping" : { - "storage": { + }, + "ha" : { "type": "map", "required": true, "mapping" : { - "mode": { "type": "str", "required": true }, - "drbd": { + "storage": { "type": "map", "required": true, "mapping" : { - "size": { "type": "int", "required": true } - } - }, - "shared": { - "type": "map", - "required": true, - "mapping" : { - "device": { "type": "str", "required": true }, - "fstype": { "type": "str", "required": true }, - "options": { "type": "str", "required": true } + "mode": { "type": "str", "required": true }, + "drbd": { + "type": "map", + "required": true, + "mapping" : { + "size": { "type": "int", "required": true } + } + }, + "shared": { + "type": "map", + "required": true, + "mapping" : { + "device": { "type": "str", "required": true }, + "fstype": { "type": "str", "required": true }, + "options": { "type": "str", "required": true } + } + } } } } diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 32260202a1..3c7a062e2f 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -71,11 +71,13 @@ def create_proposal end def validate_ha_attributes(attributes, cluster) - storage_mode = attributes["ha"]["storage"]["mode"] + sql_engine = attributes["sql_engine"] role = available_clusters[cluster] case attributes["sql_engine"] when "postgresql" + ha_attr = attributes["postgresql"]["ha"] + storage_mode = ha_attr["storage"]["mode"] unless ["shared", "drbd"].include?(storage_mode) validation_error I18n.t( "barclamp.#{@bc_name}.validation.unknown_mode_ha", @@ -83,12 +85,12 @@ def validate_ha_attributes(attributes, cluster) ) end if storage_mode == "shared" - if attributes["ha"]["storage"]["shared"]["device"].blank? + if ha_attr["storage"]["shared"]["device"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_device" ) end - if attributes["ha"]["storage"]["shared"]["fstype"].blank? + if ha_attr["storage"]["shared"]["fstype"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_filesystem" ) @@ -100,7 +102,7 @@ def validate_ha_attributes(attributes, cluster) cluster_name: cluster_name(cluster) ) end - if attributes["ha"]["storage"]["drbd"]["size"] <= 0 + if ha_attr["storage"]["drbd"]["size"] <= 0 validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_size_drbd" ) @@ -150,56 +152,71 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) return if all_nodes.empty? sql_engine = role.default_attributes["database"]["sql_engine"] - db_role = if engine == "postgresql" - "database-server" - else - "mysql-server" - end - - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) - Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] - - dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], - database_ha_enabled, - database_elements, - vip_networks) - role.save if dirty - - reset_sync_marks_on_clusters_founders(database_elements) - - if database_ha_enabled - net_svc = NetworkService.new @logger - case sql_engine - when "postgresql" - unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) - raise "Internal error: HA enabled, but element is not a cluster" - end - cluster = database_elements[0] - cluster_name = PacemakerServiceObject.cluster_name(cluster) - # Any change in the generation of the vhostname here must be reflected in - # CrowbarDatabaseHelper.get_ha_vhostname - database_vhostname = "#{role.name.gsub("-config", "")}-#{cluster_name}.#{Crowbar::Settings.domain}".tr("_", "-") - net_svc.allocate_virtual_ip "default", "admin", "host", database_vhostname - when "mysql" - database_nodes.each do |n| - net_svc.allocate_ip "default", "admin", "host", n + dirty = false + net_svc = NetworkService.new @logger + db_enabled = { + "mysql" => { + "enabled" => false, + "ha" => false + }, + "postgresql" => { + "enabled" => false, + "ha" => false + } + } + ["postgresql", "mysql"].each do |engine| + db_role = if engine == "postgresql" + "database-server" + else + "mysql-server" + end + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) + unless database_nodes.empty? + db_enabled[engine]["enabled"] = true + end + db_enabled[engine]["ha"] = database_ha_enabled + Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled + dirty = prepare_role_for_ha_with_haproxy(role, + ["database", engine, "ha", "enabled"], + database_ha_enabled, + database_elements, + vip_networks) || dirty + reset_sync_marks_on_clusters_founders(database_elements) + if database_ha_enabled + case engine + when "postgresql" + unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) + raise "Internal error: HA enabled, but element is not a cluster" + end + cluster = database_elements[0] + cluster_name = PacemakerServiceObject.cluster_name(cluster) + # Any change in the generation of the vhostname here must be reflected in + # CrowbarDatabaseHelper.get_ha_vhostname + database_vhostname = "#{role.name.gsub("-config", "")}-#{cluster_name}.#{Crowbar::Settings.domain}".tr("_", "-") + net_svc.allocate_virtual_ip "default", "admin", "host", database_vhostname + when "mysql" + database_nodes.each do |n| + net_svc.allocate_ip "default", "admin", "host", n + end + allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) end - allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) end end + role.save if dirty role.default_attributes["database"][sql_engine] = {} if role.default_attributes["database"][sql_engine].nil? role.default_attributes["database"]["db_maker_password"] = (old_role && old_role.default_attributes["database"]["db_maker_password"]) || random_password - if ( sql_engine == "mysql" ) + if db_enabled["mysql"]["enabled"] role.default_attributes["database"]["mysql"]["server_root_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["server_root_password"]) || random_password - if database_ha_enabled + if db_enabled["mysql"]["ha"] role.default_attributes["database"]["mysql"]["sstuser_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["sstuser_password"]) || random_password end @logger.debug("setting mysql specific attributes") - elsif ( sql_engine == "postgresql" ) + end + if db_enabled["postgresql"]["enabled"] # Attribute is not living in "database" namespace, but that's because # it's for the postgresql cookbook. We're not using default_attributes # because the upstream cookbook use node.set_unless which would override From 3d6b8c9b7b06c867b5292b26ecf2530080775806 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 26 Jun 2018 15:21:54 +0200 Subject: [PATCH 072/207] database: Fix "Attributes" UI after role renaming (cherry picked from commit aa10d7d89cf54f1525bbb8ece42f006c3850841b) --- .../app/helpers/barclamp/database_helper.rb | 4 +-- .../database/_edit_attributes.html.haml | 12 ++++----- .../config/locales/database/en.yml | 26 +++++++++---------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/crowbar_framework/app/helpers/barclamp/database_helper.rb b/crowbar_framework/app/helpers/barclamp/database_helper.rb index f69020ff70..dbe1a2b295 100644 --- a/crowbar_framework/app/helpers/barclamp/database_helper.rb +++ b/crowbar_framework/app/helpers/barclamp/database_helper.rb @@ -30,8 +30,8 @@ def engines_for_database(selected) def ha_storage_mode_for_database(selected) options_for_select( [ - [t(".ha.storage.modes.drbd"), "drbd"], - [t(".ha.storage.modes.shared"), "shared"] + [t(".postgresql.ha.storage.modes.drbd"), "drbd"], + [t(".postgresql.ha.storage.modes.shared"), "shared"] ], selected.to_s ) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index 3cd7af0441..8f2fd10ad0 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -40,14 +40,14 @@ %legend = t('.ha_header') - = select_field %w(ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" + = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" #drbd_storage_container .alert.alert-info - = t('.ha.storage.drbd_info') - = integer_field %w(ha storage drbd size) + = t('.postgresql.ha.storage.drbd_info') + = integer_field %w(postgresql ha storage drbd size) #shared_storage_container - = string_field %w(ha storage shared device) - = string_field %w(ha storage shared fstype) - = string_field %w(ha storage shared options) + = string_field %w(postgresql ha storage shared device) + = string_field %w(postgresql ha storage shared fstype) + = string_field %w(postgresql ha storage shared options) diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index be5868d852..df661f1666 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -40,20 +40,20 @@ en: postgresql: config: max_connections: 'Global Connection Limit (max_connections)' + ha: + storage: + mode: 'Storage Mode' + modes: + drbd: 'DRBD' + shared: 'Shared Storage' + drbd_info: 'The cluster must have been setup for DRBD.' + drbd: + size: 'Size to Allocate for DRBD Device (in Gigabytes)' + shared: + device: 'Name of Block Device or NFS Mount Specification' + fstype: 'Filesystem Type' + options: 'Mount Options' ha_header: 'High Availability' - ha: - storage: - mode: 'Storage Mode' - modes: - drbd: 'DRBD' - shared: 'Shared Storage' - drbd_info: 'The cluster must have been setup for DRBD.' - drbd: - size: 'Size to Allocate for DRBD Device (in Gigabytes)' - shared: - device: 'Name of Block Device or NFS Mount Specification' - fstype: 'Filesystem Type' - options: 'Mount Options' validation: invalid_db_engine: 'Invalid database engine: %{db_engine}.' unknown_mode_ha: 'Unknown mode for HA storage: %{storage_mode}.' From 67998c71846925dcdf4e27ba890384314d4886bf Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 26 Jun 2018 15:22:40 +0200 Subject: [PATCH 073/207] monasca: Fix check for mysql after it got moved to a separate role (cherry picked from commit de3a03b5ea8368e9b6c9a7e291474e35b26103be) --- crowbar_framework/app/models/monasca_service.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/monasca_service.rb b/crowbar_framework/app/models/monasca_service.rb index fcb9ac4c4c..a268da9c1d 100644 --- a/crowbar_framework/app/models/monasca_service.rb +++ b/crowbar_framework/app/models/monasca_service.rb @@ -93,7 +93,7 @@ def create_proposal nodes = NodeObject.all non_db_nodes = nodes.reject do |n| # Do not deploy monasca-server to the node running database cluster (already running mariadb) - n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + n.roles.include?("mysql-server") end monasca_server = select_nodes_for_role(non_db_nodes, "monasca-server", "monitoring") || [] @@ -141,7 +141,7 @@ def validate_proposal_after_save(proposal) nodes = proposal["deployment"][@bc_name]["elements"] nodes["monasca-server"].each do |node| n = NodeObject.find_node_by_name(node) - if n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + if n.roles.include?("mysql-server") validation_error( "monasca-server role cannot be deployed to the node with other MariaDB instance." ) From afb7fd0db1d8f07648eebe0756126f672ede7fce Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 18 Jul 2018 22:38:54 +1000 Subject: [PATCH 074/207] Restore caching of db_settings We need to cache settings for each combination of database barclamp instances and sql engines. To do this, we first check (and cache) the sql_engine selected by the role. The exception is when running the cookbooks for the engines themselves. (cherry picked from commit 824658f002ea5311fb6db885f9406df43ad6d864) --- .../crowbar-openstack/libraries/helpers.rb | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 495f1928ec..1323c8d14d 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -72,38 +72,45 @@ def self.database_settings(node, barclamp) "on behalf of #{barclamp}") end @database_settings = nil + @sql_engine_cache = nil @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) && false - Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") + if barclamp == "mysql" || barclamp == "postgresql" + sql_engine = barclamp + elsif @sql_engine_cache && @sql_engine_cache.include?(instance) + sql_engine = @sql_engine_cache[instance] else - @database_settings ||= Hash.new db_roles, = Chef::Search::Query.new.search( - :role, - "name:database-config-#{instance}" - ) + :role, + "name:database-config-#{instance}" + ) db_proposal_role = db_roles.first unless db_roles.empty? + # TODO(jhesketh): What if db_roles is empty here? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] - if barclamp == "mysql" || barclamp == "postgresql" - sql_engine = barclamp - end + + @sql_engine_cache ||= Hash.new + @sql_engine_cache[instance] = sql_engine + end + + if @database_settings && @database_settings.include?(instance) && @database_settings[instance].include?(sql_engine) + Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]} [cached]") + else + @database_settings ||= Hash.new db_role = if sql_engine == "postgresql" "database-server" else "mysql-server" end - database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = sql_engine ssl_opts = {} - if backend_name == "mysql" + if sql_engine == "mysql" ssl_opts = { enabled: database["database"]["mysql"]["ssl"]["enabled"], ca_certs: database["database"]["mysql"]["ssl"]["ca_certs"], @@ -111,13 +118,14 @@ def self.database_settings(node, barclamp) database["database"]["mysql"]["ssl"]["insecure"] } end - @database_settings[instance] = { + @database_settings[instance] ||= Hash.new + @database_settings[instance][sql_engine] = { address: address, - url_scheme: backend_name, - backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), + url_scheme: sql_engine, + backend_name: sql_engine, + provider: DatabaseLibrary::Database::Util.get_database_provider(database, sql_engine), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, sql_engine), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, sql_engine), connection: { host: address, username: "db_maker", @@ -130,7 +138,7 @@ def self.database_settings(node, barclamp) end end - @database_settings[instance] + @database_settings[instance][sql_engine] end def self.database_connection_string(db_settings, db_auth_attr) From 2a27ad03736d5f3a7ceb5d5f4d42fcfec063e6f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Fri, 20 Jul 2018 16:45:20 +0200 Subject: [PATCH 075/207] database: Migration fixes for separate DB roles Fix for correct copying of the HA settings. Also, some comments were added. --- .../migrate/database/109_separate_db_roles.rb | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb index bd91018994..a9dbc29921 100644 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -1,18 +1,25 @@ def upgrade(ta, td, a, d) db_engine = a["sql_engine"] - a[db_engine]["ha"] = a["ha"] - a.delete("ha") + + # 'ha' hash needs to be moved under 'postgresql' to keep it consistent with mysql if db_engine == "postgresql" - a["mysql"]["ha"] = ta["mysql"]["ha"] + a["postgresql"]["ha"] = a["ha"] else a["postgresql"]["ha"] = ta["postgresql"]["ha"] end + a.delete("ha") if a.key? "ha" + d["element_states"] = td["element_states"] d["element_order"] = td["element_order"] - if a["sql_engine"] == "mysql" + if db_engine == "mysql" + # For the time of upgrade, we're adding new 'mysql-server role', while old 'database-server' + # is reserved for existing postgresql setup. + # For users that already have mysql (mariadb) deployed with 'database-server' role, we need to + # adapt the role assignments so the code that is looking for 'mysql-server' instances always finds + # correct mysql nodes. d["elements"]["mysql-server"] = d["elements"]["database-server"] - d["elements"]["atabase-server"] = [] + d["elements"]["database-server"] = [] if d.fetch("elements_expanded", {}).key? "database-server" d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] d["elements_expanded"].delete("database-server") From 9ffd7a3033cea7f13a3d6ea61f9f315ce4a4cc93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:13 +0200 Subject: [PATCH 076/207] Revert "database: Migration fixes for separate DB roles" This reverts commit 2a27ad03736d5f3a7ceb5d5f4d42fcfec063e6f4. --- .../migrate/database/109_separate_db_roles.rb | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb index a9dbc29921..bd91018994 100644 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -1,25 +1,18 @@ def upgrade(ta, td, a, d) db_engine = a["sql_engine"] - - # 'ha' hash needs to be moved under 'postgresql' to keep it consistent with mysql + a[db_engine]["ha"] = a["ha"] + a.delete("ha") if db_engine == "postgresql" - a["postgresql"]["ha"] = a["ha"] + a["mysql"]["ha"] = ta["mysql"]["ha"] else a["postgresql"]["ha"] = ta["postgresql"]["ha"] end - a.delete("ha") if a.key? "ha" - d["element_states"] = td["element_states"] d["element_order"] = td["element_order"] - if db_engine == "mysql" - # For the time of upgrade, we're adding new 'mysql-server role', while old 'database-server' - # is reserved for existing postgresql setup. - # For users that already have mysql (mariadb) deployed with 'database-server' role, we need to - # adapt the role assignments so the code that is looking for 'mysql-server' instances always finds - # correct mysql nodes. + if a["sql_engine"] == "mysql" d["elements"]["mysql-server"] = d["elements"]["database-server"] - d["elements"]["database-server"] = [] + d["elements"]["atabase-server"] = [] if d.fetch("elements_expanded", {}).key? "database-server" d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] d["elements_expanded"].delete("database-server") From f1db14e9ba34d3969abec2d883d77223ab8e7842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:24 +0200 Subject: [PATCH 077/207] Revert "Restore caching of db_settings" This reverts commit afb7fd0db1d8f07648eebe0756126f672ede7fce. --- .../crowbar-openstack/libraries/helpers.rb | 46 ++++++++----------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 1323c8d14d..495f1928ec 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -72,45 +72,38 @@ def self.database_settings(node, barclamp) "on behalf of #{barclamp}") end @database_settings = nil - @sql_engine_cache = nil @database_settings_cache_time = node[:ohai_time] end - if barclamp == "mysql" || barclamp == "postgresql" - sql_engine = barclamp - elsif @sql_engine_cache && @sql_engine_cache.include?(instance) - sql_engine = @sql_engine_cache[instance] + if @database_settings && @database_settings.include?(instance) && false + Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else + @database_settings ||= Hash.new db_roles, = Chef::Search::Query.new.search( - :role, - "name:database-config-#{instance}" - ) + :role, + "name:database-config-#{instance}" + ) db_proposal_role = db_roles.first unless db_roles.empty? - # TODO(jhesketh): What if db_roles is empty here? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] - - @sql_engine_cache ||= Hash.new - @sql_engine_cache[instance] = sql_engine - end - - if @database_settings && @database_settings.include?(instance) && @database_settings[instance].include?(sql_engine) - Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]} [cached]") - else - @database_settings ||= Hash.new + if barclamp == "mysql" || barclamp == "postgresql" + sql_engine = barclamp + end db_role = if sql_engine == "postgresql" "database-server" else "mysql-server" end + database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) + backend_name = sql_engine ssl_opts = {} - if sql_engine == "mysql" + if backend_name == "mysql" ssl_opts = { enabled: database["database"]["mysql"]["ssl"]["enabled"], ca_certs: database["database"]["mysql"]["ssl"]["ca_certs"], @@ -118,14 +111,13 @@ def self.database_settings(node, barclamp) database["database"]["mysql"]["ssl"]["insecure"] } end - @database_settings[instance] ||= Hash.new - @database_settings[instance][sql_engine] = { + @database_settings[instance] = { address: address, - url_scheme: sql_engine, - backend_name: sql_engine, - provider: DatabaseLibrary::Database::Util.get_database_provider(database, sql_engine), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, sql_engine), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, sql_engine), + url_scheme: backend_name, + backend_name: backend_name, + provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), connection: { host: address, username: "db_maker", @@ -138,7 +130,7 @@ def self.database_settings(node, barclamp) end end - @database_settings[instance][sql_engine] + @database_settings[instance] end def self.database_connection_string(db_settings, db_auth_attr) From d727f18e148fdd31d82e705112bebbfdffe9de26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:33 +0200 Subject: [PATCH 078/207] Revert "monasca: Fix check for mysql after it got moved to a separate role" This reverts commit 67998c71846925dcdf4e27ba890384314d4886bf. --- crowbar_framework/app/models/monasca_service.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/monasca_service.rb b/crowbar_framework/app/models/monasca_service.rb index a268da9c1d..fcb9ac4c4c 100644 --- a/crowbar_framework/app/models/monasca_service.rb +++ b/crowbar_framework/app/models/monasca_service.rb @@ -93,7 +93,7 @@ def create_proposal nodes = NodeObject.all non_db_nodes = nodes.reject do |n| # Do not deploy monasca-server to the node running database cluster (already running mariadb) - n.roles.include?("mysql-server") + n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" end monasca_server = select_nodes_for_role(non_db_nodes, "monasca-server", "monitoring") || [] @@ -141,7 +141,7 @@ def validate_proposal_after_save(proposal) nodes = proposal["deployment"][@bc_name]["elements"] nodes["monasca-server"].each do |node| n = NodeObject.find_node_by_name(node) - if n.roles.include?("mysql-server") + if n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" validation_error( "monasca-server role cannot be deployed to the node with other MariaDB instance." ) From 3e71f8df5a67900ecd77537662322fe914b3de7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:40 +0200 Subject: [PATCH 079/207] Revert "database: Fix "Attributes" UI after role renaming" This reverts commit 3d6b8c9b7b06c867b5292b26ecf2530080775806. --- .../app/helpers/barclamp/database_helper.rb | 4 +-- .../database/_edit_attributes.html.haml | 12 ++++----- .../config/locales/database/en.yml | 26 +++++++++---------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/crowbar_framework/app/helpers/barclamp/database_helper.rb b/crowbar_framework/app/helpers/barclamp/database_helper.rb index dbe1a2b295..f69020ff70 100644 --- a/crowbar_framework/app/helpers/barclamp/database_helper.rb +++ b/crowbar_framework/app/helpers/barclamp/database_helper.rb @@ -30,8 +30,8 @@ def engines_for_database(selected) def ha_storage_mode_for_database(selected) options_for_select( [ - [t(".postgresql.ha.storage.modes.drbd"), "drbd"], - [t(".postgresql.ha.storage.modes.shared"), "shared"] + [t(".ha.storage.modes.drbd"), "drbd"], + [t(".ha.storage.modes.shared"), "shared"] ], selected.to_s ) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index 8f2fd10ad0..3cd7af0441 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -40,14 +40,14 @@ %legend = t('.ha_header') - = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" + = select_field %w(ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" #drbd_storage_container .alert.alert-info - = t('.postgresql.ha.storage.drbd_info') - = integer_field %w(postgresql ha storage drbd size) + = t('.ha.storage.drbd_info') + = integer_field %w(ha storage drbd size) #shared_storage_container - = string_field %w(postgresql ha storage shared device) - = string_field %w(postgresql ha storage shared fstype) - = string_field %w(postgresql ha storage shared options) + = string_field %w(ha storage shared device) + = string_field %w(ha storage shared fstype) + = string_field %w(ha storage shared options) diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index df661f1666..be5868d852 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -40,20 +40,20 @@ en: postgresql: config: max_connections: 'Global Connection Limit (max_connections)' - ha: - storage: - mode: 'Storage Mode' - modes: - drbd: 'DRBD' - shared: 'Shared Storage' - drbd_info: 'The cluster must have been setup for DRBD.' - drbd: - size: 'Size to Allocate for DRBD Device (in Gigabytes)' - shared: - device: 'Name of Block Device or NFS Mount Specification' - fstype: 'Filesystem Type' - options: 'Mount Options' ha_header: 'High Availability' + ha: + storage: + mode: 'Storage Mode' + modes: + drbd: 'DRBD' + shared: 'Shared Storage' + drbd_info: 'The cluster must have been setup for DRBD.' + drbd: + size: 'Size to Allocate for DRBD Device (in Gigabytes)' + shared: + device: 'Name of Block Device or NFS Mount Specification' + fstype: 'Filesystem Type' + options: 'Mount Options' validation: invalid_db_engine: 'Invalid database engine: %{db_engine}.' unknown_mode_ha: 'Unknown mode for HA storage: %{storage_mode}.' From f424bcf2e5c36fdd3aa3403789b166fb2b9ff2bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:47 +0200 Subject: [PATCH 080/207] Revert "database: Allow parallel HA deployment of PostgreSQL and MariaDB" This reverts commit 761c7b9233ffa8cac5c817917b60aac0236fed61. --- chef/cookbooks/database/attributes/default.rb | 6 +- chef/cookbooks/database/libraries/crowbar.rb | 12 +-- chef/cookbooks/mysql/recipes/server.rb | 4 +- chef/cookbooks/postgresql/recipes/ha.rb | 6 +- .../postgresql/recipes/ha_storage.rb | 18 ++-- chef/cookbooks/postgresql/recipes/server.rb | 2 +- .../postgresql/recipes/server_debian.rb | 2 +- .../postgresql/recipes/server_redhat.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 8 -- chef/data_bags/crowbar/template-database.json | 24 ++--- .../crowbar/template-database.schema | 40 ++++---- .../app/models/database_service.rb | 99 ++++++++----------- 12 files changed, 98 insertions(+), 125 deletions(-) diff --git a/chef/cookbooks/database/attributes/default.rb b/chef/cookbooks/database/attributes/default.rb index 0440382476..32f3e09f79 100644 --- a/chef/cookbooks/database/attributes/default.rb +++ b/chef/cookbooks/database/attributes/default.rb @@ -18,7 +18,5 @@ # # ha -default[:database][:postgresql][:ha][:enabled] = false -default[:database][:postgresql][:ha][:storage][:mode] = nil - -default[:database][:mysql][:ha][:enabled] = false +default[:database][:ha][:enabled] = false +default[:database][:ha][:storage][:mode] = nil diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index 548be83637..ed13e9e4fd 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -1,10 +1,10 @@ module CrowbarDatabaseHelper - def self.get_ha_vhostname(node, sql_engine=node[:database][:sql_engine]) - if node["database"][sql_engine]["ha"]["enabled"] + def self.get_ha_vhostname(node) + if node[:database][:ha][:enabled] cluster_name = CrowbarPacemakerHelper.cluster_name(node) # Any change in the generation of the vhostname here must be reflected in # apply_role_pre_chef_call of the database barclamp model - if sql_engine == "postgresql" + if node[:database][:sql_engine] == "postgresql" "#{node[:database][:config][:environment].gsub("-config", "")}-#{cluster_name}".tr("_", "-") else "cluster-#{cluster_name}".tr("_", "-") @@ -14,10 +14,10 @@ def self.get_ha_vhostname(node, sql_engine=node[:database][:sql_engine]) end end - def self.get_listen_address(node, sql_engine=node[:database][:sql_engine]) + def self.get_listen_address(node) # For SSL we prefer a cluster hostname (for certificate validation) - use_ssl = sql_engine == "mysql" && node[:database][:mysql][:ssl][:enabled] - if node["database"][sql_engine]["ha"]["enabled"] + use_ssl = node[:database][:sql_engine] == "mysql" && node[:database][:mysql][:ssl][:enabled] + if node[:database][:ha][:enabled] vhostname = get_ha_vhostname(node) use_ssl ? "#{vhostname}.#{node[:domain]}" : CrowbarPacemakerHelper.cluster_vip(node, "admin", vhostname) else diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index f19219514e..869f15a6de 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -20,7 +20,7 @@ include_recipe "mysql::client" include_recipe "database::client" -ha_enabled = node[:database][:mysql][:ha][:enabled] +ha_enabled = node[:database][:ha][:enabled] # For Crowbar, we need to set the address to bind - default to admin node. addr = node[:database][:mysql][:bind_address] || "" @@ -91,7 +91,7 @@ node[:database][:mysql][:ssl][:generate_certs] || node[:database][:mysql][:ssl][:insecure]) group "mysql" - fqdn CrowbarDatabaseHelper.get_listen_address(node, "mysql") + fqdn CrowbarDatabaseHelper.get_listen_address(node) end end diff --git a/chef/cookbooks/postgresql/recipes/ha.rb b/chef/cookbooks/postgresql/recipes/ha.rb index 0b9305133f..313759f993 100644 --- a/chef/cookbooks/postgresql/recipes/ha.rb +++ b/chef/cookbooks/postgresql/recipes/ha.rb @@ -22,14 +22,14 @@ # # This is the second step. -vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node, "postgresql")}" +vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node)}" service_name = "postgresql" fs_primitive = "fs-#{service_name}" group_name = "g-#{service_name}" agent_name = "ocf:heartbeat:pgsql" -ip_addr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") +ip_addr = CrowbarDatabaseHelper.get_listen_address(node) postgres_op = {} postgres_op["monitor"] = {} @@ -85,7 +85,7 @@ end transaction_objects << "pacemaker_primitive[#{service_name}]" -if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" +if node[:database][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{service_name}" pacemaker_colocation colocation_constraint do diff --git a/chef/cookbooks/postgresql/recipes/ha_storage.rb b/chef/cookbooks/postgresql/recipes/ha_storage.rb index 2e13291cc0..837b4f52bb 100644 --- a/chef/cookbooks/postgresql/recipes/ha_storage.rb +++ b/chef/cookbooks/postgresql/recipes/ha_storage.rb @@ -37,21 +37,21 @@ fs_params = {} fs_params["directory"] = "/var/lib/pgsql" -if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" +if node[:database][:ha][:storage][:mode] == "drbd" include_recipe "crowbar-pacemaker::drbd" crowbar_pacemaker_drbd drbd_resource do - size "#{node[:database][:postgresql][:ha][:storage][:drbd][:size]}G" + size "#{node[:database][:ha][:storage][:drbd][:size]}G" action :nothing end.run_action(:create) fs_params["device"] = node["drbd"]["rsc"][drbd_resource]["device"] fs_params["fstype"] = "xfs" -elsif node[:database][:postgresql][:ha][:storage][:mode] == "shared" - fs_params["device"] = node[:database][:postgresql][:ha][:storage][:shared][:device] - fs_params["fstype"] = node[:database][:postgresql][:ha][:storage][:shared][:fstype] - unless node[:database][:postgresql][:ha][:storage][:shared][:options].empty? - fs_params["options"] = node[:database][:postgresql][:ha][:storage][:shared][:options] +elsif node[:database][:ha][:storage][:mode] == "shared" + fs_params["device"] = node[:database][:ha][:storage][:shared][:device] + fs_params["fstype"] = node[:database][:ha][:storage][:shared][:fstype] + unless node[:database][:ha][:storage][:shared][:options].empty? + fs_params["options"] = node[:database][:ha][:storage][:shared][:options] end else raise "Invalid mode for HA storage!" @@ -71,7 +71,7 @@ transaction_objects = [] -if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" +if node[:database][:ha][:storage][:mode] == "drbd" drbd_params = {} drbd_params["drbd_resource"] = drbd_resource @@ -120,7 +120,7 @@ location_name = openstack_pacemaker_controller_only_location_for fs_primitive transaction_objects << "pacemaker_location[#{location_name}]" -if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" +if node[:database][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{fs_primitive}" pacemaker_colocation colocation_constraint do score "inf" diff --git a/chef/cookbooks/postgresql/recipes/server.rb b/chef/cookbooks/postgresql/recipes/server.rb index 7bf681483e..9d7d272df7 100644 --- a/chef/cookbooks/postgresql/recipes/server.rb +++ b/chef/cookbooks/postgresql/recipes/server.rb @@ -121,7 +121,7 @@ notifies change_notify, "service[postgresql]", :immediately end -ha_enabled = node[:database][:postgresql][:ha][:enabled] +ha_enabled = node[:database][:ha][:enabled] if ha_enabled log "HA support for postgresql is enabled" diff --git a/chef/cookbooks/postgresql/recipes/server_debian.rb b/chef/cookbooks/postgresql/recipes/server_debian.rb index 4258923b17..78da2a6c27 100644 --- a/chef/cookbooks/postgresql/recipes/server_debian.rb +++ b/chef/cookbooks/postgresql/recipes/server_debian.rb @@ -28,7 +28,7 @@ # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in # the directory that will be mounted for HA -if node[:database][:postgresql][:ha][:enabled] +if node[:database][:ha][:enabled] include_recipe "postgresql::ha_storage" end diff --git a/chef/cookbooks/postgresql/recipes/server_redhat.rb b/chef/cookbooks/postgresql/recipes/server_redhat.rb index 5c3a26a671..f2139a5c10 100644 --- a/chef/cookbooks/postgresql/recipes/server_redhat.rb +++ b/chef/cookbooks/postgresql/recipes/server_redhat.rb @@ -49,7 +49,7 @@ package pg_pack end -ha_enabled = node[:database][:postgresql][:ha][:enabled] +ha_enabled = node[:database][:ha][:enabled] # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb index bd91018994..4837975fbf 100644 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -1,12 +1,4 @@ def upgrade(ta, td, a, d) - db_engine = a["sql_engine"] - a[db_engine]["ha"] = a["ha"] - a.delete("ha") - if db_engine == "postgresql" - a["mysql"]["ha"] = ta["mysql"]["ha"] - else - a["postgresql"]["ha"] = ta["postgresql"]["ha"] - end d["element_states"] = td["element_states"] d["element_order"] = td["element_order"] diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 364d565b59..6132bf41e2 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -57,18 +57,18 @@ "log_filename": "postgresql.log-%Y%m%d%H%M", "log_truncate_on_rotation": false, "log_min_duration_statement": -1 - }, - "ha": { - "storage": { - "mode": "shared", - "drbd": { - "size": 50 - }, - "shared": { - "device": "", - "fstype": "", - "options": "" - } + } + }, + "ha": { + "storage": { + "mode": "shared", + "drbd": { + "size": 50 + }, + "shared": { + "device": "", + "fstype": "", + "options": "" } } }, diff --git a/chef/data_bags/crowbar/template-database.schema b/chef/data_bags/crowbar/template-database.schema index 571799ea17..401d087c31 100644 --- a/chef/data_bags/crowbar/template-database.schema +++ b/chef/data_bags/crowbar/template-database.schema @@ -85,32 +85,32 @@ "log_filename": {"type": "str" }, "log_min_duration_statement": { "type": "int" } } - }, - "ha" : { + } + } + }, + "ha" : { + "type": "map", + "required": true, + "mapping" : { + "storage": { "type": "map", "required": true, "mapping" : { - "storage": { + "mode": { "type": "str", "required": true }, + "drbd": { "type": "map", "required": true, "mapping" : { - "mode": { "type": "str", "required": true }, - "drbd": { - "type": "map", - "required": true, - "mapping" : { - "size": { "type": "int", "required": true } - } - }, - "shared": { - "type": "map", - "required": true, - "mapping" : { - "device": { "type": "str", "required": true }, - "fstype": { "type": "str", "required": true }, - "options": { "type": "str", "required": true } - } - } + "size": { "type": "int", "required": true } + } + }, + "shared": { + "type": "map", + "required": true, + "mapping" : { + "device": { "type": "str", "required": true }, + "fstype": { "type": "str", "required": true }, + "options": { "type": "str", "required": true } } } } diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 3c7a062e2f..32260202a1 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -71,13 +71,11 @@ def create_proposal end def validate_ha_attributes(attributes, cluster) - sql_engine = attributes["sql_engine"] + storage_mode = attributes["ha"]["storage"]["mode"] role = available_clusters[cluster] case attributes["sql_engine"] when "postgresql" - ha_attr = attributes["postgresql"]["ha"] - storage_mode = ha_attr["storage"]["mode"] unless ["shared", "drbd"].include?(storage_mode) validation_error I18n.t( "barclamp.#{@bc_name}.validation.unknown_mode_ha", @@ -85,12 +83,12 @@ def validate_ha_attributes(attributes, cluster) ) end if storage_mode == "shared" - if ha_attr["storage"]["shared"]["device"].blank? + if attributes["ha"]["storage"]["shared"]["device"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_device" ) end - if ha_attr["storage"]["shared"]["fstype"].blank? + if attributes["ha"]["storage"]["shared"]["fstype"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_filesystem" ) @@ -102,7 +100,7 @@ def validate_ha_attributes(attributes, cluster) cluster_name: cluster_name(cluster) ) end - if ha_attr["storage"]["drbd"]["size"] <= 0 + if attributes["ha"]["storage"]["drbd"]["size"] <= 0 validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_size_drbd" ) @@ -152,71 +150,56 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) return if all_nodes.empty? sql_engine = role.default_attributes["database"]["sql_engine"] + db_role = if engine == "postgresql" + "database-server" + else + "mysql-server" + end + + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) + Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] - dirty = false - net_svc = NetworkService.new @logger - db_enabled = { - "mysql" => { - "enabled" => false, - "ha" => false - }, - "postgresql" => { - "enabled" => false, - "ha" => false - } - } - ["postgresql", "mysql"].each do |engine| - db_role = if engine == "postgresql" - "database-server" - else - "mysql-server" - end - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) - unless database_nodes.empty? - db_enabled[engine]["enabled"] = true - end - db_enabled[engine]["ha"] = database_ha_enabled - Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled - dirty = prepare_role_for_ha_with_haproxy(role, - ["database", engine, "ha", "enabled"], - database_ha_enabled, - database_elements, - vip_networks) || dirty - reset_sync_marks_on_clusters_founders(database_elements) - if database_ha_enabled - case engine - when "postgresql" - unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) - raise "Internal error: HA enabled, but element is not a cluster" - end - cluster = database_elements[0] - cluster_name = PacemakerServiceObject.cluster_name(cluster) - # Any change in the generation of the vhostname here must be reflected in - # CrowbarDatabaseHelper.get_ha_vhostname - database_vhostname = "#{role.name.gsub("-config", "")}-#{cluster_name}.#{Crowbar::Settings.domain}".tr("_", "-") - net_svc.allocate_virtual_ip "default", "admin", "host", database_vhostname - when "mysql" - database_nodes.each do |n| - net_svc.allocate_ip "default", "admin", "host", n - end - allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) + + dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], + database_ha_enabled, + database_elements, + vip_networks) + role.save if dirty + + reset_sync_marks_on_clusters_founders(database_elements) + + if database_ha_enabled + net_svc = NetworkService.new @logger + case sql_engine + when "postgresql" + unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) + raise "Internal error: HA enabled, but element is not a cluster" end + cluster = database_elements[0] + cluster_name = PacemakerServiceObject.cluster_name(cluster) + # Any change in the generation of the vhostname here must be reflected in + # CrowbarDatabaseHelper.get_ha_vhostname + database_vhostname = "#{role.name.gsub("-config", "")}-#{cluster_name}.#{Crowbar::Settings.domain}".tr("_", "-") + net_svc.allocate_virtual_ip "default", "admin", "host", database_vhostname + when "mysql" + database_nodes.each do |n| + net_svc.allocate_ip "default", "admin", "host", n + end + allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) end end - role.save if dirty role.default_attributes["database"][sql_engine] = {} if role.default_attributes["database"][sql_engine].nil? role.default_attributes["database"]["db_maker_password"] = (old_role && old_role.default_attributes["database"]["db_maker_password"]) || random_password - if db_enabled["mysql"]["enabled"] + if ( sql_engine == "mysql" ) role.default_attributes["database"]["mysql"]["server_root_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["server_root_password"]) || random_password - if db_enabled["mysql"]["ha"] + if database_ha_enabled role.default_attributes["database"]["mysql"]["sstuser_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["sstuser_password"]) || random_password end @logger.debug("setting mysql specific attributes") - end - if db_enabled["postgresql"]["enabled"] + elsif ( sql_engine == "postgresql" ) # Attribute is not living in "database" namespace, but that's because # it's for the postgresql cookbook. We're not using default_attributes # because the upstream cookbook use node.set_unless which would override From db2882a8d8ed26aa13ed72a90ef32f71bda137eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:24:56 +0200 Subject: [PATCH 081/207] Revert "database: Allow parallel deployments of postgresql and mysql" This reverts commit fcdbd4ae9d9c670ae895d0329a9f357f68e68f44. --- .../crowbar-openstack/libraries/helpers.rb | 13 +++++-------- .../database/libraries/database_library.rb | 9 ++++++--- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- chef/cookbooks/mysql/recipes/server.rb | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 495f1928ec..1e3b8431e3 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -75,7 +75,7 @@ def self.database_settings(node, barclamp) @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) && false + if @database_settings && @database_settings.include?(instance) Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new @@ -85,9 +85,6 @@ def self.database_settings(node, barclamp) ) db_proposal_role = db_roles.first unless db_roles.empty? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] - if barclamp == "mysql" || barclamp == "postgresql" - sql_engine = barclamp - end db_role = if sql_engine == "postgresql" "database-server" else @@ -100,7 +97,7 @@ def self.database_settings(node, barclamp) Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = sql_engine + backend_name = DatabaseLibrary::Database::Util.get_backend_name(database) ssl_opts = {} if backend_name == "mysql" @@ -115,9 +112,9 @@ def self.database_settings(node, barclamp) address: address, url_scheme: backend_name, backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), + provider: DatabaseLibrary::Database::Util.get_database_provider(database), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database), connection: { host: address, username: "db_maker", diff --git a/chef/cookbooks/database/libraries/database_library.rb b/chef/cookbooks/database/libraries/database_library.rb index 8164f42373..2e343db085 100644 --- a/chef/cookbooks/database/libraries/database_library.rb +++ b/chef/cookbooks/database/libraries/database_library.rb @@ -19,7 +19,8 @@ module DatabaseLibrary class Database class Util - def self.get_database_provider(node, backend = node[:database][:sql_engine]) + def self.get_database_provider(node) + backend = node[:database][:sql_engine] db_provider = nil case backend when "postgresql" @@ -32,7 +33,8 @@ def self.get_database_provider(node, backend = node[:database][:sql_engine]) db_provider end - def self.get_user_provider(node, backend = node[:database][:sql_engine]) + def self.get_user_provider(node) + backend = node[:database][:sql_engine] db_provider = nil case backend when "postgresql" @@ -49,7 +51,8 @@ def self.get_backend_name(node) node[:database][:sql_engine] end - def self.get_default_priviledges(node, backend = node[:database][:sql_engine]) + def self.get_default_priviledges(node) + backend = node[:database][:sql_engine] privs = nil case backend when "postgresql" diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 2fcc98d835..095ea536ab 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -71,7 +71,7 @@ # unauthenticated root user is later removed in server.rb after the # bootstraping. Once the cluster has started other nodes will pick up on # the sstuser and we are able to use these credentails. - db_settings = fetch_database_settings(@cookbook_name) + db_settings = fetch_database_settings db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index 869f15a6de..c107ff2332 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -183,7 +183,7 @@ only_if "/usr/bin/mysql -u root -e 'show databases;'" end -db_settings = fetch_database_settings(@cookbook_name) +db_settings = fetch_database_settings db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" From 51dc3782c0c551c8c0c010c25e8490a0d85394ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Thu, 26 Jul 2018 13:25:05 +0200 Subject: [PATCH 082/207] Revert "database: Split database-server role into backend specific roles" This reverts commit 015d208b4d7e997d79981841ce71c23f14e3c922. --- .../crowbar-openstack/libraries/helpers.rb | 14 +----- .../database/recipes/role_database_server.rb | 6 +-- .../{role_mysql_server.rb => server.rb} | 16 +++++-- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 47 ------------------- chef/data_bags/crowbar/template-database.json | 10 ++-- chef/roles/database-server.rb | 3 +- chef/roles/mysql-server.rb | 5 -- .../app/models/database_service.rb | 39 ++++----------- 9 files changed, 31 insertions(+), 111 deletions(-) rename chef/cookbooks/database/recipes/{role_mysql_server.rb => server.rb} (64%) delete mode 100644 chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb delete mode 100644 chef/roles/mysql-server.rb diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 1e3b8431e3..73c76c14ee 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -79,19 +79,7 @@ def self.database_settings(node, barclamp) Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new - db_roles, = Chef::Search::Query.new.search( - :role, - "name:database-config-#{instance}" - ) - db_proposal_role = db_roles.first unless db_roles.empty? - sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] - db_role = if sql_engine == "postgresql" - "database-server" - else - "mysql-server" - end - - database = get_node(node, db_role, "database", instance) + database = get_node(node, "database-server", "database", instance) if database.nil? Chef::Log.warn("No database server found!") diff --git a/chef/cookbooks/database/recipes/role_database_server.rb b/chef/cookbooks/database/recipes/role_database_server.rb index a71952ff35..65a6817a08 100644 --- a/chef/cookbooks/database/recipes/role_database_server.rb +++ b/chef/cookbooks/database/recipes/role_database_server.rb @@ -1,5 +1,5 @@ # -# Copyright 2018, SUSE LINUX GmbH +# Copyright 2016, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # + if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "database-server") include_recipe "database::crowbar" - Chef::Log.info("Running database::server for PostgreSQL") - include_recipe "postgresql::server" + include_recipe "database::server" end diff --git a/chef/cookbooks/database/recipes/role_mysql_server.rb b/chef/cookbooks/database/recipes/server.rb similarity index 64% rename from chef/cookbooks/database/recipes/role_mysql_server.rb rename to chef/cookbooks/database/recipes/server.rb index ef6113fe87..97ea5c1ce0 100644 --- a/chef/cookbooks/database/recipes/role_mysql_server.rb +++ b/chef/cookbooks/database/recipes/server.rb @@ -1,11 +1,14 @@ # -# Copyright 2018, SUSE LINUX GmbH +# Cookbook Name:: database +# Recipe:: server +# +# Copyright 2012, SUSE Linux Products GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,6 +16,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # -if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "mysql-server") - include_recipe "mysql::server" -end + +backend = node[:database][:sql_engine] + +Chef::Log.info("Running database::server for #{backend}") + +include_recipe "#{backend}::server" diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 095ea536ab..fce618833e 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -330,7 +330,7 @@ include_recipe "crowbar-pacemaker::haproxy" ha_servers = CrowbarPacemakerHelper.haproxy_servers_for_service( - node, "mysql", "mysql-server", "admin_port" + node, "mysql", "database-server", "admin_port" ) # Let all nodes but one act as backup (standby) servers. diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb deleted file mode 100644 index 4837975fbf..0000000000 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ /dev/null @@ -1,47 +0,0 @@ -def upgrade(ta, td, a, d) - d["element_states"] = td["element_states"] - d["element_order"] = td["element_order"] - - if a["sql_engine"] == "mysql" - d["elements"]["mysql-server"] = d["elements"]["database-server"] - d["elements"]["atabase-server"] = [] - if d.fetch("elements_expanded", {}).key? "database-server" - d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] - d["elements_expanded"].delete("database-server") - end - - chef_order = BarclampCatalog.chef_order("database") - nodes = NodeObject.find("run_list_map:database-server") - nodes.each do |node| - node.add_to_run_list("mysql-server", chef_order, - td["element_states"]["mysql-server"]) - node.delete_from_run_list("database-server") - node.save - end - end - return a, d -end - -def downgrade(ta, td, a, d) - d["element_states"] = td["element_states"] - d["element_order"] = td["element_order"] - - if a["sql_engine"] == "mysql" - d["elements"]["database-server"] = d["elements"]["mysql-server"] - d["elements"].delete("mysql-server") - if d.fetch("elements_expanded", {}).key? "mysql-server" - d["elements_expanded"]["database-server"] = d["elements_expanded"]["mysql-server"] - d["elements_expanded"].delete("mysql-server") - end - - chef_order = BarclampCatalog.chef_order("database") - nodes = NodeObject.find("run_list_map:mysql-server") - nodes.each do |node| - node.add_to_run_list("database-server", chef_order, - td["element_states"]["database-server"]) - node.delete_from_run_list("mysql-server") - node.save - end - end - return a, d -end diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 6132bf41e2..599a55474d 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -83,17 +83,15 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 109, + "schema-revision": 108, "element_states": { - "database-server": [ "readying", "ready", "applying" ], - "mysql-server": [ "readying", "ready", "applying" ] + "database-server": [ "readying", "ready", "applying" ] }, "elements": { - "database-server": [], - "mysql-server": [] + "database-server": [] }, "element_order": [ - [ "database-server", "mysql-server" ] + [ "database-server" ] ], "config": { "environment": "database-base-config", diff --git a/chef/roles/database-server.rb b/chef/roles/database-server.rb index ad9b8ca79c..0651705110 100644 --- a/chef/roles/database-server.rb +++ b/chef/roles/database-server.rb @@ -1,5 +1,6 @@ name "database-server" -description "PostgreSQL Server Role" +description "Database Server Role" run_list("recipe[database::role_database_server]") default_attributes() override_attributes() + diff --git a/chef/roles/mysql-server.rb b/chef/roles/mysql-server.rb deleted file mode 100644 index ac82c000c0..0000000000 --- a/chef/roles/mysql-server.rb +++ /dev/null @@ -1,5 +0,0 @@ -name "mysql-server" -description "MySQL/MariaDB Server Role" -run_list("recipe[database::role_mysql_server]") -default_attributes() -override_attributes() diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 32260202a1..ae5b17d230 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -38,16 +38,6 @@ def role_constraints "suse" => "< 12.2", "windows" => "/.*/" } - }, - "mysql-server" => { - "unique" => false, - "count" => 1, - "cluster" => true, - "admin" => false, - "exclude_platform" => { - "suse" => "< 12.2", - "windows" => "/.*/" - } } } end @@ -121,22 +111,17 @@ def validate_ha_attributes(attributes, cluster) end def validate_proposal_after_save(proposal) - attributes = proposal["attributes"][@bc_name] - sql_engine = attributes["sql_engine"] - db_role = if sql_engine == "postgresql" - "database-server" - else - "mysql-server" - end - validate_one_for_role proposal, db_role + validate_one_for_role proposal, "database-server" + attributes = proposal["attributes"][@bc_name] + db_engine = attributes["sql_engine"] validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_db_engine", - db_engine: sql_engine - ) unless %w(mysql postgresql).include?(sql_engine) + db_engine: db_engine + ) unless %w(mysql postgresql).include?(db_engine) # HA validation - servers = proposal["deployment"][@bc_name]["elements"][db_role] + servers = proposal["deployment"][@bc_name]["elements"]["database-server"] unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) cluster = servers.first validate_ha_attributes(attributes, cluster) @@ -149,18 +134,10 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Database apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? - sql_engine = role.default_attributes["database"]["sql_engine"] - db_role = if engine == "postgresql" - "database-server" - else - "mysql-server" - end - - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, "database-server") Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] - dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], database_ha_enabled, database_elements, @@ -169,6 +146,8 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) reset_sync_marks_on_clusters_founders(database_elements) + sql_engine = role.default_attributes["database"]["sql_engine"] + if database_ha_enabled net_svc = NetworkService.new @logger case sql_engine From 9e9605159bc5dafc9bf8e3fb3e0e47b0314bcf1b Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Thu, 26 Jul 2018 13:57:49 +0200 Subject: [PATCH 083/207] neutron: add force_metadata attribute This changes allows to force_metadata from the proposal. Previously if this was changed directly in neutron network nodes it was removed in each chef-client execution (cherry picked from commit b25af4c8b95ccbeef93685c359209b429634c758) --- chef/cookbooks/neutron/recipes/network_agents.rb | 3 ++- .../neutron/templates/default/dhcp_agent.ini.erb | 3 ++- .../migrate/neutron/118_add_force_metadata_attributes.rb | 9 +++++++++ chef/data_bags/crowbar/template-neutron.json | 5 ++++- chef/data_bags/crowbar/template-neutron.schema | 7 ++++++- 5 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index d3a13f946c..ca9acf95ac 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -153,7 +153,8 @@ dhcp_domain: node[:neutron][:dhcp_domain], enable_isolated_metadata: "True", enable_metadata_network: "False", - nameservers: dns_list + nameservers: dns_list, + force_metadata: node[:neutron][:metadata][:force] ) end diff --git a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb index 49976d7272..3c9b6bebc3 100644 --- a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb @@ -4,7 +4,8 @@ resync_interval = <%= @resync_interval %> dhcp_driver = <%= @dhcp_driver %> enable_isolated_metadata = <%= @enable_isolated_metadata %> enable_metadata_network = <%= @enable_metadata_network %> -dhcp_domain = <%= @dhcp_domain %> +force_metadata = <%= @force_metadata %> +dns_domain = <%= @dns_domain %> <% if @nameservers -%> dnsmasq_dns_servers = <%= @nameservers %> <% end -%> diff --git a/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb b/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb new file mode 100644 index 0000000000..2fa9bb4d88 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["metadata"] = ta["metadata"] unless a.key? "metadata" + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("metadata") unless ta.key? "metadata" + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 8dbad1b978..350139f85d 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -179,6 +179,9 @@ }, "ha_rate_limit": { "neutron-server": 0 + }, + "metadata": { + "force": false } } }, @@ -186,7 +189,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 117, + "schema-revision": 118, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index a767fb9dd1..ea80c2ee55 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -52,7 +52,7 @@ "username": { "type" : "str", "required": true }, "password": { "type" : "str", "required": true }, "optimized_metadata": { "type" : "bool", "required": true }, - "optimized_dhcp": { "type" : "bool", "required": true }, + "optimized_dhcp": { "type" : "bool", "required": true }, "vpc_pairs": { "type": "str", "required": false }, "ext_net": { "type" : "map", "required" : true, "mapping" : { "name": { "type" : "str", "required" : true }, @@ -222,6 +222,11 @@ "type": "map", "required": true, "mapping": { "neutron-server": { "type": "int", "required": true } } + }, + "metadata": { + "type": "map", "required": true, "mapping": { + "force": { "type": "bool", "required": true } + } } }} }}, From 853b9d714f8a0348e4f99ee267265675ab4d2510 Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Fri, 3 Aug 2018 14:05:19 +0200 Subject: [PATCH 084/207] rabbitmq: fix extra users password regeneration Every time the rabbitmq proposal was applied the extra users passwords was regenerated. This fix mantain the paswords. (cherry picked from commit a2f7ac68262a5f02665c671a6b90a91b06bf0f6e) --- crowbar_framework/app/models/rabbitmq_service.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/rabbitmq_service.rb b/crowbar_framework/app/models/rabbitmq_service.rb index bb566de691..7525f994d0 100644 --- a/crowbar_framework/app/models/rabbitmq_service.rb +++ b/crowbar_framework/app/models/rabbitmq_service.rb @@ -83,11 +83,11 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) permissions: user["permissions"] } if !old_attrs.nil? && old_attrs.include?("users") && !old_attrs["users"].each.select do |u| - u["username"] == user["username"] + u["username"] == username end.empty? # reuse the existing pass pass = old_attrs["users"].each.select do |u| - u["username"] == user["username"] + u["username"] == username end.first["password"] updated_user.update(password: pass) From 8021eb9de2377d967def387be961a66ab77a9f70 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Fri, 3 Aug 2018 17:04:28 +0200 Subject: [PATCH 085/207] nova: reload nova-placement-api (bsc#1103383) nova-placement-api service is behing WGSI (apache2), and in order to reload it when nova-placement.conf file change, we need to create a proxy service. This proxy will signal the WSGI apache modules for a reload, without a full restart of apache service. (cherry picked from commit db813e1ce3042eb17214532063ac8de9557fec98) --- chef/cookbooks/nova/recipes/config.rb | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index 1596368c78..ad3bda74d8 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -39,6 +39,19 @@ action :install end +# Fake service to take control of the WSGI process from apache that +# runs Placement API. We replace the `reload` action, sending +# manually the signal SIGUSR1 to all the process that are part of +# `wsgi:nova-placement-api` +service "nova-placement-api" do + service_name "apache2" + if node[:platform_family] == "suse" + reload_command 'sleep 1 && pkill --signal SIGUSR1 -f "^\(wsgi:nova-placement" && sleep 1' + end + supports reload: true, restart: true, status: true + ignore_failure true +end + # don't expose database connection to the compute clients if is_controller db_settings = fetch_database_settings @@ -341,12 +354,13 @@ group node[:nova][:group] mode 0640 variables( - keystone_settings: keystone_settings, - placement_database_connection: placement_database_connection, - placement_service_user: node["nova"]["placement_service_user"], - placement_service_password: node["nova"]["placement_service_password"], - placement_service_insecure: node[:nova][:ssl][:insecure] + keystone_settings: keystone_settings, + placement_database_connection: placement_database_connection, + placement_service_user: node["nova"]["placement_service_user"], + placement_service_password: node["nova"]["placement_service_password"], + placement_service_insecure: node[:nova][:ssl][:insecure] ) + notifies :reload, "service[nova-placement-api]" end From fb7ecd7ce891a2005f3d0f6dab1802a2426daa8a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 8 Aug 2018 13:48:48 +0200 Subject: [PATCH 086/207] keystone: Fix missing keystone migration (bsc#1104182) The removal of the update_password was landed under revision 113, so the migration was never executed. Rename to 114 and m)ake it idempotent --- ...ove_updated_password.rb => 114_remove_updated_password.rb} | 4 +++- chef/data_bags/crowbar/template-keystone.json | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) rename chef/data_bags/crowbar/migrate/keystone/{113_remove_updated_password.rb => 114_remove_updated_password.rb} (66%) diff --git a/chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb b/chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb similarity index 66% rename from chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb rename to chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb index 1ab52963ad..9d532eac72 100644 --- a/chef/data_bags/crowbar/migrate/keystone/113_remove_updated_password.rb +++ b/chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb @@ -2,7 +2,9 @@ def upgrade(ta, td, a, d) a["admin"].delete("updated_password") nodes = NodeObject.find("roles:keystone-server") nodes.each do |node| - node[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + unless node[:keystone][:admin].key?("old_password") + node[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + end node.save end return a, d diff --git a/chef/data_bags/crowbar/template-keystone.json b/chef/data_bags/crowbar/template-keystone.json index f6c852ccfd..14ae7adf15 100644 --- a/chef/data_bags/crowbar/template-keystone.json +++ b/chef/data_bags/crowbar/template-keystone.json @@ -187,7 +187,7 @@ "keystone": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 113, + "schema-revision": 114, "element_states": { "keystone-server": [ "readying", "ready", "applying" ] }, From f33d781af67c289326964054ba449390fd743e5c Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 7 Aug 2018 17:54:26 +0200 Subject: [PATCH 087/207] Synchronize SSL in the cluster (bsc#1081518) When Horizon in deployed via a autogenerated self certificate, the current code will generate a different one of each node in the cluster. This patch will generate a single certificate in the founder, and share it with the rest of the cluster via `rsync`. (cherry picked from commit b033efa1d34a580f1326b7ab41a75195dfb965be) --- chef/cookbooks/horizon/recipes/server.rb | 35 ++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/horizon/recipes/server.rb b/chef/cookbooks/horizon/recipes/server.rb index 1a0557dca6..e51d236564 100644 --- a/chef/cookbooks/horizon/recipes/server.rb +++ b/chef/cookbooks/horizon/recipes/server.rb @@ -494,17 +494,48 @@ include_recipe "horizon::ha" if ha_enabled +# Type 1 synchronizarion. Only one node of the cluser will create the +# certificates that will be transferred to the rest of the nodes +crowbar_pacemaker_sync_mark "wait-horizon_ssl_sync" do + # Generate the certificate is a slow process, can timeout in the + # other nodes of the cluster + timeout 60 * 5 +end if ha_enabled + if node[:horizon][:apache][:ssl] && node[:horizon][:apache][:generate_certs] package "apache2-utils" bash "Generate Apache certificate" do code <<-EOH - (umask 377 ; /usr/bin/gensslcert -C openstack-dashboard ) + (umask 377 ; /usr/bin/gensslcert -C openstack-dashboard -n openstack-dashboard) EOH - not_if { File.size?(node[:horizon][:apache][:ssl_crt_file]) } + only_if do + !File.size?(node[:horizon][:apache][:ssl_crt_file]) && ( + !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end + end + + if ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) + cluster_nodes = CrowbarPacemakerHelper.cluster_nodes(node, "horizon-server") + cluster_nodes.map do |n| + next if node.name == n.name + node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address + bash "Synchronize SSL cetificates" do + code <<-EOH + rsync -a /etc/apache2/ssl.key/ #{node_address}:/etc/apache2/ssl.key/ + rsync -a /etc/apache2/ssl.crt/ #{node_address}:/etc/apache2/ssl.crt/ + rsync -a /etc/apache2/ssl.csr/ #{node_address}:/etc/apache2/ssl.csr/ + rsync -a /srv/www/htdocs/ #{node_address}:/srv/www/htdocs/ + EOH + timeout 120 + ignore_failure true + end + end end end +crowbar_pacemaker_sync_mark "create-horizon_ssl_sync" if ha_enabled + template "#{node[:apache][:dir]}/sites-available/openstack-dashboard.conf" do if node[:platform_family] == "suse" path "#{node[:apache][:dir]}/vhosts.d/openstack-dashboard.conf" From 5e43833dd493e523954260a1d55e2c767a197b05 Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Tue, 21 Aug 2018 16:23:42 +0200 Subject: [PATCH 088/207] Do not share ports Apache2 and HAProxy (bsc#1105086) If HAProxy is serving :80 or :443 (because Horizon), we do not want that Apache2 can also be listening on those ports. HAProxy can be configured to resuse sockets, so that means that the tuple IP:Port can be reused for different services, produncing bugs like bsc#1105086, where the same IPv4 and the same port (:80) is being listening connections from Apache and for HAProxy. This patch remove the default :listen_ports from Apache if HAProxy is deployed for Horizon, and reapply the Apache2 recipe from Crownar Core. (cherry picked from commit 312e458e3fb3cd686b2c224a7882a413f67eb6fe) --- chef/cookbooks/horizon/recipes/ha.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/chef/cookbooks/horizon/recipes/ha.rb b/chef/cookbooks/horizon/recipes/ha.rb index 2de52b7754..a9f5ce91d0 100644 --- a/chef/cookbooks/horizon/recipes/ha.rb +++ b/chef/cookbooks/horizon/recipes/ha.rb @@ -33,6 +33,15 @@ end.run_action(:create) end +# Once HAProxy is taking care of :80 and :443 we need to remove this +# from Apache realm. This requires update the node information from +# Apache, and maybe the listen.conf file +if node[:apache][:listen_ports].include?("80") || node[:apache][:listen_ports].include?("443") + node.set[:apache][:listen_ports] = [] + node.save + include_recipe "apache2::default" +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages installed before we create the pacemaker # resources From cf5eaeaaba07e8138df2caf3cd79fd0ae32ef7ca Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Fri, 17 Aug 2018 15:43:07 +1000 Subject: [PATCH 089/207] Heat: Make non-founder HA nodes do less work There is an intermittent update and HA ci failure that is originates from the heat::server recipe. It has been observed in manual upgrade tests and in ci. The problem goes something like this. The founder set's a wait pacemaker sync mark: crowbar_pacemaker_sync_mark "wait-heat_register" if ha_enabled And then goes off and registers heat with keystone. The non-founders happily sit and wait for the founder to finish and set the create mark. When the non-founders finally start, none of the register jobs that the founder has just completed has "only_if's" attached so the non-founders happily go and re-run them. The problem is, these register jobs can take time. In the failures I've seen the `register heat domain` block take up to 40-50 seconds. This places the non-founders well behind the founder. The intermittent failure happens when we get later in the chef run, where any more delays on one of the non-founders can cause later sync marks to timeout. Which usually fails in the ceilometer recipe where the founder times out after waiting a minute for the other nodes to catch up, resulting in: FATAL: RuntimeError: crowbar-pacemaker_sync_mark[sync-ceilometer_central_before_ha] (ceilometer::central_ha line 19) had an error: \ RuntimeError: Some cluster nodes didn't set ceilometer_central_before_ha! This patch goes as adds an: only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } to each keystone register block between the wait and create so once the non-founders start again they don't go re-do that the founder just completed. (cherry picked from commit a980679aebbb61471cfcba41a79a4417b878a996) --- chef/cookbooks/heat/recipes/server.rb | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/chef/cookbooks/heat/recipes/server.rb b/chef/cookbooks/heat/recipes/server.rb index b21ff4eeb2..846923b9d2 100644 --- a/chef/cookbooks/heat/recipes/server.rb +++ b/chef/cookbooks/heat/recipes/server.rb @@ -125,6 +125,7 @@ port keystone_settings["admin_port"] auth register_auth_hash action :wakeup + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat user" do @@ -137,6 +138,7 @@ user_password keystone_settings["service_password"] tenant_name keystone_settings["service_tenant"] action :add_user + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "give heat user access" do @@ -149,6 +151,7 @@ tenant_name keystone_settings["service_tenant"] role_name "admin" action :add_access + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "add heat stack user role" do @@ -161,6 +164,7 @@ tenant_name keystone_settings["service_tenant"] role_name "heat_stack_user" action :add_role + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end node[:heat][:trusts_delegated_roles].each do |role| @@ -174,6 +178,7 @@ tenant_name keystone_settings["service_tenant"] role_name role action :add_role + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "give admin access to stack owner role #{role}" do @@ -186,6 +191,7 @@ tenant_name keystone_settings["default_tenant"] role_name role action :add_access + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end end @@ -286,6 +292,7 @@ "OS_REGION_NAME" => keystone_settings["endpoint_region"], "OS_IDENTITY_API_VERSION" => "3" }) + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end # Create Heat CloudFormation service @@ -299,6 +306,7 @@ service_type "cloudformation" service_description "Heat CloudFormation Service" action :add_service + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat Cfn endpoint" do @@ -315,6 +323,7 @@ # endpoint_global true # endpoint_enabled true action :add_endpoint_template + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end # Create Heat service @@ -328,6 +337,7 @@ service_type "orchestration" service_description "Heat Service" action :add_service + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat endpoint" do @@ -350,6 +360,7 @@ # endpoint_global true # endpoint_enabled true action :add_endpoint_template + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end crowbar_pacemaker_sync_mark "create-heat_register" if ha_enabled From eac4850e4f5bdb6e74f88291168ef17e08e789e4 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 29 Jan 2018 15:27:22 +0100 Subject: [PATCH 090/207] glance: Fix ironic related glance code 1. incomplete migration of auth data from arguments to env variables 2. missing include for `secure_password` 3. extra whitespace in command (cherry picked from commit 12b10648cc23d1a9a0ed26ebbd19b76b278dd480) --- chef/cookbooks/glance/recipes/api.rb | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/glance/recipes/api.rb b/chef/cookbooks/glance/recipes/api.rb index ce41cfe2b4..99f1d1f9af 100644 --- a/chef/cookbooks/glance/recipes/api.rb +++ b/chef/cookbooks/glance/recipes/api.rb @@ -95,25 +95,29 @@ # ensure swift tempurl key only if some agent_* drivers are enabled in ironic if !swift_config.empty? && node[:glance][:default_store] == "swift" && \ ironics.any? && ironics.first[:ironic][:enabled_drivers].any? { |d| d.start_with?("agent_") } - swift_command = "swift " + swift_command = "swift" swift_command << (swift_insecure ? " --insecure" : "") env = { "OS_USERNAME" => keystone_settings["service_user"], "OS_PASSWORD" => keystone_settings["service_password"], "OS_PROJECT_NAME" => keystone_settings["service_tenant"], "OS_AUTH_URL" => keystone_settings["public_auth_url"], - "OS_IDENTITY_API_VERSION" => 3 + "OS_IDENTITY_API_VERSION" => "3" } get_tempurl_key = "#{swift_command} stat | grep -m1 'Meta Temp-Url-Key:' | awk '{print $3}'" tempurl_key = Mixlib::ShellOut.new(get_tempurl_key, environment: env).run_command.stdout.chomp # no tempurl key set, set a random one if tempurl_key.empty? + # include the secure_password code + ::Chef::Recipe.send(:include, Opscode::OpenSSL::Password) + tempurl_key = secure_password execute "set-glance-tempurl-key" do command "#{swift_command} post -m 'Temp-Url-Key:#{tempurl_key}'" user node[:glance][:user] group node[:glance][:group] + environment env end end end From a8a3d203bd0142e1e622444976b3530fbfadadbc Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 22 Jun 2018 17:23:14 +0200 Subject: [PATCH 091/207] database: Split database-server role into backend specific roles This commit splits the "database-server" role into "postgresql-server" and "mysql-server". This is the first change in a series to allow deploying both databases from a single proposal to the same (or different) set of nodes. The current state still does only allow a single database to be deployed, follow up commits with enhance the code to allow for parallel deployments. Schema migrations were added to reassign the roles correctly on existing deployments. (cherry picked from commit 5b23644341613d296bcecb6f8320aa7cb72714f8) --- .../crowbar-openstack/libraries/helpers.rb | 14 +++++- .../database/recipes/role_database_server.rb | 6 +-- .../{server.rb => role_mysql_server.rb} | 16 ++----- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 47 +++++++++++++++++++ chef/data_bags/crowbar/template-database.json | 10 ++-- chef/roles/database-server.rb | 3 +- chef/roles/mysql-server.rb | 5 ++ .../app/models/database_service.rb | 39 +++++++++++---- 9 files changed, 111 insertions(+), 31 deletions(-) rename chef/cookbooks/database/recipes/{server.rb => role_mysql_server.rb} (64%) create mode 100644 chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb create mode 100644 chef/roles/mysql-server.rb diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 73c76c14ee..1e3b8431e3 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -79,7 +79,19 @@ def self.database_settings(node, barclamp) Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new - database = get_node(node, "database-server", "database", instance) + db_roles, = Chef::Search::Query.new.search( + :role, + "name:database-config-#{instance}" + ) + db_proposal_role = db_roles.first unless db_roles.empty? + sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end + + database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") diff --git a/chef/cookbooks/database/recipes/role_database_server.rb b/chef/cookbooks/database/recipes/role_database_server.rb index 65a6817a08..a71952ff35 100644 --- a/chef/cookbooks/database/recipes/role_database_server.rb +++ b/chef/cookbooks/database/recipes/role_database_server.rb @@ -1,5 +1,5 @@ # -# Copyright 2016, SUSE LINUX GmbH +# Copyright 2018, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # - if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "database-server") include_recipe "database::crowbar" - include_recipe "database::server" + Chef::Log.info("Running database::server for PostgreSQL") + include_recipe "postgresql::server" end diff --git a/chef/cookbooks/database/recipes/server.rb b/chef/cookbooks/database/recipes/role_mysql_server.rb similarity index 64% rename from chef/cookbooks/database/recipes/server.rb rename to chef/cookbooks/database/recipes/role_mysql_server.rb index 97ea5c1ce0..ef6113fe87 100644 --- a/chef/cookbooks/database/recipes/server.rb +++ b/chef/cookbooks/database/recipes/role_mysql_server.rb @@ -1,14 +1,11 @@ # -# Cookbook Name:: database -# Recipe:: server -# -# Copyright 2012, SUSE Linux Products GmbH +# Copyright 2018, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -16,9 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -backend = node[:database][:sql_engine] - -Chef::Log.info("Running database::server for #{backend}") - -include_recipe "#{backend}::server" +if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "mysql-server") + include_recipe "mysql::server" +end diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index fce618833e..095ea536ab 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -330,7 +330,7 @@ include_recipe "crowbar-pacemaker::haproxy" ha_servers = CrowbarPacemakerHelper.haproxy_servers_for_service( - node, "mysql", "database-server", "admin_port" + node, "mysql", "mysql-server", "admin_port" ) # Let all nodes but one act as backup (standby) servers. diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb new file mode 100644 index 0000000000..4837975fbf --- /dev/null +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -0,0 +1,47 @@ +def upgrade(ta, td, a, d) + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if a["sql_engine"] == "mysql" + d["elements"]["mysql-server"] = d["elements"]["database-server"] + d["elements"]["atabase-server"] = [] + if d.fetch("elements_expanded", {}).key? "database-server" + d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] + d["elements_expanded"].delete("database-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:database-server") + nodes.each do |node| + node.add_to_run_list("mysql-server", chef_order, + td["element_states"]["mysql-server"]) + node.delete_from_run_list("database-server") + node.save + end + end + return a, d +end + +def downgrade(ta, td, a, d) + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if a["sql_engine"] == "mysql" + d["elements"]["database-server"] = d["elements"]["mysql-server"] + d["elements"].delete("mysql-server") + if d.fetch("elements_expanded", {}).key? "mysql-server" + d["elements_expanded"]["database-server"] = d["elements_expanded"]["mysql-server"] + d["elements_expanded"].delete("mysql-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:mysql-server") + nodes.each do |node| + node.add_to_run_list("database-server", chef_order, + td["element_states"]["database-server"]) + node.delete_from_run_list("mysql-server") + node.save + end + end + return a, d +end diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 599a55474d..6132bf41e2 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -83,15 +83,17 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 108, + "schema-revision": 109, "element_states": { - "database-server": [ "readying", "ready", "applying" ] + "database-server": [ "readying", "ready", "applying" ], + "mysql-server": [ "readying", "ready", "applying" ] }, "elements": { - "database-server": [] + "database-server": [], + "mysql-server": [] }, "element_order": [ - [ "database-server" ] + [ "database-server", "mysql-server" ] ], "config": { "environment": "database-base-config", diff --git a/chef/roles/database-server.rb b/chef/roles/database-server.rb index 0651705110..ad9b8ca79c 100644 --- a/chef/roles/database-server.rb +++ b/chef/roles/database-server.rb @@ -1,6 +1,5 @@ name "database-server" -description "Database Server Role" +description "PostgreSQL Server Role" run_list("recipe[database::role_database_server]") default_attributes() override_attributes() - diff --git a/chef/roles/mysql-server.rb b/chef/roles/mysql-server.rb new file mode 100644 index 0000000000..d6d5b9e9c3 --- /dev/null +++ b/chef/roles/mysql-server.rb @@ -0,0 +1,5 @@ +name "mysql-server" +description "MySQL/MariaDB Server Role" +run_list("recipe[database::role_mysql_server]") +default_attributes +override_attributes diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index ae5b17d230..a8927cf42f 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -38,6 +38,16 @@ def role_constraints "suse" => "< 12.2", "windows" => "/.*/" } + }, + "mysql-server" => { + "unique" => false, + "count" => 1, + "cluster" => true, + "admin" => false, + "exclude_platform" => { + "suse" => "< 12.2", + "windows" => "/.*/" + } } } end @@ -111,17 +121,22 @@ def validate_ha_attributes(attributes, cluster) end def validate_proposal_after_save(proposal) - validate_one_for_role proposal, "database-server" - attributes = proposal["attributes"][@bc_name] - db_engine = attributes["sql_engine"] + sql_engine = attributes["sql_engine"] + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end + validate_one_for_role proposal, db_role + validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_db_engine", - db_engine: db_engine - ) unless %w(mysql postgresql).include?(db_engine) + db_engine: sql_engine + ) unless ["mysql", "postgresql"].include?(sql_engine) # HA validation - servers = proposal["deployment"][@bc_name]["elements"]["database-server"] + servers = proposal["deployment"][@bc_name]["elements"][db_role] unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) cluster = servers.first validate_ha_attributes(attributes, cluster) @@ -134,10 +149,18 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Database apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, "database-server") + sql_engine = role.default_attributes["database"]["sql_engine"] + db_role = if engine == "postgresql" + "database-server" + else + "mysql-server" + end + + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] + dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], database_ha_enabled, database_elements, @@ -146,8 +169,6 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) reset_sync_marks_on_clusters_founders(database_elements) - sql_engine = role.default_attributes["database"]["sql_engine"] - if database_ha_enabled net_svc = NetworkService.new @logger case sql_engine From c1ae1d1613aa1b46a1bb1a45e37aaa17650c0699 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 22 Jun 2018 17:29:22 +0200 Subject: [PATCH 092/207] database: Allow parallel HA deployment of PostgreSQL and MariaDB This fixes the Crowbar Model and cookbooks to allow for the mysql- and postgresql-server roles being applied to the same cluster. The schema migration is update to put the "ha" attributes subtree in a per backend specific location to avoid overlap. (cherry picked from commit 86a1893c7a415a0d2c8c20fe36562367ec30c092) Fix migrations so the old HA settings are correctly copied into new ones. --- .../crowbar-openstack/libraries/helpers.rb | 13 ++-- chef/cookbooks/database/attributes/default.rb | 6 +- chef/cookbooks/database/libraries/crowbar.rb | 14 ++-- .../database/libraries/database_library.rb | 9 +-- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- chef/cookbooks/mysql/recipes/server.rb | 6 +- chef/cookbooks/postgresql/recipes/ha.rb | 6 +- .../postgresql/recipes/ha_storage.rb | 18 ++--- chef/cookbooks/postgresql/recipes/server.rb | 4 +- .../postgresql/recipes/server_debian.rb | 2 +- .../postgresql/recipes/server_redhat.rb | 2 +- .../migrate/database/109_separate_db_roles.rb | 20 +++++- chef/data_bags/crowbar/template-database.json | 24 +++---- .../crowbar/template-database.schema | 40 +++++------ .../app/models/database_service.rb | 67 +++++++++++-------- 15 files changed, 132 insertions(+), 101 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 1e3b8431e3..495f1928ec 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -75,7 +75,7 @@ def self.database_settings(node, barclamp) @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) + if @database_settings && @database_settings.include?(instance) && false Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") else @database_settings ||= Hash.new @@ -85,6 +85,9 @@ def self.database_settings(node, barclamp) ) db_proposal_role = db_roles.first unless db_roles.empty? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] + if barclamp == "mysql" || barclamp == "postgresql" + sql_engine = barclamp + end db_role = if sql_engine == "postgresql" "database-server" else @@ -97,7 +100,7 @@ def self.database_settings(node, barclamp) Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = DatabaseLibrary::Database::Util.get_backend_name(database) + backend_name = sql_engine ssl_opts = {} if backend_name == "mysql" @@ -112,9 +115,9 @@ def self.database_settings(node, barclamp) address: address, url_scheme: backend_name, backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database), + provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), connection: { host: address, username: "db_maker", diff --git a/chef/cookbooks/database/attributes/default.rb b/chef/cookbooks/database/attributes/default.rb index 32f3e09f79..0440382476 100644 --- a/chef/cookbooks/database/attributes/default.rb +++ b/chef/cookbooks/database/attributes/default.rb @@ -18,5 +18,7 @@ # # ha -default[:database][:ha][:enabled] = false -default[:database][:ha][:storage][:mode] = nil +default[:database][:postgresql][:ha][:enabled] = false +default[:database][:postgresql][:ha][:storage][:mode] = nil + +default[:database][:mysql][:ha][:enabled] = false diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index ed13e9e4fd..07512d9558 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -1,10 +1,10 @@ module CrowbarDatabaseHelper - def self.get_ha_vhostname(node) - if node[:database][:ha][:enabled] + def self.get_ha_vhostname(node, sql_engine = node[:database][:sql_engine]) + if node["database"][sql_engine]["ha"]["enabled"] cluster_name = CrowbarPacemakerHelper.cluster_name(node) # Any change in the generation of the vhostname here must be reflected in # apply_role_pre_chef_call of the database barclamp model - if node[:database][:sql_engine] == "postgresql" + if sql_engine == "postgresql" "#{node[:database][:config][:environment].gsub("-config", "")}-#{cluster_name}".tr("_", "-") else "cluster-#{cluster_name}".tr("_", "-") @@ -14,11 +14,11 @@ def self.get_ha_vhostname(node) end end - def self.get_listen_address(node) + def self.get_listen_address(node, sql_engine = node[:database][:sql_engine]) # For SSL we prefer a cluster hostname (for certificate validation) - use_ssl = node[:database][:sql_engine] == "mysql" && node[:database][:mysql][:ssl][:enabled] - if node[:database][:ha][:enabled] - vhostname = get_ha_vhostname(node) + use_ssl = sql_engine == "mysql" && node[:database][:mysql][:ssl][:enabled] + if node["database"][sql_engine]["ha"]["enabled"] + vhostname = get_ha_vhostname(node, sql_engine) use_ssl ? "#{vhostname}.#{node[:domain]}" : CrowbarPacemakerHelper.cluster_vip(node, "admin", vhostname) else use_ssl ? node[:fqdn] : Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address diff --git a/chef/cookbooks/database/libraries/database_library.rb b/chef/cookbooks/database/libraries/database_library.rb index 2e343db085..8164f42373 100644 --- a/chef/cookbooks/database/libraries/database_library.rb +++ b/chef/cookbooks/database/libraries/database_library.rb @@ -19,8 +19,7 @@ module DatabaseLibrary class Database class Util - def self.get_database_provider(node) - backend = node[:database][:sql_engine] + def self.get_database_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -33,8 +32,7 @@ def self.get_database_provider(node) db_provider end - def self.get_user_provider(node) - backend = node[:database][:sql_engine] + def self.get_user_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -51,8 +49,7 @@ def self.get_backend_name(node) node[:database][:sql_engine] end - def self.get_default_priviledges(node) - backend = node[:database][:sql_engine] + def self.get_default_priviledges(node, backend = node[:database][:sql_engine]) privs = nil case backend when "postgresql" diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 095ea536ab..2fcc98d835 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -71,7 +71,7 @@ # unauthenticated root user is later removed in server.rb after the # bootstraping. Once the cluster has started other nodes will pick up on # the sstuser and we are able to use these credentails. - db_settings = fetch_database_settings + db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index c107ff2332..f19219514e 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -20,7 +20,7 @@ include_recipe "mysql::client" include_recipe "database::client" -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:mysql][:ha][:enabled] # For Crowbar, we need to set the address to bind - default to admin node. addr = node[:database][:mysql][:bind_address] || "" @@ -91,7 +91,7 @@ node[:database][:mysql][:ssl][:generate_certs] || node[:database][:mysql][:ssl][:insecure]) group "mysql" - fqdn CrowbarDatabaseHelper.get_listen_address(node) + fqdn CrowbarDatabaseHelper.get_listen_address(node, "mysql") end end @@ -183,7 +183,7 @@ only_if "/usr/bin/mysql -u root -e 'show databases;'" end -db_settings = fetch_database_settings +db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" diff --git a/chef/cookbooks/postgresql/recipes/ha.rb b/chef/cookbooks/postgresql/recipes/ha.rb index 313759f993..0b9305133f 100644 --- a/chef/cookbooks/postgresql/recipes/ha.rb +++ b/chef/cookbooks/postgresql/recipes/ha.rb @@ -22,14 +22,14 @@ # # This is the second step. -vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node)}" +vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node, "postgresql")}" service_name = "postgresql" fs_primitive = "fs-#{service_name}" group_name = "g-#{service_name}" agent_name = "ocf:heartbeat:pgsql" -ip_addr = CrowbarDatabaseHelper.get_listen_address(node) +ip_addr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") postgres_op = {} postgres_op["monitor"] = {} @@ -85,7 +85,7 @@ end transaction_objects << "pacemaker_primitive[#{service_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{service_name}" pacemaker_colocation colocation_constraint do diff --git a/chef/cookbooks/postgresql/recipes/ha_storage.rb b/chef/cookbooks/postgresql/recipes/ha_storage.rb index 837b4f52bb..2e13291cc0 100644 --- a/chef/cookbooks/postgresql/recipes/ha_storage.rb +++ b/chef/cookbooks/postgresql/recipes/ha_storage.rb @@ -37,21 +37,21 @@ fs_params = {} fs_params["directory"] = "/var/lib/pgsql" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" include_recipe "crowbar-pacemaker::drbd" crowbar_pacemaker_drbd drbd_resource do - size "#{node[:database][:ha][:storage][:drbd][:size]}G" + size "#{node[:database][:postgresql][:ha][:storage][:drbd][:size]}G" action :nothing end.run_action(:create) fs_params["device"] = node["drbd"]["rsc"][drbd_resource]["device"] fs_params["fstype"] = "xfs" -elsif node[:database][:ha][:storage][:mode] == "shared" - fs_params["device"] = node[:database][:ha][:storage][:shared][:device] - fs_params["fstype"] = node[:database][:ha][:storage][:shared][:fstype] - unless node[:database][:ha][:storage][:shared][:options].empty? - fs_params["options"] = node[:database][:ha][:storage][:shared][:options] +elsif node[:database][:postgresql][:ha][:storage][:mode] == "shared" + fs_params["device"] = node[:database][:postgresql][:ha][:storage][:shared][:device] + fs_params["fstype"] = node[:database][:postgresql][:ha][:storage][:shared][:fstype] + unless node[:database][:postgresql][:ha][:storage][:shared][:options].empty? + fs_params["options"] = node[:database][:postgresql][:ha][:storage][:shared][:options] end else raise "Invalid mode for HA storage!" @@ -71,7 +71,7 @@ transaction_objects = [] -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" drbd_params = {} drbd_params["drbd_resource"] = drbd_resource @@ -120,7 +120,7 @@ location_name = openstack_pacemaker_controller_only_location_for fs_primitive transaction_objects << "pacemaker_location[#{location_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{fs_primitive}" pacemaker_colocation colocation_constraint do score "inf" diff --git a/chef/cookbooks/postgresql/recipes/server.rb b/chef/cookbooks/postgresql/recipes/server.rb index 9d7d272df7..3cd4285ad6 100644 --- a/chef/cookbooks/postgresql/recipes/server.rb +++ b/chef/cookbooks/postgresql/recipes/server.rb @@ -28,7 +28,7 @@ dirty = false # For Crowbar, we need to set the address to bind - default to admin node. -newaddr = CrowbarDatabaseHelper.get_listen_address(node) +newaddr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") if node["postgresql"]["config"]["listen_addresses"] != newaddr node.set["postgresql"]["config"]["listen_addresses"] = newaddr dirty = true @@ -121,7 +121,7 @@ notifies change_notify, "service[postgresql]", :immediately end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] if ha_enabled log "HA support for postgresql is enabled" diff --git a/chef/cookbooks/postgresql/recipes/server_debian.rb b/chef/cookbooks/postgresql/recipes/server_debian.rb index 78da2a6c27..4258923b17 100644 --- a/chef/cookbooks/postgresql/recipes/server_debian.rb +++ b/chef/cookbooks/postgresql/recipes/server_debian.rb @@ -28,7 +28,7 @@ # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in # the directory that will be mounted for HA -if node[:database][:ha][:enabled] +if node[:database][:postgresql][:ha][:enabled] include_recipe "postgresql::ha_storage" end diff --git a/chef/cookbooks/postgresql/recipes/server_redhat.rb b/chef/cookbooks/postgresql/recipes/server_redhat.rb index f2139a5c10..5c3a26a671 100644 --- a/chef/cookbooks/postgresql/recipes/server_redhat.rb +++ b/chef/cookbooks/postgresql/recipes/server_redhat.rb @@ -49,7 +49,7 @@ package pg_pack end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb index 4837975fbf..c7d08ecb44 100644 --- a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -1,10 +1,26 @@ def upgrade(ta, td, a, d) + db_engine = a["sql_engine"] + + # 'ha' hash needs to be moved under 'postgresql' to keep it consistent with mysql + if db_engine == "postgresql" + a["postgresql"]["ha"] = a["ha"] + else + a["postgresql"]["ha"] = ta["postgresql"]["ha"] + a["mysql"]["ha"]["enabled"] = true if a["ha"]["enabled"] + end + a.delete("ha") if a.key? "ha" + d["element_states"] = td["element_states"] d["element_order"] = td["element_order"] - if a["sql_engine"] == "mysql" + if db_engine == "mysql" + # For the time of upgrade, we're adding new 'mysql-server role', while old 'database-server' + # is reserved for existing postgresql setup. + # For users that already have mysql (mariadb) deployed with 'database-server' role, we need to + # adapt the role assignments so the code that is looking for 'mysql-server' instances always + # finds correct mysql nodes. d["elements"]["mysql-server"] = d["elements"]["database-server"] - d["elements"]["atabase-server"] = [] + d["elements"]["database-server"] = [] if d.fetch("elements_expanded", {}).key? "database-server" d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] d["elements_expanded"].delete("database-server") diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 6132bf41e2..364d565b59 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -57,18 +57,18 @@ "log_filename": "postgresql.log-%Y%m%d%H%M", "log_truncate_on_rotation": false, "log_min_duration_statement": -1 - } - }, - "ha": { - "storage": { - "mode": "shared", - "drbd": { - "size": 50 - }, - "shared": { - "device": "", - "fstype": "", - "options": "" + }, + "ha": { + "storage": { + "mode": "shared", + "drbd": { + "size": 50 + }, + "shared": { + "device": "", + "fstype": "", + "options": "" + } } } }, diff --git a/chef/data_bags/crowbar/template-database.schema b/chef/data_bags/crowbar/template-database.schema index 401d087c31..571799ea17 100644 --- a/chef/data_bags/crowbar/template-database.schema +++ b/chef/data_bags/crowbar/template-database.schema @@ -85,32 +85,32 @@ "log_filename": {"type": "str" }, "log_min_duration_statement": { "type": "int" } } - } - } - }, - "ha" : { - "type": "map", - "required": true, - "mapping" : { - "storage": { + }, + "ha" : { "type": "map", "required": true, "mapping" : { - "mode": { "type": "str", "required": true }, - "drbd": { + "storage": { "type": "map", "required": true, "mapping" : { - "size": { "type": "int", "required": true } - } - }, - "shared": { - "type": "map", - "required": true, - "mapping" : { - "device": { "type": "str", "required": true }, - "fstype": { "type": "str", "required": true }, - "options": { "type": "str", "required": true } + "mode": { "type": "str", "required": true }, + "drbd": { + "type": "map", + "required": true, + "mapping" : { + "size": { "type": "int", "required": true } + } + }, + "shared": { + "type": "map", + "required": true, + "mapping" : { + "device": { "type": "str", "required": true }, + "fstype": { "type": "str", "required": true }, + "options": { "type": "str", "required": true } + } + } } } } diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index a8927cf42f..523f309148 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -71,11 +71,12 @@ def create_proposal end def validate_ha_attributes(attributes, cluster) - storage_mode = attributes["ha"]["storage"]["mode"] role = available_clusters[cluster] case attributes["sql_engine"] when "postgresql" + ha_attr = attributes["postgresql"]["ha"] + storage_mode = ha_attr["storage"]["mode"] unless ["shared", "drbd"].include?(storage_mode) validation_error I18n.t( "barclamp.#{@bc_name}.validation.unknown_mode_ha", @@ -83,12 +84,12 @@ def validate_ha_attributes(attributes, cluster) ) end if storage_mode == "shared" - if attributes["ha"]["storage"]["shared"]["device"].blank? + if ha_attr["storage"]["shared"]["device"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_device" ) end - if attributes["ha"]["storage"]["shared"]["fstype"].blank? + if ha_attr["storage"]["shared"]["fstype"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_filesystem" ) @@ -100,7 +101,7 @@ def validate_ha_attributes(attributes, cluster) cluster_name: cluster_name(cluster) ) end - if attributes["ha"]["storage"]["drbd"]["size"] <= 0 + if ha_attr["storage"]["drbd"]["size"] <= 0 validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_size_drbd" ) @@ -150,28 +151,38 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) return if all_nodes.empty? sql_engine = role.default_attributes["database"]["sql_engine"] - db_role = if engine == "postgresql" - "database-server" - else - "mysql-server" - end - - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) - Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled vip_networks = ["admin"] - - dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], - database_ha_enabled, - database_elements, - vip_networks) - role.save if dirty - - reset_sync_marks_on_clusters_founders(database_elements) - - if database_ha_enabled - net_svc = NetworkService.new @logger - case sql_engine + dirty = false + net_svc = NetworkService.new @logger + db_enabled = { + "mysql" => { + "enabled" => false, + "ha" => false + }, + "postgresql" => { + "enabled" => false, + "ha" => false + } + } + ["postgresql", "mysql"].each do |engine| + db_role = if engine == "postgresql" + "database-server" + else + "mysql-server" + end + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) + db_enabled[engine]["enabled"] = true unless database_nodes.empty? + db_enabled[engine]["ha"] = database_ha_enabled + Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled + dirty = prepare_role_for_ha_with_haproxy(role, + ["database", engine, "ha", "enabled"], + database_ha_enabled, + database_elements, + vip_networks) || dirty + reset_sync_marks_on_clusters_founders(database_elements) + next unless database_ha_enabled + case engine when "postgresql" unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) raise "Internal error: HA enabled, but element is not a cluster" @@ -189,17 +200,19 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) end end + role.save if dirty role.default_attributes["database"][sql_engine] = {} if role.default_attributes["database"][sql_engine].nil? role.default_attributes["database"]["db_maker_password"] = (old_role && old_role.default_attributes["database"]["db_maker_password"]) || random_password - if ( sql_engine == "mysql" ) + if db_enabled["mysql"]["enabled"] role.default_attributes["database"]["mysql"]["server_root_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["server_root_password"]) || random_password - if database_ha_enabled + if db_enabled["mysql"]["ha"] role.default_attributes["database"]["mysql"]["sstuser_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["sstuser_password"]) || random_password end @logger.debug("setting mysql specific attributes") - elsif ( sql_engine == "postgresql" ) + end + if db_enabled["postgresql"]["enabled"] # Attribute is not living in "database" namespace, but that's because # it's for the postgresql cookbook. We're not using default_attributes # because the upstream cookbook use node.set_unless which would override From bbde7716bea4a670b7872dc21c00eb870c96342a Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 26 Jun 2018 15:21:54 +0200 Subject: [PATCH 093/207] database: Fix "Attributes" UI after role renaming (cherry picked from commit aa10d7d89cf54f1525bbb8ece42f006c3850841b) --- .../app/helpers/barclamp/database_helper.rb | 4 +-- .../database/_edit_attributes.html.haml | 12 ++++----- .../config/locales/database/en.yml | 26 +++++++++---------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/crowbar_framework/app/helpers/barclamp/database_helper.rb b/crowbar_framework/app/helpers/barclamp/database_helper.rb index f69020ff70..dbe1a2b295 100644 --- a/crowbar_framework/app/helpers/barclamp/database_helper.rb +++ b/crowbar_framework/app/helpers/barclamp/database_helper.rb @@ -30,8 +30,8 @@ def engines_for_database(selected) def ha_storage_mode_for_database(selected) options_for_select( [ - [t(".ha.storage.modes.drbd"), "drbd"], - [t(".ha.storage.modes.shared"), "shared"] + [t(".postgresql.ha.storage.modes.drbd"), "drbd"], + [t(".postgresql.ha.storage.modes.shared"), "shared"] ], selected.to_s ) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index 3cd7af0441..8f2fd10ad0 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -40,14 +40,14 @@ %legend = t('.ha_header') - = select_field %w(ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" + = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" #drbd_storage_container .alert.alert-info - = t('.ha.storage.drbd_info') - = integer_field %w(ha storage drbd size) + = t('.postgresql.ha.storage.drbd_info') + = integer_field %w(postgresql ha storage drbd size) #shared_storage_container - = string_field %w(ha storage shared device) - = string_field %w(ha storage shared fstype) - = string_field %w(ha storage shared options) + = string_field %w(postgresql ha storage shared device) + = string_field %w(postgresql ha storage shared fstype) + = string_field %w(postgresql ha storage shared options) diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index be5868d852..df661f1666 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -40,20 +40,20 @@ en: postgresql: config: max_connections: 'Global Connection Limit (max_connections)' + ha: + storage: + mode: 'Storage Mode' + modes: + drbd: 'DRBD' + shared: 'Shared Storage' + drbd_info: 'The cluster must have been setup for DRBD.' + drbd: + size: 'Size to Allocate for DRBD Device (in Gigabytes)' + shared: + device: 'Name of Block Device or NFS Mount Specification' + fstype: 'Filesystem Type' + options: 'Mount Options' ha_header: 'High Availability' - ha: - storage: - mode: 'Storage Mode' - modes: - drbd: 'DRBD' - shared: 'Shared Storage' - drbd_info: 'The cluster must have been setup for DRBD.' - drbd: - size: 'Size to Allocate for DRBD Device (in Gigabytes)' - shared: - device: 'Name of Block Device or NFS Mount Specification' - fstype: 'Filesystem Type' - options: 'Mount Options' validation: invalid_db_engine: 'Invalid database engine: %{db_engine}.' unknown_mode_ha: 'Unknown mode for HA storage: %{storage_mode}.' From a9e2009a040f6cc98cf8e2d59f256e0a88354086 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 26 Jun 2018 15:22:40 +0200 Subject: [PATCH 094/207] monasca: Fix check for mysql after it got moved to a separate role (cherry picked from commit de3a03b5ea8368e9b6c9a7e291474e35b26103be) --- crowbar_framework/app/models/monasca_service.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/monasca_service.rb b/crowbar_framework/app/models/monasca_service.rb index fcb9ac4c4c..a268da9c1d 100644 --- a/crowbar_framework/app/models/monasca_service.rb +++ b/crowbar_framework/app/models/monasca_service.rb @@ -93,7 +93,7 @@ def create_proposal nodes = NodeObject.all non_db_nodes = nodes.reject do |n| # Do not deploy monasca-server to the node running database cluster (already running mariadb) - n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + n.roles.include?("mysql-server") end monasca_server = select_nodes_for_role(non_db_nodes, "monasca-server", "monitoring") || [] @@ -141,7 +141,7 @@ def validate_proposal_after_save(proposal) nodes = proposal["deployment"][@bc_name]["elements"] nodes["monasca-server"].each do |node| n = NodeObject.find_node_by_name(node) - if n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + if n.roles.include?("mysql-server") validation_error( "monasca-server role cannot be deployed to the node with other MariaDB instance." ) From b1439c15f26dcb64f31b3899f742f7394fb8bc3e Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Wed, 18 Jul 2018 22:38:54 +1000 Subject: [PATCH 095/207] Restore caching of db_settings We need to cache settings for each combination of database barclamp instances and sql engines. To do this, we first check (and cache) the sql_engine selected by the role. The exception is when running the cookbooks for the engines themselves. (cherry picked from commit 824658f002ea5311fb6db885f9406df43ad6d864) --- .../crowbar-openstack/libraries/helpers.rb | 54 +++++++++++-------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 495f1928ec..69bfc64007 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -72,38 +72,45 @@ def self.database_settings(node, barclamp) "on behalf of #{barclamp}") end @database_settings = nil + @sql_engine_cache = nil @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) && false - Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") + if ["mysql", "postgresql"].include? barclamp + sql_engine = barclamp + elsif @sql_engine_cache && @sql_engine_cache.include?(instance) + sql_engine = @sql_engine_cache[instance] else - @database_settings ||= Hash.new db_roles, = Chef::Search::Query.new.search( - :role, - "name:database-config-#{instance}" - ) + :role, + "name:database-config-#{instance}" + ) db_proposal_role = db_roles.first unless db_roles.empty? + # TODO(jhesketh): What if db_roles is empty here? sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] - if barclamp == "mysql" || barclamp == "postgresql" - sql_engine = barclamp - end - db_role = if sql_engine == "postgresql" - "database-server" - else - "mysql-server" - end + @sql_engine_cache ||= {} + @sql_engine_cache[instance] = sql_engine + end + + if @database_settings && @database_settings.include?(instance) && @database_settings[instance].include?(sql_engine) + Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]} [cached]") + else + @database_settings ||= Hash.new + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") else address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = sql_engine ssl_opts = {} - if backend_name == "mysql" + if sql_engine == "mysql" ssl_opts = { enabled: database["database"]["mysql"]["ssl"]["enabled"], ca_certs: database["database"]["mysql"]["ssl"]["ca_certs"], @@ -111,13 +118,14 @@ def self.database_settings(node, barclamp) database["database"]["mysql"]["ssl"]["insecure"] } end - @database_settings[instance] = { + @database_settings[instance] ||= {} + @database_settings[instance][sql_engine] = { address: address, - url_scheme: backend_name, - backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database, backend_name), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, backend_name), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, backend_name), + url_scheme: sql_engine, + backend_name: sql_engine, + provider: DatabaseLibrary::Database::Util.get_database_provider(database, sql_engine), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, sql_engine), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, sql_engine), connection: { host: address, username: "db_maker", @@ -130,7 +138,7 @@ def self.database_settings(node, barclamp) end end - @database_settings[instance] + @database_settings[instance][sql_engine] end def self.database_connection_string(db_settings, db_auth_attr) From b45eb131637c5d68d798d39a5b98b37ee7819015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Tue, 28 Aug 2018 12:52:43 +0200 Subject: [PATCH 096/207] database: Set mysql as default SQL engine for new deployments Pick correct role when creating proposal (mkcloud might have set sql_engine already). --- chef/data_bags/crowbar/template-database.json | 2 +- crowbar_framework/app/models/database_service.rb | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 364d565b59..7cd5868a9d 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -3,7 +3,7 @@ "description": "Installation for Database", "attributes": { "database": { - "sql_engine": "postgresql", + "sql_engine": "mysql", "mysql": { "datadir": "/var/lib/mysql", "slow_query_logging": true, diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 523f309148..40887d7982 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -57,12 +57,18 @@ def create_proposal @logger.debug("Database create_proposal: entering") base = super + db_role = if base["attributes"]["sql_engine"] == "postgresql" + "database-server" + else + "mysql-server" + end + nodes = NodeObject.all nodes.delete_if { |n| n.nil? or n.admin? } if nodes.size >= 1 controller = nodes.find { |n| n.intended_role == "controller" } || nodes.first base["deployment"]["database"]["elements"] = { - "database-server" => [controller[:fqdn]] + db_role => [controller[:fqdn]] } end From 7858b826b0350930c26e71fd373b1f459b6a6db2 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 24 Aug 2018 15:58:25 +0200 Subject: [PATCH 097/207] [4.0] neutron: Increase wait_neutron-agents_ha_resources timeout The cloud-mkcloud8-job-upgrade-nondisruptive-ha-mariadb-x86_64 job has failed on multiple occasions due to the default timeout of 60 seconds timing out for the `wait-neutron-agents_ha_resources` pacemaker sync mark. Runs 188 and 185 are 2 such examples. Looking at the chef-agent logs on the crowbar node we see the following: node1 - founder [2018-08-28T06:29:53+00:00] INFO: Processing crowbar-pacemaker_sync_mark[wait-neutron-agents_ha_resources] action guess (neutron::network_agents_ha line 125) .... [2018-08-28T06:31:02+00:00] INFO: Processing crowbar-pacemaker_sync_mark[create-neutron-agents_ha_resources] action guess (neutron::network_agents_ha line 302) node 2 [2018-08-28T06:29:57+00:00] INFO: Processing crowbar-pacemaker_sync_mark[wait-neutron-agents_ha_resources] action guess (neutron::network_agents_ha line 125) [2018-08-28T06:29:57+00:00] INFO: Checking if cluster founder has set neutron-agents_ha_resources... [2018-08-28T06:30:57+00:00] FATAL: Cluster founder didn't set neutron-agents_ha_resources! NOTE: The founder sets the mark 5 seconds later. node 3 [2018-08-28T06:29:53+00:00] INFO: Processing crowbar-pacemaker_sync_mark[wait-neutron-agents_ha_resources] action guess (neutron::network_agents_ha line 125) [2018-08-28T06:29:53+00:00] INFO: Checking if cluster founder has set neutron-agents_ha_resources... [2018-08-28T06:30:54+00:00] FATAL: Cluster founder didn't set neutron-agents_ha_resources! NOTE: The founder sets the mark 8 seconds later. When looking at the what's happening on the founder there is no obvious time hole. There are simply many [0]: INFO: Processing pacemaker_.*[.*] action update ... Messages each taking around 2 - 3 seconds which seems to sometime push the founder past the 60 second mark. This patch increases the timeout to 90 seconds to give the updates more time. This timeout also occurs sometimes when adding a brand new node to a pacemaker cluster. As we've discovered in the upgrade squad, this commit was pulled from a chain that Jacek is currently working on to fix the increase cluster size issues we've found over here [1]. Pushing this up as a seperate change to get the CI job green while we still work on the cluster increase case. I have only seen this happen in SOC7 before an upgrade to SOC8, so this will need to be backported. However, I haven't looked closely at other CI jobs to know if other versions are affected. [0] - http://pastebin.nue.suse.com/18759/src [1] - https://github.com/crowbar/crowbar-openstack/pull/1741 (cherry picked from commit 06a9cdd631849dde760dfe4f177c5e55256d3af2) --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index b6f2fdd673..06c2767d5f 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -122,7 +122,9 @@ crowbar_pacemaker_sync_mark "sync-neutron-agents_before_ha" # Avoid races when creating pacemaker resources -crowbar_pacemaker_sync_mark "wait-neutron-agents_ha_resources" +crowbar_pacemaker_sync_mark "wait-neutron-agents_ha_resources" do + timeout 90 +end if node[:pacemaker][:clone_stateless_services] transaction_objects = [] From 901d3bd4e00e438fa31d2e64dfa49c75f6cf8ddd Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 22 Aug 2018 13:12:03 +0200 Subject: [PATCH 098/207] keystone: Always try to rsync keys to new nodes When adding new node to existing cluster, fernet keys need to be pushed from the founder to keep all keystone nodes in sync. Before this change the keys were pushed to all nodes when cluster was initially set up and then rotated periodically. This commit introduces internal node attribute which indicates that the keys are already in place. Founder pushes the fernet keys to all nodes which don't have this attribute set. The same code covers initial cluster setup as well as adding new node to existing cluster. In addition the lookup of cluster member nodes was changed. Helper function from crowbar-pacemaker cookbook which was based on Chef search was replaced with direct lookup of `elements` attribute (based on deployment part of proposal). This way, new soon-to-be-cluster-member node will be included in the list even before Chef search index is updated and/or chef-client is run on that node. This guarantees that fernet keys are pushed to the new node as soon as possible. --- chef/cookbooks/keystone/recipes/server.rb | 38 +++++++++++++++++------ 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index d728ec9aaf..625621bb20 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -490,12 +490,23 @@ end rsync_command = "" + initial_rsync_command = "" if ha_enabled - cluster_nodes = CrowbarPacemakerHelper.cluster_nodes(node) - cluster_nodes.map do |n| + # can't use CrowbarPacemakerHelper.cluster_nodes() here as it will sometimes not return + # nodes which will be added to the cluster in current chef-client run. + cluster_nodes = node[:pacemaker][:elements]["pacemaker-cluster-member"] + cluster_nodes = cluster_nodes.map { |n| Chef::Node.load(n) } + cluster_nodes.sort_by! { |n| n[:hostname] } + cluster_nodes.each do |n| next if node.name == n.name node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address - rsync_command += "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " + node_rsync_command = "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " + rsync_command += node_rsync_command + # initial rsync only for (new) nodes which didn't get the keys yet + next if n.include?(:keystone) && + n[:keystone].include?(:signing) && + n[:keystone][:signing][:initial_keys_sync] + initial_rsync_command += node_rsync_command end raise "No other cluster members found" if rsync_command.empty? end @@ -519,7 +530,13 @@ crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" if ha_enabled - unless File.exist?("/etc/keystone/fernet-keys/0") + if File.exist?("/etc/keystone/fernet-keys/0") + # Mark node to avoid unneeded future rsyncs + unless node[:keystone][:signing][:initial_keys_sync] + node[:keystone][:signing][:initial_keys_sync] = true + node.save + end + else # Setup a key repository for fernet tokens execute "keystone-manage fernet_setup" do command "keystone-manage fernet_setup \ @@ -528,12 +545,15 @@ action :run only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end + end - # We would like to propagate fernet keys to all nodes in the cluster - execute "propagate fernet keys to all nodes in the cluster" do - command rsync_command - action :run - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } + # We would like to propagate fernet keys to all (new) nodes in the cluster + execute "propagate fernet keys to all nodes in the cluster" do + command initial_rsync_command + action :run + only_if do + ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) && + !initial_rsync_command.empty? end end From 7eaf4773b62c5454530ccd2d9aa553bc5058f65a Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 24 Aug 2018 21:04:00 +0200 Subject: [PATCH 099/207] nova: Increase HA resources creation timeout When new node is added to existing cluster, all cloned resources need to be extended to include the new node. This operation takes more time than initial creation of resources as it is implemented as a series of "update" calls rather than single transaction. Updated timeout was based on timing stats collected from test runs. --- chef/cookbooks/nova/recipes/controller_ha.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/recipes/controller_ha.rb b/chef/cookbooks/nova/recipes/controller_ha.rb index 92ef398545..2e8ff490fa 100644 --- a/chef/cookbooks/nova/recipes/controller_ha.rb +++ b/chef/cookbooks/nova/recipes/controller_ha.rb @@ -81,7 +81,9 @@ crowbar_pacemaker_sync_mark "sync-nova_before_ha" # Avoid races when creating pacemaker resources - crowbar_pacemaker_sync_mark "wait-nova_ha_resources" + crowbar_pacemaker_sync_mark "wait-nova_ha_resources" do + timeout 120 + end rabbit_settings = fetch_rabbitmq_settings transaction_objects = [] From 890237ed7e870b5acaf83aec5557c8d7db497273 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Sat, 25 Aug 2018 06:49:15 +0200 Subject: [PATCH 100/207] horizon: Increase HA timeout When new node is added to existing cluster, Horizon needs to be installed and configured on that node. Because "old" nodes have most of required packages and configurations already in place, this step caused the new node to fall behind so much that the default sync window was breached. Updated timeout was based on timing stats collected from test runs. --- chef/cookbooks/horizon/recipes/ha.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/horizon/recipes/ha.rb b/chef/cookbooks/horizon/recipes/ha.rb index 2de52b7754..02ab61b3d7 100644 --- a/chef/cookbooks/horizon/recipes/ha.rb +++ b/chef/cookbooks/horizon/recipes/ha.rb @@ -36,7 +36,9 @@ # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages installed before we create the pacemaker # resources -crowbar_pacemaker_sync_mark "sync-horizon_before_ha" +crowbar_pacemaker_sync_mark "sync-horizon_before_ha" do + timeout 150 +end # no wait/create sync mark as it's done in crowbar-pacemaker itself From a8e90974a65cda7f33cf5df0e071617ae94ea429 Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Mon, 23 Jul 2018 13:34:58 +0200 Subject: [PATCH 101/207] /etc/sysctl.d/99-sysctl.conf is a symlink to /etc/sysctl.conf We should really use the direct file instead of relying on a symlink. (cherry picked from commit 84cf3a56db72011bc98a82c6b677176834e61e1c) --- chef/cookbooks/neutron/recipes/network_agents.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index ca9acf95ac..df0e00bf6a 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -39,9 +39,9 @@ end # Enable ip forwarding on network node for new SUSE platforms -ruby_block "edit /etc/sysctl.d/99-sysctl.conf for net.ipv4.ip_forward" do +ruby_block "edit /etc/sysctl.conf for net.ipv4.ip_forward" do block do - rc = Chef::Util::FileEdit.new("/etc/sysctl.d/99-sysctl.conf") + rc = Chef::Util::FileEdit.new("/etc/sysctl.conf") rc.search_file_replace_line(/^net.ipv4.ip_forward =/, "net.ipv4.ip_forward = 1") rc.write_file end @@ -51,7 +51,7 @@ # The rest of this logic will be compatible for all the platforms. # There is an overlap here, but will not cause inferference (the # variable `net.ipv4.ip_forward` is set to 1 in two files, -# 99-sysctl.conf and 50-neutron-enable-ip_forward.conf) +# sysctl.conf and 50-neutron-enable-ip_forward.conf) directory "create /etc/sysctl.d for enable-ip_forward" do path "/etc/sysctl.d" From a5511c309f37447f78e448a0972c4c57357010df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Mon, 3 Sep 2018 17:33:58 +0200 Subject: [PATCH 102/207] crowbar-openstack: Add a location constraint for DRBD nodes For the cases of DRBD cluster that actually has more than 2 nodes we need to make sure that DRBD resources are only allowed to run on the nodes that have DRBD setup. The purpose of the potential extra node is when migrating to galera cluster which needs 3 nodes. --- ...maker_drbd_controller_only_location_for.rb | 37 +++++++++++++++++++ .../crowbar-openstack/libraries/ha_helpers.rb | 5 +++ 2 files changed, 42 insertions(+) create mode 100644 chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb diff --git a/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb b/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb new file mode 100644 index 0000000000..754d590e43 --- /dev/null +++ b/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb @@ -0,0 +1,37 @@ +# +# Copyright 2016, SUSE +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +define :openstack_pacemaker_drbd_controller_only_location_for do + # ensure attributes are set + include_recipe "crowbar-pacemaker::attributes" + + resource = params[:name] + location_name = "l-#{resource}-controller" + + # Make sure drbd nodes are known so that drbd-controller constraint makes sense + location_def = if node[:pacemaker][:drbd].fetch("nodes", []).any? + OpenStackHAHelper.drbd_controller_only_location(location_name, resource) + else + OpenStackHAHelper.controller_only_location(location_name, resource) + end + + pacemaker_location location_name do + definition location_def + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + location_name +end diff --git a/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb index 413a515e73..394f47a6a8 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb @@ -40,6 +40,11 @@ def self.controller_only_location_ignoring_upgrade(location, service) "rule 0: OpenStack-role eq controller" end + def self.drbd_controller_only_location(location, service) + "location #{location} #{service} resource-discovery=exclusive " \ + "rule 0: OpenStack-role eq controller and drbd-controller eq true" + end + def self.no_compute_location(location, service) "location #{location} #{service} resource-discovery=exclusive " \ "rule 0: OpenStack-role ne compute" From 9708afc27c9231f0e95738e4b3aafa9534246372 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Mon, 3 Sep 2018 17:35:35 +0200 Subject: [PATCH 103/207] postgresql: Use extra location constraint for DRBD nodes DRBD resources need to run only on the nodes with DRBD setup. --- chef/cookbooks/postgresql/recipes/ha.rb | 2 +- chef/cookbooks/postgresql/recipes/ha_storage.rb | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/chef/cookbooks/postgresql/recipes/ha.rb b/chef/cookbooks/postgresql/recipes/ha.rb index 0b9305133f..2e5ffd90ef 100644 --- a/chef/cookbooks/postgresql/recipes/ha.rb +++ b/chef/cookbooks/postgresql/recipes/ha.rb @@ -108,7 +108,7 @@ vip_location_name = openstack_pacemaker_controller_only_location_for vip_primitive transaction_objects << "pacemaker_location[#{vip_location_name}]" - location_name = openstack_pacemaker_controller_only_location_for service_name + location_name = openstack_pacemaker_drbd_controller_only_location_for service_name transaction_objects << "pacemaker_location[#{location_name}]" else diff --git a/chef/cookbooks/postgresql/recipes/ha_storage.rb b/chef/cookbooks/postgresql/recipes/ha_storage.rb index 2e13291cc0..6c8be1a411 100644 --- a/chef/cookbooks/postgresql/recipes/ha_storage.rb +++ b/chef/cookbooks/postgresql/recipes/ha_storage.rb @@ -38,7 +38,10 @@ fs_params["directory"] = "/var/lib/pgsql" if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" - include_recipe "crowbar-pacemaker::drbd" + + if CrowbarPacemakerHelper.drbd_node?(node) + include_recipe "crowbar-pacemaker::drbd" + end crowbar_pacemaker_drbd drbd_resource do size "#{node[:database][:postgresql][:ha][:storage][:drbd][:size]}G" @@ -104,7 +107,7 @@ end transaction_objects << "pacemaker_ms[#{ms_name}]" - location_name = openstack_pacemaker_controller_location_ignoring_upgrade_for ms_name + location_name = openstack_pacemaker_drbd_controller_only_location_for ms_name transaction_objects << "pacemaker_location[#{location_name}]" end @@ -117,7 +120,11 @@ end transaction_objects << "pacemaker_primitive[#{fs_primitive}]" -location_name = openstack_pacemaker_controller_only_location_for fs_primitive +location_name = if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" + openstack_pacemaker_drbd_controller_only_location_for fs_primitive +else + openstack_pacemaker_controller_only_location_for fs_primitive +end transaction_objects << "pacemaker_location[#{location_name}]" if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" From 5efeeaa140cc2d4988595b588c068dd6ef64533a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Tue, 4 Sep 2018 11:10:51 +0200 Subject: [PATCH 104/207] rabbitmq: Use extra location constraint for DRBD nodes DRBD resources need to run only on the nodes with DRBD setup. It is possible that pacemaker cluster contains some extra node so we must avoid starting DRBD resources on it. --- chef/cookbooks/rabbitmq/recipes/ha.rb | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/ha.rb b/chef/cookbooks/rabbitmq/recipes/ha.rb index 3c24ee52aa..1a8230ab6c 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha.rb @@ -33,7 +33,10 @@ fs_params = {} fs_params["directory"] = "/var/lib/rabbitmq" if node[:rabbitmq][:ha][:storage][:mode] == "drbd" - include_recipe "crowbar-pacemaker::drbd" + + if CrowbarPacemakerHelper.drbd_node?(node) + include_recipe "crowbar-pacemaker::drbd" + end crowbar_pacemaker_drbd drbd_resource do size "#{node[:rabbitmq][:ha][:storage][:drbd][:size]}G" @@ -133,7 +136,7 @@ end storage_transaction_objects << "pacemaker_ms[#{ms_name}]" - ms_location_name = openstack_pacemaker_controller_location_ignoring_upgrade_for ms_name + ms_location_name = openstack_pacemaker_drbd_controller_only_location_for ms_name storage_transaction_objects << "pacemaker_location[#{ms_location_name}]" end @@ -146,7 +149,12 @@ end storage_transaction_objects << "pacemaker_primitive[#{fs_primitive}]" -fs_location_name = openstack_pacemaker_controller_only_location_for fs_primitive +fs_location_name = if node[:rabbitmq][:ha][:storage][:mode] == "drbd" + openstack_pacemaker_drbd_controller_only_location_for fs_primitive +else + openstack_pacemaker_controller_only_location_for fs_primitive +end + storage_transaction_objects << "pacemaker_location[#{fs_location_name}]" if node[:rabbitmq][:ha][:storage][:mode] == "drbd" @@ -318,7 +326,7 @@ service_transaction_objects << "pacemaker_location[#{public_vip_location_name}]" end - location_name = openstack_pacemaker_controller_only_location_for service_name + location_name = openstack_pacemaker_drbd_controller_only_location_for service_name service_transaction_objects << "pacemaker_location[#{location_name}]" else From acc993611455711cd49b3a4a7b4ef2002d3004ab Mon Sep 17 00:00:00 2001 From: Alberto Planas Date: Fri, 20 Jul 2018 15:33:15 +0200 Subject: [PATCH 105/207] Set a proper ACL for shared certificates (bsc#1101154) If the user is sharing a certificate between different OpenStack services, this needs to be readed for different system users. For example, if the client store the certificate in /etc/apache2/ssl.key, this directory and all the parents needs to be read for all the OpenStack service users, and also the certificates contained there. To address that we use the ACL from the operating system, via the `setfacl` command. (cherry picked from commit 75fe195ad7875fd650bb842d8127c930d87d5b6b) --- .../libraries/provider_ssl_setup.rb | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb index 45f2fe2970..13cabb6802 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb @@ -72,6 +72,15 @@ def action_setup end # We do not check for existence of keyfile, as the private key is # allowed to be in the certfile + + # If we do no generate the certificate, we need to be sure + # that is readable for the user. In some configurations we + # need to share the same certificate for multiple services, + # so needs to be readable for multiple different users and + # groups (for example, if we share the apache certificate + # for Nova and the Dashboard) + _fix_acl @current_resource.certfile, @current_resource.group + _fix_acl @current_resource.keyfile, @current_resource.group end # if generate_certs if @current_resource.cert_required && ! ::File.size?(@current_resource.ca_certs) @@ -80,6 +89,33 @@ def action_setup raise message end end + + def _fix_acl(certificate, group) + partial = "/" + directory.split(File::SEPARATOR).each do |entry| + next if entry.empty? + + partial = File.join(partial, entry) + # If the file is readable by all users, and the directory is + # readable and executable (we can list the contents) we can + # avoid an ACL modification + if File.world_readable?(partial) + next if File.file?(partial) + next if _world_executable?(partial) && File.directory?(partial) + end + + mask = if File.directory?(partial) + "group:#{group}:r-x" + else + "group:#{group}:r--" + end + system "setfacl -m #{mask} #{partial}" + end + end + + def _world_executable(path) + File.stat(path).mode & 1 == 1 + end end end end From 8df86463d039a2cbb6a86dfd155dfbb286d393e1 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 11 Sep 2018 09:51:23 +0200 Subject: [PATCH 106/207] database: Hide SSL options from database UI This is a replacement for package patch to simplify future modifications to the sources without causing conflicts. --- .../app/views/barclamp/database/_edit_attributes.html.haml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index 8f2fd10ad0..e363b48414 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -14,7 +14,7 @@ = integer_field %w(mysql expire_logs_days) = boolean_field %w(mysql slow_query_logging) - %fieldset + %fieldset{ "style" => "display:none" } %legend = t(".mysql.ssl_header") From 15124af74bc3b192e39546860d39e33e54a997c9 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 18 Jul 2018 14:54:52 +0200 Subject: [PATCH 107/207] database: UI for mysql and/or postgresql Barclamp UI was modified to show custom view of MySQL and/or PostgreSQL depending on assigned roles. --- .../barclamps/database/application.js | 47 ++++++++++++------- .../database/_edit_attributes.html.haml | 2 - 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js index 23d49f55cb..7accac280a 100644 --- a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js +++ b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js @@ -1,6 +1,6 @@ /** * Copyright 2011-2013, Dell - * Copyright 2013-2014, SUSE LINUX Products GmbH + * Copyright 2013-2018, SUSE LINUX Products GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,23 +16,38 @@ */ $(document).ready(function($) { - $('#sql_engine').on('change', function() { - var value = $(this).val(); + function updateDBEngines() { + // defer update of selected engines to make sure roles assignment + // is updated by event handlers from NodeList. + setTimeout(function() { + var nodes = { + postgresql: $('ul#database-server li').length, + mysql: $('ul#mysql-server li').length + }; - var types = [ - 'mysql', - 'postgresql' - ]; + var selector = $.map(nodes, function(val, index) { + return '#{0}_container'.format(index); + }).join(', '); - var selector = $.map(types, function(val, index) { - return '#{0}_container'.format(val); - }).join(', '); + var current = $.map($.grep(Object.keys(nodes), function(val) { + return nodes[val] > 0; + }), function(val, index) { + return '#{0}_container'.format(val); + }).join(', '); - var current = '#{0}_container'.format( - value - ); + $(selector).hide(100).attr('disabled', 'disabled'); + $(current).show(100).removeAttr('disabled'); - $(selector).hide(100).attr('disabled', 'disabled'); - $(current).show(100).removeAttr('disabled'); - }).trigger('change'); + // make sure all items have handlers attached + setupEventHandlers(); + }, 0); + } + + function setupEventHandlers() { + $('[data-droppable=true]').off('drop', updateDBEngines).on('drop', updateDBEngines); + $('.dropzone .delete').off('click', updateDBEngines).on('click', updateDBEngines); + $('.dropzone .unassign').off('click', updateDBEngines).on('click', updateDBEngines); + } + + updateDBEngines(); }); diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index e363b48414..d659d23cab 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -3,8 +3,6 @@ = header show_raw_deployment?, true .panel-body - = select_field :sql_engine, :collection => :engines_for_database - #mysql_container %fieldset %legend From 7ec12e395004fed737188a54add5c8d20bb37e16 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 24 Jul 2018 08:45:13 +0200 Subject: [PATCH 108/207] database: Automatic sql_engine assignment Added static display of active sql engine. Added automatic assignment of sql engine based on assigned roles: if only one of database roles is used, it changes the active sql engine. In addition if no roles are assigned, sql engine is switched to 'mysql' to highlight preferred option. --- .../barclamps/database/application.js | 16 +++++++++++++--- .../barclamp/database/_edit_attributes.html.haml | 2 ++ crowbar_framework/config/locales/database/en.yml | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js index 7accac280a..8efcd53ae9 100644 --- a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js +++ b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js @@ -29,15 +29,25 @@ $(document).ready(function($) { return '#{0}_container'.format(index); }).join(', '); - var current = $.map($.grep(Object.keys(nodes), function(val) { - return nodes[val] > 0; - }), function(val, index) { + var currentEngines = $.grep(Object.keys(nodes), function(val) { return nodes[val] > 0; }); + + var current = $.map(currentEngines, function(val, index) { return '#{0}_container'.format(val); }).join(', '); $(selector).hide(100).attr('disabled', 'disabled'); $(current).show(100).removeAttr('disabled'); + // update sql_engine if only one engine was selected and default to mysql if no roles are assigned + var activeEngine = $('#sql_engine').val(); + if (currentEngines.length === 1) { + activeEngine = currentEngines[0]; + } else if (currentEngines.length === 0) { + activeEngine = 'mysql'; + } + $('#sql_engine').val(activeEngine); + $('#proposal_attributes').writeJsonAttribute('sql_engine', activeEngine); + // make sure all items have handlers attached setupEventHandlers(); }, 0); diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index d659d23cab..dc80ed02c5 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -3,6 +3,8 @@ = header show_raw_deployment?, true .panel-body + = string_field :sql_engine, disabled: true + #mysql_container %fieldset %legend diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index df661f1666..906250da45 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -21,7 +21,7 @@ en: barclamp: database: edit_attributes: - sql_engine: 'SQL Engine' + sql_engine: 'Active SQL Engine' mysql_attributes: 'MariaDB Options' mysql: datadir: 'Datadir' From cc4e25281b25e2e536a52d21c300ae3b0146ee88 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 24 Jul 2018 14:06:16 +0200 Subject: [PATCH 109/207] database: Add warning about multiple DB engines Multiple DB engines are supported only during upgrade. Added warning to make sure users are aware of that. --- .../app/views/barclamp/database/_edit_attributes.html.haml | 3 +++ crowbar_framework/config/locales/database/en.yml | 1 + 2 files changed, 4 insertions(+) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index dc80ed02c5..e06f9674ec 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -3,6 +3,9 @@ = header show_raw_deployment?, true .panel-body + .alert.alert-warning + = t(".engine_upgrade") + = string_field :sql_engine, disabled: true #mysql_container diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index 906250da45..80e97299c3 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -21,6 +21,7 @@ en: barclamp: database: edit_attributes: + engine_upgrade: 'Deployment of multiple database engines at the same time is only supported before SUSE OpenStack Cloud 8 upgrade to allow the migration from PostgreSQL to MariaDB. Please refer to the SUSE OpenStack Cloud documentation for more information about the upgrade procedure.' sql_engine: 'Active SQL Engine' mysql_attributes: 'MariaDB Options' mysql: From 8917d960170cccfa01cfaeff165af7a3d38039d7 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 24 Jul 2018 15:18:18 +0200 Subject: [PATCH 110/207] database: Validate multiple DB proposals Added validation rules for multiple DB roles: - if multiple roles are assigned, attrs for all selected engines are validated - active sql engine needs to match assigned roles - PostgreSQL can be deployed only as first engine --- .../app/models/database_service.rb | 60 ++++++++++++------- .../config/locales/database/en.yml | 2 + 2 files changed, 40 insertions(+), 22 deletions(-) diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 40887d7982..15b9291a73 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -76,10 +76,18 @@ def create_proposal base end - def validate_ha_attributes(attributes, cluster) + def role_for_engine(engine) + if engine == "postgresql" + "database-server" + else + "mysql-server" + end + end + + def validate_ha_attributes(attributes, cluster, sql_engine) role = available_clusters[cluster] - case attributes["sql_engine"] + case sql_engine when "postgresql" ha_attr = attributes["postgresql"]["ha"] storage_mode = ha_attr["storage"]["mode"] @@ -129,26 +137,38 @@ def validate_ha_attributes(attributes, cluster) def validate_proposal_after_save(proposal) attributes = proposal["attributes"][@bc_name] - sql_engine = attributes["sql_engine"] - db_role = if sql_engine == "postgresql" - "database-server" - else - "mysql-server" - end - validate_one_for_role proposal, db_role + active_engine = attributes["sql_engine"] validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_db_engine", - db_engine: sql_engine - ) unless ["mysql", "postgresql"].include?(sql_engine) - - # HA validation - servers = proposal["deployment"][@bc_name]["elements"][db_role] - unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) - cluster = servers.first - validate_ha_attributes(attributes, cluster) + db_engine: active_engine + ) unless ["mysql", "postgresql"].include?(active_engine) + + selected_engines = ["postgresql", "mysql"].select do |engine| + nodes = proposal["deployment"][@bc_name]["elements"][role_for_engine engine] + !nodes.nil? && !nodes.first.nil? end + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.engine_roles_mismatch", + db_engine: active_engine + ) unless selected_engines.include?(active_engine) + + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.secondary_psql" + ) if selected_engines.length > 1 && active_engine == "mysql" + + selected_engines.each do |engine| + db_role = role_for_engine engine + validate_one_for_role proposal, db_role + + # HA validation + servers = proposal["deployment"][@bc_name]["elements"][db_role] + unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) + cluster = servers.first + validate_ha_attributes(attributes, cluster, engine) + end + end super end @@ -172,11 +192,7 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) } } ["postgresql", "mysql"].each do |engine| - db_role = if engine == "postgresql" - "database-server" - else - "mysql-server" - end + db_role = role_for_engine engine database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) db_enabled[engine]["enabled"] = true unless database_nodes.empty? db_enabled[engine]["ha"] = database_ha_enabled diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index 80e97299c3..4be6b6f235 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -64,3 +64,5 @@ en: invalid_size_drbd: 'Invalid size for DRBD device.' cluster_size_one: 'The Galera cluster needs more than one cluster member.' cluster_size_even: 'The Galera cluster needs an odd number of cluster members and at least three of them.' + engine_roles_mismatch: 'Assigned roles do not match selected database engine: %{db_engine}.' + secondary_psql: 'PostgreSQL can only be deployed as first SQL engine. Migration from MariaDB to PostgreSQL is not supported.' From 1ff76f9c8637d6385c1d5983dc77524b78c3ffc8 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 25 Jul 2018 16:01:56 +0200 Subject: [PATCH 111/207] database: Auto-select SQL engine only for new proposals Auto-changing active engine for existing/edited proposals can be confusing. --- .../barclamps/database/application.js | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js index 8efcd53ae9..a3b0db4916 100644 --- a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js +++ b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js @@ -38,15 +38,20 @@ $(document).ready(function($) { $(selector).hide(100).attr('disabled', 'disabled'); $(current).show(100).removeAttr('disabled'); - // update sql_engine if only one engine was selected and default to mysql if no roles are assigned - var activeEngine = $('#sql_engine').val(); - if (currentEngines.length === 1) { - activeEngine = currentEngines[0]; - } else if (currentEngines.length === 0) { - activeEngine = 'mysql'; + // automatically select active engine only for new proposals + // note that this check is not perfect and will trigger autoselect also for saved but not applied + // proposals (even old ones). + if ($('#proposal_deployment').readJsonAttribute('crowbar-applied') === false) { + // update sql_engine if only one engine was selected and default to mysql if no roles are assigned + var activeEngine = $('#sql_engine').val(); + if (currentEngines.length === 1) { + activeEngine = currentEngines[0]; + } else if (currentEngines.length === 0) { + activeEngine = 'mysql'; + } + $('#sql_engine').val(activeEngine); + $('#proposal_attributes').writeJsonAttribute('sql_engine', activeEngine); } - $('#sql_engine').val(activeEngine); - $('#proposal_attributes').writeJsonAttribute('sql_engine', activeEngine); // make sure all items have handlers attached setupEventHandlers(); From 8c49548581b8cf47532e840f80a6721c97467b75 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Thu, 26 Jul 2018 14:38:26 +0200 Subject: [PATCH 112/207] database: Disallow two engines for new deployments Second engine is supported only as a part of migration to MariaDB. For new proposals only one will be accepted. --- crowbar_framework/app/models/database_service.rb | 4 ++++ crowbar_framework/config/locales/database/en.yml | 1 + 2 files changed, 5 insertions(+) diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 15b9291a73..49e4b03ae1 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -149,6 +149,10 @@ def validate_proposal_after_save(proposal) !nodes.nil? && !nodes.first.nil? end + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.new_proposal_multi_engine" + ) if selected_engines.length > 1 && !proposal["deployment"]["crowbar-applied"] + validation_error I18n.t( "barclamp.#{@bc_name}.validation.engine_roles_mismatch", db_engine: active_engine diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index 4be6b6f235..46e844354d 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -66,3 +66,4 @@ en: cluster_size_even: 'The Galera cluster needs an odd number of cluster members and at least three of them.' engine_roles_mismatch: 'Assigned roles do not match selected database engine: %{db_engine}.' secondary_psql: 'PostgreSQL can only be deployed as first SQL engine. Migration from MariaDB to PostgreSQL is not supported.' + new_proposal_multi_engine: 'Second SQL engine can only be added to an existing database deployment.' From 29c605f021d3eb50f5d95c4783db767438658fe1 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 5 Sep 2018 11:28:28 +0200 Subject: [PATCH 113/207] database: Fix detection of already applied proposals Checking "crowbar-applied" is not enough as it is reset when doing "save" without "apply". --- crowbar_framework/app/models/database_service.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 49e4b03ae1..13a6d9a8da 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -84,6 +84,10 @@ def role_for_engine(engine) end end + def already_applied?(proposal_name="default") + !RoleObject.find_role_by_name("#{@bc_name}-config-#{proposal_name}").nil? + end + def validate_ha_attributes(attributes, cluster, sql_engine) role = available_clusters[cluster] @@ -151,7 +155,7 @@ def validate_proposal_after_save(proposal) validation_error I18n.t( "barclamp.#{@bc_name}.validation.new_proposal_multi_engine" - ) if selected_engines.length > 1 && !proposal["deployment"]["crowbar-applied"] + ) if selected_engines.length > 1 && !already_applied? validation_error I18n.t( "barclamp.#{@bc_name}.validation.engine_roles_mismatch", From 3a1db0c569a330c2e4578df544161b1c92836611 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 5 Sep 2018 11:31:34 +0200 Subject: [PATCH 114/207] database: Disallow secondary psql (revisited) Better check for blocking psql deployment on top of existing mysql. --- .../app/models/database_service.rb | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 13a6d9a8da..54f78476a2 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -162,10 +162,6 @@ def validate_proposal_after_save(proposal) db_engine: active_engine ) unless selected_engines.include?(active_engine) - validation_error I18n.t( - "barclamp.#{@bc_name}.validation.secondary_psql" - ) if selected_engines.length > 1 && active_engine == "mysql" - selected_engines.each do |engine| db_role = role_for_engine engine validate_one_for_role proposal, db_role @@ -180,6 +176,25 @@ def validate_proposal_after_save(proposal) super end + def validate_proposal_elements(proposal_elements) + old_proposal = proposals_raw.first + + return super if old_proposal.nil? + + # disallow adding psql when mysql is already deployed + old_psql_nodes = old_proposal.elements["database-server"] || [] + old_mysql_nodes = old_proposal.elements["mysql-server"] || [] + new_psql_nodes = proposal_elements["database-server"] || [] + raise I18n.t( + "barclamp.#{@bc_name}.validation.secondary_psql" + ) if already_applied? && + !old_mysql_nodes.empty? && + old_psql_nodes.empty? && + !new_psql_nodes.empty? + + super + end + def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Database apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? From 5c8b8c37fb0679076a4d40180ee2f1e8080fd172 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 5 Sep 2018 12:32:52 +0200 Subject: [PATCH 115/207] database: Improve engine parameters display With flat UI structure it was not clear which parameters belong to which engine. Additional borders were added to highlight the split. --- .../database/_edit_attributes.html.haml | 81 ++++++++++--------- 1 file changed, 43 insertions(+), 38 deletions(-) diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index e06f9674ec..ce5caece27 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -8,49 +8,54 @@ = string_field :sql_engine, disabled: true - #mysql_container - %fieldset - %legend + %ul.list-group#mysql_container + %li.list-group-item.active + %h3.list-group-item-heading = t('.mysql_attributes') - = integer_field %w(mysql max_connections) - = integer_field %w(mysql expire_logs_days) - = boolean_field %w(mysql slow_query_logging) - - %fieldset{ "style" => "display:none" } - %legend - = t(".mysql.ssl_header") - - = boolean_field %w(mysql ssl enabled), - "data-sslprefix" => "ssl" - - #ssl_container - = boolean_field %w(mysql ssl generate_certs) - = string_field %w(mysql ssl certfile) - = string_field %w(mysql ssl keyfile) - = boolean_field %w(mysql ssl insecure) - = string_field %w(mysql ssl ca_certs) - - #postgresql_container - %fieldset - %legend + %li.list-group-item + %fieldset + = integer_field %w(mysql max_connections) + = integer_field %w(mysql expire_logs_days) + = boolean_field %w(mysql slow_query_logging) + + %fieldset{ "style" => "display:none" } + %legend + = t(".mysql.ssl_header") + + = boolean_field %w(mysql ssl enabled), + "data-sslprefix" => "ssl" + + #ssl_container + = boolean_field %w(mysql ssl generate_certs) + = string_field %w(mysql ssl certfile) + = string_field %w(mysql ssl keyfile) + = boolean_field %w(mysql ssl insecure) + = string_field %w(mysql ssl ca_certs) + + %ul.list-group#postgresql_container + %li.list-group-item.active + %h3.list-group-item-heading = t('.postgresql_attributes') - = integer_field %w(postgresql config max_connections) + %li.list-group-item + %fieldset + = integer_field %w(postgresql config max_connections) + + -# As HA is only supported for postgresql, we put this section in #postgresql_container + %fieldset#ha-setup{ "data-show-for-clusters-only" => "true", "data-elements-path" => "database-server" } + %legend + = t('.ha_header') - -# As HA is only supported for postgresql, we put this section in #postgresql_container - %fieldset#ha-setup{ "data-show-for-clusters-only" => "true", "data-elements-path" => "database-server" } - %legend - = t('.ha_header') + = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" - = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" + #drbd_storage_container + .alert.alert-info + = t('.postgresql.ha.storage.drbd_info') + = integer_field %w(postgresql ha storage drbd size) - #drbd_storage_container - .alert.alert-info - = t('.postgresql.ha.storage.drbd_info') - = integer_field %w(postgresql ha storage drbd size) + #shared_storage_container + = string_field %w(postgresql ha storage shared device) + = string_field %w(postgresql ha storage shared fstype) + = string_field %w(postgresql ha storage shared options) - #shared_storage_container - = string_field %w(postgresql ha storage shared device) - = string_field %w(postgresql ha storage shared fstype) - = string_field %w(postgresql ha storage shared options) From 3e285f548fd18cc83260b8f0dc51cf18696758a8 Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Thu, 5 Jul 2018 23:07:07 +0200 Subject: [PATCH 116/207] neutron [cisco-aci]: Add cisco-aci role for neutron. So far, the implementation assumed that only compute nodes would be attached to the ACI leaf ports and the controller nodes are independently attached to the network not connected to the ACI fabric itself. The only form of communication from the controller to the ACI is through the agents. However, recent customer POCs have changed our assumption since these setups expect all or most of the nodes attached the ACI leaf as the ACI fabric forms the central network infrastructure of the entire deployment. In such scenarios, we need a mechanism where we can know which of the nodes are attached to the ACI leaf and which nodes are not. The agents should be enabled and run on each node attached to the ACI leaf ports. This commit tries to achieve exactly this result, so that future deployments can flexibly allow any node attached to the fabric. The commit also validates that at least one node is assigned with neutron-sdn-cisco-aci-agents role if the mechanism drivers for apic_ml2 or apic_gbp is chosen. (cherry picked from commit a63b7346eea507024fa8308da5785ba72f6294cd) --- .../neutron/recipes/cisco_apic_agents.rb | 150 +++++++++--------- .../cookbooks/neutron/recipes/common_agent.rb | 6 +- .../role_neutron_sdn_cisco_aci_agents.rb | 19 +++ .../migrate/neutron/119_add_cisco_aci_role.rb | 39 +++++ chef/data_bags/crowbar/template-neutron.json | 11 +- chef/roles/neutron-sdn-cisco-aci-agents.rb | 4 + .../app/models/neutron_service.rb | 91 +++++++---- 7 files changed, 201 insertions(+), 119 deletions(-) create mode 100644 chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb create mode 100644 chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb create mode 100644 chef/roles/neutron-sdn-cisco-aci-agents.rb diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb index 231bcb5103..5351655984 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb @@ -57,85 +57,83 @@ end # apply configurations to compute node -if node.roles.include?("nova-compute-kvm") - node[:neutron][:platform][:cisco_opflex_pkgs].each { |p| package p } +node[:neutron][:platform][:cisco_opflex_pkgs].each { |p| package p } - service "lldpd" do - action [:enable, :start] - end - utils_systemd_service_restart "lldpd" - - # include neutron::common_config only now, after we've installed packages - include_recipe "neutron::common_config" - - # Agent configurations for Cisco APIC driver - # The ACI setup for OpenStack releases before Pike use "of_interface" options - # set to "ovs-ofctl". This option has been deprecated in Pike and removed - # from this config file for Pike. It is still included in Newton (Cloud7) - agent_config_path = "/etc/neutron/plugins/ml2/openvswitch_agent.ini" - template agent_config_path do - cookbook "neutron" - source "openvswitch_agent.ini.erb" - owner "root" - group node[:neutron][:platform][:group] - mode "0640" - variables( - ml2_type_drivers: ml2_type_drivers, - ml2_mech_drivers: ml2_mech_drivers, - tunnel_types: "", - enable_tunneling: false, - use_l2pop: false, - dvr_enabled: false, - of_interface: "ovs-ofctl", - ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], - bridge_mappings: "" - ) - end +service "lldpd" do + action [:enable, :start] +end +utils_systemd_service_restart "lldpd" + +# include neutron::common_config only now, after we've installed packages +include_recipe "neutron::common_config" + +# Agent configurations for Cisco APIC driver +# The ACI setup for OpenStack releases before Pike use "of_interface" options +# set to "ovs-ofctl". This option has been deprecated in Pike and removed +# from this config file for Pike. It is still included in Newton (Cloud7) +agent_config_path = "/etc/neutron/plugins/ml2/openvswitch_agent.ini" +template agent_config_path do + cookbook "neutron" + source "openvswitch_agent.ini.erb" + owner "root" + group node[:neutron][:platform][:group] + mode "0640" + variables( + ml2_type_drivers: ml2_type_drivers, + ml2_mech_drivers: ml2_mech_drivers, + tunnel_types: "", + enable_tunneling: false, + use_l2pop: false, + dvr_enabled: false, + of_interface: "ovs-ofctl", + ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], + bridge_mappings: "" + ) +end - # Update config file from template - opflex_agent_conf = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" - apic = neutron[:neutron][:apic] - opflex_list = apic[:opflex].select { |i| i[:nodes].include? node[:hostname] } - opflex_list.any? || raise("Opflex instance not found for node '#{node[:hostname]}'") - opflex_list.one? || raise("Multiple opflex instances found for node '#{node[:hostname]}'") - opflex = opflex_list.first - template opflex_agent_conf do - cookbook "neutron" - source "10-opflex-agent-ovs.conf.erb" - mode "0755" - owner "root" - group neutron[:neutron][:platform][:group] - variables( - opflex_apic_domain_name: neutron[:neutron][:apic][:system_id], - hostname: node[:hostname], - socketgroup: neutron[:neutron][:platform][:group], - opflex_peer_ip: opflex[:peer_ip], - opflex_peer_port: opflex[:peer_port], - opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], - opflex_vxlan_uplink_iface: opflex[:vxlan][:uplink_iface], - opflex_vxlan_uplink_vlan: opflex[:vxlan][:uplink_vlan], - opflex_vxlan_remote_ip: opflex[:vxlan][:remote_ip], - opflex_vxlan_remote_port: opflex[:vxlan][:remote_port], - # TODO(mmnelemane) : update VLAN encapsulation config when it works. - # Currently set to VXLAN by default but can be modified from proposal. - ml2_type_drivers: ml2_type_drivers - ) - end +# Update config file from template +opflex_agent_conf = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" +apic = neutron[:neutron][:apic] +opflex_list = apic[:opflex].select { |i| i[:nodes].include? node[:hostname] } +opflex_list.any? || raise("Opflex instance not found for node '#{node[:hostname]}'") +opflex_list.one? || raise("Multiple opflex instances found for node '#{node[:hostname]}'") +opflex = opflex_list.first +template opflex_agent_conf do + cookbook "neutron" + source "10-opflex-agent-ovs.conf.erb" + mode "0755" + owner "root" + group neutron[:neutron][:platform][:group] + variables( + opflex_apic_domain_name: neutron[:neutron][:apic][:system_id], + hostname: node[:hostname], + socketgroup: neutron[:neutron][:platform][:group], + opflex_peer_ip: opflex[:peer_ip], + opflex_peer_port: opflex[:peer_port], + opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], + opflex_vxlan_uplink_iface: opflex[:vxlan][:uplink_iface], + opflex_vxlan_uplink_vlan: opflex[:vxlan][:uplink_vlan], + opflex_vxlan_remote_ip: opflex[:vxlan][:remote_ip], + opflex_vxlan_remote_port: opflex[:vxlan][:remote_port], + # TODO(mmnelemane) : update VLAN encapsulation config when it works. + # Currently set to VXLAN by default but can be modified from proposal. + ml2_type_drivers: ml2_type_drivers + ) +end - neutron_metadata do - use_cisco_apic_ml2_driver true - neutron_node_object neutron - end +neutron_metadata do + use_cisco_apic_ml2_driver true + neutron_node_object neutron +end - service "neutron-opflex-agent" do - action [:enable, :start] - subscribes :restart, resources("template[#{agent_config_path}]") - end - utils_systemd_service_restart "neutron-opflex-agent" +service "neutron-opflex-agent" do + action [:enable, :start] + subscribes :restart, resources("template[#{agent_config_path}]") +end +utils_systemd_service_restart "neutron-opflex-agent" - service "agent-ovs" do - action [:enable, :start] - subscribes :restart, resources("template[#{opflex_agent_conf}]") - end - utils_systemd_service_restart "agent-ovs" +service "agent-ovs" do + action [:enable, :start] + subscribes :restart, resources("template[#{opflex_agent_conf}]") end +utils_systemd_service_restart "agent-ovs" diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index 020af2f733..4ffbb449de 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -81,12 +81,10 @@ end end -if neutron[:neutron][:networking_plugin] == "ml2" && +# Skip working with regular agents if Cisco ACI is used +return if neutron[:neutron][:networking_plugin] == "ml2" && (neutron[:neutron][:ml2_mechanism_drivers].include?("cisco_apic_ml2") || neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp")) - include_recipe "neutron::cisco_apic_agents" - return # skip anything else in this recipe -end multiple_external_networks = !neutron[:neutron][:additional_external_networks].empty? diff --git a/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb b/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb new file mode 100644 index 0000000000..eddfd28ad9 --- /dev/null +++ b/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb @@ -0,0 +1,19 @@ +# +# Copyright 2018, SUSE LINUX GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if CrowbarRoleRecipe.node_state_valid_for_role?(node, "neutron", "neutron-sdn-cisco-aci-agents") + include_recipe "neutron::cisco_apic_agents" +end diff --git a/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb b/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb new file mode 100644 index 0000000000..4e297a78d1 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +def upgrade(ta, td, a, d) + unless d["element_states"].key?("neutron-sdn-cisco-aci-agents") + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + d["element_run_list_order"] = td["element_run_list_order"] + + if a["networking_plugin"] == "ml2" && + (a["ml2_mechanism_drivers"].include?("cisco_apic_ml2") || + a["ml2_mechanism_drivers"].include?("apic_gbp")) + nodes = NodeObject.find("roles:nova-compute-kvm") + nodes.each do |node| + node.add_to_run_list("neutron-sdn-cisco-aci-agents", + td["element_run_list_order"]["neutron-sdn-cisco-aci-agents"], + td["element_states"]["neutron-sdn-cisco-aci-agents"]) + node.save + end + end + end + return a, d +end + +def downgrade(ta, td, a, d) + unless td["element_states"].key?("neutron-sdn-cisco-aci-agents") + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + d["element_run_list_order"] = td["element_run_list_order"] + d["elements"].delete("neutron-sdn-cisco-aci-agents") + + nodes = NodeObject.find("roles:neutron-sdn-cisco-aci-agents") + nodes.each do |node| + node.delete_from_run_list("neutron-sdn-cisco-aci-agents") + node.save + end + end + + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 350139f85d..a6e18c465d 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -189,19 +189,22 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 118, + "schema-revision": 119, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], - "neutron-network": [ "readying", "ready", "applying" ] + "neutron-network": [ "readying", "ready", "applying" ], + "neutron-sdn-cisco-aci-agents": [ "readying", "ready", "applying" ] }, "elements": {}, "element_order": [ ["neutron-server" ], - ["neutron-network" ] + ["neutron-network" ], + ["neutron-sdn-cisco-aci-agents" ] ], "element_run_list_order": { "neutron-server": 94, - "neutron-network": 95 + "neutron-network": 95, + "neutron-sdn-cisco-aci-agents": 96 }, "config": { "environment": "neutron-config-base", diff --git a/chef/roles/neutron-sdn-cisco-aci-agents.rb b/chef/roles/neutron-sdn-cisco-aci-agents.rb new file mode 100644 index 0000000000..cf66565834 --- /dev/null +++ b/chef/roles/neutron-sdn-cisco-aci-agents.rb @@ -0,0 +1,4 @@ +name "neutron-sdn-cisco-aci-agents" +description "Nodes attached to one of the Cisco ACI Leaf Ports" + +run_list("recipe[neutron::role_neutron_sdn_cisco_aci_agents]") diff --git a/crowbar_framework/app/models/neutron_service.rb b/crowbar_framework/app/models/neutron_service.rb index 0a122baf64..17c27dd8db 100644 --- a/crowbar_framework/app/models/neutron_service.rb +++ b/crowbar_framework/app/models/neutron_service.rb @@ -61,6 +61,16 @@ def role_constraints "windows" => "/.*/" }, "cluster" => true + }, + "neutron-sdn-cisco-aci-agents" => { + "unique" => false, + "count" => -1, + "admin" => false, + "exclude_platform" => { + "suse" => "< 12.2", + "windows" => "/.*/" + }, + "cluster" => true } } end @@ -109,8 +119,9 @@ def create_proposal base["deployment"]["neutron"]["elements"] = { "neutron-server" => [controller_node[:fqdn]], - "neutron-network" => network_nodes.map { |x| x[:fqdn] } - } unless nodes.nil? or nodes.length ==0 + "neutron-network" => network_nodes.map { |x| x[:fqdn] }, + "neutron-sdn-cisco-aci-agents" => nodes.map { |x| x[:fqdn] } + } unless nodes.nil? || nodes.length.zero? base["attributes"]["neutron"]["service_password"] = random_password base["attributes"][@bc_name][:db][:password] = random_password @@ -219,39 +230,6 @@ def validate_ml2(proposal) validation_error I18n.t("barclamp.#{@bc_name}.validation.vmware_dvs_vlan") end - # Checks for Cisco ACI ml2 driver - if ml2_mechanism_drivers.include?("cisco_apic_ml2") && - ml2_mechanism_drivers.include?("apic_gbp") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2_gbp") - end - - if ml2_mechanism_drivers.include?("cisco_apic_ml2") || - ml2_mechanism_drivers.include?("apic_gbp") - # openvswitch should not be used when cisco_apic_ml2 mechanism driver is used - if ml2_mechanism_drivers.include?("openvswitch") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2") - end - - if ml2_mechanism_drivers.include?("linuxbridge") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_linuxbridge") - end - - # cisco_apic_ml2 mechanism driver needs opflex as the type_driver - unless ml2_type_drivers.include?("opflex") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_type") - end - - # Validate if ACI configurations are provided - if proposal["attributes"]["neutron"]["apic"].nil? || - proposal["attributes"]["neutron"]["apic"].empty? - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_no_config") - end - - # Cisco APIC already distributes neutron services not needing DVR - if proposal["attributes"]["neutron"]["use_dvr"] - validation_error I18n.t("barcalmp.#{@bc_name}.validation.cisco_apic_dvr") - end - end # for now, openvswitch and linuxbrige can't be used in parallel if ml2_mechanism_drivers.include?("openvswitch") && @@ -332,6 +310,48 @@ def validate_dvr(proposal) end end + def validate_cisco_aci(proposal) + # Checks for Cisco ACI ml2 driver + ml2_mechanism_drivers = proposal["attributes"]["neutron"]["ml2_mechanism_drivers"] + ml2_type_drivers = proposal["attributes"]["neutron"]["ml2_type_drivers"] + + if ml2_mechanism_drivers.include?("cisco_apic_ml2") && + ml2_mechanism_drivers.include?("apic_gbp") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2_gbp") + end + + if ml2_mechanism_drivers.include?("cisco_apic_ml2") || + ml2_mechanism_drivers.include?("apic_gbp") + + validate_at_least_n_for_role proposal, "neutron-sdn-cisco-aci-agents", 1 + + # openvswitch should not be used when cisco_apic_ml2 mechanism driver is used + if ml2_mechanism_drivers.include?("openvswitch") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2") + end + + if ml2_mechanism_drivers.include?("linuxbridge") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_linuxbridge") + end + + # cisco_apic_ml2 mechanism driver needs opflex as the type_driver + unless ml2_type_drivers.include?("opflex") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_type") + end + + # Validate if ACI configurations are provided + if proposal["attributes"]["neutron"]["apic"].nil? || + proposal["attributes"]["neutron"]["apic"].empty? + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_no_config") + end + + # Cisco APIC already distributes neutron services not needing DVR + if proposal["attributes"]["neutron"]["use_dvr"] + validation_error I18n.t("barcalmp.#{@bc_name}.validation.cisco_apic_dvr") + end + end + end + def validate_external_networks(external_networks) net_svc = NetworkService.new @logger network_proposal = Proposal.find_by(barclamp: net_svc.bc_name, name: "default") @@ -374,6 +394,7 @@ def validate_proposal_after_save(proposal) validate_ml2(proposal) if plugin == "ml2" validate_l2pop(proposal) validate_dvr(proposal) + validate_cisco_aci(proposal) if proposal[:attributes][:neutron][:use_infoblox] validate_infoblox(proposal) end From 11d7ce8d4fb038b966a1a0acea49171f6c696822 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 13 Sep 2018 14:30:39 +0200 Subject: [PATCH 117/207] helpers: Pass sql_engine into get_listen_address When fetch_database_settings is called by one of the database specific cookbooks it is supposed to lookup the settings for the respective backend instead of for the currently selected default backend. --- chef/cookbooks/crowbar-openstack/libraries/helpers.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 69bfc64007..27eff1c7ba 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -107,7 +107,7 @@ def self.database_settings(node, barclamp) if database.nil? Chef::Log.warn("No database server found!") else - address = CrowbarDatabaseHelper.get_listen_address(database) + address = CrowbarDatabaseHelper.get_listen_address(database, sql_engine) ssl_opts = {} if sql_engine == "mysql" From 6aea2d575a0d6983695f4249f9e4bab09d7cf68f Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Wed, 9 May 2018 17:05:28 +0200 Subject: [PATCH 118/207] database: Allow bootstrap openstack dbs on a separate node The "pg2mariadb_preparation" can be inserted into the run list of a controller node (in the HA case only one node per controller-cluster). It will create all databases and -users for the deployed openstack services on an existing mariadb database (while the cloud continues to use postgresql). It will also create temporary override config files for all the services for use with the respective "db_sync" tools of the services. Currently it doesn't call the db_sync tools by itself (to be addressed in a follow up). FIXME: * Avoid hardcoded name for the mariadb deployment * Add support for missing barclamps * call the correct db_sync tools * cleanup files after success (based on commit a798efac834e33e1309fe966d2039b8c6e3016fb) --- .../recipes/pg2mariadb_preparation.rb | 150 ++++++++++++++++++ .../default/mariadb-override.conf.erb | 5 + 2 files changed, 155 insertions(+) create mode 100644 chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb create mode 100644 chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb diff --git a/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb new file mode 100644 index 0000000000..b9f39ad7c9 --- /dev/null +++ b/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb @@ -0,0 +1,150 @@ +# FIXME this is still missing a couple of openstack barclamps +roles_using_database = { + "keystone-server" => { + "barclamp" => "keystone", + "db_sync_cmd" => "keystone-manage --config-dir /etc/keystone/keystone.conf.d/ --config-dir %{db_override_conf} db_sync" + }, + "glance-server" => { + "barclamp" => "glance", + "db_sync_cmd" => "glance-manage --config-dir /etc/glance/glance.conf.d/ --config-dir %{db_override_conf} db_sync" + }, + "cinder-controller" => { + "barclamp" => "cinder", + "db_sync_cmd" => "cinder-manage --config-dir /etc/cinder/cinder.conf.d/ --config-dir %{db_override_conf} db sync" + }, + "manila-server" => { + "barclamp" => "manila", + "db_sync_cmd" => "manila-manage --config-dir /etc/manila/manila.conf.d/ --config-dir %{db_override_conf} db sync" + }, + "neutron-server" => { + "barclamp" => "neutron", + "db_sync_cmd" => "neutron-db-manage --config-dir /etc/neutron/neutron.conf.d/ --config-dir %{db_override_conf} upgrade head" + }, + "nova-controller" => { + "barclamp" => "nova", + "db_sync_cmd" => [ + "nova-manage --config-dir /etc/nova/nova.conf.d/ --config-dir %{db_override_conf} db sync", + "nova-manage --config-dir /etc/nova/nova.conf.d/ --config-dir %{db_override_conf} api_db sync" + ] + }, + # ec2 is special in that it's attributes are part of the nova barclamp + "ec2-api" => { + "barclamp" => "nova", + "ec2-api-manage --config-dir /etc/ec2api/ec2api.conf.d/ --config-dir %{db_override_conf} db_sync", + }, + "horizon-server" => { + "barclamp" => "horizon", + "db_sync_cmd" => "--config-file %{db_override_conf}" + }, + "ceilometer-server" => { + "barclamp" => "ceilometer", + "db_sync_cmd" => "--config-file %{db_override_conf}" + }, + "heat-server" => { + "barclamp" => "heat", + "db_sync_cmd" => "--config-file %{db_override_conf}" + }, + "aodh-server" => { + "barclamp" => "aodh", + "db_sync_cmd" => "--config-file %{db_override_conf}" + } +} + +databases = [] +# The "barclamp" parameter doesn't really matter here, we want to use the same +# instance for all databases. And we specify that instance my name (currently +# hard-coded to "maria" +db_settings = CrowbarOpenStackHelper.database_settings(node, "keystone", "maria") +roles_using_database.keys.each do |role| + if node.roles.include? role + barclamp = roles_using_database[role]["barclamp"] + + db = if role == "ec2-api" + node[barclamp]["ec2-api"]["db"] + else + node[barclamp]["db"] + end + databases << db + db_conf_sections = {} + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, db ) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["database"] = connection + + # The nova-controller role creates more than one database + if role == "nova-controller" + databases << node[barclamp]["api_db"] + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["api_db"] ) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["api_database"] = connection + databases << node[barclamp]["placement_db"] + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["placement_db"] ) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["placement_database"] = connection + end + + db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" + directory "/etc/pg2mysql/" do + mode 0750 + owner "root" + group "root" + end + + directory db_override_conf do + mode 0750 + owner "root" + group "root" + end + + template "#{db_override_conf}/999-db.conf" do + source "mariadb-override.conf.erb" + mode 0640 + owner "root" + group "root" + variables( + db_conf_sections: db_conf_sections + ) + end + end +end + +include_recipe "database::client" +include_recipe "#{db_settings[:backend_name]}::client" +include_recipe "#{db_settings[:backend_name]}::python-client" + +databases.each do |db| + Chef::Log.info("creating database #{db["database"]}") + Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") + Chef::Log.info("db settings: #{db_settings.inspect}") + + database "create #{db[:database]} database (pg2my)" do + connection db_settings[:connection] + database_name db[:database] + provider db_settings[:provider] + action :create +# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + database_user "create #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + host "%" + provider db_settings[:user_provider] + action :create +# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + database_user "grant database access for #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + database_name db[:database] + host "%" + privileges db_settings[:privs] + provider db_settings[:user_provider] + require_ssl db_settings[:connection][:ssl][:enabled] + action :grant +# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + +end diff --git a/chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb b/chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb new file mode 100644 index 0000000000..9999e18895 --- /dev/null +++ b/chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb @@ -0,0 +1,5 @@ +<% @db_conf_sections.keys.each do |section| -%> +[<%= section -%>] +connection = <%= @db_conf_sections[section] -%> + +<% end %> From 1b35320ef54c9d8112c04c0119f877fcbc97a20a Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 7 Sep 2018 09:34:12 +0200 Subject: [PATCH 119/207] database: Prepare MariaDB for migration Before data could be migrated from PostgreSQL databases and users for all services need to be created in target MariaDB. Also table structures need to be deployed (using OpenStack manage commands). New script (prepare-mariadb) was added to perform the whole process. Role specific data was extracted from existing pg2mariadb_preparation recipe so that it can be used in both recipe and the prepare script. The script looks up nodes which host services using the database and prepares each of them. First step is execution of pg2mariadb_preparation chef recipe which creates all needed databases and users. It also writes set of config override files and per-role scripts which are later used to execute dbsync commands. The recipe is executed by temporary modification of node-role run_list and running chef-client on the target node. Second step is execution of all dbsync scripts on the target node. The script should be executed on the admin server. Stdout and stderr of all steps are written to log files under /var/log/crowbar/db-prepare*.log --- bin/prepare-mariadb | 145 +++++++++++++++++ .../recipes/pg2mariadb_preparation.rb | 150 ------------------ chef/cookbooks/database/libraries/crowbar.rb | 88 ++++++++++ .../recipes/pg2mariadb_preparation.rb | 125 +++++++++++++++ .../templates/default/mariadb-db_sync.sh.erb | 4 + .../default/mariadb-override.conf.erb | 2 +- 6 files changed, 363 insertions(+), 151 deletions(-) create mode 100755 bin/prepare-mariadb delete mode 100644 chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb create mode 100644 chef/cookbooks/database/recipes/pg2mariadb_preparation.rb create mode 100644 chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb rename chef/cookbooks/{crowbar-openstack => database}/templates/default/mariadb-override.conf.erb (54%) diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb new file mode 100755 index 0000000000..0010a7d178 --- /dev/null +++ b/bin/prepare-mariadb @@ -0,0 +1,145 @@ +#!/usr/bin/env ruby +# +# Copyright 2018, SUSE +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +$LOAD_PATH.unshift(File.expand_path("../../crowbar_framework/lib", __FILE__)) +$LOAD_PATH.unshift(File.expand_path("../../chef/cookbooks/database/libraries", __FILE__)) + +require "chef" +require "erb" +require "crowbar" + +RECIPE = "recipe[database::pg2mariadb_preparation]" +LOGDIR = "/var/log/crowbar" + +def chef_init + Chef::Config.node_name "crowbar" + Chef::Config.client_key "/opt/dell/crowbar_framework/config/client.pem" + Chef::Config.chef_server_url "http://localhost:4000" +end + +def node_for_role(role) + nodes = [] + Chef::Search::Query.new.search "node", "roles:#{role}" do |n| + nodes << n + end + nodes.sort_by! { |n| n.name } + nodes.empty? ? nil : nodes.first +end + +def mysql_node + node_for_role "mysql-server" +end + +# Select nodes which represent all services which use database +# they might be standalone nodes or cluster members. In most simple +# case there will be only one node used for all services. +def selected_nodes + nodes_and_roles = {} + CrowbarDatabaseHelper.roles_using_database.each do |role| + node = node_for_role(role) + next if node.nil? + if nodes_and_roles.include? node.name + nodes_and_roles[node.name][:roles] << role + else + nodes_and_roles[node.name] = { node: node, roles: [role] } + end + end + nodes_and_roles +end + +def node_role(node) + Chef::Role.load("crowbar-"+node.name.gsub(".", "_")) +end + +def add_recipe(node) + role = node_role(node) + role.run_list << RECIPE + role.save +end + +def remove_recipe(node) + role = node_role(node) + role.run_list.remove(RECIPE) + role.save +end + +# based on code from crowbar_framework/app/models/node.rb +def run_ssh_cmd(node, cmd, timeout = "15s", kill_after = "5s") + start_time = Time.now + args = ["sudo", "-i", "-u", "root", "--", "timeout", "-k", kill_after, timeout, + "ssh", "-o", "ConnectTimeout=10", + "root@#{node.name}", + %("#{cmd.gsub('"', '\\"')}") + ].join(" ") + Open3.popen2e(args) do |stdin, stdout_and_stderr, wait_thr| + { + stdout_and_stderr: stdout_and_stderr.gets(nil), + exit_code: wait_thr.value.exitstatus, + run_time: Time.now - start_time + } + end +end + +def log(msg) + print "#{msg}\n" +end + +def write_log(part, stdout_and_stderr) + outfile = File.join(LOGDIR, "db-prepare.#{part}.log") + open(outfile, "w") { |f| f << stdout_and_stderr } + log "Stdout/Stderr written to: #{outfile}" +end + +def prepare_node(node, roles) + log "Preparing node #{node.name}" + log "Adding #{RECIPE} to run_list" + add_recipe node + log "Running chef-client on #{node.name}..." + res = run_ssh_cmd(node, "chef-client", "40m") + write_log("chef-client", res[:stdout_and_stderr]) + log "Run time: #{res[:run_time]}s" + log "Removing #{RECIPE} from run_list" + remove_recipe node + unless res[:exit_code].zero? + log "ERROR: Chef-client failed with code: #{res[:exit_code]}" + return + end + log "Processing db_sync commands on #{node.name}" + roles.each do |role| + cmd = "/etc/pg2mysql/scripts/#{role}-db_sync.sh" + log "Running db_sync script #{cmd} for role #{role}" + res = run_ssh_cmd(node, cmd, "5m") + log "ERROR: Failed with code: #{res[:exit_code]}" unless res[:exit_code].zero? + write_log("role-#{role}", res[:stdout_and_stderr]) + log "Run time: #{res[:run_time]}s" + end + log "Prepare completed for #{node.name}" +end + +def main + chef_init + if mysql_node.nil? + log "ERROR: MySQL server not found. Please assign mysql-server role " + "to some node or cluster and re-apply database proposal." + return + end + selected_nodes.values.each do |node_data| + prepare_node(node_data[:node], node_data[:roles]) + end +end + +main diff --git a/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb deleted file mode 100644 index b9f39ad7c9..0000000000 --- a/chef/cookbooks/crowbar-openstack/recipes/pg2mariadb_preparation.rb +++ /dev/null @@ -1,150 +0,0 @@ -# FIXME this is still missing a couple of openstack barclamps -roles_using_database = { - "keystone-server" => { - "barclamp" => "keystone", - "db_sync_cmd" => "keystone-manage --config-dir /etc/keystone/keystone.conf.d/ --config-dir %{db_override_conf} db_sync" - }, - "glance-server" => { - "barclamp" => "glance", - "db_sync_cmd" => "glance-manage --config-dir /etc/glance/glance.conf.d/ --config-dir %{db_override_conf} db_sync" - }, - "cinder-controller" => { - "barclamp" => "cinder", - "db_sync_cmd" => "cinder-manage --config-dir /etc/cinder/cinder.conf.d/ --config-dir %{db_override_conf} db sync" - }, - "manila-server" => { - "barclamp" => "manila", - "db_sync_cmd" => "manila-manage --config-dir /etc/manila/manila.conf.d/ --config-dir %{db_override_conf} db sync" - }, - "neutron-server" => { - "barclamp" => "neutron", - "db_sync_cmd" => "neutron-db-manage --config-dir /etc/neutron/neutron.conf.d/ --config-dir %{db_override_conf} upgrade head" - }, - "nova-controller" => { - "barclamp" => "nova", - "db_sync_cmd" => [ - "nova-manage --config-dir /etc/nova/nova.conf.d/ --config-dir %{db_override_conf} db sync", - "nova-manage --config-dir /etc/nova/nova.conf.d/ --config-dir %{db_override_conf} api_db sync" - ] - }, - # ec2 is special in that it's attributes are part of the nova barclamp - "ec2-api" => { - "barclamp" => "nova", - "ec2-api-manage --config-dir /etc/ec2api/ec2api.conf.d/ --config-dir %{db_override_conf} db_sync", - }, - "horizon-server" => { - "barclamp" => "horizon", - "db_sync_cmd" => "--config-file %{db_override_conf}" - }, - "ceilometer-server" => { - "barclamp" => "ceilometer", - "db_sync_cmd" => "--config-file %{db_override_conf}" - }, - "heat-server" => { - "barclamp" => "heat", - "db_sync_cmd" => "--config-file %{db_override_conf}" - }, - "aodh-server" => { - "barclamp" => "aodh", - "db_sync_cmd" => "--config-file %{db_override_conf}" - } -} - -databases = [] -# The "barclamp" parameter doesn't really matter here, we want to use the same -# instance for all databases. And we specify that instance my name (currently -# hard-coded to "maria" -db_settings = CrowbarOpenStackHelper.database_settings(node, "keystone", "maria") -roles_using_database.keys.each do |role| - if node.roles.include? role - barclamp = roles_using_database[role]["barclamp"] - - db = if role == "ec2-api" - node[barclamp]["ec2-api"]["db"] - else - node[barclamp]["db"] - end - databases << db - db_conf_sections = {} - connection = CrowbarOpenStackHelper.database_connection_string(db_settings, db ) - Chef::Log.info("connection string: #{connection}") - db_conf_sections["database"] = connection - - # The nova-controller role creates more than one database - if role == "nova-controller" - databases << node[barclamp]["api_db"] - connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["api_db"] ) - Chef::Log.info("connection string: #{connection}") - db_conf_sections["api_database"] = connection - databases << node[barclamp]["placement_db"] - connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["placement_db"] ) - Chef::Log.info("connection string: #{connection}") - db_conf_sections["placement_database"] = connection - end - - db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" - directory "/etc/pg2mysql/" do - mode 0750 - owner "root" - group "root" - end - - directory db_override_conf do - mode 0750 - owner "root" - group "root" - end - - template "#{db_override_conf}/999-db.conf" do - source "mariadb-override.conf.erb" - mode 0640 - owner "root" - group "root" - variables( - db_conf_sections: db_conf_sections - ) - end - end -end - -include_recipe "database::client" -include_recipe "#{db_settings[:backend_name]}::client" -include_recipe "#{db_settings[:backend_name]}::python-client" - -databases.each do |db| - Chef::Log.info("creating database #{db["database"]}") - Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") - Chef::Log.info("db settings: #{db_settings.inspect}") - - database "create #{db[:database]} database (pg2my)" do - connection db_settings[:connection] - database_name db[:database] - provider db_settings[:provider] - action :create -# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - - database_user "create #{db[:user]} database user (pg2my)" do - connection db_settings[:connection] - username db[:user] - password db[:password] - host "%" - provider db_settings[:user_provider] - action :create -# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - - database_user "grant database access for #{db[:user]} database user (pg2my)" do - connection db_settings[:connection] - username db[:user] - password db[:password] - database_name db[:database] - host "%" - privileges db_settings[:privs] - provider db_settings[:user_provider] - require_ssl db_settings[:connection][:ssl][:enabled] - action :grant -# only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - -end diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index 07512d9558..08a3b87bee 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -24,4 +24,92 @@ def self.get_listen_address(node, sql_engine = node[:database][:sql_engine]) use_ssl ? node[:fqdn] : Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address end end + + def self.roles_using_database + migration_data.keys + end + + def self.role_migration_data(role) + migration_data[role] + end + + def self.migration_data + { + "keystone-server" => { + "barclamp" => "keystone", + "db_sync_cmd" => "keystone-manage --config-dir /etc/keystone/keystone.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db_sync" + }, + "glance-server" => { + "barclamp" => "glance", + "db_sync_cmd" => "glance-manage --config-dir /etc/glance/glance.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db_sync" + }, + "cinder-controller" => { + "barclamp" => "cinder", + "db_sync_cmd" => "cinder-manage --config-dir /etc/cinder/cinder.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db sync" + }, + "manila-server" => { + "barclamp" => "manila", + "db_sync_cmd" => "manila-manage --config-dir /etc/manila/manila.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db sync" + }, + "neutron-server" => { + "barclamp" => "neutron", + "db_sync_cmd" => "neutron-db-manage --config-dir /etc/neutron/neutron.conf.d/ " \ + "--config-dir <%=@db_override_conf%> upgrade head" + }, + "nova-controller" => { + "barclamp" => "nova", + "db_sync_cmd" => [ + "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ + "--config-dir <%=@db_override_conf%> api_db sync", + "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db sync" + ] + }, + # ec2 is special in that it's attributes are part of the nova barclamp + "ec2-api" => { + "barclamp" => "nova", + "db_sync_cmd" => "ec2-api-manage --config-dir /etc/ec2api/ec2api.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db_sync" + }, + # django migration tool uses db settings from + # /srv/www/openstack-dashboard/openstack_dashboard/local/local.settings.d/_100_local_settings.py + "horizon-server" => { + "barclamp" => "horizon", + "db_sync_cmd" => "python /srv/www/openstack-dashboard/manage.py migrate --database mysql" + }, + "ceilometer-server" => { + "barclamp" => "ceilometer", + "db_sync_cmd" => "ceilometer-dbsync --config-dir /etc/ceilometer/ceilometer.conf.d/ " \ + "--config-dir <%=@db_override_conf%>" + }, + "heat-server" => { + "barclamp" => "heat", + "db_sync_cmd" => "heat-manage --config-dir /etc/heat/heat.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db_sync" + }, + "aodh-server" => { + "barclamp" => "aodh", + "db_sync_cmd" => "aodh-dbsync --config-dir /etc/aodh/aodh.conf.d/ " \ + "--config-dir <%=@db_override_conf%>" + }, + "barbican-controller" => { + "barclamp" => "barbican", + # this doesn't work because of a bug in barbican-manage handling of oslo_config + # "db_sync_cmd" => "barbican-manage --config-dir /etc/barbican/barbican.conf.d/ " \ + # "--config-dir <%=@db_override_conf%> db upgrade" + "db_sync_cmd" => "barbican-manage db upgrade --db-url <%=@db_conf_sections['DEFAULT']%>" + }, + "trove-server" => { + "barclamp" => "trove", + "db_sync_cmd" => "trove-manage --config-dir /etc/trove/trove.conf.d/ " \ + "--config-dir <%=@db_override_conf%> db_sync" + } + } + end + + private_class_method :migration_data end diff --git a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb new file mode 100644 index 0000000000..cbd29d82f5 --- /dev/null +++ b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb @@ -0,0 +1,125 @@ +databases = [] +# The "barclamp" parameter doesn't really matter here, we want to use the same +# instance for all databases. +db_settings = CrowbarOpenStackHelper.database_settings(node, "mysql") +CrowbarDatabaseHelper.roles_using_database.each do |role| + next unless node.roles.include? role + + role_migration_data = CrowbarDatabaseHelper.role_migration_data(role) + barclamp = role_migration_data["barclamp"] + + db = if role == "ec2-api" + node[barclamp]["ec2-api"]["db"] + else + node[barclamp]["db"] + end + databases << db + db_conf_sections = {} + db_connection_key = "connection" + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, db) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["database"] = connection + + # The nova-controller role creates more than one database + if role == "nova-controller" + databases << node[barclamp]["api_db"] + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, + node[barclamp]["api_db"]) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["api_database"] = connection + databases << node[barclamp]["placement_db"] + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, + node[barclamp]["placement_db"]) + Chef::Log.info("connection string: #{connection}") + db_conf_sections["placement_database"] = connection + end + # Barbican uses non-standard db config structure + if role == "barbican-controller" + db_conf_sections = { "DEFAULT" => connection } + db_connection_key = "sql_connection" + end + + db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" + directory "/etc/pg2mysql/" do + mode 0750 + owner "root" + group "root" + end + + directory "/etc/pg2mysql/scripts" do + mode 0750 + owner "root" + group "root" + end + + cmds = role_migration_data["db_sync_cmd"] + cmds = [cmds] unless cmds.is_a?(Array) + + template "/etc/pg2mysql/scripts/#{role}-db_sync.sh" do + source "mariadb-db_sync.sh.erb" + mode 0750 + owner "root" + group "root" + variables( + db_sync_cmds: cmds, + db_conf_sections: db_conf_sections, + db_override_conf: db_override_conf + ) + end + + directory db_override_conf do + mode 0750 + owner "root" + group "root" + end + + template "#{db_override_conf}/999-db.conf" do + source "mariadb-override.conf.erb" + mode 0640 + owner "root" + group "root" + variables( + db_conf_sections: db_conf_sections, + db_connection_key: db_connection_key + ) + end +end + +include_recipe "database::client" +include_recipe "#{db_settings[:backend_name]}::client" +include_recipe "#{db_settings[:backend_name]}::python-client" + +databases.each do |db| + Chef::Log.info("creating database #{db["database"]}") + Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") + Chef::Log.info("db settings: #{db_settings.inspect}") + + database "create #{db[:database]} database (pg2my)" do + connection db_settings[:connection] + database_name db[:database] + provider db_settings[:provider] + action :create + end + + database_user "create #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + host "%" + provider db_settings[:user_provider] + action :create + end + + database_user "grant database access for #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + database_name db[:database] + host "%" + privileges db_settings[:privs] + provider db_settings[:user_provider] + require_ssl db_settings[:connection][:ssl][:enabled] + action :grant + end + +end diff --git a/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb b/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb new file mode 100644 index 0000000000..a509d65557 --- /dev/null +++ b/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb @@ -0,0 +1,4 @@ +#!/bin/sh +<% @db_sync_cmds.each do |cmd| -%> +<%= ERB.new(cmd).result(binding) %> +<% end %> diff --git a/chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb b/chef/cookbooks/database/templates/default/mariadb-override.conf.erb similarity index 54% rename from chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb rename to chef/cookbooks/database/templates/default/mariadb-override.conf.erb index 9999e18895..e787748c8a 100644 --- a/chef/cookbooks/crowbar-openstack/templates/default/mariadb-override.conf.erb +++ b/chef/cookbooks/database/templates/default/mariadb-override.conf.erb @@ -1,5 +1,5 @@ <% @db_conf_sections.keys.each do |section| -%> [<%= section -%>] -connection = <%= @db_conf_sections[section] -%> +<%= @db_connection_key -%> = <%= @db_conf_sections[section] -%> <% end %> From 78f6fcd943a5f4319718fae068a1a191adf0abe5 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 12 Sep 2018 11:58:22 +0200 Subject: [PATCH 120/207] horizon: Add database config for migration Django DB manage tool uses configuration from local_settings.d. To prepare target MySQL database for migration, additional entry is added to the configuration. It always points to MySQL database and is not present if MySQL is not deployed. --- chef/cookbooks/horizon/recipes/server.rb | 14 ++++++++++++++ .../templates/default/local_settings.py.erb | 7 +++++++ 2 files changed, 21 insertions(+) diff --git a/chef/cookbooks/horizon/recipes/server.rb b/chef/cookbooks/horizon/recipes/server.rb index e51d236564..a5e19837c7 100644 --- a/chef/cookbooks/horizon/recipes/server.rb +++ b/chef/cookbooks/horizon/recipes/server.rb @@ -335,6 +335,19 @@ "default-character-set" => "'utf8'" } +mysql_settings = fetch_database_settings "mysql" +if mysql_settings + package "python-mysql" + django_mysql_settings = { + "ENGINE" => "'django.db.backends.mysql'", + "NAME" => "'#{node[:horizon][:db][:database]}'", + "USER" => "'#{node[:horizon][:db][:user]}'", + "PASSWORD" => "'#{node[:horizon][:db][:password]}'", + "HOST" => "'#{mysql_settings[:address]}'", + "default-character-set" => "'utf8'" + } +end + db_ca_certs = database_ssl ? db_settings[:connection][:ssl][:ca_certs] : "" glance_insecure = CrowbarOpenStackHelper.insecure(Barclamp::Config.load("openstack", "glance")) @@ -454,6 +467,7 @@ || manila_insecure \ || ceilometer_insecure, db_settings: django_db_settings, + mysql_settings: django_mysql_settings, db_ca_certs: db_ca_certs, enable_lb: neutron_use_lbaas, enable_vpn: neutron_use_vpnaas, diff --git a/chef/cookbooks/horizon/templates/default/local_settings.py.erb b/chef/cookbooks/horizon/templates/default/local_settings.py.erb index ab77b33aee..b14619b449 100644 --- a/chef/cookbooks/horizon/templates/default/local_settings.py.erb +++ b/chef/cookbooks/horizon/templates/default/local_settings.py.erb @@ -253,6 +253,13 @@ DATABASES = { } <% end %> }, +<% if @mysql_settings -%> + 'mysql': { + <% @mysql_settings.sort_by { |key, value| key }.each do |key,value| -%> + '<%= key %>': <%= value %>, + <% end -%> + }, +<% end -%> } SITE_BRANDING = "<%= @site_branding %>" From 412640f8ea4a1ac612ced46e7ffbbf80323cd470 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 14 Sep 2018 21:49:03 +0200 Subject: [PATCH 121/207] database: Add missing roles to migration data --- chef/cookbooks/database/libraries/crowbar.rb | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index 08a3b87bee..f546d6a40f 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -103,6 +103,16 @@ def self.migration_data # "--config-dir <%=@db_override_conf%> db upgrade" "db_sync_cmd" => "barbican-manage db upgrade --db-url <%=@db_conf_sections['DEFAULT']%>" }, + "magnum-server" => { + "barclamp" => "magnum", + "db_sync_cmd" => "magnum-db-manage --config-dir /etc/magnum/magnum.conf.d/ " \ + "--config-dir <%=@db_override_conf%> upgrade" + }, + "sahara-server" => { + "barclamp" => "sahara", + "db_sync_cmd" => "sahara-db-manage --config-dir /etc/sahara/sahara.conf.d/ " \ + "--config-dir <%=@db_override_conf%> upgrade head" + }, "trove-server" => { "barclamp" => "trove", "db_sync_cmd" => "trove-manage --config-dir /etc/trove/trove.conf.d/ " \ From c1b89062a77feea529e0397a88f74446a18a5f96 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 14 Sep 2018 21:49:29 +0200 Subject: [PATCH 122/207] database: Remote logging in prepare script Switched prepare script to log stdout/stderr on remote node instead of admin server. Benefit is that the log is written live while the command is running not after the execution is finished. --- bin/prepare-mariadb | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb index 0010a7d178..45721cbc78 100755 --- a/bin/prepare-mariadb +++ b/bin/prepare-mariadb @@ -78,13 +78,15 @@ def remove_recipe(node) end # based on code from crowbar_framework/app/models/node.rb -def run_ssh_cmd(node, cmd, timeout = "15s", kill_after = "5s") +def run_ssh_cmd(node, cmd, log_suffix, timeout = "15s", kill_after = "5s") + log_file = "/var/log/crowbar/db-prepare.#{log_suffix}.log" if log_suffix start_time = Time.now args = ["sudo", "-i", "-u", "root", "--", "timeout", "-k", kill_after, timeout, "ssh", "-o", "ConnectTimeout=10", "root@#{node.name}", - %("#{cmd.gsub('"', '\\"')}") + %("#{cmd.gsub('"', '\\"')} > #{log_file} 2>&1") ].join(" ") + log "Log: #{log_file} on #{node.name}" Open3.popen2e(args) do |stdin, stdout_and_stderr, wait_thr| { stdout_and_stderr: stdout_and_stderr.gets(nil), @@ -98,19 +100,12 @@ def log(msg) print "#{msg}\n" end -def write_log(part, stdout_and_stderr) - outfile = File.join(LOGDIR, "db-prepare.#{part}.log") - open(outfile, "w") { |f| f << stdout_and_stderr } - log "Stdout/Stderr written to: #{outfile}" -end - def prepare_node(node, roles) log "Preparing node #{node.name}" log "Adding #{RECIPE} to run_list" add_recipe node log "Running chef-client on #{node.name}..." - res = run_ssh_cmd(node, "chef-client", "40m") - write_log("chef-client", res[:stdout_and_stderr]) + res = run_ssh_cmd(node, "chef-client", "chef-client", "30m") log "Run time: #{res[:run_time]}s" log "Removing #{RECIPE} from run_list" remove_recipe node @@ -122,9 +117,8 @@ def prepare_node(node, roles) roles.each do |role| cmd = "/etc/pg2mysql/scripts/#{role}-db_sync.sh" log "Running db_sync script #{cmd} for role #{role}" - res = run_ssh_cmd(node, cmd, "5m") + res = run_ssh_cmd(node, cmd, role, "5m") log "ERROR: Failed with code: #{res[:exit_code]}" unless res[:exit_code].zero? - write_log("role-#{role}", res[:stdout_and_stderr]) log "Run time: #{res[:run_time]}s" end log "Prepare completed for #{node.name}" From 8c0be66ffe824b2bbde698716b8af3afcf272d46 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Sat, 15 Sep 2018 09:16:44 +0200 Subject: [PATCH 123/207] database: Create index of prepared DBs To simplify data migration additional file which lists all prepared databases with respective source and target connection strings. The files are stored on the prepared nodes. Later they are collected, merged and stored on the admin server as one file. --- bin/prepare-mariadb | 15 +++++++++---- .../recipes/pg2mariadb_preparation.rb | 22 +++++++++++++++---- .../default/mariadb-databases.txt.erb | 3 +++ 3 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 chef/cookbooks/database/templates/default/mariadb-databases.txt.erb diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb index 45721cbc78..7edb66d94b 100755 --- a/bin/prepare-mariadb +++ b/bin/prepare-mariadb @@ -78,13 +78,14 @@ def remove_recipe(node) end # based on code from crowbar_framework/app/models/node.rb -def run_ssh_cmd(node, cmd, log_suffix, timeout = "15s", kill_after = "5s") +def run_ssh_cmd(node, cmd, log_suffix = nil, timeout = "15s", kill_after = "5s") log_file = "/var/log/crowbar/db-prepare.#{log_suffix}.log" if log_suffix + log_redirect = "> #{log_file} 2>&1" if log_file start_time = Time.now args = ["sudo", "-i", "-u", "root", "--", "timeout", "-k", kill_after, timeout, "ssh", "-o", "ConnectTimeout=10", "root@#{node.name}", - %("#{cmd.gsub('"', '\\"')} > #{log_file} 2>&1") + %("#{cmd.gsub('"', '\\"')} #{log_redirect}") ].join(" ") log "Log: #{log_file} on #{node.name}" Open3.popen2e(args) do |stdin, stdout_and_stderr, wait_thr| @@ -111,7 +112,7 @@ def prepare_node(node, roles) remove_recipe node unless res[:exit_code].zero? log "ERROR: Chef-client failed with code: #{res[:exit_code]}" - return + return [] end log "Processing db_sync commands on #{node.name}" roles.each do |role| @@ -121,7 +122,9 @@ def prepare_node(node, roles) log "ERROR: Failed with code: #{res[:exit_code]}" unless res[:exit_code].zero? log "Run time: #{res[:run_time]}s" end + summary = run_ssh_cmd(node, "cat /etc/pg2mysql/databases.txt")[:stdout_and_stderr].lines log "Prepare completed for #{node.name}" + summary end def main @@ -131,9 +134,13 @@ def main "to some node or cluster and re-apply database proposal." return end + summary = Set.new selected_nodes.values.each do |node_data| - prepare_node(node_data[:node], node_data[:roles]) + summary |= prepare_node(node_data[:node], node_data[:roles]) end + Dir.mkdir "/etc/pg2mysql" unless File.exists? "/etc/pg2mysql" + open("/etc/pg2mysql/databases.txt", "w") { |f| f << summary.to_a.join } + log "Summary of used databases: /etc/pg2mysql/databases.txt" end main diff --git a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb index cbd29d82f5..fb501d72b3 100644 --- a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb +++ b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb @@ -2,6 +2,7 @@ # The "barclamp" parameter doesn't really matter here, we want to use the same # instance for all databases. db_settings = CrowbarOpenStackHelper.database_settings(node, "mysql") +psql_settings = CrowbarOpenStackHelper.database_settings(node, "postgresql") CrowbarDatabaseHelper.roles_using_database.each do |role| next unless node.roles.include? role @@ -13,23 +14,23 @@ else node[barclamp]["db"] end - databases << db db_conf_sections = {} db_connection_key = "connection" connection = CrowbarOpenStackHelper.database_connection_string(db_settings, db) + databases << { db: db, url: connection } Chef::Log.info("connection string: #{connection}") db_conf_sections["database"] = connection # The nova-controller role creates more than one database if role == "nova-controller" - databases << node[barclamp]["api_db"] connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["api_db"]) + databases << { db: node[barclamp]["api_db"], url: connection } Chef::Log.info("connection string: #{connection}") db_conf_sections["api_database"] = connection - databases << node[barclamp]["placement_db"] connection = CrowbarOpenStackHelper.database_connection_string(db_settings, node[barclamp]["placement_db"]) + databases << { db: node[barclamp]["placement_db"], url: connection } Chef::Log.info("connection string: #{connection}") db_conf_sections["placement_database"] = connection end @@ -89,7 +90,10 @@ include_recipe "#{db_settings[:backend_name]}::client" include_recipe "#{db_settings[:backend_name]}::python-client" -databases.each do |db| +databases.each do |dbdata| + db = dbdata[:db] + # fill psql url for databases.txt + dbdata[:psql_url] = CrowbarOpenStackHelper.database_connection_string(psql_settings, db) Chef::Log.info("creating database #{db["database"]}") Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") Chef::Log.info("db settings: #{db_settings.inspect}") @@ -123,3 +127,13 @@ end end + +template "/etc/pg2mysql/databases.txt" do + source "mariadb-databases.txt.erb" + mode 0640 + owner "root" + group "root" + variables( + databases: databases + ) +end diff --git a/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb b/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb new file mode 100644 index 0000000000..4fa2c10cf5 --- /dev/null +++ b/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb @@ -0,0 +1,3 @@ +<% @databases.each do |dbdata| -%> +<%= dbdata[:db][:database] -%> <%= dbdata[:psql_url] -%> <%= dbdata[:url] %> +<% end %> From 369e98049306486141768b3826ea2aed89a90418 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 19 Sep 2018 09:25:10 +0200 Subject: [PATCH 124/207] database: Exit codes in prepare script Added system exit codes for error conditions. --- bin/prepare-mariadb | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb index 7edb66d94b..4535c44241 100755 --- a/bin/prepare-mariadb +++ b/bin/prepare-mariadb @@ -112,7 +112,7 @@ def prepare_node(node, roles) remove_recipe node unless res[:exit_code].zero? log "ERROR: Chef-client failed with code: #{res[:exit_code]}" - return [] + return end log "Processing db_sync commands on #{node.name}" roles.each do |role| @@ -128,19 +128,26 @@ def prepare_node(node, roles) end def main + ret = 0 chef_init if mysql_node.nil? log "ERROR: MySQL server not found. Please assign mysql-server role " "to some node or cluster and re-apply database proposal." - return + return -1 end summary = Set.new selected_nodes.values.each do |node_data| - summary |= prepare_node(node_data[:node], node_data[:roles]) + node_summary = prepare_node(node_data[:node], node_data[:roles]) + if node_summary.nil? + ret = -2 + else + summary |= node_summary + end end Dir.mkdir "/etc/pg2mysql" unless File.exists? "/etc/pg2mysql" open("/etc/pg2mysql/databases.txt", "w") { |f| f << summary.to_a.join } log "Summary of used databases: /etc/pg2mysql/databases.txt" + return ret end -main +exit(main) From 3d4dc67106157bc4e313e893ffaa108e8426b196 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 19 Sep 2018 14:01:19 +0200 Subject: [PATCH 125/207] crowbar-openstack: Expose get_node helper function CrowbarOpenStack.get_node should work fine outside of the helper class. It can be used to ensure a node for given role/barclamp by checking given node and falling back to Chef search if it's not matching the requirements. --- chef/cookbooks/crowbar-openstack/libraries/helpers.rb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 27eff1c7ba..8ef8af26d9 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -303,8 +303,6 @@ def self.insecure(attributes) use_ssl && attributes["ssl"]["insecure"] end - private - def self.get_node(node, role, barclamp, instance) result = nil @@ -322,12 +320,13 @@ def self.get_node(node, role, barclamp, instance) result end + private + def self.get_nodes(node, role, barclamp, instance) nodes, = Chef::Search::Query.new.search(:node, "roles:#{role} AND " \ "#{barclamp}_config_environment:#{barclamp}-config-#{instance}") nodes end - private_class_method :get_node private_class_method :get_nodes end From eee6a8bcbc4ec27a4000bdc6994893af4ebec17b Mon Sep 17 00:00:00 2001 From: Abel Navarro Date: Tue, 7 Aug 2018 13:41:28 +0200 Subject: [PATCH 126/207] mysql: ha galera needs op monitor for slaves In a Galera HA environment the pacemaker primitive op monitor is only set for master mode. This is not a problem because all galera resources are in master mode. The problem comes when changing the pacemaker primitive and the masters can become slaves, i.e. when shrinking the cluster. In such case the slaves never get promoted to masters because they are not monitored. Adding the monitor operation for slaves solves this situation. (cherry picked from commit 67fa21f8b086c862163ac044a788226ce159edcd) --- chef/cookbooks/mysql/attributes/server.rb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index 93c7b39ef1..a0cf51696b 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -31,8 +31,9 @@ # Default operation setting for the galera resource # in pacemamker -default[:mysql][:ha][:op][:monitor][:interval] = "20s" -default[:mysql][:ha][:op][:monitor][:role] = "Master" +default[:mysql][:ha][:op][:monitor] = [ + { interval: "23s" }, { interval: "20s", role: "Master" } +] # If needed we can enhance this to set the mariadb version # depeding on "platform" and "platform_version". But currently From 0290b20674c580fc34fa199cc94e2c9a956706db Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 21 Sep 2018 10:41:10 +0200 Subject: [PATCH 127/207] neutron: Don't assign the aci role to any node by default This needs be assigned manually when the ACI/opflex drivers are enabled (cherry picked from commit 2581217e072c0f415be410e80a266ba028637e1c) --- crowbar_framework/app/models/neutron_service.rb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/neutron_service.rb b/crowbar_framework/app/models/neutron_service.rb index 17c27dd8db..ab863d17df 100644 --- a/crowbar_framework/app/models/neutron_service.rb +++ b/crowbar_framework/app/models/neutron_service.rb @@ -119,8 +119,7 @@ def create_proposal base["deployment"]["neutron"]["elements"] = { "neutron-server" => [controller_node[:fqdn]], - "neutron-network" => network_nodes.map { |x| x[:fqdn] }, - "neutron-sdn-cisco-aci-agents" => nodes.map { |x| x[:fqdn] } + "neutron-network" => network_nodes.map { |x| x[:fqdn] } } unless nodes.nil? || nodes.length.zero? base["attributes"]["neutron"]["service_password"] = random_password From e5c3dde90906ee5c008ff67803634b5cc97db705 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 19 Sep 2018 14:04:37 +0200 Subject: [PATCH 128/207] database: Execute db_sync commands via Chef The execution of db_sync commands was moved from wrapper script to the recipe. This way additional ssh calls could be removed and the wrapper script is simplified. In addition, the recipe is executed on first database node to create one common index file which could be used as input for the migration tool. --- bin/prepare-mariadb | 42 +++++-------- chef/cookbooks/database/libraries/crowbar.rb | 32 +++++----- .../recipes/pg2mariadb_preparation.rb | 63 +++++++++++-------- .../default/mariadb-databases.txt.erb | 3 - .../default/mariadb-databases.yaml.erb | 5 ++ .../templates/default/mariadb-db_sync.sh.erb | 4 -- 6 files changed, 73 insertions(+), 76 deletions(-) delete mode 100644 chef/cookbooks/database/templates/default/mariadb-databases.txt.erb create mode 100644 chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb delete mode 100644 chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb index 4535c44241..e0d55bc0b0 100755 --- a/bin/prepare-mariadb +++ b/bin/prepare-mariadb @@ -47,8 +47,12 @@ end # Select nodes which represent all services which use database # they might be standalone nodes or cluster members. In most simple # case there will be only one node used for all services. -def selected_nodes - nodes_and_roles = {} +def selected_nodes(first_mysql_node) + nodes_and_roles = { + first_mysql_node.name => { + node: first_mysql_node, roles: ["mysql-server"] + } + } CrowbarDatabaseHelper.roles_using_database.each do |role| node = node_for_role(role) next if node.nil? @@ -112,41 +116,27 @@ def prepare_node(node, roles) remove_recipe node unless res[:exit_code].zero? log "ERROR: Chef-client failed with code: #{res[:exit_code]}" - return - end - log "Processing db_sync commands on #{node.name}" - roles.each do |role| - cmd = "/etc/pg2mysql/scripts/#{role}-db_sync.sh" - log "Running db_sync script #{cmd} for role #{role}" - res = run_ssh_cmd(node, cmd, role, "5m") - log "ERROR: Failed with code: #{res[:exit_code]}" unless res[:exit_code].zero? - log "Run time: #{res[:run_time]}s" + return -2 end - summary = run_ssh_cmd(node, "cat /etc/pg2mysql/databases.txt")[:stdout_and_stderr].lines log "Prepare completed for #{node.name}" - summary + 0 end def main ret = 0 chef_init - if mysql_node.nil? - log "ERROR: MySQL server not found. Please assign mysql-server role " + first_mysql_node = mysql_node + if first_mysql_node.nil? + log "ERROR: MySQL server not found. Please assign mysql-server role " \ "to some node or cluster and re-apply database proposal." return -1 end - summary = Set.new - selected_nodes.values.each do |node_data| - node_summary = prepare_node(node_data[:node], node_data[:roles]) - if node_summary.nil? - ret = -2 - else - summary |= node_summary - end + selected_nodes(first_mysql_node).values.each do |node_data| + node_ret = prepare_node(node_data[:node], node_data[:roles]) + ret = node_ret if ret.zero? && !node_ret.zero? + log "Summary of used databases: /etc/pg2mysql/databases.yaml on " \ + "#{first_mysql_node.name}" if node_data[:node] == first_mysql_node && ret.zero? end - Dir.mkdir "/etc/pg2mysql" unless File.exists? "/etc/pg2mysql" - open("/etc/pg2mysql/databases.txt", "w") { |f| f << summary.to_a.join } - log "Summary of used databases: /etc/pg2mysql/databases.txt" return ret end diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index f546d6a40f..2120c3f495 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -38,42 +38,42 @@ def self.migration_data "keystone-server" => { "barclamp" => "keystone", "db_sync_cmd" => "keystone-manage --config-dir /etc/keystone/keystone.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db_sync" + "--config-dir <%=db_override_conf%> db_sync" }, "glance-server" => { "barclamp" => "glance", "db_sync_cmd" => "glance-manage --config-dir /etc/glance/glance.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db_sync" + "--config-dir <%=db_override_conf%> db_sync" }, "cinder-controller" => { "barclamp" => "cinder", "db_sync_cmd" => "cinder-manage --config-dir /etc/cinder/cinder.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db sync" + "--config-dir <%=db_override_conf%> db sync" }, "manila-server" => { "barclamp" => "manila", "db_sync_cmd" => "manila-manage --config-dir /etc/manila/manila.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db sync" + "--config-dir <%=db_override_conf%> db sync" }, "neutron-server" => { "barclamp" => "neutron", "db_sync_cmd" => "neutron-db-manage --config-dir /etc/neutron/neutron.conf.d/ " \ - "--config-dir <%=@db_override_conf%> upgrade head" + "--config-dir <%=db_override_conf%> upgrade head" }, "nova-controller" => { "barclamp" => "nova", "db_sync_cmd" => [ "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ - "--config-dir <%=@db_override_conf%> api_db sync", + "--config-dir <%=db_override_conf%> api_db sync", "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db sync" + "--config-dir <%=db_override_conf%> db sync" ] }, # ec2 is special in that it's attributes are part of the nova barclamp "ec2-api" => { "barclamp" => "nova", "db_sync_cmd" => "ec2-api-manage --config-dir /etc/ec2api/ec2api.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db_sync" + "--config-dir <%=db_override_conf%> db_sync" }, # django migration tool uses db settings from # /srv/www/openstack-dashboard/openstack_dashboard/local/local.settings.d/_100_local_settings.py @@ -84,39 +84,39 @@ def self.migration_data "ceilometer-server" => { "barclamp" => "ceilometer", "db_sync_cmd" => "ceilometer-dbsync --config-dir /etc/ceilometer/ceilometer.conf.d/ " \ - "--config-dir <%=@db_override_conf%>" + "--config-dir <%=db_override_conf%>" }, "heat-server" => { "barclamp" => "heat", "db_sync_cmd" => "heat-manage --config-dir /etc/heat/heat.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db_sync" + "--config-dir <%=db_override_conf%> db_sync" }, "aodh-server" => { "barclamp" => "aodh", "db_sync_cmd" => "aodh-dbsync --config-dir /etc/aodh/aodh.conf.d/ " \ - "--config-dir <%=@db_override_conf%>" + "--config-dir <%=db_override_conf%>" }, "barbican-controller" => { "barclamp" => "barbican", # this doesn't work because of a bug in barbican-manage handling of oslo_config # "db_sync_cmd" => "barbican-manage --config-dir /etc/barbican/barbican.conf.d/ " \ - # "--config-dir <%=@db_override_conf%> db upgrade" - "db_sync_cmd" => "barbican-manage db upgrade --db-url <%=@db_conf_sections['DEFAULT']%>" + # "--config-dir <%=db_override_conf%> db upgrade" + "db_sync_cmd" => "barbican-manage db upgrade --db-url <%=db_conf_sections['DEFAULT']%>" }, "magnum-server" => { "barclamp" => "magnum", "db_sync_cmd" => "magnum-db-manage --config-dir /etc/magnum/magnum.conf.d/ " \ - "--config-dir <%=@db_override_conf%> upgrade" + "--config-dir <%=db_override_conf%> upgrade" }, "sahara-server" => { "barclamp" => "sahara", "db_sync_cmd" => "sahara-db-manage --config-dir /etc/sahara/sahara.conf.d/ " \ - "--config-dir <%=@db_override_conf%> upgrade head" + "--config-dir <%=db_override_conf%> upgrade head" }, "trove-server" => { "barclamp" => "trove", "db_sync_cmd" => "trove-manage --config-dir /etc/trove/trove.conf.d/ " \ - "--config-dir <%=@db_override_conf%> db_sync" + "--config-dir <%=db_override_conf%> db_sync" } } end diff --git a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb index fb501d72b3..73d14ac5e8 100644 --- a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb +++ b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb @@ -1,18 +1,24 @@ databases = [] +commands = [] # The "barclamp" parameter doesn't really matter here, we want to use the same # instance for all databases. db_settings = CrowbarOpenStackHelper.database_settings(node, "mysql") psql_settings = CrowbarOpenStackHelper.database_settings(node, "postgresql") CrowbarDatabaseHelper.roles_using_database.each do |role| - next unless node.roles.include? role - role_migration_data = CrowbarDatabaseHelper.role_migration_data(role) barclamp = role_migration_data["barclamp"] + # Find a node with this role even if the recipe was executed from another one + # e.g. one of the database nodes. + role_node = CrowbarOpenStackHelper.get_node(node, role, barclamp, "default") + + # Role not found on any node? Skip it completely. + next if role_node.nil? + db = if role == "ec2-api" - node[barclamp]["ec2-api"]["db"] + role_node[barclamp]["ec2-api"]["db"] else - node[barclamp]["db"] + role_node[barclamp]["db"] end db_conf_sections = {} db_connection_key = "connection" @@ -24,13 +30,13 @@ # The nova-controller role creates more than one database if role == "nova-controller" connection = CrowbarOpenStackHelper.database_connection_string(db_settings, - node[barclamp]["api_db"]) - databases << { db: node[barclamp]["api_db"], url: connection } + role_node[barclamp]["api_db"]) + databases << { db: role_node[barclamp]["api_db"], url: connection } Chef::Log.info("connection string: #{connection}") db_conf_sections["api_database"] = connection connection = CrowbarOpenStackHelper.database_connection_string(db_settings, - node[barclamp]["placement_db"]) - databases << { db: node[barclamp]["placement_db"], url: connection } + role_node[barclamp]["placement_db"]) + databases << { db: role_node[barclamp]["placement_db"], url: connection } Chef::Log.info("connection string: #{connection}") db_conf_sections["placement_database"] = connection end @@ -40,32 +46,27 @@ db_connection_key = "sql_connection" end - db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" directory "/etc/pg2mysql/" do mode 0750 owner "root" group "root" end - directory "/etc/pg2mysql/scripts" do - mode 0750 - owner "root" - group "root" - end + # Remaining part of the loop should only be executed on the controller node with this role + next unless node.roles.include? role + + db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" cmds = role_migration_data["db_sync_cmd"] cmds = [cmds] unless cmds.is_a?(Array) - template "/etc/pg2mysql/scripts/#{role}-db_sync.sh" do - source "mariadb-db_sync.sh.erb" - mode 0750 - owner "root" - group "root" - variables( - db_sync_cmds: cmds, - db_conf_sections: db_conf_sections, - db_override_conf: db_override_conf - ) + idx = 0 + cmds.each do |cmd| + suffix = idx.zero? ? "" : "-#{idx}" + log_file = "/var/log/crowbar/db-prepare.#{role}#{suffix}.log" + log_redirect = "> #{log_file} 2>&1" + commands << { cmd: ERB.new("#{cmd} #{log_redirect}").result(binding), role: role + suffix } + idx += 1 end directory db_override_conf do @@ -92,7 +93,7 @@ databases.each do |dbdata| db = dbdata[:db] - # fill psql url for databases.txt + # fill psql url for databases.yaml dbdata[:psql_url] = CrowbarOpenStackHelper.database_connection_string(psql_settings, db) Chef::Log.info("creating database #{db["database"]}") Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") @@ -128,12 +129,20 @@ end -template "/etc/pg2mysql/databases.txt" do - source "mariadb-databases.txt.erb" +commands.each do |command| + execute "dbsync-role-#{command[:role]}" do + command command[:cmd] + end +end + +# Write the index only on database node +template "/etc/pg2mysql/databases.yaml" do + source "mariadb-databases.yaml.erb" mode 0640 owner "root" group "root" variables( databases: databases ) + only_if { node.roles.include? "mysql-server" } end diff --git a/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb b/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb deleted file mode 100644 index 4fa2c10cf5..0000000000 --- a/chef/cookbooks/database/templates/default/mariadb-databases.txt.erb +++ /dev/null @@ -1,3 +0,0 @@ -<% @databases.each do |dbdata| -%> -<%= dbdata[:db][:database] -%> <%= dbdata[:psql_url] -%> <%= dbdata[:url] %> -<% end %> diff --git a/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb b/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb new file mode 100644 index 0000000000..51effc79c7 --- /dev/null +++ b/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb @@ -0,0 +1,5 @@ +<% @databases.each do |dbdata| -%> +<%= dbdata[:db][:database] %>: + source: <%= dbdata[:psql_url] %> + target: <%= dbdata[:url] %> +<% end %> diff --git a/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb b/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb deleted file mode 100644 index a509d65557..0000000000 --- a/chef/cookbooks/database/templates/default/mariadb-db_sync.sh.erb +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -<% @db_sync_cmds.each do |cmd| -%> -<%= ERB.new(cmd).result(binding) %> -<% end %> From 2e5db8b4b56bd3afcb03f7bd687bddf02f5b679f Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 25 Sep 2018 09:28:35 +0200 Subject: [PATCH 129/207] keystone: Fix retry behavior on password update With commit 9ee935f86f we added a retry loop to _get_token() to avoid issues with service restarts during chef-client runs. The retrys currently happen on any non-success response code including the 4XX ones. However the code for updating the password relies on the get_token to return 401. The current retry behavior will trigger unnecessary retrys. Which will often cause the HA syncmark on the admin password update code in keystone/recipe/server.rb to run into a timeout. This change update the retry loop to only retry on 5XX errors, which caused the original problem that the retry loop was trying to address. Everything else will be handled as a success (2XX) or hard (non-retryable) error. (cherry picked from commit 5a6a79eb27d3fe752ddea2b1360f1cd331e50c4a) --- chef/cookbooks/keystone/providers/register.rb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index 67eb4a491a..e850473f78 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -558,8 +558,9 @@ def _get_token(http, user_name, password, tenant = "") count += 1 Chef::Log.debug "Trying to get keystone token for user '#{user_name}' (try #{count})" resp = http.send_request("POST", path, JSON.generate(body), headers) - error = !(resp.is_a?(Net::HTTPCreated) || resp.is_a?(Net::HTTPOK)) - sleep 5 if error + error = !resp.is_a?(Net::HTTPSuccess) + # retry on any 5XX (server error) error code but not on 4XX (client error) + sleep 5 if resp.is_a?(Net::HTTPServerError) end if error From 7821cd296b926bfabe5ed1248c7345f3dd01c844 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 10 Jan 2018 14:59:48 +0100 Subject: [PATCH 130/207] keystone: Install fernet-keys-sync when needed Added checks to skip installation of fernet-keys-sync script and sudoers configuration if fernet keys are not used. Also added comment describing the syncmarks assumptions for this part. (cherry picked from commit 872c7142b67f7234e67acfd9bb1ac157f817cf3d) --- chef/cookbooks/keystone/recipes/ha.rb | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/chef/cookbooks/keystone/recipes/ha.rb b/chef/cookbooks/keystone/recipes/ha.rb index e6feae1e5f..094286487f 100644 --- a/chef/cookbooks/keystone/recipes/ha.rb +++ b/chef/cookbooks/keystone/recipes/ha.rb @@ -72,11 +72,18 @@ crowbar_pacemaker_sync_mark "create-keystone_ha_resources" end +# note(jtomasiak): We don't need new syncmarks for the fernet-keys-sync part. +# This is because the deployment and configuration of this feature will be done +# once during keystone installation and it will not be used until some keystone +# node is reinstalled. We assume that time between keystone installation and +# possible node reinstallation is high enough to run this safely without +# syncmarks. template "/usr/bin/keystone-fernet-keys-sync.sh" do source "keystone-fernet-keys-sync.sh" owner "root" group "root" mode "0755" + action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete end # handler scripts are run by hacluster user so sudo configuration is needed @@ -86,6 +93,7 @@ owner "root" group "root" mode "0440" + action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete end # on founder: create/delete pacemaker alert From e64db0c3b5ce2021ee9bff0c7eabae66fdb1c105 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 12 Jan 2018 11:16:51 +0100 Subject: [PATCH 131/207] keystone: More fernet-keys handling cleanup Moved `keystone-fernet-keys-push.sh` deployment to `ha.rb` as it is used only for HA cases. Added small optimization for action needed for chef resources related to the fernet-keys handling. (cherry picked from commit e03c9f5a7233651ce69d0047743d76769d52147d) --- chef/cookbooks/keystone/recipes/ha.rb | 16 +++++++++++++--- chef/cookbooks/keystone/recipes/server.rb | 7 ------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/chef/cookbooks/keystone/recipes/ha.rb b/chef/cookbooks/keystone/recipes/ha.rb index 094286487f..4968034f02 100644 --- a/chef/cookbooks/keystone/recipes/ha.rb +++ b/chef/cookbooks/keystone/recipes/ha.rb @@ -78,12 +78,22 @@ # node is reinstalled. We assume that time between keystone installation and # possible node reinstallation is high enough to run this safely without # syncmarks. +fernet_resources_action = node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete + +template "/usr/bin/keystone-fernet-keys-push.sh" do + source "keystone-fernet-keys-push.sh" + owner "root" + group "root" + mode "0755" + action fernet_resources_action +end + template "/usr/bin/keystone-fernet-keys-sync.sh" do source "keystone-fernet-keys-sync.sh" owner "root" group "root" mode "0755" - action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete + action fernet_resources_action end # handler scripts are run by hacluster user so sudo configuration is needed @@ -93,12 +103,12 @@ owner "root" group "root" mode "0440" - action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete + action fernet_resources_action end # on founder: create/delete pacemaker alert pacemaker_alert "keystone-fernet-keys-sync" do handler "/usr/bin/keystone-fernet-keys-sync.sh" - action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete + action fernet_resources_action only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 625621bb20..8b0da6fd26 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -482,13 +482,6 @@ package "rsync" crowbar_pacemaker_sync_mark "sync-keystone_install_rsync" if ha_enabled - template "/usr/bin/keystone-fernet-keys-push.sh" do - source "keystone-fernet-keys-push.sh" - owner "root" - group "root" - mode "0755" - end - rsync_command = "" initial_rsync_command = "" if ha_enabled From f6f4233a2cfb754803ec3ae8038c7e3d55279542 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 25 Sep 2018 00:22:31 +0200 Subject: [PATCH 132/207] neutron: use messaging driver for notifications --- chef/cookbooks/neutron/templates/default/neutron.conf.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index 35a7b4d9fd..b39637cb13 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -80,7 +80,7 @@ username = <%= @keystone_settings['service_user'] %> lock_path = /var/run/neutron [oslo_messaging_notifications] -driver = neutron.openstack.common.notifier.rpc_notifier +driver = messaging [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> From 81f090893168dc8db13a6886ac1029ec26184c68 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 25 Sep 2018 00:25:20 +0200 Subject: [PATCH 133/207] cinder: enable sending notifications using messaging --- chef/cookbooks/cinder/templates/default/cinder.conf.erb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chef/cookbooks/cinder/templates/default/cinder.conf.erb b/chef/cookbooks/cinder/templates/default/cinder.conf.erb index 127e72d4f2..ba2d01ecdb 100644 --- a/chef/cookbooks/cinder/templates/default/cinder.conf.erb +++ b/chef/cookbooks/cinder/templates/default/cinder.conf.erb @@ -313,6 +313,9 @@ lock_path = /var/run/openstack lock_path = /var/run/cinder <% end -%> +[oslo_messaging_notifications] +driver = messaging + [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> <% if @rabbit_settings[:cluster] -%> From c06b426e76248ad3428116a2db112904b1f417d3 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 25 Sep 2018 00:39:41 +0200 Subject: [PATCH 134/207] rabbitmq: make notification queues optional If notifications are enabled but ceilometer (or any other consumer like monasca-ceilometer) isn't deployed, there isn't any consumer for those messages and since they're configured to not expire, they will just eternally pile up in the mnesia database. This is a problem because in nontrivial environments this can be multiple gigabytes a day of data that just keeps piling up slowing everything down. We could set a expiry policy on those notifications so that if there isn't a consumer they disappear, but that still causes the overhead and would be highly undesirable in case billing/telemetry is actually intended to be used (those events must not ever get lost in such a case even if the consumer is unavailable for some time). For stable/ branch the backport was changed to not change the default but just add an option to turn it off. (cherry picked from commit 966090fac188bb9081e1a9cdc4a45ca30e2079ed) --- .../cinder/templates/default/cinder.conf.erb | 2 ++ .../cookbooks/crowbar-openstack/libraries/helpers.rb | 1 + .../glance/templates/default/glance-api.conf.erb | 2 ++ .../templates/default/glance-registry.conf.erb | 2 ++ .../magnum/templates/default/magnum.conf.erb | 2 ++ .../neutron/templates/default/neutron.conf.erb | 2 ++ chef/cookbooks/nova/templates/default/nova.conf.erb | 2 ++ chef/cookbooks/tempest/recipes/config.rb | 10 ++++++++++ .../migrate/rabbitmq/106_add_notifications.rb | 12 ++++++++++++ chef/data_bags/crowbar/template-rabbitmq.json | 5 +++-- chef/data_bags/crowbar/template-rabbitmq.schema | 3 ++- .../barclamp/rabbitmq/_edit_attributes.html.haml | 1 + crowbar_framework/config/locales/rabbitmq/en.yml | 2 ++ 13 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb diff --git a/chef/cookbooks/cinder/templates/default/cinder.conf.erb b/chef/cookbooks/cinder/templates/default/cinder.conf.erb index ba2d01ecdb..663521a7b9 100644 --- a/chef/cookbooks/cinder/templates/default/cinder.conf.erb +++ b/chef/cookbooks/cinder/templates/default/cinder.conf.erb @@ -313,8 +313,10 @@ lock_path = /var/run/openstack lock_path = /var/run/cinder <% end -%> +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 27eff1c7ba..eb69aa4f2e 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -224,6 +224,7 @@ def self.rabbitmq_settings(node, barclamp) "#{rabbit[:rabbitmq][:trove][:vhost]}", cluster: false, durable_queues: false, + enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], ha_queues: false, heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], pacemaker_resource: "rabbitmq" diff --git a/chef/cookbooks/glance/templates/default/glance-api.conf.erb b/chef/cookbooks/glance/templates/default/glance-api.conf.erb index 047ba4e5f5..47aa247786 100644 --- a/chef/cookbooks/glance/templates/default/glance-api.conf.erb +++ b/chef/cookbooks/glance/templates/default/glance-api.conf.erb @@ -81,8 +81,10 @@ auth_type = password [oslo_concurrency] lock_path = /var/run/glance +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/glance/templates/default/glance-registry.conf.erb b/chef/cookbooks/glance/templates/default/glance-registry.conf.erb index 496f461966..031ab8145f 100644 --- a/chef/cookbooks/glance/templates/default/glance-registry.conf.erb +++ b/chef/cookbooks/glance/templates/default/glance-registry.conf.erb @@ -33,8 +33,10 @@ user_domain_name = <%= @keystone_settings["admin_domain"] %> auth_url = <%= @keystone_settings['admin_auth_url'] %> auth_type = password +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/magnum/templates/default/magnum.conf.erb b/chef/cookbooks/magnum/templates/default/magnum.conf.erb index 2aef6fe4bb..5108220acb 100644 --- a/chef/cookbooks/magnum/templates/default/magnum.conf.erb +++ b/chef/cookbooks/magnum/templates/default/magnum.conf.erb @@ -72,8 +72,10 @@ insecure = <%= @keystone_settings['insecure'] %> [oslo_concurrency] lock_path = /var/run/magnum +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index b39637cb13..94a3131439 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -79,8 +79,10 @@ username = <%= @keystone_settings['service_user'] %> [oslo_concurrency] lock_path = /var/run/neutron +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index 421ce7f64d..3c628c393f 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -246,8 +246,10 @@ lock_path = /var/run/openstack lock_path = /var/run/nova <% end -%> +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messagingv2 +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/tempest/recipes/config.rb b/chef/cookbooks/tempest/recipes/config.rb index ef08589380..378b8a4b65 100644 --- a/chef/cookbooks/tempest/recipes/config.rb +++ b/chef/cookbooks/tempest/recipes/config.rb @@ -95,6 +95,16 @@ roles = [ 'anotherrole' ] +if enabled_services.include?("metering") + rabbitmq_settings = fetch_rabbitmq_settings + + unless rabbitmq_settings[:enable_notifications] + # without rabbitmq notification clients configured the ceilometer + # tempest tests will fail so skip them + enabled_services = enabled_services - ["metering"] + end +end + heat_server = search(:node, "roles:heat-server").first if enabled_services.include?("orchestration") && !heat_server.nil? heat_trusts_delegated_roles = heat_server[:heat][:trusts_delegated_roles] diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb b/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb new file mode 100644 index 0000000000..8e123ec321 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb @@ -0,0 +1,12 @@ +def upgrade(ta, td, a, d) + unless a["client"].key?("enable_notifications") + # keep it always enabled on upgrade for compat + a["client"]["enable_notifications"] = true + end + return a, d +end + +def downgrade(ta, td, a, d) + a["client"].delete("enable_notifications") unless ta["client"].key?("enable_notifications") + return a, d +end diff --git a/chef/data_bags/crowbar/template-rabbitmq.json b/chef/data_bags/crowbar/template-rabbitmq.json index 88f8b1db05..7f7edcb0a7 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.json +++ b/chef/data_bags/crowbar/template-rabbitmq.json @@ -21,7 +21,8 @@ "client_ca_certs": "/etc/ssl/certs/rabbitca.pem" }, "client": { - "heartbeat_timeout": 60 + "heartbeat_timeout": 60, + "enable_notifications": false }, "cluster": false, "ha": { @@ -58,7 +59,7 @@ "rabbitmq": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 105, + "schema-revision": 106, "element_states": { "rabbitmq-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-rabbitmq.schema b/chef/data_bags/crowbar/template-rabbitmq.schema index 12dfe6f113..337fd8bb69 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.schema +++ b/chef/data_bags/crowbar/template-rabbitmq.schema @@ -48,7 +48,8 @@ "type": "map", "required": true, "mapping" : { - "heartbeat_timeout": { "type": "int", "required": true } + "heartbeat_timeout": { "type": "int", "required": true }, + "enable_notifications": { "type": "bool", "required": true } } }, "cluster": { "type": "bool", "required": true }, diff --git a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml index a389eff252..108699c3e9 100644 --- a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml @@ -6,6 +6,7 @@ = string_field :vhost = integer_field :port = string_field :user + = boolean_field %w(client enable_notifications) %fieldset %legend diff --git a/crowbar_framework/config/locales/rabbitmq/en.yml b/crowbar_framework/config/locales/rabbitmq/en.yml index e3d6081a41..399db3b6ea 100644 --- a/crowbar_framework/config/locales/rabbitmq/en.yml +++ b/crowbar_framework/config/locales/rabbitmq/en.yml @@ -21,6 +21,8 @@ en: edit_attributes: vhost: 'Virtual host' user: 'User' + client: + enable_notifications: 'Configure Clients to send notifications' extra_users: username: 'Username' permissions: 'Permissions (3 comma separated items for configure, write, read; e.g. ".*,.*,.*")' From 491571517cf395464580789e0682bdfd1b449cdb Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 5 Jan 2018 13:42:28 +0100 Subject: [PATCH 135/207] nova: move flavor creation to converge phase Instead of creating the flavor list during the compilation phase, do it on the converge phase as we will get the most up-to-date list of flavors. Also, separate the command that obtains the flavor list so its resilent to temporal failures of the nova api while obtaining the list of flavors as to not break the flavor creation that follows (cherry picked from commit 457a94b39708e20ef1242becc96bef8204bcff4d) --- chef/cookbooks/nova/recipes/flavors.rb | 96 +++++++++++++++++--------- 1 file changed, 62 insertions(+), 34 deletions(-) diff --git a/chef/cookbooks/nova/recipes/flavors.rb b/chef/cookbooks/nova/recipes/flavors.rb index 35b0cf1acb..3cab896821 100644 --- a/chef/cookbooks/nova/recipes/flavors.rb +++ b/chef/cookbooks/nova/recipes/flavors.rb @@ -96,44 +96,72 @@ trusted_flavors = flavors.select{ |key, value| value["name"].match(/\.trusted\./) } default_flavors = flavors.select{ |key, value| !value["name"].match(/\.trusted\./) } -flavorlist = `#{openstack} flavor list -f value -c Name`.split("\n") -# create the trusted flavors -if node[:nova][:trusted_flavors] - trusted_flavors.keys.each do |id| - next if flavorlist.include?(flavors[id]["name"]) - execute "register_#{flavors[id]["name"]}_flavor" do - retries 5 - command <<-EOF - #{novacmd} flavor-create #{flavors[id]["name"]} #{id} #{flavors[id]["mem"]} \ - #{flavors[id]["disk"]} #{flavors[id]["vcpu"]} - #{novacmd} flavor-key #{flavors[id]["name"]} set trust:trusted_host=trusted - EOF - action :nothing - subscribes :run, "execute[trigger-flavor-creation]", :delayed - end - end +execute "delay-flavor-creation" do + command "true" + action :nothing end -# create the default flavors -if node[:nova][:create_default_flavors] - default_flavors.keys.each do |id| - next if flavorlist.include?(flavors[id]["name"]) - execute "register_#{flavors[id]["name"]}_flavor" do - retries 5 - command <<-EOF - #{novacmd} flavor-create #{flavors[id]["name"]} #{id} #{flavors[id]["mem"]} \ - #{flavors[id]["disk"]} #{flavors[id]["vcpu"]} - EOF - action :nothing - subscribes :run, "execute[trigger-flavor-creation]", :delayed - end +ruby_block "Get current flavors" do + block do + cmd = Mixlib::ShellOut.new("#{openstack} flavor list -f value -c Name").run_command + raise "Flavor list not obtained, is the nova-api down?" unless cmd.exitstatus.zero? + node.run_state["flavorlist"] = cmd.stdout.split("\n") end + retries 5 end -# This is to trigger all the above "execute" resources to run :delayed, so that -# they run at the end of the chef-client run, after the nova service has been -# restarted (in case of a config change) -execute "trigger-flavor-creation" do - command "true" +ruby_block "Flavor creation" do + block do + flavorlist = node.run_state["flavorlist"] + + if node[:nova][:create_default_flavors] + default_flavors.each do |id, flavor| + next if flavorlist.include?(flavor["name"]) + command = "#{novacmd} flavor-create #{flavor["name"]} #{id} #{flavor["mem"]} " + command << "#{flavor["disk"]} #{flavor["vcpu"]}" + run_context.resource_collection << flavor_create = Chef::Resource::Execute.new( + "Create flavor #{flavor["name"]}", run_context + ) + flavor_create.command command + flavor_create.retries 5 + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_create, :run, "delay-flavor-creation") + ) + end + end + + if node[:nova][:trusted_flavors] + trusted_flavors.each do |id, flavor| + next if flavorlist.include?(flavor["name"]) + command = "#{novacmd} flavor-create #{flavor["name"]} " + command << "#{id} #{flavor["mem"]} #{flavor["disk"]} #{flavor["vcpu"]} " + run_context.resource_collection << flavor_create = Chef::Resource::Execute.new( + "Create trusted flavor #{flavor["name"]}", run_context + ) + flavor_create.command command + flavor_create.retries 5 + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_create, :run, "delay-flavor-creation") + ) + + # set flavors to trusted + command = "#{novacmd} flavor-key #{flavor["name"]} set trust:trusted_host=trusted" + run_context.resource_collection << flavor_trusted = Chef::Resource::Execute.new( + "Set flavor #{flavor["name"]} to trusted", run_context + ) + flavor_trusted.command command + flavor_trusted.retries 5 + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_trusted, :run, "delay-flavor-creation") + ) + end + end + end end From b01669e0b5d2463538faa3791662118d256590dd Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Fri, 7 Sep 2018 09:37:41 +0200 Subject: [PATCH 136/207] neutron: disable metadata agent service if not necessary In order to free some connections to rabbitmq the neutron metadata agent is disabled in computer nodes if the force_metadata flag is set to true. The pacemaker primitive is removed as well if exists. (cherry picked from commit 098b3a478bd9e78bb8c4bf112afd46f2f568846a) --- .../neutron/definitions/neutron_metadata.rb | 20 ++++++++++++++----- .../neutron/recipes/network_agents_ha.rb | 12 +++++++++-- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/chef/cookbooks/neutron/definitions/neutron_metadata.rb b/chef/cookbooks/neutron/definitions/neutron_metadata.rb index b955fed479..aeea152d37 100644 --- a/chef/cookbooks/neutron/definitions/neutron_metadata.rb +++ b/chef/cookbooks/neutron/definitions/neutron_metadata.rb @@ -99,13 +99,19 @@ use_crowbar_pacemaker_service = \ (neutron_network_ha && node[:pacemaker][:clone_stateless_services]) || nova_compute_ha_enabled + enable_metadata = node.roles.include?("neutron-network") || !node[:neutron][:metadata][:force] + # In case of Cisco ACI driver, supervisord takes care of starting up # the metadata agent. service node[:neutron][:platform][:metadata_agent_name] do - action [:enable, :start] - subscribes :restart, resources(template: node[:neutron][:config_file]) - subscribes :restart, resources(template: node[:neutron][:metadata_agent_config_file]) - subscribes :restart, resources(file: "/etc/neutron/metadata_agent.ini") + if enable_metadata + action [:enable, :start] + subscribes :restart, resources(template: node[:neutron][:config_file]) + subscribes :restart, resources(template: node[:neutron][:metadata_agent_config_file]) + subscribes :restart, resources(file: "/etc/neutron/metadata_agent.ini") + else + action [:disable, :stop] + end provider Chef::Provider::CrowbarPacemakerService if use_crowbar_pacemaker_service if nova_compute_ha_enabled supports no_crm_maintenance_mode: true @@ -114,7 +120,11 @@ end end utils_systemd_service_restart node[:neutron][:platform][:metadata_agent_name] do - action use_crowbar_pacemaker_service ? :disable : :enable + if enable_metadata + action use_crowbar_pacemaker_service ? :disable : :enable + else + action :disable + end end end end diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 06c2767d5f..86ebd22a2d 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -182,13 +182,21 @@ l3_agent_clone = "cl-#{l3_agent_primitive}" end - if use_metadata_agent - metadata_agent_primitive = "neutron-metadata-agent" + enable_metadata = node.roles.include?("neutron-network") || !node[:neutron][:metadata][:force] + + metadata_agent_primitive = "neutron-metadata-agent" + if use_metadata_agent && enable_metadata objects = openstack_pacemaker_controller_clone_for_transaction metadata_agent_primitive do agent node[:neutron][:ha][:network][:metadata_ra] op node[:neutron][:ha][:network][:op] end transaction_objects.push(objects) + else + pacemaker_primitive metadata_agent_primitive do + agent node[:neutron][:ha][:network][:metadata_ra] + action [:stop, :delete] + only_if "crm configure show #{metadata_agent_primitive}" + end end metering_agent_primitive = "neutron-metering-agent" From 33b7c6e664d5c62e44937fc893a0b1812a4dbc0e Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 24 Sep 2018 15:42:47 +0200 Subject: [PATCH 137/207] neutron: Increase agents_ha_resources timeout again Increased in 7858b826b0350930c26e71fd373b1f459b6a6db2 the timeout was working correctly for smaller scenarios (with few barclamps applied). To properly handle bigger deployments it had to be increased even more. --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 06c2767d5f..30e1671331 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -123,7 +123,7 @@ # Avoid races when creating pacemaker resources crowbar_pacemaker_sync_mark "wait-neutron-agents_ha_resources" do - timeout 90 + timeout 150 end if node[:pacemaker][:clone_stateless_services] From 50d3e25a486b15a362adb8958d989986bdc50676 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 24 Sep 2018 15:42:29 +0200 Subject: [PATCH 138/207] nova: Increase HA resources timeout again Increased in 7eaf4773b62c5454530ccd2d9aa553bc5058f65a the timeout was working correctly for smaller scenarios (with few barclamps applied). To properly handle bigger deployments it had to be increased even more. --- chef/cookbooks/nova/recipes/controller_ha.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/recipes/controller_ha.rb b/chef/cookbooks/nova/recipes/controller_ha.rb index 2e8ff490fa..65c2e2e07e 100644 --- a/chef/cookbooks/nova/recipes/controller_ha.rb +++ b/chef/cookbooks/nova/recipes/controller_ha.rb @@ -82,7 +82,7 @@ # Avoid races when creating pacemaker resources crowbar_pacemaker_sync_mark "wait-nova_ha_resources" do - timeout 120 + timeout 160 end rabbit_settings = fetch_rabbitmq_settings From 2d9e6a7e2d52b718fbc4044ea7307b1a3e3d9dcd Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 18 Sep 2018 14:01:02 +0200 Subject: [PATCH 139/207] keystone: Reorganize HA/fernet code Moved most of the fernet setup to ha.rb. Some redundancy was added but it is now easier to work with the (more-complex) HA case. The main goal was to reorder step so that fernet keys are configured and distributed to cluster nodes before apache is (re)started. This way, there is no time when apache would be configured to use fernet keys but no actual keys would be present. The problem was detected in cluster extension scenario where new node was added to an already existing cluster. --- chef/cookbooks/keystone/recipes/ha.rb | 162 +++++++++++++++++----- chef/cookbooks/keystone/recipes/server.rb | 94 ++----------- 2 files changed, 138 insertions(+), 118 deletions(-) diff --git a/chef/cookbooks/keystone/recipes/ha.rb b/chef/cookbooks/keystone/recipes/ha.rb index 4968034f02..3536f65711 100644 --- a/chef/cookbooks/keystone/recipes/ha.rb +++ b/chef/cookbooks/keystone/recipes/ha.rb @@ -31,45 +31,107 @@ action :nothing end.run_action(:create) -if node[:keystone][:frontend] == "apache" && node[:pacemaker][:clone_stateless_services] - include_recipe "crowbar-pacemaker::apache" - - # Wait for all nodes to reach this point so we know that all nodes will have - # all the required packages installed before we create the pacemaker - # resources - crowbar_pacemaker_sync_mark "sync-keystone_before_ha" +# Configure Keystone token fernet backend provider +if node[:keystone][:signing][:token_format] == "fernet" + template "/usr/bin/keystone-fernet-keys-push.sh" do + source "keystone-fernet-keys-push.sh" + owner "root" + group "root" + mode "0755" + end - # Avoid races when creating pacemaker resources - crowbar_pacemaker_sync_mark "wait-keystone_ha_resources" + # To be sure that rsync package is installed + package "rsync" + crowbar_pacemaker_sync_mark "sync-keystone_install_rsync" + + rsync_command = "" + initial_rsync_command = "" + + # can't use CrowbarPacemakerHelper.cluster_nodes() here as it will sometimes not return + # nodes which will be added to the cluster in current chef-client run. + cluster_nodes = node[:pacemaker][:elements]["pacemaker-cluster-member"] + cluster_nodes = cluster_nodes.map { |n| Chef::Node.load(n) } + cluster_nodes.sort_by! { |n| n[:hostname] } + cluster_nodes.each do |n| + next if node.name == n.name + node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address + node_rsync_command = "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " + rsync_command += node_rsync_command + # initial rsync only for (new) nodes which didn't get the keys yet + next if n.include?(:keystone) && + n[:keystone].include?(:signing) && + n[:keystone][:signing][:initial_keys_sync] + initial_rsync_command += node_rsync_command + end + raise "No other cluster members found" if rsync_command.empty? + + # Rotate primary key, which is used for new tokens + template "/var/lib/keystone/keystone-fernet-rotate" do + source "keystone-fernet-rotate.erb" + owner "root" + group node[:keystone][:group] + mode "0750" + variables( + rsync_command: rsync_command + ) + end - rabbit_settings = fetch_rabbitmq_settings - transaction_objects = [] + crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" + + if File.exist?("/etc/keystone/fernet-keys/0") + # Mark node to avoid unneeded future rsyncs + unless node[:keystone][:signing][:initial_keys_sync] + node[:keystone][:signing][:initial_keys_sync] = true + node.save + end + else + # Setup a key repository for fernet tokens + execute "keystone-manage fernet_setup" do + command "keystone-manage fernet_setup \ + --keystone-user #{node[:keystone][:user]} \ + --keystone-group #{node[:keystone][:group]}" + action :run + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + end - # let's create a dummy resource for keystone, that can be used for ordering - # constraints (as the apache2 resource is too vague) - objects = openstack_pacemaker_controller_clone_for_transaction "keystone" do - agent "ocf:pacemaker:Dummy" - order_only_existing "( postgresql #{rabbit_settings[:pacemaker_resource]} )" + # We would like to propagate fernet keys to all (new) nodes in the cluster + execute "propagate fernet keys to all nodes in the cluster" do + command initial_rsync_command + action :run + only_if do + CrowbarPacemakerHelper.is_cluster_founder?(node) && + !initial_rsync_command.empty? + end end - transaction_objects.push(objects) - order_name = "o-cl-apache2-keystone" - pacemaker_order order_name do - ordering "cl-apache2 cl-keystone" - score "Mandatory" + service_transaction_objects = [] + + keystone_fernet_primitive = "keystone-fernet-rotate" + pacemaker_primitive keystone_fernet_primitive do + agent node[:keystone][:ha][:fernet][:agent] + params( + "target" => "/var/lib/keystone/keystone-fernet-rotate", + "link" => "/etc/cron.hourly/openstack-keystone-fernet", + "backup_suffix" => ".orig" + ) + op node[:keystone][:ha][:fernet][:op] action :update only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end - transaction_objects << "pacemaker_order[#{order_name}]" + service_transaction_objects << "pacemaker_primitive[#{keystone_fernet_primitive}]" - pacemaker_transaction "keystone server" do - cib_objects transaction_objects.flatten + fernet_rotate_loc = openstack_pacemaker_controller_only_location_for keystone_fernet_primitive + service_transaction_objects << "pacemaker_location[#{fernet_rotate_loc}]" + + pacemaker_transaction "keystone-fernet-rotate cron" do + cib_objects service_transaction_objects # note that this will also automatically start the resources action :commit_new only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end - crowbar_pacemaker_sync_mark "create-keystone_ha_resources" + crowbar_pacemaker_sync_mark "create-keystone_fernet_rotate" end # note(jtomasiak): We don't need new syncmarks for the fernet-keys-sync part. @@ -80,14 +142,6 @@ # syncmarks. fernet_resources_action = node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete -template "/usr/bin/keystone-fernet-keys-push.sh" do - source "keystone-fernet-keys-push.sh" - owner "root" - group "root" - mode "0755" - action fernet_resources_action -end - template "/usr/bin/keystone-fernet-keys-sync.sh" do source "keystone-fernet-keys-sync.sh" owner "root" @@ -112,3 +166,45 @@ action fernet_resources_action only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end + +# Create/update apache resources after fernet keys setup to make sure everything is ready. +if node[:keystone][:frontend] == "apache" && node[:pacemaker][:clone_stateless_services] + include_recipe "crowbar-pacemaker::apache" + + # Wait for all nodes to reach this point so we know that all nodes will have + # all the required packages installed before we create the pacemaker + # resources + crowbar_pacemaker_sync_mark "sync-keystone_before_ha" + + # Avoid races when creating pacemaker resources + crowbar_pacemaker_sync_mark "wait-keystone_ha_resources" + + rabbit_settings = fetch_rabbitmq_settings + transaction_objects = [] + + # let's create a dummy resource for keystone, that can be used for ordering + # constraints (as the apache2 resource is too vague) + objects = openstack_pacemaker_controller_clone_for_transaction "keystone" do + agent "ocf:pacemaker:Dummy" + order_only_existing "( postgresql #{rabbit_settings[:pacemaker_resource]} )" + end + transaction_objects.push(objects) + + order_name = "o-cl-apache2-keystone" + pacemaker_order order_name do + ordering "cl-apache2 cl-keystone" + score "Mandatory" + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + transaction_objects << "pacemaker_order[#{order_name}]" + + pacemaker_transaction "keystone server" do + cib_objects transaction_objects.flatten + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + crowbar_pacemaker_sync_mark "create-keystone_ha_resources" +end diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 8b0da6fd26..850c018ba0 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -472,38 +472,8 @@ end end -if ha_enabled - include_recipe "keystone::ha" -end - -# Configure Keystone token fernet backend provider -if node[:keystone][:signing][:token_format] == "fernet" - # To be sure that rsync package is installed - package "rsync" - crowbar_pacemaker_sync_mark "sync-keystone_install_rsync" if ha_enabled - - rsync_command = "" - initial_rsync_command = "" - if ha_enabled - # can't use CrowbarPacemakerHelper.cluster_nodes() here as it will sometimes not return - # nodes which will be added to the cluster in current chef-client run. - cluster_nodes = node[:pacemaker][:elements]["pacemaker-cluster-member"] - cluster_nodes = cluster_nodes.map { |n| Chef::Node.load(n) } - cluster_nodes.sort_by! { |n| n[:hostname] } - cluster_nodes.each do |n| - next if node.name == n.name - node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address - node_rsync_command = "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " - rsync_command += node_rsync_command - # initial rsync only for (new) nodes which didn't get the keys yet - next if n.include?(:keystone) && - n[:keystone].include?(:signing) && - n[:keystone][:signing][:initial_keys_sync] - initial_rsync_command += node_rsync_command - end - raise "No other cluster members found" if rsync_command.empty? - end - +# Configure Keystone token fernet backend provider (non-HA case) +if !ha_enabled && node[:keystone][:signing][:token_format] == "fernet" # Rotate primary key, which is used for new tokens template "/var/lib/keystone/keystone-fernet-rotate" do source "keystone-fernet-rotate.erb" @@ -511,74 +481,28 @@ group node[:keystone][:group] mode "0750" variables( - rsync_command: rsync_command + rsync_command: "" ) end - unless ha_enabled - link "/etc/cron.hourly/openstack-keystone-fernet" do - to "/var/lib/keystone/keystone-fernet-rotate" - end + link "/etc/cron.hourly/openstack-keystone-fernet" do + to "/var/lib/keystone/keystone-fernet-rotate" end - crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" if ha_enabled - - if File.exist?("/etc/keystone/fernet-keys/0") - # Mark node to avoid unneeded future rsyncs - unless node[:keystone][:signing][:initial_keys_sync] - node[:keystone][:signing][:initial_keys_sync] = true - node.save - end - else + unless File.exist?("/etc/keystone/fernet-keys/0") # Setup a key repository for fernet tokens execute "keystone-manage fernet_setup" do command "keystone-manage fernet_setup \ --keystone-user #{node[:keystone][:user]} \ --keystone-group #{node[:keystone][:group]}" action :run - only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - end - - # We would like to propagate fernet keys to all (new) nodes in the cluster - execute "propagate fernet keys to all nodes in the cluster" do - command initial_rsync_command - action :run - only_if do - ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) && - !initial_rsync_command.empty? end end - - service_transaction_objects = [] - - keystone_fernet_primitive = "keystone-fernet-rotate" - pacemaker_primitive keystone_fernet_primitive do - agent node[:keystone][:ha][:fernet][:agent] - params({ - "target" => "/var/lib/keystone/keystone-fernet-rotate", - "link" => "/etc/cron.hourly/openstack-keystone-fernet", - "backup_suffix" => ".orig" - }) - op node[:keystone][:ha][:fernet][:op] - action :update - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - service_transaction_objects << "pacemaker_primitive[#{keystone_fernet_primitive}]" - - fernet_rotate_loc = openstack_pacemaker_controller_only_location_for keystone_fernet_primitive - service_transaction_objects << "pacemaker_location[#{fernet_rotate_loc}]" - - pacemaker_transaction "keystone-fernet-rotate cron" do - cib_objects service_transaction_objects - # note that this will also automatically start the resources - action :commit_new - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - - crowbar_pacemaker_sync_mark "create-keystone_fernet_rotate" if ha_enabled end +# This also includes fernet setup for HA case +include_recipe "keystone::ha" if ha_enabled + # Wait for all nodes to reach this point so we know that all nodes will have # all the required services correctly configured and running before we create # the keystone resources From 8e68d08841a89b48f928ce75ccd2ab32137006d6 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 28 Sep 2018 11:59:40 +0200 Subject: [PATCH 140/207] nova: Increase retries for listing flavors The command getting the list of flavors is executed pretty close to starting the nova ha resource, which can take bit more time (especially on slow clusters). So give the flavor list command a few more tries before erroring out. Note: this is only an issue in setups where the stateless resources are still managed by pacemaker (clone-stateless-service=true). In the alternative setup (managed by systemd) the service are started a bit earlier during the chef-client run. (cherry picked from commit b2131f057a7b6c1b0839b1d48e1e3e148f273550) --- chef/cookbooks/nova/recipes/flavors.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/nova/recipes/flavors.rb b/chef/cookbooks/nova/recipes/flavors.rb index 3cab896821..d54b1bf9d5 100644 --- a/chef/cookbooks/nova/recipes/flavors.rb +++ b/chef/cookbooks/nova/recipes/flavors.rb @@ -108,7 +108,7 @@ raise "Flavor list not obtained, is the nova-api down?" unless cmd.exitstatus.zero? node.run_state["flavorlist"] = cmd.stdout.split("\n") end - retries 5 + retries 10 end ruby_block "Flavor creation" do From 4158b48345c75ada56fd6f0a98e0983e00054ca3 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 26 Sep 2018 04:25:18 +0200 Subject: [PATCH 141/207] ceilometer: add validation that notifications are enabled client side Telemetry collection is based on clients configured to send notifications. With the change to make the client notification sending optional, we now should warn the user that the notifications have to be enabled for Ceilometer/Telemetry to work. --- crowbar_framework/app/models/ceilometer_service.rb | 13 +++++++++++++ crowbar_framework/config/locales/ceilometer/en.yml | 1 + 2 files changed, 14 insertions(+) diff --git a/crowbar_framework/app/models/ceilometer_service.rb b/crowbar_framework/app/models/ceilometer_service.rb index 4e8de79380..8719e68a62 100644 --- a/crowbar_framework/app/models/ceilometer_service.rb +++ b/crowbar_framework/app/models/ceilometer_service.rb @@ -142,6 +142,19 @@ def validate_proposal_after_save(proposal) end end end + + rabbitmq_proposal = Proposal.find_by( + barclamp: "rabbitmq", + name: proposal["attributes"][@bc_name]["rabbitmq_instance"] + ) + + unless rabbitmq_proposal && + rabbitmq_proposal["attributes"]["rabbitmq"]["client"]["enable_notifications"] + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.notifications_enabled" + ) + end + super end diff --git a/crowbar_framework/config/locales/ceilometer/en.yml b/crowbar_framework/config/locales/ceilometer/en.yml index 00a9317ecf..be0d10a7de 100644 --- a/crowbar_framework/config/locales/ceilometer/en.yml +++ b/crowbar_framework/config/locales/ceilometer/en.yml @@ -48,6 +48,7 @@ en: cert_required: 'Require Client Certificate' ca_certs: 'SSL CA Certificates File' validation: + notifications_enabled: 'Sending notifications has to be enabled in the RabbitMQ proposal first.' hyper_v_support: 'Hyper-V support is not available.' swift_proxy: 'Nodes with the ceilometer-swift-proxy-middleware role must also have the swift-proxy role.' nodes_count: 'The cluster assigned to the ceilometer-server role should have at least 3 nodes, but it only has %{nodes_count}.' From b10f10f0df69281f4a087869539ca67640ccb05b Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 25 Sep 2018 13:27:56 +0200 Subject: [PATCH 142/207] keystone: Move redundant code to custom resource Redundancy caused by previous changes was removed by extracting common code into a custom resource. --- chef/cookbooks/keystone/providers/fernet.rb | 21 +++++++++++++++++++ chef/cookbooks/keystone/recipes/ha.rb | 19 +++++------------ chef/cookbooks/keystone/recipes/server.rb | 17 ++++----------- chef/cookbooks/keystone/resources/fernet.rb | 23 +++++++++++++++++++++ 4 files changed, 53 insertions(+), 27 deletions(-) create mode 100644 chef/cookbooks/keystone/providers/fernet.rb create mode 100644 chef/cookbooks/keystone/resources/fernet.rb diff --git a/chef/cookbooks/keystone/providers/fernet.rb b/chef/cookbooks/keystone/providers/fernet.rb new file mode 100644 index 0000000000..19baee3f34 --- /dev/null +++ b/chef/cookbooks/keystone/providers/fernet.rb @@ -0,0 +1,21 @@ +action :setup do + execute "keystone-manage fernet_setup" do + command "keystone-manage fernet_setup \ + --keystone-user #{node[:keystone][:user]} \ + --keystone-group #{node[:keystone][:group]}" + action :run + end +end + +# attribute :rsync_command, kind_of: String, default: "" +action :rotate_script do + template "/var/lib/keystone/keystone-fernet-rotate" do + source "keystone-fernet-rotate.erb" + owner "root" + group node[:keystone][:group] + mode "0750" + variables( + rsync_command: new_resource.rsync_command + ) + end +end diff --git a/chef/cookbooks/keystone/recipes/ha.rb b/chef/cookbooks/keystone/recipes/ha.rb index 3536f65711..7290a0c742 100644 --- a/chef/cookbooks/keystone/recipes/ha.rb +++ b/chef/cookbooks/keystone/recipes/ha.rb @@ -66,14 +66,9 @@ raise "No other cluster members found" if rsync_command.empty? # Rotate primary key, which is used for new tokens - template "/var/lib/keystone/keystone-fernet-rotate" do - source "keystone-fernet-rotate.erb" - owner "root" - group node[:keystone][:group] - mode "0750" - variables( - rsync_command: rsync_command - ) + keystone_fernet "keystone-fernet-rotate-ha" do + action :rotate_script + rsync_command rsync_command end crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" @@ -85,12 +80,8 @@ node.save end else - # Setup a key repository for fernet tokens - execute "keystone-manage fernet_setup" do - command "keystone-manage fernet_setup \ - --keystone-user #{node[:keystone][:user]} \ - --keystone-group #{node[:keystone][:group]}" - action :run + keystone_fernet "keystone-fernet-setup-ha" do + action :setup only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end end diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 850c018ba0..764219f5a6 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -475,14 +475,8 @@ # Configure Keystone token fernet backend provider (non-HA case) if !ha_enabled && node[:keystone][:signing][:token_format] == "fernet" # Rotate primary key, which is used for new tokens - template "/var/lib/keystone/keystone-fernet-rotate" do - source "keystone-fernet-rotate.erb" - owner "root" - group node[:keystone][:group] - mode "0750" - variables( - rsync_command: "" - ) + keystone_fernet "keystone-fernet-rotate-non-ha" do + action :rotate_script end link "/etc/cron.hourly/openstack-keystone-fernet" do @@ -491,11 +485,8 @@ unless File.exist?("/etc/keystone/fernet-keys/0") # Setup a key repository for fernet tokens - execute "keystone-manage fernet_setup" do - command "keystone-manage fernet_setup \ - --keystone-user #{node[:keystone][:user]} \ - --keystone-group #{node[:keystone][:group]}" - action :run + keystone_fernet "keystone-fernet-setup-non-ha" do + action :setup end end end diff --git a/chef/cookbooks/keystone/resources/fernet.rb b/chef/cookbooks/keystone/resources/fernet.rb new file mode 100644 index 0000000000..ba6f434807 --- /dev/null +++ b/chef/cookbooks/keystone/resources/fernet.rb @@ -0,0 +1,23 @@ +# +# Cookbook Name:: keystone +# Resource:: fernet +# +# Copyright:: 2018, SUSE +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +actions :setup, :rotate_script + +# :rotate_script specific attributes +attribute :rsync_command, kind_of: String, default: "" From c98bed76b7020e4d1c1764257350780693d97b4a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 1 Oct 2018 22:51:28 +0200 Subject: [PATCH 143/207] neutron: define sql_max_pool_size for config template expansion Without that change, the previous patch that tried to make this parameter configurable and is expanded to the (raised) default of 50 did not work as it was never set. (cherry picked from commit 3951bae99b7293ed50b42aad2dc358a6a5e6c6ca) --- chef/cookbooks/neutron/recipes/common_config.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index 81ccb4b387..de753eb2e5 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -120,6 +120,7 @@ variables( sql_connection: is_neutron_server ? neutron[:neutron][:db][:sql_connection] : nil, sql_min_pool_size: neutron[:neutron][:sql][:min_pool_size], + sql_max_pool_size: neutron[:neutron][:sql][:max_pool_size], sql_max_pool_overflow: neutron[:neutron][:sql][:max_pool_overflow], sql_pool_timeout: neutron[:neutron][:sql][:pool_timeout], debug: neutron[:neutron][:debug], From 5ca31ff68b05598c1742f63f77743063ffd95c76 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 8 Oct 2018 13:54:18 +0200 Subject: [PATCH 144/207] database: Fix log line "Database server found at" line had wrong reference to address variable. --- chef/cookbooks/crowbar-openstack/libraries/helpers.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index e16e634334..2f6d31b2fa 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -134,7 +134,7 @@ def self.database_settings(node, barclamp) } } - Chef::Log.info("Database server found at #{@database_settings[instance][:address]}") + Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]}") end end From 2f21575a37113e5740b47fe2285a663a81a62b96 Mon Sep 17 00:00:00 2001 From: Ivan Lausuch Date: Fri, 5 Oct 2018 15:21:11 +0200 Subject: [PATCH 145/207] neutron: Fix condition to enable metadata agent In commit 098b3a4 is introduced an option to disable metadata agent in computer nodes. In some cases, the condition to check if metadata is enabled fails because these computers don't have the information of node[:neutron][:metadata][:force] This commit fixes this problem using the information from the neutron proposal (cherry picked from commit fa92cfd41a212eb1eed25e9bd3afd7c33a811776) --- chef/cookbooks/neutron/definitions/neutron_metadata.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/definitions/neutron_metadata.rb b/chef/cookbooks/neutron/definitions/neutron_metadata.rb index aeea152d37..f6d56b9fe4 100644 --- a/chef/cookbooks/neutron/definitions/neutron_metadata.rb +++ b/chef/cookbooks/neutron/definitions/neutron_metadata.rb @@ -99,7 +99,7 @@ use_crowbar_pacemaker_service = \ (neutron_network_ha && node[:pacemaker][:clone_stateless_services]) || nova_compute_ha_enabled - enable_metadata = node.roles.include?("neutron-network") || !node[:neutron][:metadata][:force] + enable_metadata = node.roles.include?("neutron-network") || !neutron[:neutron][:metadata][:force] # In case of Cisco ACI driver, supervisord takes care of starting up # the metadata agent. From d8446d930550411a34e1c2d57a52aa23fc90a4d7 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 9 Oct 2018 16:24:10 +0200 Subject: [PATCH 146/207] database: Increase galera write set limits For some reason the default configuration shipped with the mariadb-galera package configures non-default limits for wsrep_max_ws_rows and wsrep_max_ws_size. This commit changes the limits back to the maximum supported values (which also happen to be the defaults used by MariaDB if no configuration is present). This is to reduce the likelyhood of failing DB transactions (with e.g. "wsrep_max_ws_rows exceeded") when e.g. ceilometer-expirer needs to DELETE a large set of database rows. (cherry picked from commit 8a2877cd9c3084670c6dcc4230661cd31405dffb) --- chef/cookbooks/mysql/templates/default/galera.cnf.erb | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/chef/cookbooks/mysql/templates/default/galera.cnf.erb b/chef/cookbooks/mysql/templates/default/galera.cnf.erb index a8aa1d9ca5..365dd0f519 100644 --- a/chef/cookbooks/mysql/templates/default/galera.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/galera.cnf.erb @@ -6,6 +6,16 @@ wsrep_cluster_address = "<%= @cluster_addresses %>" wsrep_provider_options = "gmcast.listen_addr=tcp://<%= @node_address %>:4567;gcs.fc_limit = <%= @wsrep_slave_threads * 5 %>;gcs.fc_factor = 0.8" wsrep_slave_threads = <%= @wsrep_slave_threads %> +# Maximum number of rows in write set +# "0" (unlimited) is the upstream default, but the default configuration in the +# rpm package overwrites that +wsrep_max_ws_rows=0 + +# Maximum size of write set +# "2147483647" (2GB) is the upstream default, but the default configuration in +# the rpm package overwrites that +wsrep_max_ws_size=2147483647 + # to enable debug level logging, set this to 1 wsrep_debug = 0 From 64c49e52a9acc33257224a667ba8c3bdf0e88035 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 10 Oct 2018 11:28:18 +0200 Subject: [PATCH 147/207] rabbitmq: Remove addresses from node attributes These are used only to populate rabbitmq templates. There's no need to store them in the node. --- chef/cookbooks/rabbitmq/attributes/default.rb | 2 -- chef/cookbooks/rabbitmq/recipes/default.rb | 6 ++++++ chef/cookbooks/rabbitmq/recipes/rabbit.rb | 9 --------- .../rabbitmq/templates/default/rabbitmq.config.erb | 4 ++-- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index f819d98235..372f51f0b4 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -29,8 +29,6 @@ default[:rabbitmq][:nodename] = "rabbit@#{node[:hostname]}" # This is the address for internal usage default[:rabbitmq][:address] = nil -# These are all the addresses, possibly including public one -default[:rabbitmq][:addresses] = [] default[:rabbitmq][:port] = 5672 default[:rabbitmq][:management_port] = 15672 default[:rabbitmq][:management_address] = nil diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 3e981c38dc..766c98735b 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -18,6 +18,11 @@ # limitations under the License. # +addresses = [CrowbarRabbitmqHelper.get_listen_address(node)] +if node[:rabbitmq][:listen_public] + addresses << CrowbarRabbitmqHelper.get_public_listen_address(node) +end + ha_enabled = node[:rabbitmq][:ha][:enabled] # we only do cluster if we do HA cluster_enabled = node[:rabbitmq][:cluster] && ha_enabled @@ -107,6 +112,7 @@ variables( cluster_enabled: cluster_enabled, cluster_partition_handling: cluster_partition_handling, + addresses: addresses, hipe_compile: hipe_compile ) notifies :restart, "service[rabbitmq-server]" diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index 829f102fd0..c7705ef408 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -34,15 +34,6 @@ dirty = true end -addresses = [node[:rabbitmq][:address]] -if node[:rabbitmq][:listen_public] - addresses << CrowbarRabbitmqHelper.get_public_listen_address(node) -end -if node[:rabbitmq][:addresses] != addresses - node.set[:rabbitmq][:addresses] = addresses - dirty = true -end - nodename = "rabbit@#{CrowbarRabbitmqHelper.get_ha_vhostname(node)}" if cluster_enabled diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb index 99437a9fcc..a3a2b9e44b 100644 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb @@ -8,11 +8,11 @@ {rabbit, [ {tcp_listeners, [ - <%= node[:rabbitmq][:addresses].map { |address| "{\"#{address}\", #{node[:rabbitmq][:port]}}" }.join(", ") %> + <%= @addresses.map { |address| "{\"#{address}\", #{node[:rabbitmq][:port]}}" }.join(", ") %> ]}, <% if node[:rabbitmq][:ssl][:enabled] -%> {ssl_listeners, [ - <%= node[:rabbitmq][:addresses].map { |address| "{\"#{address}\", #{node[:rabbitmq][:ssl][:port]}}" }.join(", ") %> + <%= @addresses.map { |address| "{\"#{address}\", #{node[:rabbitmq][:ssl][:port]}}" }.join(", ") %> ]}, {ssl_options, [ <% if node[:rabbitmq][:ssl][:cert_required] -%> From 3b3d0683fa218a70c0eaacac765260c1ff9ffe88 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 10 Oct 2018 11:36:22 +0200 Subject: [PATCH 148/207] rabbitmq: Remove management_address from node attributes It is used internally by rabbitmq cookbook. There's no need to store it in the node. --- chef/cookbooks/rabbitmq/attributes/default.rb | 1 - chef/cookbooks/rabbitmq/libraries/crowbar.rb | 4 ++++ chef/cookbooks/rabbitmq/recipes/default.rb | 1 + chef/cookbooks/rabbitmq/recipes/rabbit.rb | 14 ++++++-------- .../rabbitmq/templates/default/rabbitmq.config.erb | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index 372f51f0b4..34a272d7a5 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -31,7 +31,6 @@ default[:rabbitmq][:address] = nil default[:rabbitmq][:port] = 5672 default[:rabbitmq][:management_port] = 15672 -default[:rabbitmq][:management_address] = nil default[:rabbitmq][:configfile] = nil default[:rabbitmq][:logdir] = nil default[:rabbitmq][:mnesiadir] = nil diff --git a/chef/cookbooks/rabbitmq/libraries/crowbar.rb b/chef/cookbooks/rabbitmq/libraries/crowbar.rb index 1e0fe643a3..faaad1e220 100644 --- a/chef/cookbooks/rabbitmq/libraries/crowbar.rb +++ b/chef/cookbooks/rabbitmq/libraries/crowbar.rb @@ -24,4 +24,8 @@ def self.get_public_listen_address(node) Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "public").address end end + + def self.get_management_address(node) + get_listen_address(node) + end end diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 766c98735b..f2ef25974d 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -113,6 +113,7 @@ cluster_enabled: cluster_enabled, cluster_partition_handling: cluster_partition_handling, addresses: addresses, + management_address: CrowbarRabbitmqHelper.get_management_address(node), hipe_compile: hipe_compile ) notifies :restart, "service[rabbitmq-server]" diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index c7705ef408..a937b40a80 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -24,15 +24,13 @@ dirty = false +management_address = CrowbarRabbitmqHelper.get_management_address(node) + listen_address = CrowbarRabbitmqHelper.get_listen_address(node) if node[:rabbitmq][:address] != listen_address node.set[:rabbitmq][:address] = listen_address dirty = true end -if node[:rabbitmq][:management_address] != listen_address - node.set[:rabbitmq][:management_address] = listen_address - dirty = true -end nodename = "rabbit@#{CrowbarRabbitmqHelper.get_ha_vhostname(node)}" @@ -103,7 +101,7 @@ rabbitmq_user "adding user #{node[:rabbitmq][:user]}" do user node[:rabbitmq][:user] password node[:rabbitmq][:password] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :add only_if only_if_command if ha_enabled @@ -130,7 +128,7 @@ rabbitmq_user "adding user #{user[:username]}" do user user[:username] password user[:password] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :add only_if only_if_command if ha_enabled @@ -186,7 +184,7 @@ rabbitmq_user "adding user #{node[:rabbitmq][:trove][:user]}" do user node[:rabbitmq][:trove][:user] password node[:rabbitmq][:trove][:password] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :add only_if only_if_command if ha_enabled @@ -204,7 +202,7 @@ else rabbitmq_user "deleting user #{node[:rabbitmq][:trove][:user]}" do user node[:rabbitmq][:trove][:user] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :delete only_if only_if_command if ha_enabled diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb index a3a2b9e44b..9b3e629faa 100644 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb @@ -37,7 +37,7 @@ ]}, {rabbitmq_management, [ - {listener, [{ip, "<%= node[:rabbitmq][:management_address] %>"}, {port, <%= node[:rabbitmq][:management_port] %>}]}, + {listener, [{ip, "<%= @management_address %>"}, {port, <%= node[:rabbitmq][:management_port] %>}]}, {load_definitions, "/etc/rabbitmq/definitions.json"} ] } From fab0a9a274ad42d8f54cc8764243f2bdd43e59fb Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 10 Oct 2018 12:08:06 +0200 Subject: [PATCH 149/207] rabbitmq: Remove address from node attributes Storing the address in node attributes in rabbitmq cookbook and reading them in other cookbooks (via CrowbarOpenStackHelper) can cause problems when recipe tries to use address from new node before it sets it in the node. Replacing this with direct address lookup based on Network barclamp attributes solves this problem. --- .../crowbar-openstack/libraries/helpers.rb | 59 ++++++++++--------- chef/cookbooks/crowbar-openstack/metadata.rb | 1 + chef/cookbooks/rabbitmq/attributes/default.rb | 2 - chef/cookbooks/rabbitmq/metadata.rb | 4 -- chef/cookbooks/rabbitmq/recipes/default.rb | 5 +- chef/cookbooks/rabbitmq/recipes/monitor.rb | 3 + chef/cookbooks/rabbitmq/recipes/rabbit.rb | 7 --- .../templates/default/rabbitmq_nrpe.cfg.erb | 2 +- 8 files changed, 38 insertions(+), 45 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index e16e634334..1502eb0a07 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -193,6 +193,8 @@ def self.rabbitmq_settings(node, barclamp) else rabbit = rabbits.first + address = CrowbarRabbitmqHelper.get_listen_address(rabbit) + port = if rabbit[:rabbitmq][:ssl][:enabled] rabbit[:rabbitmq][:ssl][:port] else @@ -204,34 +206,33 @@ def self.rabbitmq_settings(node, barclamp) rabbit[:rabbitmq][:ssl][:client_ca_certs] end - single_rabbit_settings = { - # backwards compatible attributes, remove in cloud8? - address: rabbit[:rabbitmq][:address], - port: port, - user: rabbit[:rabbitmq][:user], - password: rabbit[:rabbitmq][:password], - vhost: rabbit[:rabbitmq][:vhost], - # end backwards comatible attrs - use_ssl: rabbit[:rabbitmq][:ssl][:enabled], - client_ca_certs: client_ca_certs, - url: "rabbit://#{rabbit[:rabbitmq][:user]}:" \ - "#{rabbit[:rabbitmq][:password]}@" \ - "#{rabbit[:rabbitmq][:address]}:#{port}/" \ - "#{rabbit[:rabbitmq][:vhost]}", - trove_url: "rabbit://#{rabbit[:rabbitmq][:trove][:user]}:" \ - "#{rabbit[:rabbitmq][:trove][:password]}@" \ - "#{rabbit[:rabbitmq][:address]}:#{port}/" \ - "#{rabbit[:rabbitmq][:trove][:vhost]}", - cluster: false, - durable_queues: false, - enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], - ha_queues: false, - heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], - pacemaker_resource: "rabbitmq" - } - if !rabbit[:rabbitmq][:cluster] - @rabbitmq_settings[instance] = single_rabbit_settings + @rabbitmq_settings[instance] = { + # backwards compatible attributes, remove in cloud8? + address: address, + port: port, + user: rabbit[:rabbitmq][:user], + password: rabbit[:rabbitmq][:password], + vhost: rabbit[:rabbitmq][:vhost], + # end backwards comatible attrs + use_ssl: rabbit[:rabbitmq][:ssl][:enabled], + client_ca_certs: client_ca_certs, + url: "rabbit://#{rabbit[:rabbitmq][:user]}:" \ + "#{rabbit[:rabbitmq][:password]}@" \ + "#{address}:#{port}/" \ + "#{rabbit[:rabbitmq][:vhost]}", + trove_url: "rabbit://#{rabbit[:rabbitmq][:trove][:user]}:" \ + "#{rabbit[:rabbitmq][:trove][:password]}@" \ + "#{address}:#{port}/" \ + "#{rabbit[:rabbitmq][:trove][:vhost]}", + cluster: false, + durable_queues: false, + enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], + ha_queues: false, + heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], + pacemaker_resource: "rabbitmq" + } + Chef::Log.info("RabbitMQ server found") else # transport_url format: @@ -244,7 +245,7 @@ def self.rabbitmq_settings(node, barclamp) end url = "#{rabbit[:rabbitmq][:user]}:" url << "#{rabbit[:rabbitmq][:password]}@" - url << "#{rabbit[:rabbitmq][:address]}:#{port}" + url << "#{CrowbarRabbitmqHelper.get_listen_address(rabbit)}:#{port}" url << "/#{rabbit[:rabbitmq][:vhost]}" if rabbit.equal? rabbits.last url.prepend("rabbit://") if rabbit.equal? rabbits.first @@ -260,7 +261,7 @@ def self.rabbitmq_settings(node, barclamp) url = "#{rabbit[:rabbitmq][:trove][:user]}:" url << "#{rabbit[:rabbitmq][:trove][:password]}@" - url << "#{rabbit[:rabbitmq][:address]}:#{port}" + url << "#{CrowbarRabbitmqHelper.get_listen_address(rabbit)}:#{port}" url << "/#{rabbit[:rabbitmq][:trove][:vhost]}" unless rabbit.equal? rabbits.first url.prepend("rabbit://") if rabbit.equal? rabbits.first diff --git a/chef/cookbooks/crowbar-openstack/metadata.rb b/chef/cookbooks/crowbar-openstack/metadata.rb index 87aee4dcff..6dfa9ae566 100644 --- a/chef/cookbooks/crowbar-openstack/metadata.rb +++ b/chef/cookbooks/crowbar-openstack/metadata.rb @@ -8,3 +8,4 @@ depends "crowbar-pacemaker" depends "database" +depends "rabbitmq" diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index 34a272d7a5..3b7ea07a81 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -27,8 +27,6 @@ default[:rabbitmq][:rabbitmq_group] = "rabbitmq" default[:rabbitmq][:nodename] = "rabbit@#{node[:hostname]}" -# This is the address for internal usage -default[:rabbitmq][:address] = nil default[:rabbitmq][:port] = 5672 default[:rabbitmq][:management_port] = 15672 default[:rabbitmq][:configfile] = nil diff --git a/chef/cookbooks/rabbitmq/metadata.rb b/chef/cookbooks/rabbitmq/metadata.rb index 7695a1b4d0..4d24cea35a 100644 --- a/chef/cookbooks/rabbitmq/metadata.rb +++ b/chef/cookbooks/rabbitmq/metadata.rb @@ -25,10 +25,6 @@ description: "The Erlang node name for this server.", default: "node[:hostname]" -attribute "rabbitmq/address", - display_name: "RabbitMQ server IP address", - description: "IP address to bind." - attribute "rabbitmq/port", display_name: "RabbitMQ server port", description: "TCP port to bind." diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index f2ef25974d..8bc6be16eb 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -18,7 +18,8 @@ # limitations under the License. # -addresses = [CrowbarRabbitmqHelper.get_listen_address(node)] +listen_address = CrowbarRabbitmqHelper.get_listen_address(node) +addresses = [listen_address] if node[:rabbitmq][:listen_public] addresses << CrowbarRabbitmqHelper.get_public_listen_address(node) end @@ -60,7 +61,7 @@ group "root" mode 0o644 variables( - listen_address: node[:rabbitmq][:address] + listen_address: listen_address ) only_if "grep -q Requires=epmd.service /usr/lib/systemd/system/rabbitmq-server.service" end diff --git a/chef/cookbooks/rabbitmq/recipes/monitor.rb b/chef/cookbooks/rabbitmq/recipes/monitor.rb index 1d7e4d5d48..34cdacc800 100644 --- a/chef/cookbooks/rabbitmq/recipes/monitor.rb +++ b/chef/cookbooks/rabbitmq/recipes/monitor.rb @@ -28,5 +28,8 @@ mode "0644" group node[:nagios][:group] owner node[:nagios][:user] + variables( + listen_address: CrowbarRabbitmqHelper.get_listen_address(node) + ) notifies :restart, "service[nagios-nrpe-server]" end diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index a937b40a80..bb29c93307 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -25,13 +25,6 @@ dirty = false management_address = CrowbarRabbitmqHelper.get_management_address(node) - -listen_address = CrowbarRabbitmqHelper.get_listen_address(node) -if node[:rabbitmq][:address] != listen_address - node.set[:rabbitmq][:address] = listen_address - dirty = true -end - nodename = "rabbit@#{CrowbarRabbitmqHelper.get_ha_vhostname(node)}" if cluster_enabled diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb index 87295a87c8..0079c07b86 100644 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb @@ -1,4 +1,4 @@ <% unless node[:rabbitmq].nil? -%> -command[check_rabbit]=/usr/lib/nagios/plugins/check_rabbitmq_aliveness -H <%= node[:rabbitmq][:address] %> -u <%= node[:rabbitmq][:user] %> -p <%= node[:rabbitmq][:password] %> --vhost <%= node[:rabbitmq][:vhost] %> +command[check_rabbit]=/usr/lib/nagios/plugins/check_rabbitmq_aliveness -H <%= @listen_address %> -u <%= node[:rabbitmq][:user] %> -p <%= node[:rabbitmq][:password] %> --vhost <%= node[:rabbitmq][:vhost] %> <% end -%> From c61af6de1fa4ae63aeb5c20f948b0d863487be4a Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 15 Oct 2018 11:44:12 +0200 Subject: [PATCH 150/207] upgrade: Remove chef-client timeout from prepare-mariadb There is no need for this timeout since we simplified the script to do everything in one ssh call. In addition, timeout command only kills the ssh client and leaves chef-client running remotely. This gives the user false impression that prepare is already done but also returns false-error result (exit status 124 from timeout reported as chef-client exit code). --- bin/prepare-mariadb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb index e0d55bc0b0..f04166bdbe 100755 --- a/bin/prepare-mariadb +++ b/bin/prepare-mariadb @@ -82,11 +82,11 @@ def remove_recipe(node) end # based on code from crowbar_framework/app/models/node.rb -def run_ssh_cmd(node, cmd, log_suffix = nil, timeout = "15s", kill_after = "5s") +def run_ssh_cmd(node, cmd, log_suffix = nil) log_file = "/var/log/crowbar/db-prepare.#{log_suffix}.log" if log_suffix log_redirect = "> #{log_file} 2>&1" if log_file start_time = Time.now - args = ["sudo", "-i", "-u", "root", "--", "timeout", "-k", kill_after, timeout, + args = ["sudo", "-i", "-u", "root", "--", "ssh", "-o", "ConnectTimeout=10", "root@#{node.name}", %("#{cmd.gsub('"', '\\"')} #{log_redirect}") @@ -110,7 +110,7 @@ def prepare_node(node, roles) log "Adding #{RECIPE} to run_list" add_recipe node log "Running chef-client on #{node.name}..." - res = run_ssh_cmd(node, "chef-client", "chef-client", "30m") + res = run_ssh_cmd(node, "chef-client", "chef-client") log "Run time: #{res[:run_time]}s" log "Removing #{RECIPE} from run_list" remove_recipe node From 880a66b3f9c573813fac93d71e8ea07204e6d334 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Wed, 17 Oct 2018 15:36:44 +0200 Subject: [PATCH 151/207] postgresql: Add timestamp prefix to logs Because timestamps make the logs a lot more useful (cherry picked from commit 2da5b01b2706b9d955d6b66cdd7d16d2d56f50ec) --- chef/cookbooks/postgresql/attributes/default.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/postgresql/attributes/default.rb b/chef/cookbooks/postgresql/attributes/default.rb index 40ecfe9218..95f37ded28 100644 --- a/chef/cookbooks/postgresql/attributes/default.rb +++ b/chef/cookbooks/postgresql/attributes/default.rb @@ -219,6 +219,7 @@ default["postgresql"]["config"]["log_truncate_on_rotation"] = true default["postgresql"]["config"]["log_rotation_age"] = "1d" default["postgresql"]["config"]["log_rotation_size"] = 0 + default["postgresql"]["config"]["log_line_prefix"] = "%t " default["postgresql"]["config"]["datestyle"] = "iso, mdy" default["postgresql"]["config"]["lc_messages"] = "en_US.UTF-8" default["postgresql"]["config"]["lc_monetary"] = "en_US.UTF-8" From 97d4e244a15b772751e2c1c76ff16142ebe3bd64 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Wed, 17 Oct 2018 16:58:35 +0200 Subject: [PATCH 152/207] nova: Only emit unversioned notfications There is currently nothing consuming the version notifications. So they're piling up in rabbitmq. This is a backport of fd8e5947961350eb9fc26de2778f3bcff4d970b0 to newton, where the setting still resides in the [DEFAULT] section. --- chef/cookbooks/nova/templates/default/nova.conf.erb | 1 + 1 file changed, 1 insertion(+) diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index 3c628c393f..cb781aea2c 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -8,6 +8,7 @@ instance_name_template=zvm%05x my_ip = <%= node[:nova][:my_ip] %> <% unless @ironic_settings.nil? %>scheduler_host_manager = ironic_host_manager<% end %> notify_on_state_change = vm_and_task_state +notification_format = unversioned state_path = /var/lib/nova enabled_ssl_apis = <%= @ssl_enabled ? "osapi_compute,metadata" : "" %> osapi_compute_listen = <%= @bind_host %> From 54e3213201cf0831e218fce0a6c1050846874e0a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 18 Oct 2018 16:27:47 +0200 Subject: [PATCH 153/207] rabbitmq: configure notification settings also in clustered mode Accidentally when forward porting the patch the setting was only set for the native clustered mode because the hash is being assembled in two locations. Break out the common part and merge it so that we follow DRY principles. (cherry picked from commit a06bbb3663f311ac47b85e0fea64d6973c221d12) --- .../crowbar-openstack/libraries/helpers.rb | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index b2ceacd3a1..03ef06f575 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -206,6 +206,13 @@ def self.rabbitmq_settings(node, barclamp) rabbit[:rabbitmq][:ssl][:client_ca_certs] end + common_rabbit_settings = { + use_ssl: rabbit[:rabbitmq][:ssl][:enabled], + client_ca_certs: client_ca_certs, + enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], + heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout] + } + if !rabbit[:rabbitmq][:cluster] @rabbitmq_settings[instance] = { # backwards compatible attributes, remove in cloud8? @@ -215,8 +222,6 @@ def self.rabbitmq_settings(node, barclamp) password: rabbit[:rabbitmq][:password], vhost: rabbit[:rabbitmq][:vhost], # end backwards comatible attrs - use_ssl: rabbit[:rabbitmq][:ssl][:enabled], - client_ca_certs: client_ca_certs, url: "rabbit://#{rabbit[:rabbitmq][:user]}:" \ "#{rabbit[:rabbitmq][:password]}@" \ "#{address}:#{port}/" \ @@ -227,11 +232,9 @@ def self.rabbitmq_settings(node, barclamp) "#{rabbit[:rabbitmq][:trove][:vhost]}", cluster: false, durable_queues: false, - enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], ha_queues: false, - heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], pacemaker_resource: "rabbitmq" - } + }.merge(common_rabbit_settings) Chef::Log.info("RabbitMQ server found") else @@ -269,16 +272,13 @@ def self.rabbitmq_settings(node, barclamp) end @rabbitmq_settings[instance] = { - use_ssl: rabbit[:rabbitmq][:ssl][:enabled], - client_ca_certs: client_ca_certs, url: rabbit_hosts.join(","), trove_url: trove_rabbit_hosts.join(","), cluster: true, durable_queues: true, ha_queues: true, - heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], pacemaker_resource: "ms-rabbitmq" - } + }.merge(common_rabbit_settings) Chef::Log.info("RabbitMQ cluster found") end end From 44702ca59c912bba2951a200296eff01276d78d9 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 1 Oct 2018 21:48:32 +0200 Subject: [PATCH 154/207] rabbitmq: allow disabling queue mirroring Allows to disable queue mirroring on rabbitmq as it should not be linked to having a rabbitmq cluster. Also checks for the full policy+queue in the check_policy_command (cherry picked from commit ce87fade2402aa050b67a4377da2a475f1ee403b) --- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 23 +++++++++++++++---- .../107_add_enable_queue_mirroring.rb | 11 +++++++++ chef/data_bags/crowbar/template-rabbitmq.json | 5 ++-- .../crowbar/template-rabbitmq.schema | 3 ++- 4 files changed, 34 insertions(+), 8 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index 829f102fd0..ecb5865be9 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -164,12 +164,25 @@ end if cluster_enabled - quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 + if node[:rabbitmq][:enable_queue_mirroring] + quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 + else + quorum = 1 + end + + queue_regex = "^(?!amq.).*" + # policy doesnt need spaces between elements as they will be removed when listing them + # making it more difficult to check for them + policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum}}" + vhost = node[:rabbitmq][:vhost] + # we need to scape the regex properly so we can use it on the grep command + queue_regex_escaped = "" + queue_regex.split("").each { |c| queue_regex_escaped << "\\" + c } - set_policy_command = "rabbitmqctl set_policy -p #{node[:rabbitmq][:vhost]} --apply-to queues " \ - " ha-queues '^(?!amq\.).*' '{\"ha-mode\": \"exactly\", \"ha-params\": #{quorum}}'" - check_policy_command = "rabbitmqctl list_policies -p #{node[:rabbitmq][:vhost]} | " \ - " grep -q '^#{node[:rabbitmq][:vhost]}\\s*ha-queues\\s'" + set_policy_command = "rabbitmqctl set_policy -p #{vhost} --apply-to queues " \ + " ha-queues '#{queue_regex}' '#{policy}'" + check_policy_command = "rabbitmqctl list_policies -p #{vhost} | " \ + " grep -Eq '^#{vhost}\\s*ha-queues\\s*queues\\s*#{queue_regex_escaped}\\s*#{policy}\\s*0$'" execute set_policy_command do not_if check_policy_command diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb b/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb new file mode 100644 index 0000000000..b9320f6214 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "enable_queue_mirroring" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "enable_queue_mirroring" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/template-rabbitmq.json b/chef/data_bags/crowbar/template-rabbitmq.json index 7f7edcb0a7..d220c88dea 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.json +++ b/chef/data_bags/crowbar/template-rabbitmq.json @@ -52,14 +52,15 @@ "mnesia": { "dump_log_write_threshold": 100, "dump_log_time_threshold": 180000 - } + }, + "enable_queue_mirroring": true } }, "deployment": { "rabbitmq": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 106, + "schema-revision": 107, "element_states": { "rabbitmq-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-rabbitmq.schema b/chef/data_bags/crowbar/template-rabbitmq.schema index 337fd8bb69..46372b5fe6 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.schema +++ b/chef/data_bags/crowbar/template-rabbitmq.schema @@ -110,7 +110,8 @@ "dump_log_write_threshold": { "type": "int", "required": true}, "dump_log_time_threshold": { "type": "int", "required": true} } - } + }, + "enable_queue_mirroring": { "type": "bool", "required": true} } } } From e5755c3db35facce8827d57d2d051033c83cd246 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Wed, 3 Oct 2018 13:52:53 +0200 Subject: [PATCH 155/207] rabbitmq: disable mirroring for several queues fanout queues, reply queues, amqp default queues are not durable so it should be safe to avoid mirroring them (cherry picked from commit 341f6b059b33690e607e9a3da83cf250c97323d0) --- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index ecb5865be9..e21796f61c 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -170,7 +170,8 @@ quorum = 1 end - queue_regex = "^(?!amq.).*" + # don't mirror queues that are 'amqp.*' or '*_fanout_*' or `reply_*` in their names + queue_regex = "^(?!(amqp.)|(.*_fanout_)|(reply_)).*" # policy doesnt need spaces between elements as they will be removed when listing them # making it more difficult to check for them policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum}}" From ca03edb89b555d00c1e2c7477377fd392d06380a Mon Sep 17 00:00:00 2001 From: Itxaka Date: Mon, 22 Oct 2018 10:36:15 +0200 Subject: [PATCH 156/207] rabbitmq: change ha-sync-mode to automatic instead of using the default sync mode, which is manual, change to automatic which will sync any new mirrors as soon as they appear (cherry picked from commit ce32f1e5f4fc86f63cc98ff1f5c3c46dc4aa5a20) --- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index e21796f61c..5bcb764f93 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -174,7 +174,7 @@ queue_regex = "^(?!(amqp.)|(.*_fanout_)|(reply_)).*" # policy doesnt need spaces between elements as they will be removed when listing them # making it more difficult to check for them - policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum}}" + policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum},\"ha-sync-mode\":\"automatic\"}" vhost = node[:rabbitmq][:vhost] # we need to scape the regex properly so we can use it on the grep command queue_regex_escaped = "" From bbde892fdd17c6fcc28be371934fabe4f9b67868 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Wed, 13 Jun 2018 13:57:47 +0200 Subject: [PATCH 157/207] rabbitmq: block client port on startup This script blocks the connection to the rabbitmq cluster in case the number of nodes decay below the half of the total. In this case the remaining rabbit nodes won't accept new connections until quorum is reached. It takes advantage of the ClusterMon agent that notifies when a rabbitmq has failed or has restored. All nodes rabbitmq ports will be blocked if the total number of alive nodes are below to the half of nodes of the cluster, or unblock if its over this value. The reasoning behind this patch is to allow waiting for more than one node in the cluster to be ready, which is useful for two reasons: - clients would connect to more than one node - master for queues would be spread more evenly Currently most of the connections and master queues are created on the master node, as its the first to come up when the cluster is restarted/new master promoted. co-authored-by: Ivan Lausuch (cherry picked from commit 0d26f8b119e1a3b0f6df75e554001956504d19c6) --- chef/cookbooks/rabbitmq/recipes/ha_cluster.rb | 138 ++++++++++++++++++ .../default/rabbitmq-alert-handler.erb | 7 + .../default/rabbitmq-port-blocker.erb | 43 ++++++ 3 files changed, 188 insertions(+) create mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb create mode 100644 chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index 9aa086cb79..fd86778b39 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -150,3 +150,141 @@ end crowbar_pacemaker_sync_mark "create-rabbitmq_ha_resources" + +clustermon_op = { "monitor" => [{ "interval" => "10s" }] } +clustermon_params = { "extra_options" => "-E /usr/bin/rabbitmq-alert-handler.sh --watch-fencing" } +name = "rabbitmq-port-blocker" +clone_name = "cl-#{name}" +location_name = "l-#{name}-controller" +node_upgrading = CrowbarPacemakerHelper.being_upgraded?(node) +clone_running = "crm resource show #{clone_name}" +primitive_running = "crm resource show #{name}" +port = node[:rabbitmq][:port] +ssl_port = node[:rabbitmq][:ssl][:port] + +crowbar_pacemaker_sync_mark "wait-rabbitmq_alert_resources" + +if CrowbarPacemakerHelper.cluster_nodes(node).size > 2 && !node_upgrading + template "/usr/bin/rabbitmq-alert-handler.sh" do + source "rabbitmq-alert-handler.erb" + owner "root" + group "root" + mode "0755" + variables(node: node, nodes: CrowbarPacemakerHelper.cluster_nodes(node)) + end + + template "/usr/bin/#{name}.sh" do + source "#{name}.erb" + owner "root" + group "root" + mode "0755" + variables(total_nodes: CrowbarPacemakerHelper.cluster_nodes(node).size, + port: port, ssl_port: ssl_port) + end + + pacemaker_primitive name do + agent "ocf:pacemaker:ClusterMon" + op clustermon_op + params clustermon_params + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_clone clone_name do + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_location location_name do + definition OpenStackHAHelper.controller_only_location(location_name, clone_name) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_transaction name do + cib_objects [ + "pacemaker_primitive[#{name}]", + "pacemaker_clone[#{clone_name}]", + "pacemaker_location[#{location_name}]" + ] + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end +else + pacemaker_location location_name do + definition OpenStackHAHelper.controller_only_location(location_name, clone_name) + action :delete + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_clone "#{clone_name}_stop" do + name clone_name + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :stop + only_if do + running = system(clone_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + pacemaker_clone "#{clone_name}_delete" do + name clone_name + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :delete + only_if do + running = system(clone_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + pacemaker_primitive "#{name}_stop" do + agent "ocf:pacemaker:ClusterMon" + name name + op clustermon_op + params clustermon_params + action :stop + only_if do + running = system(primitive_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + pacemaker_primitive "#{name}_delete" do + agent "ocf:pacemaker:ClusterMon" + name name + op clustermon_op + params clustermon_params + action :delete + only_if do + running = system(primitive_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + file "/usr/bin/rabbitmq-alert-handler.sh" do + action :delete + end + + file "/usr/bin/#{name}.sh" do + action :delete + end + + # in case that the script was already deployed and the rule is already stored we need to clean it + # up as to not left anything around + bash "Remove existent rabbitmq blocking rules" do + code "iptables -D INPUT -p tcp --destination-port 5672 "\ + "-m comment --comment \"rabbitmq port blocker (no quorum)\" -j DROP" + only_if do + # check for the rule + cmd = "iptables -L -n | grep -F \"tcp dpt:5672 /* rabbitmq port blocker (no quorum) */\"" + system(cmd) + end + end +end + +crowbar_pacemaker_sync_mark "create-rabbitmq_alert_resources" diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb new file mode 100644 index 0000000000..4c104de656 --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb @@ -0,0 +1,7 @@ +#!/bin/sh + +# exit if isn't a rabbitmq alert or is not a monitor task +[ "${CRM_notify_rsc}" = "rabbitmq" -a "${CRM_notify_task}" = "monitor" ] || exit 0 + +# launch the blocker in exclusive mode +flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh \ No newline at end of file diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb new file mode 100644 index 0000000000..5d457b93c7 --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb @@ -0,0 +1,43 @@ +#!/bin/sh + +# calcules the blocking level applying the formula +total_nodes=<%= @total_nodes %> +blocking_level=$(expr $total_nodes / 2) +comment_text="rabbitmq port blocker (no quorum)" +port=<%= @port %> +ssl_port=<%= @ssl_port %> + +# get the number of running nodes of rabbitmq in the current cluster +function running_nodes() +{ + rabbitmqctl cluster_status 2>/dev/null | tr -d "\n" | sed -e 's/running_nodes,/\nrunning_nodes/g'| grep running_nodes | cut -d "[" -f2 | cut -d "]" -f1 | tr "," "\n" | wc -l +} + +# check if exists the blocking rule for rabbitmq clients +function check_rule() +{ + iptables -L -n | grep -F "tcp dpt:$1 /* $comment_text */" | grep DROP | wc -l +} + +function create_rule(){ + if [ $(check_rule $1) -eq 0 ]; then + iptables -A INPUT -p tcp --destination-port $1 -m comment --comment "$comment_text" -j DROP + fi +} + +function delete_rule(){ + if [[ $(check_rule $1) -gt 0 ]]; then + iptables -D INPUT -p tcp --destination-port $1 -m comment --comment "$comment_text" -j DROP + fi +} + +# if the running nodes is les that the blocking level, then... +if [ $(running_nodes) -le $blocking_level ]; then + # if rule not exists the rule will be added to block the clients port + create_rule $port + create_rule $ssl_port +else + # finally if the rule exists it will be deleted. If there are more than one, will remove all + delete_rule $port + delete_rule $ssl_port +fi From 222179b892c4513dfb31868dccf429911c7d52a5 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 6 Oct 2018 08:50:28 +0200 Subject: [PATCH 158/207] neutron: disable metadata proxy when metadata is forced If 'force_metadata' is configured as 'True' then disable the metadata proxies in the nodes do not have dhcp namespaces. They are not needed in l3 agents. And then 'enable_isolated_metadata' is neglected. So there is no need to configure it, since 'force_metadata' is a superset and configured that alone is sufficient to get the metadata from the dhcp server. Also 'enable_metadata_network' may not be required if 'enable_isolated_metadata' is enabled, so it is disabled too. (cherry picked from commit e2b408e571e302cf2574a5b9517ca9ca0220becd) --- chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb | 5 +++++ chef/cookbooks/neutron/templates/default/l3_agent.ini.erb | 3 +++ 2 files changed, 8 insertions(+) diff --git a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb index 3c9b6bebc3..a64362d700 100644 --- a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb @@ -2,8 +2,13 @@ interface_driver = <%= @interface_driver %> resync_interval = <%= @resync_interval %> dhcp_driver = <%= @dhcp_driver %> +<% if @force_metadata -%> +enable_isolated_metadata = False +enable_metadata_network = False +<% else -%> enable_isolated_metadata = <%= @enable_isolated_metadata %> enable_metadata_network = <%= @enable_metadata_network %> +<% end -%> force_metadata = <%= @force_metadata %> dns_domain = <%= @dns_domain %> <% if @nameservers -%> diff --git a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb index 88f905c33a..c4cb8b26c4 100644 --- a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb @@ -3,6 +3,9 @@ interface_driver = <%= @interface_driver %> <% if @dvr_enabled -%> agent_mode = <%= @dvr_mode %> <% end -%> +<% if node[:neutron][:metadata][:force] -%> +enable_metadata_proxy = False +<% end -%> metadata_port = <%= @metadata_port %> send_arp_for_ha = <%= @send_arp_for_ha %> handle_internal_only_routers = <%= @handle_internal_only_routers %> From d68a18f2af0e5059682dae9fffd805124c6a72e3 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 26 Oct 2018 16:59:27 +0200 Subject: [PATCH 159/207] Fix migrations There were a number of duplicated numbering migrations in several barclamps. Fix them by moving them to new numbers (cherry picked from commit 963c713c1dffe2a0f07ac4b0223e9c3efcc5824c) --- .../{103_remove_use_lbaasv2.rb => 120_remove_use_lbaasv2.rb} | 0 .../{105_add_back_use_l2pop.rb => 121_add_back_use_l2pop.rb} | 0 .../data_bags/crowbar/migrate/rabbitmq/{001_ha.rb => 002_ha.rb} | 0 chef/data_bags/crowbar/template-neutron.json | 2 +- 4 files changed, 1 insertion(+), 1 deletion(-) rename chef/data_bags/crowbar/migrate/neutron/{103_remove_use_lbaasv2.rb => 120_remove_use_lbaasv2.rb} (100%) rename chef/data_bags/crowbar/migrate/neutron/{105_add_back_use_l2pop.rb => 121_add_back_use_l2pop.rb} (100%) rename chef/data_bags/crowbar/migrate/rabbitmq/{001_ha.rb => 002_ha.rb} (100%) diff --git a/chef/data_bags/crowbar/migrate/neutron/103_remove_use_lbaasv2.rb b/chef/data_bags/crowbar/migrate/neutron/120_remove_use_lbaasv2.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/neutron/103_remove_use_lbaasv2.rb rename to chef/data_bags/crowbar/migrate/neutron/120_remove_use_lbaasv2.rb diff --git a/chef/data_bags/crowbar/migrate/neutron/105_add_back_use_l2pop.rb b/chef/data_bags/crowbar/migrate/neutron/121_add_back_use_l2pop.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/neutron/105_add_back_use_l2pop.rb rename to chef/data_bags/crowbar/migrate/neutron/121_add_back_use_l2pop.rb diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/001_ha.rb b/chef/data_bags/crowbar/migrate/rabbitmq/002_ha.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/rabbitmq/001_ha.rb rename to chef/data_bags/crowbar/migrate/rabbitmq/002_ha.rb diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index a6e18c465d..f7c2332870 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -189,7 +189,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 119, + "schema-revision": 121, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ], From 9947537e8baf0cf68a5b2121e4cda0f6f0e930a5 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 2 Nov 2018 11:24:37 +0100 Subject: [PATCH 160/207] database: fix duplicated number migration --- .../{105_add_resource_limits.rb => 110_add_resource_limits.rb} | 0 chef/data_bags/crowbar/template-database.json | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename chef/data_bags/crowbar/migrate/database/{105_add_resource_limits.rb => 110_add_resource_limits.rb} (100%) diff --git a/chef/data_bags/crowbar/migrate/database/105_add_resource_limits.rb b/chef/data_bags/crowbar/migrate/database/110_add_resource_limits.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/database/105_add_resource_limits.rb rename to chef/data_bags/crowbar/migrate/database/110_add_resource_limits.rb diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 7cd5868a9d..04f177be08 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -83,7 +83,7 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 109, + "schema-revision": 110, "element_states": { "database-server": [ "readying", "ready", "applying" ], "mysql-server": [ "readying", "ready", "applying" ] From 90c26b5cd9fab3dac4d4e18f48b0abda6e96cc83 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Tue, 16 Oct 2018 15:12:19 +0200 Subject: [PATCH 161/207] travis: tests databags by leveraging the crowbar-validate-databags gem we can now test the schemas, jsons and migrations for failures on each repo This adds the appropiate trais entry so it runs the validation on the proper dirs on each PR (cherry picked from commit f4bc97219aa45a82c7ff69fdf0598618d4a7f866) --- .travis.yml | 2 ++ Gemfile | 1 + 2 files changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 2c97aa7603..e870d701f2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,3 +13,5 @@ matrix: - env: SPEC_TESTS script: - bundle exec rake spec + - name: "Databag testing" + script: bundle exec crowbar-validate-databags chef/data_bags/crowbar \ No newline at end of file diff --git a/Gemfile b/Gemfile index 2835337bbf..a0a64755b7 100644 --- a/Gemfile +++ b/Gemfile @@ -18,6 +18,7 @@ source "https://rubygems.org" group :development do + gem "crowbar-validate-databags", "~> 0.1" gem "rake", "< 12.0.0" gem "uglifier", "~> 2.7.2" gem "sass", "~> 3.2.19" From 660b8a52fc94bd7c1218f212b0f71f38e115cd42 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Wed, 31 Oct 2018 13:40:07 +0100 Subject: [PATCH 162/207] Gemfile: Drop crowbar-validate-databags gem version We dont need an specific version and we control the gem release versioning so we can manage it ourselves properly (cherry picked from commit 35feee300b4d6abd46dc34aad115c1e585c18d2d) --- Gemfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Gemfile b/Gemfile index a0a64755b7..137f680b63 100644 --- a/Gemfile +++ b/Gemfile @@ -18,7 +18,7 @@ source "https://rubygems.org" group :development do - gem "crowbar-validate-databags", "~> 0.1" + gem "crowbar-validate-databags" gem "rake", "< 12.0.0" gem "uglifier", "~> 2.7.2" gem "sass", "~> 3.2.19" From a11f1d6f76039765cdedb8a1287c3dc73370a40f Mon Sep 17 00:00:00 2001 From: Itxaka Date: Fri, 3 Aug 2018 16:03:03 +0200 Subject: [PATCH 163/207] neutron/nova: allow overriding default_log_levels Allow overriding the default_log_leves from crowbar so we can bump the log levels for several libraries like oslo_messaging or amqp This new setting accepts a list of "library=LEVEL" strings as show in the docs[0][1] [0] https://docs.openstack.org/mitaka/config-reference/compute/config-options.html#nova-logging [1] https://docs.openstack.org/mitaka/config-reference/networking/networking_options_reference.html (cherry picked from commit fbb2121d79ed4257b3e44fdda1ce39d881b2393f) --- chef/cookbooks/neutron/recipes/common_config.rb | 3 ++- .../neutron/templates/default/neutron.conf.erb | 3 +++ chef/cookbooks/nova/recipes/config.rb | 3 ++- chef/cookbooks/nova/templates/default/nova.conf.erb | 3 +++ .../migrate/neutron/122_add_default_log_levels.rb | 11 +++++++++++ .../migrate/nova/125_add_default_log_levels.rb | 11 +++++++++++ chef/data_bags/crowbar/template-neutron.json | 5 +++-- chef/data_bags/crowbar/template-neutron.schema | 5 +++++ chef/data_bags/crowbar/template-nova.json | 5 +++-- chef/data_bags/crowbar/template-nova.schema | 5 +++++ 10 files changed, 48 insertions(+), 6 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb create mode 100644 chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index de753eb2e5..cc452ecccf 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -148,7 +148,8 @@ infoblox: infoblox_settings, ipam_driver: ipam_driver, rpc_workers: neutron[:neutron][:rpc_workers], - use_apic_gbp: use_apic_gbp + use_apic_gbp: use_apic_gbp, + default_log_levels: neutron[:neutron][:default_log_levels] ) end diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index 94a3131439..fbd89e53ec 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -26,6 +26,9 @@ transport_url = <%= @rabbit_settings[:url] %> control_exchange = neutron max_header_line = <%= node[:neutron][:max_header_line] %> wsgi_keep_alive = false +<% unless @default_log_levels.length.zero? -%> +default_log_levels = <%= @default_log_levels.join(", ") %> +<% end -%> [agent] root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index ad3bda74d8..9aa1acc44e 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -433,7 +433,8 @@ reserved_host_memory: reserved_host_memory, use_baremetal_filters: use_baremetal_filters, track_instance_changes: track_instance_changes, - ironic_settings: ironic_settings + ironic_settings: ironic_settings, + default_log_levels: node[:nova][:default_log_levels] ) end diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index cb781aea2c..ef45fb517b 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -105,6 +105,9 @@ control_exchange = nova <%= "zvm_user_profile=#{node[:nova][:zvm][:zvm_user_profile]}" if @libvirt_type.eql?('zvm') %> <%= "zvm_user_default_password=#{node[:nova][:zvm][:zvm_user_default_password]}" if @libvirt_type.eql?('zvm') %> <%= "zvm_user_default_privilege=#{node[:nova][:zvm][:zvm_user_default_privilege]}" if @libvirt_type.eql?('zvm') %> +<% unless @default_log_levels.length.zero? -%> +default_log_levels = <%= @default_log_levels.join(", ") %> +<% end -%> [api_database] <% if @api_database_connection -%> diff --git a/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb b/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb new file mode 100644 index 0000000000..4a81f81290 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb b/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb new file mode 100644 index 0000000000..4a81f81290 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index f7c2332870..81fe61e9db 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -182,14 +182,15 @@ }, "metadata": { "force": false - } + }, + "default_log_levels": [] } }, "deployment": { "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 121, + "schema-revision": 122, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index ea80c2ee55..a83c620632 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -227,6 +227,11 @@ "type": "map", "required": true, "mapping": { "force": { "type": "bool", "required": true } } + }, + "default_log_levels": { + "type": "seq", + "required": false, + "sequence": [ { "type": "str" } ] } }} }}, diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 856ac624a8..4c67ddcfa9 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -174,14 +174,15 @@ "openstack-nova-compute": { "LimitNOFILE": null } - } + }, + "default_log_levels": [] } }, "deployment": { "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 124, + "schema-revision": 125, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index 52674c087b..00617a744d 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -273,6 +273,11 @@ "mapping": { "LimitNOFILE": { "type": "int", "required": false }} } } + }, + "default_log_levels": { + "type": "seq", + "required": false, + "sequence": [ { "type": "str" } ] } } } From 641d954a57548bdeb988a24703e392e9559612ee Mon Sep 17 00:00:00 2001 From: Tom Patzig Date: Thu, 25 Oct 2018 16:44:03 +0200 Subject: [PATCH 164/207] rabbitmq: Add list of tags comma separated for extra users otherwise the tags will be added like: `Setting user tags for user 'yarb' to ['[administrator]']` (cherry picked from commit 3034446d7511ba23ea50060b6d68d9368d447621) --- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index be876302cd..6414100a5f 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -138,7 +138,7 @@ end # tag those users as management - execute "rabbitmqctl set_user_tags #{user[:username]} #{user[:tags]}" do + execute "rabbitmqctl set_user_tags #{user[:username]} #{user[:tags].join(",")}" do not_if "rabbitmqctl list_users | grep #{user[:username]} | grep -q #{user[:tags].join(",")}" action :run only_if only_if_command if ha_enabled From 17cfa9a5a3c97151eab9728deff519d73b8d5702 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Wed, 7 Nov 2018 12:41:12 +0100 Subject: [PATCH 165/207] neutron: Fix "enable_metadata_proxy" setting for DVR setups With DVR the l3-agent needs to be configured on the compute nodes as well. As the computes don't have the neutron proposal assigned we need to read the [:metadata][:force] attribute from a neutron-node. (cherry picked from commit 67824043459426e033fd770d76070fc36cb3f70a) --- chef/cookbooks/neutron/recipes/common_agent.rb | 1 + chef/cookbooks/neutron/templates/default/l3_agent.ini.erb | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index 4ffbb449de..2851e3b758 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -336,6 +336,7 @@ handle_internal_only_routers: "True", metadata_port: 9697, send_arp_for_ha: 3, + force_metadata: neutron[:neutron][:metadata][:force], periodic_interval: 40, periodic_fuzzy_delay: 5, dvr_enabled: neutron[:neutron][:use_dvr], diff --git a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb index c4cb8b26c4..adfa2cb4ec 100644 --- a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb @@ -3,7 +3,7 @@ interface_driver = <%= @interface_driver %> <% if @dvr_enabled -%> agent_mode = <%= @dvr_mode %> <% end -%> -<% if node[:neutron][:metadata][:force] -%> +<% if @force_metadata -%> enable_metadata_proxy = False <% end -%> metadata_port = <%= @metadata_port %> From 4fa46cfdad03c26e29526a5adc4017be1d910302 Mon Sep 17 00:00:00 2001 From: Itxaka Date: Thu, 25 Oct 2018 16:15:27 +0200 Subject: [PATCH 166/207] neutron: disable metering agent if no ceilometer Currently we are enabling and running the metering agent at all times in the network nodes for no reason. Instead this patch makes it so its only deployed in the case of having the ceilometer-agent role, which indicates that we are gathering metrics and we want to run it. Otherwise its just wasted resources (cherry picked from commit 17206688fe668ac5cba0517d4135155c9f5afd3b) --- chef/cookbooks/neutron/recipes/network_agents.rb | 6 +++++- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 12 +++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index df0e00bf6a..64792f2d0f 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -15,9 +15,10 @@ # include_recipe "neutron::common_agent" +ceilometer_agent_enabled = node.roles.include? "ceilometer-agent" package node[:neutron][:platform][:dhcp_agent_pkg] -package node[:neutron][:platform][:metering_agent_pkg] +package node[:neutron][:platform][:metering_agent_pkg] if ceilometer_agent_enabled if node[:neutron][:use_lbaas] if node[:neutron][:lbaasv2_driver] == "f5" && @@ -119,6 +120,7 @@ debug: node[:neutron][:debug], interface_driver: interface_driver, ) + only_if { ceilometer_agent_enabled } end # Delete pre-existing configuration file. @@ -223,9 +225,11 @@ subscribes :restart, resources(template: node[:neutron][:config_file]) subscribes :restart, resources("template[/etc/neutron/metering_agent.ini]") provider Chef::Provider::CrowbarPacemakerService if use_crowbar_pacemaker_service + only_if { ceilometer_agent_enabled } end utils_systemd_service_restart node[:neutron][:platform][:metering_agent_name] do action use_crowbar_pacemaker_service ? :disable : :enable + only_if { ceilometer_agent_enabled } end if node[:neutron][:use_lbaas] && diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 1370e62cfc..ee63f0f1b1 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -199,12 +199,14 @@ end end - metering_agent_primitive = "neutron-metering-agent" - objects = openstack_pacemaker_controller_clone_for_transaction metering_agent_primitive do - agent node[:neutron][:ha][:network][:metering_ra] - op node[:neutron][:ha][:network][:op] + if node.roles.include? "ceilometer-agent" + metering_agent_primitive = "neutron-metering-agent" + objects = openstack_pacemaker_controller_clone_for_transaction metering_agent_primitive do + agent node[:neutron][:ha][:network][:metering_ra] + op node[:neutron][:ha][:network][:op] + end + transaction_objects.push(objects) end - transaction_objects.push(objects) if use_lbaas_agent && [nil, "", "haproxy"].include?(node[:neutron][:lbaasv2_driver]) From f552e748361703d515bd71c7fbec079066befa92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ji=C5=99=C3=AD=20Suchomel?= Date: Fri, 7 Dec 2018 14:36:50 +0100 Subject: [PATCH 167/207] database: Prevent deploying mysql-server role to monasca node Monasca has its own MariaDB instance and we can't have it conflict with the one deployed by database barclamp. See also bsc#1118759. --- crowbar_framework/app/models/database_service.rb | 13 +++++++++++-- crowbar_framework/config/locales/database/en.yml | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index 54f78476a2..bf2533579d 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -141,6 +141,7 @@ def validate_ha_attributes(attributes, cluster, sql_engine) def validate_proposal_after_save(proposal) attributes = proposal["attributes"][@bc_name] + deployment = proposal["deployment"][@bc_name] active_engine = attributes["sql_engine"] validation_error I18n.t( @@ -149,10 +150,18 @@ def validate_proposal_after_save(proposal) ) unless ["mysql", "postgresql"].include?(active_engine) selected_engines = ["postgresql", "mysql"].select do |engine| - nodes = proposal["deployment"][@bc_name]["elements"][role_for_engine engine] + nodes = deployment["elements"][role_for_engine engine] !nodes.nil? && !nodes.first.nil? end + expand_nodes_for_all(deployment["elements"]["mysql-server"] || []).flatten.each do |n| + node = Node.find_by_name(n) + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.monasca_deployed", + node_name: n + ) if node.roles.include?("monasca-server") + end + validation_error I18n.t( "barclamp.#{@bc_name}.validation.new_proposal_multi_engine" ) if selected_engines.length > 1 && !already_applied? @@ -167,7 +176,7 @@ def validate_proposal_after_save(proposal) validate_one_for_role proposal, db_role # HA validation - servers = proposal["deployment"][@bc_name]["elements"][db_role] + servers = deployment["elements"][db_role] unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) cluster = servers.first validate_ha_attributes(attributes, cluster, engine) diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index 46e844354d..1fd5a67423 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -67,3 +67,4 @@ en: engine_roles_mismatch: 'Assigned roles do not match selected database engine: %{db_engine}.' secondary_psql: 'PostgreSQL can only be deployed as first SQL engine. Migration from MariaDB to PostgreSQL is not supported.' new_proposal_multi_engine: 'Second SQL engine can only be added to an existing database deployment.' + monasca_deployed: 'MariaDB cannot be deployed on a node with monasca-server role: %{node_name}.' From f29fce3c6625ccfe52814b8897227617fbad5eb9 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 29 Nov 2018 13:02:06 +0100 Subject: [PATCH 168/207] galera: Use monitoring user for observing wsrep state Using the root user with empty password has the downside that once the root user no longer has an empty password, it can no longer observe the state. The monitoring user is more useful for that as it is always available, even when the cluster was already bootstrapped before. This is important in case you want to add new nodes to an existing cluster. (cherry picked from commit cbc475b4ab87888b4bcb0b6ee1d886fb060082b9) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 2fcc98d835..f83eb062df 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -243,7 +243,7 @@ block do require "timeout" begin - cmd = "mysql -u '' -N -B " \ + cmd = "mysql -u 'monitoring' -N -B " \ "-e \"SHOW STATUS WHERE Variable_name='wsrep_local_state_comment';\" | cut -f 2" sync_state = "" Timeout.timeout(seconds) do From 01b1201f4bce86fccda1dcb08cee5fb526fd760d Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 29 Nov 2018 13:12:05 +0100 Subject: [PATCH 169/207] galera: Install system tables only on the founder node Non-founder nodes should get the system tables synced from the founder in the initial SST. (cherry picked from commit 707efe498b0a6338121793365a37a71e658407bc) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 28 ++++++++++++++--------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index f83eb062df..d47303671e 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -23,24 +23,30 @@ package p end -unless node[:database][:galera_bootstrapped] - directory "/var/run/mysql/" do - owner "mysql" - group "root" - mode "0755" - action :create - end +directory "/var/run/mysql/" do + owner "mysql" + group "root" + mode "0755" + action :create +end - execute "mysql_install_db" do - command "mysql_install_db" - action :run - end +directory "/var/lib/mysql/" do + owner "mysql" + group "root" + mode "0700" + action :create end node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address unless node[:database][:galera_bootstrapped] if CrowbarPacemakerHelper.is_cluster_founder?(node) + + execute "mysql_install_db" do + command "mysql_install_db" + action :run + end + # To bootstrap for the first time, start galera on one node # to set up the seed sst and monitoring users. From adccca79b4232a2e45f93673f66bfc23010f3e6c Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 29 Mar 2018 19:02:57 +0200 Subject: [PATCH 170/207] horizon: load monasca from databag The monasca databag config is accessing the monasca node attributes unconditionally, which causes crashes when the monasca barclamp is deactivated. Converting this to use the databag reduces unnecessary nodesearches as well as avoiding those issues (cherry picked from commit 545d3c6dcff1dde63b62be945e298c0ad9a7c691) --- chef/cookbooks/horizon/libraries/helper.rb | 4 ---- chef/cookbooks/horizon/recipes/monasca_ui.rb | 23 ++++++++------------ 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/chef/cookbooks/horizon/libraries/helper.rb b/chef/cookbooks/horizon/libraries/helper.rb index e502fddc9e..7e33d9f095 100644 --- a/chef/cookbooks/horizon/libraries/helper.rb +++ b/chef/cookbooks/horizon/libraries/helper.rb @@ -19,10 +19,6 @@ def self.monasca_public_host(node) CrowbarHelper.get_host_for_public_url(node, ssl_enabled, ha_enabled) end - def self.monasca_admin_host(node) - CrowbarHelper.get_host_for_admin_url(node, node[:monasca][:ha][:enabled]) - end - def self.api_public_url(node) host = monasca_public_host(node) # SSL is not supported at this moment diff --git a/chef/cookbooks/horizon/recipes/monasca_ui.rb b/chef/cookbooks/horizon/recipes/monasca_ui.rb index b56f953e22..141f8478c4 100644 --- a/chef/cookbooks/horizon/recipes/monasca_ui.rb +++ b/chef/cookbooks/horizon/recipes/monasca_ui.rb @@ -14,9 +14,12 @@ keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) monasca_server = node_search_with_cache("roles:monasca-server").first -monasca_master = node_search_with_cache("roles:monasca-master").first -monasca_host = MonascaUiHelper.monasca_admin_host(monasca_server) -grafana_password = monasca_master[:monasca][:master][:database_grafana_password] +if monasca_server.nil? + Chef::Log.warn("No monasca-server found.") + return +end +monasca_cfg = Barclamp::Config.load("openstack", "monasca") +grafana_password = monasca_cfg["master"]["database_grafana_password"] # Used for creating data source grafana_base_url = ::File.join(MonascaUiHelper.dashboard_local_url(node), "/grafana") @@ -26,16 +29,6 @@ ha_enabled = node[:horizon][:ha][:enabled] -if monasca_server.nil? - Chef::Log.warn("No monasca-server found.") - return -end - -if monasca_master.nil? - Chef::Log.warn("No monasca-master found.") - return -end - template "/srv/www/openstack-dashboard/openstack_dashboard/"\ "local/local_settings.d/_80_monasca_ui_settings.py" do source "_80_monasca_ui_settings.py.erb" @@ -57,7 +50,9 @@ template "/etc/grafana/grafana.ini" do source "grafana.ini.erb" variables( - database_host: monasca_host, + database_host: CrowbarHelper.get_host_for_admin_url( + monasca_server, monasca_cfg["ha"]["enabled"] + ), grafana_password: grafana_password ) owner "root" From a9af809ca9fb84cd3270eca1a7a0d42c228a03a1 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Mon, 3 Dec 2018 13:17:00 +0100 Subject: [PATCH 171/207] ssl: Fix ACL setup in ssl_setup provider In 75fe195a a method was introduced to ensure certificates that are installed by the operator were readable to OpenStack service users. This method is never tested in CI because we only ever use self-signed, crowbar-generated certs in CI. If an operator does generate their own certs instead of having crowbar generate them, crowbar fails with this message: ArgumentError: ssl_setup[setting up ssl for keystone] (keystone::server line 78) had an error: ArgumentError: You must supply a name when declaring a directory resource This is because the method calls an operation on an object called 'directory' without defining it first, but 'directory' is already reserved as the name of a Chef resource. Moreover, we can't directly call the ruby File module from inside this module as ruby will assume it is a relative module, not a global module. This change fixes the reference to 'directory' to use the cert path passed into the method, which must have been the intended usage, and also adds the global namespace to the File module references. Finally, it fixes the misnamed reference to the locally defined world_executable method. (cherry picked from commit 798b553ce49fe039f9c0cb72301c2c0d0175308f) --- .../libraries/provider_ssl_setup.rb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb index 13cabb6802..6a1f4ae3a8 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb @@ -92,19 +92,19 @@ def action_setup def _fix_acl(certificate, group) partial = "/" - directory.split(File::SEPARATOR).each do |entry| + certificate.split(::File::SEPARATOR).each do |entry| next if entry.empty? - partial = File.join(partial, entry) + partial = ::File.join(partial, entry) # If the file is readable by all users, and the directory is # readable and executable (we can list the contents) we can # avoid an ACL modification - if File.world_readable?(partial) - next if File.file?(partial) - next if _world_executable?(partial) && File.directory?(partial) + if ::File.world_readable?(partial) + next if ::File.file?(partial) + next if _world_executable?(partial) && ::File.directory?(partial) end - mask = if File.directory?(partial) + mask = if ::File.directory?(partial) "group:#{group}:r-x" else "group:#{group}:r--" @@ -113,8 +113,8 @@ def _fix_acl(certificate, group) end end - def _world_executable(path) - File.stat(path).mode & 1 == 1 + def _world_executable?(path) + ::File.stat(path).mode & 1 == 1 end end end From b69d4e29401be047a6dec9fd4240a2886e313784 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Mon, 3 Dec 2018 13:24:28 +0100 Subject: [PATCH 172/207] keystone: Fix CA cert Apache config In 7fdbef8a the cert_required barclamp option was removed, but we still refer to it in the keystone server recipe. This means it is evaluated as nil, which causes the Apache CA path to never be set since the wsgi setup uses crowbar-openstack Apache template[1]. Remove references to the cert_required parameter, since the CA path will always be required. [1] https://github.com/crowbar/crowbar-openstack/blob/release/suse-openstack-cloud/8/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb (cherry picked from commit a4ecd56f87d19d422b6e59131ea5b06cc8c3a617) --- chef/cookbooks/keystone/recipes/server.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 764219f5a6..1e933fd3f8 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -135,7 +135,7 @@ ssl_enable node[:keystone][:api][:protocol] == "https" ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] - ssl_cacert node[:keystone][:ssl][:ca_certs] + ssl_cacert node[:keystone][:ssl][:ca_certs] unless node[:keystone][:ssl][:insecure] # LDAP backend can be slow.. timeout 600 end @@ -158,7 +158,7 @@ ssl_enable node[:keystone][:api][:protocol] == "https" ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] - ssl_cacert node[:keystone][:ssl][:ca_certs] + ssl_cacert node[:keystone][:ssl][:ca_certs] unless node[:keystone][:ssl][:insecure] # LDAP backend can be slow.. timeout 600 end From 1f75d87e46c5fcca6c08202fdc4986a625942930 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Tue, 4 Dec 2018 12:05:19 +0100 Subject: [PATCH 173/207] keystone: Refactor keystone_register retry loop This patch refactors the retry look in the _get_token method so that it could be used for other requests if necessary. The retry loop in the :wakeup action is left alone since it is a tighter loop and is additionally used for refreshing the token after password changes. (cherry picked from commit 5718ae83c4090961b65e85145e36ec5e36ba0838) --- chef/cookbooks/keystone/providers/register.rb | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index e850473f78..3aa6387c98 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -551,17 +551,8 @@ def _get_token(http, user_name, password, tenant = "") headers = _build_headers body = _build_auth(user_name, password, tenant) - resp = nil - count = 0 - error = true - while error && count < 10 - count += 1 - Chef::Log.debug "Trying to get keystone token for user '#{user_name}' (try #{count})" - resp = http.send_request("POST", path, JSON.generate(body), headers) - error = !resp.is_a?(Net::HTTPSuccess) - # retry on any 5XX (server error) error code but not on 4XX (client error) - sleep 5 if resp.is_a?(Net::HTTPServerError) - end + resp = retry_request(http, "POST", path, body, headers) + error = !resp.is_a?(Net::HTTPSuccess) if error Chef::Log.info "Failed to get token for User '#{user_name}' Tenant '#{tenant}'" @@ -673,3 +664,14 @@ def endpoint_needs_update(endpoint, new_resource) return true end end + +def retry_request(http, method, path, body, headers) + resp = nil + 10.times do |count| + resp = http.send_request(method, path, JSON.generate(body), headers) + break unless resp.is_a?(Net::HTTPServerError) + Chef::Log.debug("Retrying request #{method} #{path} : #{count}") + sleep 5 + end + resp +end From 03885ab789a5c154223143a579a91eda915aeafe Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Mon, 3 Dec 2018 17:29:54 +0100 Subject: [PATCH 174/207] keystone: Fix update endpoint for ha (bsc#1114851) Currently, changing the keystone endpoint protocol does not work in an HA deployment. The barclamp fails and leaves the deployment in a broken state with SSL only part way deployed. This is because the values needed to configure haproxy are determined in the compile phase, and the resources that enable SSL in haproxy and apache are out of sequence with the update to the keystone endpoints in the keystone database, so haproxy is reconfigured and reloaded with the new SSL settings before the endpoint is changed in keystone and so fall out of alignment. This commit splits the keystone endpoint update code into its own recipe, keystone::update_endpoint. It creates a new keystone_register action, :update_one_endpoint, so that we can update a single endpoint in the compile phase before haproxy is reloaded. Then a ruby block and sync marks force apache and haproxy to update their configuration and reload immediately so that the new internal endpoint can be used to update the other endpoints. This causes a small amount of downtime, which is unavoidable and expected because there is no way to change the keystone endpoints without also forcing end users to update their auth settings. While haproxy and apache are reconfiguring themselves, either haproxy or apache might cause either a gateway error or SSL error if keystone is accessed, so this adds another wakeup call before the first sync mark to give the services a chance to come up. Since haproxy is set up in a roundrobin configuration, using the wakeup call before the sync mark should in theory ensure the apache service on each controller is ready, but it's still possible the wakeup calls don't hit each controller, in which case a retry loop is added to the _update_item method keystone_register requests to give apache time to come up. (cherry picked from commit 8dfc0c1c1d3623d35cfb9cb0d80c060c3ad44a41) --- chef/cookbooks/keystone/providers/register.rb | 53 +++++++- chef/cookbooks/keystone/recipes/server.rb | 45 +------ .../keystone/recipes/update_endpoint.rb | 120 ++++++++++++++++++ chef/cookbooks/keystone/resources/register.rb | 6 +- 4 files changed, 178 insertions(+), 46 deletions(-) create mode 100644 chef/cookbooks/keystone/recipes/update_endpoint.rb diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index 3aa6387c98..17b3be9738 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -387,9 +387,8 @@ endpoint_template["endpoint"]["url"] = new_url endpoint_template["endpoint"]["endpoint_id"] = endpoints[interface]["id"] endpoint_template["endpoint"]["service_id"] = endpoints[interface]["service_id"] - resp = http.send_request("PATCH", - "#{path}/#{endpoints[interface]["id"]}", - JSON.generate(endpoint_template), headers) + fullpath = "#{path}/#{endpoints[interface]["id"]}" + resp = retry_request(http, "PATCH", fullpath, endpoint_template, headers) if resp.is_a?(Net::HTTPOK) Chef::Log.info("Successfully updated endpoint URL #{interface} #{new_url}") else @@ -407,6 +406,54 @@ end end +action :update_one_endpoint do + http, headers = _build_connection(new_resource) + + path = "/v3/services" + dir = "services" + my_service_id, error = _find_id(http, headers, new_resource.endpoint_service, path, dir) + unless my_service_id + msg = "Couldn't find service #{new_resource.endpoint_service} in keystone" + _raise_error(nil, msg, "update_endpoint") + end + + path = "/v3/endpoints" + + resp = http.request_get(path, headers) + if resp.is_a?(Net::HTTPOK) + data = JSON.parse(resp.read_body) + endpoints = {} + data["endpoints"].each do |endpoint| + if endpoint["service_id"].to_s == my_service_id.to_s + endpoints[endpoint["interface"]] = endpoint + end + end + interface = new_resource.endpoint_interface + new_url = new_resource.endpoint_url + endpoint_template = {} + endpoint_template["endpoint"] = {} + endpoint_template["endpoint"]["interface"] = interface + endpoint_template["endpoint"]["url"] = new_url + endpoint_template["endpoint"]["endpoint_id"] = endpoints[interface]["id"] + endpoint_template["endpoint"]["service_id"] = endpoints[interface]["service_id"] + fullpath = "#{path}/#{endpoints[interface]["id"]}" + resp = retry_request(http, "PATCH", fullpath, endpoint_template, headers) + if resp.is_a?(Net::HTTPOK) + Chef::Log.info("Successfully updated endpoint URL #{interface} #{new_url}") + else + Chef::Log.error("Unknown response code: #{resp.code}") + new_resource.updated_by_last_action(false) + raise "Failed to talk to keystone in update_endpoint" + end + else + Chef::Log.error "Unknown response from Keystone Server" + Chef::Log.error("Response Code: #{resp.code}") + Chef::Log.error("Response Message: #{resp.message}") + new_resource.updated_by_last_action(false) + raise "Failed to talk to keystone in update_one_endpoint" if error + end +end + # Return true on success private def _create_item(http, headers, path, body, name) diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 1e933fd3f8..1545a5bdf6 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -235,47 +235,6 @@ password: node[:keystone][:admin][:password], tenant: node[:keystone][:admin][:tenant] } -if node[:keystone].key?(:endpoint) - endpoint_protocol = node[:keystone][:endpoint][:protocol] - endpoint_insecure = node[:keystone][:endpoint][:insecure] - # In order to update keystone's endpoints we need the old internal endpoint. - endpoint_port = node[:keystone][:endpoint][:port] -else - endpoint_protocol = node[:keystone][:api][:protocol] - endpoint_insecure = node[:keystone][:ssl][:insecure] - endpoint_port = node[:keystone][:api][:admin_port] -end - -endpoint_host = my_admin_host - -# Update keystone endpoints (in case we switch http/https this will update the -# endpoints to the correct ones). This needs to be done _before_ we switch -# protocols on the keystone api. -keystone_register "update keystone endpoint" do - protocol endpoint_protocol - insecure endpoint_insecure - host endpoint_host - port endpoint_port - auth register_auth_hash - endpoint_service "keystone" - endpoint_region node[:keystone][:api][:region] - endpoint_adminURL KeystoneHelper.admin_auth_url(node, my_admin_host) - endpoint_publicURL KeystoneHelper.public_auth_url(node, my_public_host) - endpoint_internalURL KeystoneHelper.internal_auth_url(node, my_admin_host) - action :update_endpoint - # Do not try to update keystone endpoint during upgrade, when keystone is not running yet - # ("done_os_upgrade" is present when first chef-client run is executed at the end of upgrade) - not_if { node["crowbar_upgrade_step"] == "done_os_upgrade" } - only_if do - node[:keystone][:bootstrap] && - (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && - node[:keystone].key?(:endpoint) && - (node[:keystone][:endpoint][:protocol] != node[:keystone][:api][:protocol] || - node[:keystone][:endpoint][:insecure] != node[:keystone][:ssl][:insecure] || - node[:keystone][:endpoint][:port] != node[:keystone][:api][:admin_port]) - end -end - template node[:keystone][:config_file] do source "keystone.conf.erb" owner "root" @@ -491,7 +450,7 @@ end end -# This also includes fernet setup for HA case +# This also includes fernet setup for HA case. include_recipe "keystone::ha" if ha_enabled # Wait for all nodes to reach this point so we know that all nodes will have @@ -700,6 +659,8 @@ crowbar_pacemaker_sync_mark "create-keystone_register" if ha_enabled +include_recipe "keystone::update_endpoint" + keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) template "/root/.openrc" do diff --git a/chef/cookbooks/keystone/recipes/update_endpoint.rb b/chef/cookbooks/keystone/recipes/update_endpoint.rb new file mode 100644 index 0000000000..7293d9bf95 --- /dev/null +++ b/chef/cookbooks/keystone/recipes/update_endpoint.rb @@ -0,0 +1,120 @@ +# Copyright 2018 SUSE Linux GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ha_enabled = node[:keystone][:ha][:enabled] + +if node[:keystone].key?(:endpoint) + endpoint_protocol = node[:keystone][:endpoint][:protocol] + endpoint_insecure = node[:keystone][:endpoint][:insecure] + endpoint_port = node[:keystone][:endpoint][:port] + + endpoint_changed = endpoint_protocol != node[:keystone][:api][:protocol] || + endpoint_insecure != node[:keystone][:ssl][:insecure] || + endpoint_port != node[:keystone][:api][:admin_port] + + endpoint_needs_update = endpoint_changed && + node[:keystone][:bootstrap] && + # Do not try to update keystone endpoint during upgrade, when keystone is not + # running yet ("done_os_upgrade" is present when first chef-client run is + # executed at the end of upgrade) + node["crowbar_upgrade_step"] != "done_os_upgrade" +else + endpoint_needs_update = false +end +endpoint_host = CrowbarHelper.get_host_for_admin_url(node, ha_enabled) + +use_ssl = node[:keystone][:api][:protocol] == "https" +public_host = CrowbarHelper.get_host_for_public_url(node, use_ssl, ha_enabled) +register_auth_hash = { user: node[:keystone][:admin][:username], + password: node[:keystone][:admin][:password], + tenant: node[:keystone][:admin][:tenant] } + +# In compile phase, update the internal keystone endpoint if necessary. +# Do this before the haproxy and apache configs are updated, otherwise the old +# endpoint will become invalid too early. +keystone_register "update keystone internal endpoint" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + endpoint_service "keystone" + endpoint_region node[:keystone][:api][:region] + endpoint_url KeystoneHelper.internal_auth_url(node, endpoint_host) + endpoint_interface "internal" + action :nothing + only_if do + endpoint_needs_update && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end +end.run_action(:update_one_endpoint) + +# Update variables for use in converge-phase endpoint updates +endpoint_protocol = node[:keystone][:api][:protocol] +endpoint_insecure = node[:keystone][:ssl][:insecure] +endpoint_port = node[:keystone][:api][:admin_port] + +ruby_block "Prepare haproxy and apache2 for new keystone endpoints" do + block {} + if ha_enabled + notifies :create, resources(template: node[:haproxy][:platform][:config_file]), :immediately + notifies :reload, resources(service: "haproxy"), :immediately + end + notifies :create, resources(ruby_block: "set origin for apache2 restart"), :immediately + notifies :reload, resources(service: "apache2"), :immediately + only_if { endpoint_needs_update } +end + +keystone_register "wakeup keystone after service reload" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + retries 10 + retry_delay 10 + action :wakeup +end + +# Wait until all nodes have refreshed haproxy and apache before trying to use +# the new internal endpoint to update the rest of the endpoints +crowbar_pacemaker_sync_mark "sync-keystone_update_endpoints" if ha_enabled + +crowbar_pacemaker_sync_mark "wait-keystone_update_endpoints" if ha_enabled + +# Update keystone endpoints (in case we switch http/https this will update the +# endpoints to the correct ones). This needs to be done _before_ we switch +# protocols on the keystone api. +keystone_register "update keystone endpoint" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + endpoint_service "keystone" + endpoint_region node[:keystone][:api][:region] + endpoint_adminURL KeystoneHelper.admin_auth_url(node, endpoint_host) + endpoint_publicURL KeystoneHelper.public_auth_url(node, public_host) + endpoint_internalURL KeystoneHelper.internal_auth_url(node, endpoint_host) + action :update_endpoint + # Do not try to update keystone endpoint during upgrade, when keystone is not running yet + # ("done_os_upgrade" is present when first chef-client run is executed at the end of upgrade) + only_if do + endpoint_needs_update && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end +end + +crowbar_pacemaker_sync_mark "create-keystone_services" if ha_enabled diff --git a/chef/cookbooks/keystone/resources/register.rb b/chef/cookbooks/keystone/resources/register.rb index 9c3d8a014f..ea75eed825 100644 --- a/chef/cookbooks/keystone/resources/register.rb +++ b/chef/cookbooks/keystone/resources/register.rb @@ -18,7 +18,7 @@ # actions :add_service, :add_endpoint_template, :add_tenant, :add_domain, :add_domain_role, :add_user, - :add_role, :add_access, :add_ec2, :wakeup, :update_endpoint + :add_role, :add_access, :add_ec2, :wakeup, :update_endpoint, :update_one_endpoint attribute :protocol, kind_of: String attribute :insecure, kind_of: [TrueClass, FalseClass], default: false @@ -43,6 +43,10 @@ attribute :endpoint_global, default: true attribute :endpoint_enabled, default: true +# :update_one_endpoint specific attributes +attribute :endpoint_interface, kind_of: String +attribute :endpoint_url, kind_of: String + # :add_tenant specific attributes attribute :tenant_name, kind_of: String From e9b9c07065198b2882d0574affb700a67af4d19f Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Mon, 10 Dec 2018 12:46:59 +0100 Subject: [PATCH 175/207] nova/neutron: Restart immediately on keystone changes If the keystone endpoint or keystone credentials change in nova or neutron's config file, that means the old settings must already be invalid. If we allow the config file to change but delay restarting the service, Chef may try to make nova requests, such as listing flavors, using old keystone settings and will fail. This change ensures the nova-api and neutron-server services are restarted immediately when the main config file is changed. It's possible other services should be doing the same but so far I haven't observed other services suffering ths ordering problem. There is always a risk that a chef run will fail in the middle and leave a service out of sync with its config file, and the service will need to be restarted manually. (cherry picked from commit 0aace5337525781f05eb7827d58f3d5dc82103a9) --- chef/cookbooks/keystone/recipes/update_endpoint.rb | 3 +++ chef/cookbooks/neutron/recipes/server.rb | 7 +++++++ chef/cookbooks/nova/recipes/api.rb | 8 ++++++++ 3 files changed, 18 insertions(+) diff --git a/chef/cookbooks/keystone/recipes/update_endpoint.rb b/chef/cookbooks/keystone/recipes/update_endpoint.rb index 7293d9bf95..0375c6f410 100644 --- a/chef/cookbooks/keystone/recipes/update_endpoint.rb +++ b/chef/cookbooks/keystone/recipes/update_endpoint.rb @@ -24,6 +24,9 @@ endpoint_insecure != node[:keystone][:ssl][:insecure] || endpoint_port != node[:keystone][:api][:admin_port] + # Will be reset on next chef run + node.default[:keystone][:endpoint_changed] = endpoint_changed + endpoint_needs_update = endpoint_changed && node[:keystone][:bootstrap] && # Do not try to update keystone endpoint during upgrade, when keystone is not diff --git a/chef/cookbooks/neutron/recipes/server.rb b/chef/cookbooks/neutron/recipes/server.rb index 3fcca1c6b7..35f2f8175e 100644 --- a/chef/cookbooks/neutron/recipes/server.rb +++ b/chef/cookbooks/neutron/recipes/server.rb @@ -419,6 +419,13 @@ utils_systemd_service_restart node[:neutron][:platform][:service_name] do action use_crowbar_pacemaker_service ? :disable : :enable end +# neutron-server must be restarted immediately if keystone settings have changed, +# otherwise neutron requests in recipes will fail +if node[:keystone][:endpoint_changed] + service node[:neutron][:platform][:service_name] do + subscribes :restart, resources(template: node[:neutron][:config_file]), :immediately + end +end if node[:neutron][:use_infoblox] service node[:neutron][:platform][:infoblox_agent_name] do diff --git a/chef/cookbooks/nova/recipes/api.rb b/chef/cookbooks/nova/recipes/api.rb index d408623c99..ed27fc1155 100644 --- a/chef/cookbooks/nova/recipes/api.rb +++ b/chef/cookbooks/nova/recipes/api.rb @@ -28,6 +28,14 @@ use_pacemaker_provider use_crowbar_pacemaker_service end +# nova-api must be restarted immediately if keystone settings have changed, +# otherwise nova requests in recipes will fail +if node[:keystone][:endpoint_changed] + service "nova-api" do + subscribes :restart, resources(template: node[:nova][:config_file]), :immediately + end +end + api_ha_enabled = node[:nova][:ha][:enabled] admin_api_host = CrowbarHelper.get_host_for_admin_url(node, api_ha_enabled) public_api_host = CrowbarHelper.get_host_for_public_url(node, node[:nova][:ssl][:enabled], api_ha_enabled) From 363c7172cd1bde3ef43bba220619d25c0b2de9ef Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Wed, 12 Dec 2018 17:27:29 +0100 Subject: [PATCH 176/207] cinder: Only set up SSL on API nodes For the sake of CI and general hygiene, only set up SSL where it's needed. (cherry picked from commit 6871f4cedf764dca19ab559e7f8f1844f21a0489) --- chef/cookbooks/cinder/recipes/api.rb | 12 ++++++++++++ chef/cookbooks/cinder/recipes/common.rb | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/chef/cookbooks/cinder/recipes/api.rb b/chef/cookbooks/cinder/recipes/api.rb index 867da60c69..f39dba5f30 100644 --- a/chef/cookbooks/cinder/recipes/api.rb +++ b/chef/cookbooks/cinder/recipes/api.rb @@ -31,6 +31,18 @@ my_admin_host = CrowbarHelper.get_host_for_admin_url(node, ha_enabled) my_public_host = CrowbarHelper.get_host_for_public_url(node, node[:cinder][:api][:protocol] == "https", ha_enabled) +if node[:cinder][:api][:protocol] == "https" + ssl_setup "setting up ssl for cinder" do + generate_certs node[:cinder][:ssl][:generate_certs] + certfile node[:cinder][:ssl][:certfile] + keyfile node[:cinder][:ssl][:keyfile] + group node[:cinder][:group] + fqdn node[:fqdn] + cert_required node[:cinder][:ssl][:cert_required] + ca_certs node[:cinder][:ssl][:ca_certs] + end +end + crowbar_pacemaker_sync_mark "wait-cinder_register" register_auth_hash = { user: keystone_settings["admin_user"], diff --git a/chef/cookbooks/cinder/recipes/common.rb b/chef/cookbooks/cinder/recipes/common.rb index a4f6819b2b..896b2c0a94 100644 --- a/chef/cookbooks/cinder/recipes/common.rb +++ b/chef/cookbooks/cinder/recipes/common.rb @@ -84,18 +84,6 @@ node.save if dirty -if node[:cinder][:api][:protocol] == "https" - ssl_setup "setting up ssl for cinder" do - generate_certs node[:cinder][:ssl][:generate_certs] - certfile node[:cinder][:ssl][:certfile] - keyfile node[:cinder][:ssl][:keyfile] - group node[:cinder][:group] - fqdn node[:fqdn] - cert_required node[:cinder][:ssl][:cert_required] - ca_certs node[:cinder][:ssl][:ca_certs] - end -end - availability_zone = nil unless node[:crowbar_wall].nil? or node[:crowbar_wall][:openstack].nil? if node[:crowbar_wall][:openstack][:availability_zone] != "" From 2de03ff1b53c8c267750fb15b65e413d1ee3aa62 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Thu, 13 Dec 2018 14:38:59 +0100 Subject: [PATCH 177/207] horizon: Fix SSL CA configuration for apache 2.4 The SSLCertificateChainFile option was obsoleted in apache 2.4.8[1]. Now if used, it invalidates the SSL configuration and causes a "no peer certificate available" error. Use the SSLCACertificateFile directive instead. [1] https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslcertificatechainfile (cherry picked from commit c20ae2c2213bbe7f4fe89ef8f7d78de38c4b8d3c) --- .../horizon/templates/suse/openstack-dashboard.conf.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb index 01ee231930..6595a61c6c 100644 --- a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb +++ b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb @@ -35,7 +35,7 @@ Listen <%= @bind_host %>:<%= @bind_port_ssl %> SSLCertificateFile <%= @ssl_crt_file %> SSLCertificateKeyFile <%= @ssl_key_file %> <% unless @ssl_crt_chain_file.nil? or @ssl_crt_chain_file.empty? %> - SSLCertificateChainFile <%= @ssl_crt_chain_file %> + SSLCACertificateFile <%= @ssl_crt_chain_file %> <% end %> <% else %> From 9f6d5920930fbf758b222b851c19f95b4a8dcc76 Mon Sep 17 00:00:00 2001 From: Colleen Murphy Date: Fri, 14 Dec 2018 14:19:59 +0100 Subject: [PATCH 178/207] nova: Use internal glance and neutron endpoints If not explicitly given, the glance and neutron clients will default to making requests to the public endpoint for those services. For service-to-service communication, we always want to use the internal interface. This became clear in cases when we are using secure, non-self-signed SSL certificates for these services. If the certificate CN matches the name of the internal endpoint but not the public endpoint, the SSL validation will fail and nova-compute will not be able to retrieve images and floating IPs. (cherry picked from commit 6ff325dbb7e4ce00f0bb60c8f78fc4f674922b1d) --- chef/cookbooks/nova/templates/default/nova.conf.erb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index ef45fb517b..91f2abb400 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -157,6 +157,7 @@ pool_timeout = <%= node[:nova][:db][:pool_timeout] %> [glance] <%= "host = #{@glance_server_host}" unless @glance_server_host.nil? %> <%= "port = #{@glance_server_port}" unless @glance_server_host.nil? %> +endpoint_type = internal protocol = <%= @glance_server_protocol %> <%= "api_servers = #{@glance_server_protocol}://#{@glance_server_host}:#{@glance_server_port}" unless @glance_server_host.nil? %> <%= "api_insecure = #{@glance_server_insecure ? 'True' : 'False'}" unless @glance_server_host.nil? %> @@ -235,6 +236,7 @@ auth_url = <%= KeystoneHelper.versioned_service_URL(@keystone_settings["protocol @keystone_settings["internal_url_host"], @keystone_settings["service_port"], "2.0") %> +endpoint_type = internal auth_type = password insecure = <%= @neutron_insecure ? 'True' : 'False' %> password = <%= @neutron_service_password %> From de3fab7544c7e68d2e498eb5929f487d90ae5a82 Mon Sep 17 00:00:00 2001 From: Abel Navarro Date: Thu, 3 Jan 2019 15:20:16 +0100 Subject: [PATCH 179/207] Update travis config to solve bundler dependency Bundler has been updated and our travis configuration is no longer working. There's a proposed fix by limiting bunlder version to be less than 2.0.0: https://github.com/travis-ci/travis-ci/issues/5290 Our last working bundler version was 1.17.3 (cherry picked from commit ebc8927aada07e83cbee8ae75567df7f609618fe) --- .travis.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e870d701f2..3c9b24d3a5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,9 @@ dist: trusty rvm: 2.1.9 +before_install: + - rvm @global do gem install bundler -v '< 2.0.0' + matrix: include: - env: SYNTAXCHECK @@ -14,4 +17,4 @@ matrix: script: - bundle exec rake spec - name: "Databag testing" - script: bundle exec crowbar-validate-databags chef/data_bags/crowbar \ No newline at end of file + script: bundle exec crowbar-validate-databags chef/data_bags/crowbar From 5666acc1ba621d924d52500adcef5957cc949ceb Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Fri, 14 Dec 2018 11:40:48 +0100 Subject: [PATCH 180/207] mariadb: Set wsrep_sst_method to mariabackup (bsc#1116686) MariaDB 10.2.16 added a new option innodb_safe_truncate which requies a the switch to mariabackup which can handle crash-safe rename operations. (cherry picked from commit cf6018758eca6af67f3f3c2c44c895608a0ae516) --- chef/cookbooks/mysql/templates/default/galera.cnf.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/mysql/templates/default/galera.cnf.erb b/chef/cookbooks/mysql/templates/default/galera.cnf.erb index 365dd0f519..d38f53aa40 100644 --- a/chef/cookbooks/mysql/templates/default/galera.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/galera.cnf.erb @@ -32,5 +32,5 @@ query_cache_type = 0 expire_logs_days = <%= @expire_logs_days %> # SST method -wsrep_sst_method = xtrabackup-v2 +wsrep_sst_method = mariabackup wsrep_sst_auth = <%= @sstuser %>:<%= @sstuser_password %> From b2ba183a0008d2c33b12d0c370557aa27416930e Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Fri, 8 Feb 2019 11:12:11 +0100 Subject: [PATCH 181/207] mariadb: Remove installing the xtrabackup package We switched some time ago to mariabackup and therefore xtrabackup is not needed anymore (cherry picked from commit 10f65db2dd4fa19ba4ee99c551a8634513363a29) --- chef/cookbooks/mysql/attributes/server.rb | 1 - 1 file changed, 1 deletion(-) diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index a0cf51696b..ff48db8ed2 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -42,7 +42,6 @@ default[:mysql][:galera_packages] = [ "galera-3-wsrep-provider", "mariadb-tools", - "xtrabackup", "socat", "galera-python-clustercheck" ] From 555f7ef7b5e9f081b280ac45d8b901d85a27635c Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 11 Jul 2018 15:38:29 +0200 Subject: [PATCH 182/207] mysql: create .my.cnf in root home directory for mysql cmdline Searching for the mysql database root password can be a major pain, so just store it in ~/.my.cnf so that mysql defaults to it and everything just works when you're root on the controller. As you can change the password when you're root, this shouldn't be an additional security hassle. (cherry picked from commit 95d2936fd5896a6942adcbb22cfa34c02f1fbec2) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- chef/cookbooks/mysql/recipes/server.rb | 12 +++++++++++- .../mysql/templates/default/root-my.cnf.erb | 5 +++++ 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 chef/cookbooks/mysql/templates/default/root-my.cnf.erb diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index d47303671e..4aebd7707e 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -289,7 +289,7 @@ password \"#{node[:database][:mysql][:server_root_password]}\"" action :run only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - only_if "/usr/bin/mysql -u root -e 'show databases;'" + only_if "/usr/bin/mysql --no-defaults -u root -e 'select (1);'" end crowbar_pacemaker_sync_mark "sync-database_root_password" do diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index f19219514e..20f1fc4218 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -180,7 +180,7 @@ command "/usr/bin/mysqladmin -u root password \"#{server_root_password}\"" action :run not_if { ha_enabled } # password already set as part of the ha bootstrap - only_if "/usr/bin/mysql -u root -e 'show databases;'" + only_if "/usr/bin/mysql --no-defaults -u root -e 'select (1);'" end db_settings = fetch_database_settings(@cookbook_name) @@ -279,3 +279,13 @@ mode "0755" action :create end + +template "/root/.my.cnf" do + source "root-my.cnf.erb" + owner "root" + group "root" + mode "0600" + variables( + password: node[:database][:mysql][:server_root_password] + ) +end diff --git a/chef/cookbooks/mysql/templates/default/root-my.cnf.erb b/chef/cookbooks/mysql/templates/default/root-my.cnf.erb new file mode 100644 index 0000000000..714e83417d --- /dev/null +++ b/chef/cookbooks/mysql/templates/default/root-my.cnf.erb @@ -0,0 +1,5 @@ +# Managed by Crowbar +[client] +socket = /var/run/mysql/mysql.sock +user = root +password = <%= @password %> From 1eb91c1f92ddf4f68dc7b61183bfd64885f75d43 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Thu, 14 Feb 2019 16:16:04 +0100 Subject: [PATCH 183/207] mysql: Do not set a custom logfile for mysqld (bsc#1112767) the mariadb package comes with a logrotate script (/etc/logrotate.d/mariadb) that currently only rotates the default /var/log/mysql/mysqld.log file. So let's use that file instead of a custom path to fix log rotation. Note: If "slow_query_logging" is enabled, this file is still not rotated. This will hopefully be fixed in the package itself[1] [1] https://build.opensuse.org/request/show/676156 (cherry picked from commit aabd06b7a291a2e5a3465238957f32390c8fffdd) (cherry picked from commit a0bb13aeddef74775ab15610d4b2def6cec8ba0c) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 +- chef/cookbooks/mysql/templates/default/logging.cnf.erb | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index d47303671e..2fb8fcebc4 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -201,7 +201,7 @@ "check_user" => "monitoring", "socket" => "/var/run/mysql/mysql.sock", "datadir" => node[:database][:mysql][:datadir], - "log" => "/var/log/mysql/mysql_error.log" + "log" => "/var/log/mysql/mysqld.log" }) op primitive_op action :update diff --git a/chef/cookbooks/mysql/templates/default/logging.cnf.erb b/chef/cookbooks/mysql/templates/default/logging.cnf.erb index 7f360264e6..a3ed954fcb 100644 --- a/chef/cookbooks/mysql/templates/default/logging.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/logging.cnf.erb @@ -1,6 +1,4 @@ [mysqld] -log_error=/var/log/mysql/mysql_error.log - <% if @slow_query_logging_enabled -%> slow_query_log = 1 slow_query_log_file = /var/log/mysql/mysql_slow.log From fc5971cbc019d1bf93723f401cd083647feae737 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Fri, 15 Feb 2019 15:59:45 +0100 Subject: [PATCH 184/207] ceilometer: Use pacemaker to handle expirer cron link (bsc#1113107) With recent openstack-ceilometer packaging changes (Newton[1], Pike[2]), the ceilometer-expirer cronjob is no longer installed on all nodes where the openstack-ceilometer-collector package is installed. Instead the cronjob is installed in /usr/share/ceilometer//openstack-ceilometer-expirer.cron . This is needed to avoid parallel runs of the cronjob from different nodes which lead to database deadlocks. Now use pacemaker to handle a symlink in /etc/cron.daily so the cronjob is executed on a single node in the cluster. [1] https://build.opensuse.org/request/show/676177 [2] https://build.opensuse.org/request/show/676174 --- .../ceilometer/attributes/default.rb | 5 +++ .../cookbooks/ceilometer/recipes/server_ha.rb | 33 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/chef/cookbooks/ceilometer/attributes/default.rb b/chef/cookbooks/ceilometer/attributes/default.rb index 016bb300dd..410f77b884 100644 --- a/chef/cookbooks/ceilometer/attributes/default.rb +++ b/chef/cookbooks/ceilometer/attributes/default.rb @@ -78,6 +78,7 @@ default[:ceilometer][:ha][:agent_notification][:agent] = "systemd:#{agent_notification_service_name}" default[:ceilometer][:ha][:agent_notification][:op][:monitor][:interval] = "10s" + default[:ceilometer][:ha][:central][:enabled] = false default[:ceilometer][:ha][:central][:agent] = "systemd:#{central_service_name}" default[:ceilometer][:ha][:central][:op][:monitor][:interval] = "10s" @@ -90,3 +91,7 @@ # this establishes which node is used for mongo client connections that # we use to initialize the replica set default[:ceilometer][:ha][:mongodb][:replica_set][:controller] = false + +# Pacemaker ceilometer expirer cronjob link +default[:ceilometer][:ha][:expirer][:cronjob][:agent] = "ocf:heartbeat:symlink" +default[:ceilometer][:ha][:expirer][:cronjob][:op][:monitor][:interval] = "10s" diff --git a/chef/cookbooks/ceilometer/recipes/server_ha.rb b/chef/cookbooks/ceilometer/recipes/server_ha.rb index df4257ab21..6eb5ecc867 100644 --- a/chef/cookbooks/ceilometer/recipes/server_ha.rb +++ b/chef/cookbooks/ceilometer/recipes/server_ha.rb @@ -23,6 +23,39 @@ action :nothing end.run_action(:create) +# setup the expirer cronjob only on a single node to not +# run into DB deadlocks (bsc#1113107) +crowbar_pacemaker_sync_mark "wait-ceilometer_expirer_cron" + +expirer_transaction_objects = [] + +ceilometer_expirer_cron_primitive = "ceilometer-expirer-cron" +pacemaker_primitive ceilometer_expirer_cron_primitive do + agent node[:ceilometer][:ha][:expirer][:cronjob][:agent] + params( + # target is from the RPM package openstack-ceilometer + "target" => "/usr/share/ceilometer/openstack-ceilometer-expirer.cron", + "link" => "/etc/cron.daily/openstack-ceilometer-expirer.cron", + "backup_suffix" => ".orig" + ) + op node[:ceilometer][:ha][:expirer][:cronjob][:op] + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end +expirer_transaction_objects << "pacemaker_primitive[#{ceilometer_expirer_cron_primitive}]" + +ceilometer_expirer_cron_loc = openstack_pacemaker_controller_only_location_for ceilometer_expirer_cron_primitive +expirer_transaction_objects << "pacemaker_location[#{ceilometer_expirer_cron_loc}]" + +pacemaker_transaction "ceilometer-expirer cron" do + cib_objects expirer_transaction_objects + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end + +crowbar_pacemaker_sync_mark "create-ceilometer_expirer_cron" + if node[:pacemaker][:clone_stateless_services] # Wait for all nodes to reach this point so we know that they will have # all the required packages installed and configuration files updated From aa685c74630dc268beca81462ce89d3c6a273234 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 7 Feb 2019 12:59:55 +0100 Subject: [PATCH 185/207] neutron: Added option to use L3 HA with Keepalived neutron-ha-tool helps moving routers from one node to other active nodes. This is triggered when the L3 agent on that particular network node is down. This behavior is not compatible with L3 HA. L3 HA implementation uses one master router and several others in standby. If a router/agent is down, it will be discovered through VRRP protocol [1] and another one will take the master role. Both L3 HA and neuton-ha-tool are incompatible because they both has their own way of balancing routers on fallen nodes. This implementation lets the user configure L3 HA, which will disable neutron-ha-tool. If L3 HA is deactivated, neutron-ha-tool comes back. Notice that switching between neutron-ha-tool and L3 HA is possible, but the routers created are not updated to reflect this change. If they were created as HA routers they will remain that way. The contrary is also true. It is up to the user to manually change those routers or delete them and create new ones. Co-Authored-By: Itxaka Co-Authored-By: Abel Navarro [1] https://wiki.openstack.org/wiki/Neutron/L3_High_Availability_VRRP (cherry picked from commit 6c2034309569d66099f49a3de8697a812c9bf176) --- chef/cookbooks/neutron/attributes/default.rb | 3 + .../cookbooks/neutron/recipes/common_agent.rb | 2 + .../neutron/recipes/common_config.rb | 1 + .../neutron/recipes/network_agents.rb | 2 + .../neutron/recipes/network_agents_ha.rb | 122 ++++++++++-------- .../templates/default/l3_agent.ini.erb | 1 + .../templates/default/neutron.conf.erb | 4 + .../migrate/neutron/123_add_use_l3_ha.rb | 22 ++++ chef/data_bags/crowbar/template-neutron.json | 6 +- .../data_bags/crowbar/template-neutron.schema | 4 + .../app/models/neutron_service.rb | 8 ++ 11 files changed, 120 insertions(+), 55 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index 8bf57d6467..5c07fb56ba 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -118,6 +118,7 @@ l3_agent_name: "openstack-neutron-l3-agent", l3_agent_pkg: "openstack-neutron-l3-agent", ha_tool_pkg: "openstack-neutron-ha-tool", + l3_ha_pkg: "keepalived", hyperv_pkg: "python-networking-hyperv", nsx_pkgs: ["openvswitch-pki", "ruby2.1-rubygem-faraday"], @@ -164,6 +165,7 @@ l3_agent_name: "neutron-l3-agent", l3_agent_pkg: "openstack-neutron", ha_tool_pkg: "", + l3_ha_pkg: "", hyperv_pkg: "", nsx_pkgs: [""], cisco_pkgs: ["python-networking-cisco"], @@ -210,6 +212,7 @@ l3_agent_name: "neutron-l3-agent", l3_agent_pkg: "neutron-l3-agent", ha_tool_pkg: "", + l3_ha_pkg: "", hyperv_pkg: "python-networking-hyperv", nsx_pkgs: [""], cisco_pkgs: [""], diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index 2851e3b758..1c68b1ec5b 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -340,6 +340,8 @@ periodic_interval: 40, periodic_fuzzy_delay: 5, dvr_enabled: neutron[:neutron][:use_dvr], + l3_ha_enabled: node.roles.include?("neutron-network") && neutron[:neutron][:l3_ha][:use_l3_ha], + l3_ha_vrrp_password: neutron[:neutron][:l3_ha][:vrrp_password], dvr_mode: node.roles.include?("neutron-network") ? "dvr_snat" : "dvr" ) end diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index cc452ecccf..4ad5d4a3bf 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -142,6 +142,7 @@ service_plugins: service_plugins, allow_overlapping_ips: neutron[:neutron][:allow_overlapping_ips], dvr_enabled: neutron[:neutron][:use_dvr], + l3_ha_enabled: neutron[:neutron][:l3_ha][:use_l3_ha], network_nodes_count: network_nodes_count, dns_domain: neutron[:neutron][:dhcp_domain], mtu_value: mtu_value, diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index 64792f2d0f..a679435263 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -29,6 +29,8 @@ end end +package node[:neutron][:platform][:l3_ha_pkg] if node[:neutron][:l3_ha][:use_l3_ha] + # Enable ip forwarding on network node for SLE11 ruby_block "edit /etc/sysconfig/sysctl for IP_FORWARD" do block do diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index ee63f0f1b1..d632be2970 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -246,68 +246,82 @@ if use_l3_agent # Remove old resource ha_tool_primitive_name = "neutron-ha-tool" - pacemaker_primitive ha_tool_primitive_name do - agent node[:neutron][:ha][:network][:ha_tool_ra] - action [:stop, :delete] - only_if "crm configure show #{ha_tool_primitive_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + ha_service_primitive_name = "neutron-l3-ha-service" - # Remove old location - ha_tool_location_name = "l-#{ha_tool_primitive_name}-controller" - pacemaker_location ha_tool_location_name do - action :delete - only_if "crm configure show #{ha_tool_location_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + if node[:neutron][:l3_ha][:use_l3_ha] + ## Do we really need to delete it? what about routers not + # marked with --ha Enabled ?! + pacemaker_primitive ha_service_primitive_name do + agent "systemd:neutron-l3-ha-service" + op node[:neutron][:ha][:neutron_l3_ha_resource][:op] + action [:stop, :delete] + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + only_if "crm configure show #{ha_service_primitive_name}" + end + else + pacemaker_primitive ha_tool_primitive_name do + agent node[:neutron][:ha][:network][:ha_tool_ra] + action [:stop, :delete] + only_if "crm configure show #{ha_tool_primitive_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - # Remove old ordering - ha_tool_ordering_name = "o-#{ha_tool_primitive_name}" - pacemaker_order ha_tool_ordering_name do - action :delete - only_if "crm configure show #{ha_tool_ordering_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + # Remove old location + ha_tool_location_name = "l-#{ha_tool_primitive_name}-controller" + pacemaker_location ha_tool_location_name do + action :delete + only_if "crm configure show #{ha_tool_location_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - # Add pacemaker resource for neutron-l3-ha-service - ha_service_transaction_objects = [] - ha_service_primitive_name = "neutron-l3-ha-service" + # Remove old ordering + ha_tool_ordering_name = "o-#{ha_tool_primitive_name}" + pacemaker_order ha_tool_ordering_name do + action :delete + only_if "crm configure show #{ha_tool_ordering_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - pacemaker_primitive ha_service_primitive_name do - agent "systemd:neutron-l3-ha-service" - op node[:neutron][:ha][:neutron_l3_ha_resource][:op] - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - ha_service_transaction_objects << "pacemaker_primitive[#{ha_service_primitive_name}]" + # Add pacemaker resource for neutron-l3-ha-service + # only if l3_ha is not enabled + ha_service_transaction_objects = [] - ha_service_location_name = openstack_pacemaker_controller_only_location_for( - ha_service_primitive_name - ) + pacemaker_primitive ha_service_primitive_name do + agent "systemd:neutron-l3-ha-service" + op node[:neutron][:ha][:neutron_l3_ha_resource][:op] + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + ha_service_transaction_objects << "pacemaker_primitive[#{ha_service_primitive_name}]" - ha_service_transaction_objects << "pacemaker_location[#{ha_service_location_name}]" + ha_service_location_name = openstack_pacemaker_controller_only_location_for( + ha_service_primitive_name + ) - pacemaker_transaction "neutron ha service" do - cib_objects ha_service_transaction_objects - # note that this will also automatically start the resources - action :commit_new - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + ha_service_transaction_objects << "pacemaker_location[#{ha_service_location_name}]" - rabbit_settings = fetch_rabbitmq_settings - - crowbar_pacemaker_order_only_existing "o-#{ha_service_primitive_name}" do - # While neutron-ha-tool technically doesn't directly depend on postgresql or - # rabbitmq, if these bits are not running, then neutron-server can run but - # can't do what it's being asked. Note that neutron-server does have a - # constraint on these services, but it's optional, not mandatory (because it - # doesn't need to be restarted when postgresql or rabbitmq are restarted). - # So explicitly depend on postgresql and rabbitmq (if they are in the cluster). - ordering "( postgresql #{rabbit_settings[:pacemaker_resource]} g-haproxy cl-neutron-server " \ - "#{l3_agent_clone} ) #{ha_service_primitive_name}" - score "Mandatory" - action :create - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + pacemaker_transaction "neutron ha service" do + cib_objects ha_service_transaction_objects + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + rabbit_settings = fetch_rabbitmq_settings + + crowbar_pacemaker_order_only_existing "o-#{ha_service_primitive_name}" do + # While neutron-ha-tool technically doesn't directly depend on postgresql or + # rabbitmq, if these bits are not running, then neutron-server can run but + # can't do what it's being asked. Note that neutron-server does have a + # constraint on these services, but it's optional, not mandatory (because it + # doesn't need to be restarted when postgresql or rabbitmq are restarted). + # So explicitly depend on postgresql and rabbitmq (if they are in the cluster). + ordering "( postgresql #{rabbit_settings[:pacemaker_resource]} g-haproxy cl-neutron-server " \ + "#{l3_agent_clone} ) #{ha_service_primitive_name}" + score "Mandatory" + action :create + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end end end diff --git a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb index adfa2cb4ec..6c2c6d8079 100644 --- a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb @@ -1,5 +1,6 @@ [DEFAULT] interface_driver = <%= @interface_driver %> +ha_vrrp_auth_password = <%= @l3_ha_vrrp_password %> <% if @dvr_enabled -%> agent_mode = <%= @dvr_mode %> <% end -%> diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index fbd89e53ec..1de2e9042d 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -17,6 +17,10 @@ dhcp_agents_per_network = <%= @network_nodes_count %> <% if @dvr_enabled -%> router_distributed = True <% end -%> +<% if @l3_ha_enabled -%> +l3_ha = True +max_l3_agents_per_router = <%= (@network_nodes_count + 1) / 2 %> +<% end -%> debug = <%= @debug ? "True" : "False" %> verbose = <%= @verbose ? "True" : "False" %> log_dir = /var/log/neutron diff --git a/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb b/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb new file mode 100644 index 0000000000..ba3bc17421 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb @@ -0,0 +1,22 @@ +def upgrade(ta, td, a, d) + unless a.key? "l3_ha" + a["l3_ha"] = ta["l3_ha"] + + unless defined?(@@neutron_l3_ha_vrrp_password) + service = ServiceObject.new "fake-logger" + @@neutron_l3_ha_vrrp_password = service.random_password + end + + a["l3_ha"]["vrrp_password"] = @@neutron_l3_ha_vrrp_password + end + + return a, d +end + +def downgrade(ta, td, a, d) + unless ta.key?("l3_ha") + a.delete("l3_ha") + end + + return a, d +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 81fe61e9db..b7cc7b32d9 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -20,6 +20,10 @@ "agent_boot_time": 180 }, "use_dvr": false, + "l3_ha": { + "use_l3_ha": false, + "vrrp_password": "" + }, "additional_external_networks": [], "networking_plugin": "ml2", "ml2_mechanism_drivers": ["openvswitch"], @@ -190,7 +194,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 122, + "schema-revision": 123, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index a83c620632..6dac258e35 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -25,6 +25,10 @@ "agent_boot_time": { "type" : "int", "required" : true } }}, "use_dvr": { "type": "bool", "required": true }, + "l3_ha": { "type": "map", "required": true, "mapping": { + "use_l3_ha": { "type": "bool", "required": true }, + "vrrp_password": { "type": "str", "required": true } + }}, "additional_external_networks": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "networking_plugin": { "type": "str", "required": true }, "ml2_mechanism_drivers": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, diff --git a/crowbar_framework/app/models/neutron_service.rb b/crowbar_framework/app/models/neutron_service.rb index ab863d17df..fc2d3b4d6d 100644 --- a/crowbar_framework/app/models/neutron_service.rb +++ b/crowbar_framework/app/models/neutron_service.rb @@ -124,6 +124,7 @@ def create_proposal base["attributes"]["neutron"]["service_password"] = random_password base["attributes"][@bc_name][:db][:password] = random_password + base["attributes"][@bc_name][:l3_ha][:vrrp_password] = random_password base end @@ -351,6 +352,12 @@ def validate_cisco_aci(proposal) end end + def validate_l3ha(proposal) + if proposal["attributes"]["neutron"]["l3_ha"]["enabled"] + validate_multiple_for_role_or_cluster proposal, "neutron-network" + end + end + def validate_external_networks(external_networks) net_svc = NetworkService.new @logger network_proposal = Proposal.find_by(barclamp: net_svc.bc_name, name: "default") @@ -394,6 +401,7 @@ def validate_proposal_after_save(proposal) validate_l2pop(proposal) validate_dvr(proposal) validate_cisco_aci(proposal) + validate_l3ha(proposal) if proposal[:attributes][:neutron][:use_infoblox] validate_infoblox(proposal) end From 6610d74e1bd9b91b61bffcbe45f065738c87122a Mon Sep 17 00:00:00 2001 From: Itxaka Date: Thu, 28 Feb 2019 15:02:52 +0100 Subject: [PATCH 186/207] rabbit: fix mirroring regex When the regex for mirroring queues was introduced on 341f6b059b33690e607e9a3da83cf250c97323d0 a typo was introduced which skipped some of the default queues. Restore it to the proper regex (cherry picked from commit 97c1e7d1814fafbcfc92a1b133d4abea4eca427a) --- chef/cookbooks/rabbitmq/recipes/rabbit.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index 6414100a5f..16a11c8115 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -152,8 +152,8 @@ quorum = 1 end - # don't mirror queues that are 'amqp.*' or '*_fanout_*' or `reply_*` in their names - queue_regex = "^(?!(amqp.)|(.*_fanout_)|(reply_)).*" + # don't mirror queues that are 'amq.*' or '*_fanout_*' or `reply_*` in their names + queue_regex = "^(?!(amq\.)|(.*_fanout_)|(reply_)).*" # policy doesnt need spaces between elements as they will be removed when listing them # making it more difficult to check for them policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum},\"ha-sync-mode\":\"automatic\"}" From 57773a2b8372f79743a4ca87a10a0ec935328926 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Tue, 26 Mar 2019 09:03:28 +0100 Subject: [PATCH 187/207] ceilometer: Install package which contains cron file (bsc#1130414) Commit 2f441560b uses pacemaker to handle the cron symlink. But to work correctly, the link destination (/usr/share/ceilometer/openstack-ceilometer-expirer.cron) needs to be there and this file is in the openstack-ceilometer-collector package. So install the package to solve this. (cherry picked from commit a60f645f8c621ae7c57e793d0ce20a04d48db661) --- chef/cookbooks/ceilometer/recipes/server_ha.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chef/cookbooks/ceilometer/recipes/server_ha.rb b/chef/cookbooks/ceilometer/recipes/server_ha.rb index 6eb5ecc867..493b55481b 100644 --- a/chef/cookbooks/ceilometer/recipes/server_ha.rb +++ b/chef/cookbooks/ceilometer/recipes/server_ha.rb @@ -23,6 +23,10 @@ action :nothing end.run_action(:create) +# install openstack-ceilometer-collector - the package contains the cron file +# /usr/share/ceilometer/openstack-ceilometer-expirer.cron +package "openstack-ceilometer-collector" + # setup the expirer cronjob only on a single node to not # run into DB deadlocks (bsc#1113107) crowbar_pacemaker_sync_mark "wait-ceilometer_expirer_cron" From c3394dd3b37cec13f112470766d434c1031a37d4 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 30 Jan 2018 13:44:19 +0100 Subject: [PATCH 188/207] ironic: Fix regression in helper Small fix to the env based handling of auth parameters. (cherry picked from commit 346606f9ef0e66b4d5d76faeb274cf6977fdf9b3) --- chef/cookbooks/ironic/libraries/helpers.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chef/cookbooks/ironic/libraries/helpers.rb b/chef/cookbooks/ironic/libraries/helpers.rb index d1b3d529ca..1a1c45a863 100644 --- a/chef/cookbooks/ironic/libraries/helpers.rb +++ b/chef/cookbooks/ironic/libraries/helpers.rb @@ -39,9 +39,9 @@ def swift_settings(node, glance) env = { "OS_USERNAME" => glance_keystone_settings["service_user"], - "OS_PASSWORD" => keystone_settings["service_password"], - "OS_PROJECT_NAME" => keystone_settings["service_tenant"], - "OS_AUTH_URL" => auth_url(keystone_settings), + "OS_PASSWORD" => glance_keystone_settings["service_password"], + "OS_PROJECT_NAME" => glance_keystone_settings["service_tenant"], + "OS_AUTH_URL" => auth_url(glance_keystone_settings), "OS_IDENTITY_API_VERSION" => "3" } insecure = swift[:swift][:ssl][:insecure] ? " --insecure" : "" From 8e11eca0094a1b88a9c032d7611d0cf6a90400a5 Mon Sep 17 00:00:00 2001 From: aojeagarcia Date: Fri, 15 Feb 2019 13:00:06 +0100 Subject: [PATCH 189/207] mysql: improve galera HA setup (bsc#1122875) This patch applies the option "on-marked-down shutdown-sessions" to our haproxy galera configuration to deal with the situation that happens keeping connections open when a backend is declared down by the health check but the mariadb server is still alive Depends-On: https://github.com/crowbar/crowbar-ha/pull/347 Signed-off-by: aojeagarcia (cherry picked from commit b1106b3ec54a6fc0f7dcfdf4fe9d37347e7ae344) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index b64aea4d47..ed551e4fd3 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -350,6 +350,8 @@ n["fall"] = 2 # lower the interval checking after first failure is found n["fastinter"] = 1000 + # shutdown connection when backend is marked down + n["on_marked_down_shutdown"] = true end haproxy_loadbalancer "galera" do From c4bd3f6549729ef0564824580fd0d7bf8c9078c6 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 17 Apr 2019 15:20:02 +0200 Subject: [PATCH 190/207] neutron: restart neutron-ha-tool when the config file changes When we toggle ssl/non-ssl in keystone, neutron-ha-tool gets a new config file but doesn't get restarted, so it just repeatedly crashes all the way until it causes a pacemaker failcount exceeded and then its dead. We should try better. (cherry picked from commit 65bdb6c767f471cff11c0646a5bd9bf1f433fdea) --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index d632be2970..0538e99401 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -106,6 +106,15 @@ ) end + service "neutron-l3-ha-service" do + supports status: true, restart: true + subscribes :restart, resources(file: "/etc/neutron/neutron-l3-ha-service.yaml") + subscribes :restart, resources(template: "/root/.openrc") + subscribes :restart, resources(file: "/etc/neutron/os_password") + + provider Chef::Provider::CrowbarPacemakerService + end + # Reload systemd when unit file changed bash "reload systemd after neutron-l3-ha-service update" do code "systemctl daemon-reload" From fb36f718a03bf1e11532196502ee21497e61f94e Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sat, 27 Apr 2019 10:06:10 +0200 Subject: [PATCH 191/207] neutron: use crm_resource restart for restarting neutron-l3-ha-service When we restart via systemd, it can happen that the service already crashed and then 'service neutron-l3-ha-service status" will (correctly) report that the service isn't running. And then the restart is not being executed due to: INFO: Ignoring restart action for neutron-l3-ha-service service since not running on this node (d52-54-77-77-01-01) which will then later cause a pacemaker failcount. the pacemaker resource restart hopefully does not have this problem. (cherry picked from commit 3615437f242b114e9ef6e958a200fef042e511e5) --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 0538e99401..019a561191 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -107,10 +107,10 @@ end service "neutron-l3-ha-service" do - supports status: true, restart: true - subscribes :restart, resources(file: "/etc/neutron/neutron-l3-ha-service.yaml") - subscribes :restart, resources(template: "/root/.openrc") - subscribes :restart, resources(file: "/etc/neutron/os_password") + supports status: true, restart: true, restart_crm_resource: true + subscribes :restart, resources(file: "/etc/neutron/neutron-l3-ha-service.yaml"), :immediately + subscribes :restart, resources(template: "/root/.openrc"), :immediately + subscribes :restart, resources(file: "/etc/neutron/os_password"), :immediately provider Chef::Provider::CrowbarPacemakerService end From d51881759e1f4a8999e0bea263737bfe09cbd88e Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 24 Apr 2019 12:46:13 +0200 Subject: [PATCH 192/207] database: Raise and align promote/demote timeouts (bsc#1131791) Looks like 5 minutes is too short in some cases when a new node is joining. I have observed the 5 min timeout in a small cloud of 100 nodes already that has been operated for some time, so we might be better off being conservative. (cherry picked from commit 2f84ad631c82bf75f84b395929a2bb5780c7da61) --- chef/data_bags/crowbar/template-database.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 04f177be08..068fe86487 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -43,10 +43,10 @@ "timeout": "60s" }, "promote": { - "timeout": "300s" + "timeout": "600s" }, "demote": { - "timeout": "60s" + "timeout": "600s" } } } From 7a7cef50d3ab1032e20a9fe3561f4e2baa95ee53 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Tue, 14 May 2019 15:41:23 +0200 Subject: [PATCH 193/207] database: Make wsrep_provider_options configurable (fate#327745) It is sometimes useful to be able to add extra options (like gcache.size, gcs.fc_debug, ...) to the galera wsrep_provider_options configuration variable. This can now be done via the Crowbar RAW view. Also make (the currently hardcoded) "gcs.fc_limit" multiplier configurable. Also make (the currently hardcoded) "gcs.fc_factor" configurable. (cherry picked from commit 94eb55905b28e3407187f54d9ada4b7e27214704) --- chef/cookbooks/mysql/recipes/ha_galera.rb | 10 ++++++++-- .../mysql/templates/default/galera.cnf.erb | 2 +- .../111_make_wsrep_provider_options_configurable.rb | 13 +++++++++++++ chef/data_bags/crowbar/template-database.json | 5 ++++- chef/data_bags/crowbar/template-database.schema | 3 +++ 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index ed551e4fd3..5f04b62551 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -62,7 +62,10 @@ sstuser_password: "", expire_logs_days: node[:database][:mysql][:expire_logs_days], node_address: node_address, - wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads] + wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads], + gcs_fc_limit_multiplier: node[:database][:mysql][:gcs_fc_limit_multiplier], + gcs_fc_factor: node[:database][:mysql][:gcs_fc_factor], + wsrep_provider_options_custom: node[:database][:mysql][:wsrep_provider_options_custom].join(";") ) end @@ -143,7 +146,10 @@ sstuser_password: node[:database][:mysql][:sstuser_password], expire_logs_days: node[:database][:mysql][:expire_logs_days], node_address: node_address, - wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads] + wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads], + gcs_fc_limit_multiplier: node[:database][:mysql][:gcs_fc_limit_multiplier], + gcs_fc_factor: node[:database][:mysql][:gcs_fc_factor], + wsrep_provider_options_custom: node[:database][:mysql][:wsrep_provider_options_custom].join(";") ) end diff --git a/chef/cookbooks/mysql/templates/default/galera.cnf.erb b/chef/cookbooks/mysql/templates/default/galera.cnf.erb index d38f53aa40..8b282fe786 100644 --- a/chef/cookbooks/mysql/templates/default/galera.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/galera.cnf.erb @@ -3,7 +3,7 @@ wsrep_on = ON wsrep_provider = /usr/lib64/galera-3/libgalera_smm.so wsrep_cluster_address = "<%= @cluster_addresses %>" # values recommended by mysqltuner.pl -wsrep_provider_options = "gmcast.listen_addr=tcp://<%= @node_address %>:4567;gcs.fc_limit = <%= @wsrep_slave_threads * 5 %>;gcs.fc_factor = 0.8" +wsrep_provider_options = "gmcast.listen_addr=tcp://<%= @node_address %>:4567;gcs.fc_limit = <%= @wsrep_slave_threads * @gcs_fc_limit_multiplier %>;gcs.fc_factor = <%= @gcs_fc_factor %>;<%= @wsrep_provider_options_custom %>" wsrep_slave_threads = <%= @wsrep_slave_threads %> # Maximum number of rows in write set diff --git a/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb b/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb new file mode 100644 index 0000000000..d8316924dc --- /dev/null +++ b/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb @@ -0,0 +1,13 @@ +def upgrade(template_attrs, template_deployment, attrs, deployment) + attrs["mysql"]["wsrep_provider_options_custom"] = template_attrs["mysql"]["wsrep_provider_options_custom"] unless attrs["mysql"]["wsrep_provider_options_custom"] + attrs["mysql"]["gcs_fc_limit_multiplier"] = template_attrs["mysql"]["gcs_fc_limit_multiplier"] unless attrs["mysql"]["gcs_fc_limit_multiplier"] + attrs["mysql"]["gcs_fc_factor"] = template_attrs["mysql"]["gcs_fc_factor"] unless attrs["mysql"]["gcs_fc_factor"] + return attrs, deployment +end + +def downgrade(template_attrs, template_deployment, attrs, deployment) + attrs["mysql"].delete("wsrep_provider_options_custom") unless template_attrs["mysql"].key?("wsrep_provider_options_custom") + attrs["mysql"].delete("gcs_fc_limit_multiplier") unless template_attrs["mysql"].key?("gcs_fc_limit_multiplier") + attrs["mysql"].delete("gcs_fc_factor") unless template_attrs["mysql"].key?("gcs_fc_factor") + return attrs, deployment +end diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 04f177be08..b7b35dac42 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -15,6 +15,9 @@ "expire_logs_days": 10, "bootstrap_timeout": 600, "wsrep_slave_threads" : 1, + "gcs_fc_limit_multiplier" : 5, + "gcs_fc_factor" : 0.8, + "wsrep_provider_options_custom" : [], "innodb_buffer_pool_size": 256, "innodb_tunings": [ "# log_file_size should be ~ 25% of buffer_pool_size", @@ -83,7 +86,7 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 110, + "schema-revision": 111, "element_states": { "database-server": [ "readying", "ready", "applying" ], "mysql-server": [ "readying", "ready", "applying" ] diff --git a/chef/data_bags/crowbar/template-database.schema b/chef/data_bags/crowbar/template-database.schema index 571799ea17..3c54de2980 100644 --- a/chef/data_bags/crowbar/template-database.schema +++ b/chef/data_bags/crowbar/template-database.schema @@ -32,6 +32,9 @@ "expire_logs_days": { "type": "int", "required": true }, "bootstrap_timeout": { "type": "int", "required": true }, "wsrep_slave_threads": { "type": "int", "required": true }, + "gcs_fc_limit_multiplier": { "type": "int", "required": true }, + "gcs_fc_factor": { "type": "float", "required": true }, + "wsrep_provider_options_custom": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "ssl": { "type": "map", "required": true, "mapping": { "enabled": { "type": "bool", "required": true }, From 086216018065b9855b27bfb73d556e70f7e336b2 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Mon, 3 Jun 2019 19:39:46 +0200 Subject: [PATCH 194/207] rabbitmq: Fix ACL of SSL key after uid/gid change In shared storage based HA setup, rabbitmq uses fixed uid/gid=91. This user/group modification was done after (optional) SSL certificate generation. The ACLs on the SSL key were incorrect making rabbitmq unable to start because with EACCESS errors. --- chef/cookbooks/rabbitmq/recipes/ha.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/rabbitmq/recipes/ha.rb b/chef/cookbooks/rabbitmq/recipes/ha.rb index 1a8230ab6c..4d74d5febf 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha.rb @@ -77,6 +77,7 @@ # on anyway. static_uid = 91 static_gid = 91 +ssl_keyfile = node[:rabbitmq][:ssl][:keyfile] bash "assign static uid to rabbitmq" do code < /dev/null; @@ -86,6 +87,7 @@ chown rabbitmq:rabbitmq /var/run/rabbitmq /var/log/rabbitmq; chown rabbitmq:rabbitmq /var/run/rabbitmq/pid /var/log/rabbitmq/*.log* || :; chgrp rabbitmq /etc/rabbitmq/definitions.json; +test -e #{ssl_keyfile} && chgrp rabbitmq #{ssl_keyfile} || :; EOC # Make any error in the commands fatal flags "-e" From 375f8a8c12f3f4bf1f8bb0ef0b3f009e3b760386 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Fri, 31 May 2019 18:37:57 +0200 Subject: [PATCH 195/207] keystone: Use correct paths when syncing certs The sync failed when certs and/or keys were located in non-default paths. --- chef/cookbooks/keystone/recipes/server.rb | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index 1545a5bdf6..3c1de63629 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -347,9 +347,9 @@ ruby_block "synchronize signing keys for founder and remember them for non-HA case" do only_if { (!ha_enabled || (ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node))) } block do - ca = File.open("/etc/keystone/ssl/certs/ca.pem", "rb", &:read) - signing_cert = File.open("/etc/keystone/ssl/certs/signing_cert.pem", "rb", &:read) - signing_key = File.open("/etc/keystone/ssl/private/signing_key.pem", "rb", &:read) + ca = File.open(node[:keystone][:signing][:ca_certs], "rb", &:read) + signing_cert = File.open(node[:keystone][:signing][:certfile], "rb", &:read) + signing_key = File.open(node[:keystone][:signing][:keyfile], "rb", &:read) node[:keystone][:certificates] ||= {} node[:keystone][:certificates][:content] ||= {} @@ -376,9 +376,9 @@ ruby_block "synchronize signing keys for non-founder" do only_if { ha_enabled && !CrowbarPacemakerHelper.is_cluster_founder?(node) } block do - ca = File.open("/etc/keystone/ssl/certs/ca.pem", "rb", &:read) - signing_cert = File.open("/etc/keystone/ssl/certs/signing_cert.pem", "rb", &:read) - signing_key = File.open("/etc/keystone/ssl/private/signing_key.pem", "rb", &:read) + ca = File.open(node[:keystone][:signing][:ca_certs], "rb", &:read) + signing_cert = File.open(node[:keystone][:signing][:certfile], "rb", &:read) + signing_key = File.open(node[:keystone][:signing][:keyfile], "rb", &:read) founder = CrowbarPacemakerHelper.cluster_founder(node) @@ -390,19 +390,19 @@ # the code below dirty = false if ca != cluster_ca - File.open("/etc/keystone/ssl/certs/ca.pem", "w") { |f| + File.open(node[:keystone][:signing][:ca_certs], "w") { |f| f.write(cluster_ca) } dirty = true end if signing_cert != cluster_signing_cert - File.open("/etc/keystone/ssl/certs/signing_cert.pem", "w") { |f| + File.open(node[:keystone][:signing][:certfile], "w") { |f| f.write(cluster_signing_cert) } dirty = true end if signing_key != cluster_signing_key - File.open("/etc/keystone/ssl/private/signing_key.pem", "w") { |f| + File.open(node[:keystone][:signing][:keyfile], "w") { |f| f.write(cluster_signing_key) } dirty = true From 3ccd61987760b3b60eacb0d72bf0fdc24f10864d Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Wed, 29 May 2019 12:41:04 +0200 Subject: [PATCH 196/207] nova: Don't retry creating existing flavors In some cases the flavor create call succeeds but client still returns non-zero status. Retries of the create call fail with "Flavor already exists" and the retry loop never succeeds. Added check is executed in every loop turn and will stop reytring if the flavor already exists. Example scenario where flavor might be correctly created but client doesn't return zero is when one of HA nodes executes flavor create commands while others perform delayed restart of nova API after config files are modified. If the "create" request hits the API just before restart it could be accepted but the client might not get the correct response back. (cherry picked from commit 8085fb91bac64683ae8ee1eebd805c55d01966e7) --- chef/cookbooks/nova/recipes/flavors.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chef/cookbooks/nova/recipes/flavors.rb b/chef/cookbooks/nova/recipes/flavors.rb index d54b1bf9d5..c61818f5ba 100644 --- a/chef/cookbooks/nova/recipes/flavors.rb +++ b/chef/cookbooks/nova/recipes/flavors.rb @@ -125,6 +125,8 @@ ) flavor_create.command command flavor_create.retries 5 + # don't retry after "Flavor with ID ... already exists" + flavor_create.not_if "#{openstack} flavor show #{id}" # delay the run of this resource until the end of the run run_context.notifies_delayed( @@ -143,6 +145,8 @@ ) flavor_create.command command flavor_create.retries 5 + # don't retry after "Flavor with ID ... already exists" + flavor_create.not_if "#{openstack} flavor show #{id}" # delay the run of this resource until the end of the run run_context.notifies_delayed( From 8eef6d1c6fda096062992327f0e715ce6e079b72 Mon Sep 17 00:00:00 2001 From: Jacek Tomasiak Date: Tue, 21 May 2019 11:03:02 +0200 Subject: [PATCH 197/207] neutron: Don't restart l3-ha on .openrc change l3-ha service doesn't use .openrc so it doesn't need to be restarted when that file is modified. (cherry picked from commit 56e4bed869ef6e9f610bb3b066727c3f79c0e8b5) --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 1 - 1 file changed, 1 deletion(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 019a561191..96d718ddec 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -109,7 +109,6 @@ service "neutron-l3-ha-service" do supports status: true, restart: true, restart_crm_resource: true subscribes :restart, resources(file: "/etc/neutron/neutron-l3-ha-service.yaml"), :immediately - subscribes :restart, resources(template: "/root/.openrc"), :immediately subscribes :restart, resources(file: "/etc/neutron/os_password"), :immediately provider Chef::Provider::CrowbarPacemakerService From d19ac50cace11b1940c9b1829c3f563dc283a0e6 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 22 May 2019 11:55:13 +0200 Subject: [PATCH 198/207] neutron: remove .openrc creation from neutron cookbooks This openrc should no longer be needed now that we have the neutron l3-ha-service which consumes its configuration from a yaml file. (cherry picked from commit aa20716d8117561af02da3c4c7e24793e1dc9f71) --- chef/cookbooks/neutron/recipes/network_agents_ha.rb | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index 96d718ddec..14880459e0 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -41,19 +41,6 @@ action :create end - # We need .openrc present at network node so the node can use neutron-ha-tool even - # when located in separate cluster - template "/root/.openrc" do - source "openrc.erb" - cookbook "keystone" - owner "root" - group "root" - mode 0o600 - variables( - keystone_settings: keystone_settings - ) - end - # skip neutron-ha-tool resource creation during upgrade unless CrowbarPacemakerHelper.being_upgraded?(node) From 5c24d2949b012c375566124198e20326d155681f Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 22 May 2019 12:01:58 +0200 Subject: [PATCH 199/207] neutron: increase interval between checks to 30s The agent status is anyway not very quickly updated, and if we retry too quickly we still might not have the underlying problem fixes. Lets be more conservative and check every 30s instead of 10s. (cherry picked from commit 59672c89a1845a7463a1093589b8424c63c39664) --- chef/cookbooks/neutron/attributes/default.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index 5c07fb56ba..c4c3953bfb 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -271,6 +271,6 @@ default[:neutron][:ha][:neutron_l3_ha_service][:timeouts][:router_migration][:kill] = 120 default[:neutron][:ha][:neutron_l3_ha_service][:hatool][:program] = "/usr/bin/neutron-ha-tool" default[:neutron][:ha][:neutron_l3_ha_service][:hatool][:env] = {} -default[:neutron][:ha][:neutron_l3_ha_service][:seconds_to_sleep_between_checks] = 10 +default[:neutron][:ha][:neutron_l3_ha_service][:seconds_to_sleep_between_checks] = 30 default[:neutron][:ha][:neutron_l3_ha_service][:max_errors_tolerated] = 10 default[:neutron][:ha][:neutron_l3_ha_service][:log_file] = "/var/log/neutron/neutron-l3-ha-service.log" From c0a4d9e49d042c80b118404a3472727c41403145 Mon Sep 17 00:00:00 2001 From: Zara Date: Fri, 10 May 2019 16:53:48 +0100 Subject: [PATCH 200/207] Make ovs of_inactivity_probe configurable from neutron barclamp This patch allows the user to change the ovs inactivity_probe timeout from the neutron barclamp, in the 'raw' view. Previously, this value was always set to the OVS default, 5. It provides crowbar support for this upstream patch: https://review.opendev.org/#/c/663024/ --- chef/cookbooks/neutron/recipes/common_agent.rb | 3 ++- .../templates/default/openvswitch_agent.ini.erb | 1 + .../neutron/124_add_ovs_of_inactivity_probe.rb | 15 +++++++++++++++ chef/data_bags/crowbar/template-neutron.json | 5 +++-- chef/data_bags/crowbar/template-neutron.schema | 3 ++- 5 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index 1c68b1ec5b..9ae45615f7 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -276,7 +276,8 @@ tunnel_csum: neutron[:neutron][:ovs][:tunnel_csum], of_interface: neutron[:neutron][:ovs][:of_interface], ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], - bridge_mappings: bridge_mappings + bridge_mappings: bridge_mappings, + of_inactivity_probe: neutron[:neutron][:ovs][:of_inactivity_probe] ) end when ml2_mech_drivers.include?("linuxbridge") diff --git a/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb b/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb index f3e5083db1..5c074fd3d4 100644 --- a/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb @@ -26,5 +26,6 @@ of_interface = <%= @of_interface %> local_ip = <%= node.address("os_sdn").addr %> <% end -%> bridge_mappings = <%= @bridge_mappings %> +of_inactivity_probe = <%= @of_inactivity_probe %> [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver diff --git a/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb b/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb new file mode 100644 index 0000000000..41af631153 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb @@ -0,0 +1,15 @@ +def upgrade(tattr, tdep, attr, dep) + unless attr["ovs"].key?("of_inactivity_probe") + attr["ovs"]["of_inactivity_probe"] = tattr["ovs"]["of_inactivity_probe"] + end + + return attr, dep +end + +def downgrade(tattr, tdep, attr, dep) + unless tattr["ovs"].key?("of_inactivity_probe") + attr["ovs"].delete("of_inactivity_probe") if attr.key?("ovs") + end + + return attr, dep +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index b7cc7b32d9..0d5a6fc4c1 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -43,7 +43,8 @@ "ovs": { "tunnel_csum": false, "of_interface": "native", - "ovsdb_interface": "native" + "ovsdb_interface": "native", + "of_inactivity_probe": 10 }, "apic": { "hosts": "", @@ -194,7 +195,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 123, + "schema-revision": 124, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index 6dac258e35..ef60e3881e 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -48,7 +48,8 @@ "ovs": { "type": "map", "required": true, "mapping": { "tunnel_csum": { "type": "bool", "required": true }, "ovsdb_interface": { "type": "str", "required": true }, - "of_interface": { "type": "str", "required": true } + "of_interface": { "type": "str", "required": true }, + "of_inactivity_probe": { "type": "int", "required": true } }}, "apic": { "type": "map", "required": true, "mapping": { "hosts": { "type" : "str", "required" : true }, From 85f4d511be55d7927bdf6c6334320e0ce3b1d16f Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Fri, 26 Jul 2019 20:02:55 +0200 Subject: [PATCH 201/207] magnum: retry flavor creation (SOC-9991) Creating magnum flavors is done as a delayed action, alongside all other delayed actions, such as restarting services. If one of those restarted services is apache (e.g. because it was triggered by another barclamp configuration change), then keystone and other API services might not be available right away, in which case the magnum flavor creation will fail. Re-attempting magnum flavor creation fixes this issue. --- chef/cookbooks/magnum/recipes/post_install.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/magnum/recipes/post_install.rb b/chef/cookbooks/magnum/recipes/post_install.rb index e58bed49fb..e8207ef35d 100644 --- a/chef/cookbooks/magnum/recipes/post_install.rb +++ b/chef/cookbooks/magnum/recipes/post_install.rb @@ -67,6 +67,8 @@ command "#{openstack_cmd} #{openstack_args_nova} flavor create --ram 1024 --disk 10 \ --vcpus 1 m1.magnum" not_if "#{openstack_cmd} #{openstack_args_nova} flavor list --all | grep -q m1.magnum" + retries 5 + retry_delay 10 action :nothing end From ceff8ba25452c1f0e9675247e9a6737f1dcf1c3e Mon Sep 17 00:00:00 2001 From: Stefan Nica Date: Tue, 30 Jul 2019 17:45:12 +0200 Subject: [PATCH 202/207] magnum: retry magnum image creation (SOC-10015) Creating the magnum image is done as a delayed action, alongside all other delayed actions, such as restarting services. If one of those restarted services is apache (e.g. because it was triggered by another barclamp configuration change), then keystone and other API services might not be available right away, in which case the magnum image creation will fail. Re-attempting magnum image creation fixes this issue. --- chef/cookbooks/magnum/recipes/post_install.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/chef/cookbooks/magnum/recipes/post_install.rb b/chef/cookbooks/magnum/recipes/post_install.rb index e8207ef35d..cbd42694f7 100644 --- a/chef/cookbooks/magnum/recipes/post_install.rb +++ b/chef/cookbooks/magnum/recipes/post_install.rb @@ -60,6 +60,8 @@ --container-format bare --public --property os_distro=opensuse \ #{service_sles_image_name}" not_if "#{openstack_cmd} #{openstack_args_glance} image list -f value -c Name | grep -q #{service_sles_image_name}" + retries 5 + retry_delay 10 action :nothing end From 39f3319fed35eacf893c3447a4a5464839819436 Mon Sep 17 00:00:00 2001 From: Johannes Grassler Date: Tue, 30 Jul 2019 16:05:15 +0200 Subject: [PATCH 203/207] nova: add max_threads_per_process tuneable (SOC-10001, bsc#1133719) In some cases, VMs my contain more threads than permissible by the systemd set default of 16000. In this case, this tuneable makes it possible to set a higher limit in qemu.conf through the Nova barclamp. (cherry picked from commit a62fd709b0a9417fc65eda773c11a1ddda47651d) Backport changes: migration renamed to 126_add_max_threads.rb and schema revision adjusted accordingly. --- chef/cookbooks/nova/recipes/compute.rb | 3 ++- chef/cookbooks/nova/templates/default/qemu.conf.erb | 9 +++++++++ .../crowbar/migrate/nova/126_add_max_threads.rb | 11 +++++++++++ chef/data_bags/crowbar/template-nova.json | 5 +++-- chef/data_bags/crowbar/template-nova.schema | 3 ++- 5 files changed, 27 insertions(+), 4 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb index 33c863a78f..bfec1c30a7 100644 --- a/chef/cookbooks/nova/recipes/compute.rb +++ b/chef/cookbooks/nova/recipes/compute.rb @@ -200,7 +200,8 @@ mode 0644 variables( user: libvirt_user, - group: libvirt_group + group: libvirt_group, + max_threads_per_process: node[:nova][:kvm][:max_threads_per_process] ) notifies :create, "ruby_block[restart_libvirtd]", :immediately end diff --git a/chef/cookbooks/nova/templates/default/qemu.conf.erb b/chef/cookbooks/nova/templates/default/qemu.conf.erb index d5c9cadc83..12aba019d5 100644 --- a/chef/cookbooks/nova/templates/default/qemu.conf.erb +++ b/chef/cookbooks/nova/templates/default/qemu.conf.erb @@ -400,7 +400,16 @@ group = "<%= @group %>" #max_processes = 0 #max_files = 0 +# If max_threads_per_process is set to a positive integer, libvirt +# will use it to set the maximum number of threads that can be +# created by a qemu process. Some VM configurations can result in +# qemu processes with tens of thousands of threads. systemd-based +# systems typically limit the number of threads per process to +# 16k. max_threads_per_process can be used to override default +# limits in the host OS. +# +max_threads_per_process = <%= @max_threads_per_process %> # mac_filter enables MAC addressed based filtering on bridge ports. # This currently requires ebtables to be installed. diff --git a/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb b/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb new file mode 100644 index 0000000000..a1703e730b --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "max_threads_per_process" + attributes["kvm"][key] = template_attributes["kvm"][key] unless attributes["kvm"].key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "max_threads_per_process" + attributes["kvm"].delete(key) unless template_attributes["kvm"].key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 4c67ddcfa9..7f1fc1c466 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -77,7 +77,8 @@ "kvm": { "nested_virt": false, "ksm_enabled": false, - "disk_cachemodes": "network=writeback" + "disk_cachemodes": "network=writeback", + "max_threads_per_process": 0 }, "vcenter": { "host": "", @@ -182,7 +183,7 @@ "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 125, + "schema-revision": 126, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index 00617a744d..c4d2224d20 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -135,7 +135,8 @@ "type": "map", "required": true, "mapping": { "nested_virt": { "type": "bool", "required": false }, "ksm_enabled": { "type": "bool", "required": true }, - "disk_cachemodes": { "type": "str", "required": true } + "disk_cachemodes": { "type": "str", "required": true }, + "max_threads_per_process": { "type": "int", "required": true } } }, "vcenter": { From 31c88cfa42209d79b3476672a399ba4a062d66b9 Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Fri, 16 Aug 2019 15:11:36 +0100 Subject: [PATCH 204/207] neutron: restore dhcp_domain in stable/4.0 (bsc#1145867) It was replaced with dns_domain in 9e96051, but newton neutron dhcp needs dhcp_domain. --- chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb index a64362d700..06afca44ad 100644 --- a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb @@ -10,7 +10,7 @@ enable_isolated_metadata = <%= @enable_isolated_metadata %> enable_metadata_network = <%= @enable_metadata_network %> <% end -%> force_metadata = <%= @force_metadata %> -dns_domain = <%= @dns_domain %> +dhcp_domain = <%= @dhcp_domain %> <% if @nameservers -%> dnsmasq_dns_servers = <%= @nameservers %> <% end -%> From bf86b2a8e8a8388ba5e27b240a83a71c62f09dca Mon Sep 17 00:00:00 2001 From: Rick Salevsky Date: Mon, 26 Aug 2019 16:14:37 +0200 Subject: [PATCH 205/207] database: Hardcode ruby version for package installation (SOC-10010) Sometimes there is a race condition and ohai didn't collect the ruby version. to_f evalutes then the version to 0.0 and zypper fails to install the rubygem `ruby0.0-rubygem-cstruct' not found in package names`. (cherry picked from commit 6c02091fc2ffbadfb8fa12f7537b91586e6e112b) --- chef/cookbooks/mysql/recipes/client.rb | 2 +- chef/cookbooks/postgresql/attributes/default.rb | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/chef/cookbooks/mysql/recipes/client.rb b/chef/cookbooks/mysql/recipes/client.rb index 435a6f295f..7728175d0a 100644 --- a/chef/cookbooks/mysql/recipes/client.rb +++ b/chef/cookbooks/mysql/recipes/client.rb @@ -30,7 +30,7 @@ package "mysql-ruby" do package_name value_for_platform_family( ["rhel", "fedora"] => "ruby-mysql", - "suse" => "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-mysql2", + "suse" => "ruby2.1-rubygem-mysql2", "default" => "libmysql-ruby" ) action :install diff --git a/chef/cookbooks/postgresql/attributes/default.rb b/chef/cookbooks/postgresql/attributes/default.rb index 95f37ded28..60ec5cf9e4 100644 --- a/chef/cookbooks/postgresql/attributes/default.rb +++ b/chef/cookbooks/postgresql/attributes/default.rb @@ -132,14 +132,18 @@ default["postgresql"]["contrib"]["packages"] = ["postgresql-contrib"] when node["platform_version"].to_f < 12.0 default["postgresql"]["version"] = "9.1" - default["postgresql"]["client"]["packages"] = ["postgresql91", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg"] + default["postgresql"]["client"]["packages"] = [ + "postgresql91", + "ruby2.1-rubygem-pg" + ] default["postgresql"]["server"]["packages"] = ["postgresql91-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql91-contrib"] when node["platform_version"].to_f == 12.0 default["postgresql"]["version"] = "9.3" - default["postgresql"]["client"]["packages"] = ["postgresql93", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg"] + default["postgresql"]["client"]["packages"] = [ + "postgresql93", + "ruby2.1-rubygem-pg" + ] default["postgresql"]["server"]["packages"] = ["postgresql93-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql93-contrib"] else @@ -160,7 +164,7 @@ default["postgresql"]["version"] = "9.4" default["postgresql"]["client"]["packages"] = [ "postgresql94", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg" + "ruby2.1-rubygem-pg" ] default["postgresql"]["server"]["packages"] = ["postgresql94-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql94-contrib"] From 914b93cf7546a050f86ec62a2ea8e0743bebee68 Mon Sep 17 00:00:00 2001 From: Madhu Mohan Nelemane Date: Mon, 2 Sep 2019 15:47:55 +0200 Subject: [PATCH 206/207] Changes to integrate with ACI 4.1 and new packages (SOC-10403) This commit provides changes in plugin packages and config files needed for integration of SOC with ACI 4.1 and higher versions. ACI 4.1 uses a slightly different set of plugin packages and configs for integration with OpenStack. This includes: - python-gbpclient renamed to python-group-based-policy-client - ovs-bridge-name in opflex-agent-ovs.conf removed - addition of int-bridge-name and access-bridge-name in opflex-agent-ovs.conf - Renaming of agent-ovs to opflex-agent For uniformity, the template for opflex-agent-ovs.conf is now renamed from 10-opflex-agent-ovs.conf.erb to opflex-agent-ovs.conf.erb - The neutron template schema and json templates are updated to provide integration_bridge and access_bridge details with default values. The corresponding migration scripts are also updated. (cherry picked from commit cb5347d6a47565c2ef1aebf10ef80d980114e046) --- chef/cookbooks/neutron/attributes/default.rb | 11 ++++++---- .../neutron/recipes/cisco_apic_agents.rb | 7 ++++--- ...ovs.conf.erb => opflex-agent-ovs.conf.erb} | 3 ++- ...25_add_opflex_access_integration_bridge.rb | 21 +++++++++++++++++++ chef/data_bags/crowbar/template-neutron.json | 6 ++++-- .../data_bags/crowbar/template-neutron.schema | 2 ++ 6 files changed, 40 insertions(+), 10 deletions(-) rename chef/cookbooks/neutron/templates/default/{10-opflex-agent-ovs.conf.erb => opflex-agent-ovs.conf.erb} (93%) create mode 100644 chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index c4c3953bfb..e58ecf78e8 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -32,6 +32,9 @@ default[:neutron][:metadata_agent_config_file] = "/etc/neutron/neutron-metadata-agent.conf.d/100-metadata_agent.conf" default[:neutron][:ml2_config_file] = "/etc/neutron/neutron.conf.d/110-ml2.conf" default[:neutron][:nsx_config_file] = "/etc/neutron/neutron.conf.d/110-nsx.conf" +default[:neutron][:ml2_cisco_config_file] = "/etc/neutron/neutron.conf.d/115-ml2_cisco.conf" +default[:neutron][:ml2_cisco_apic_config_file] = "/etc/neutron/neutron.conf.d/115-ml2_cisco_apic.conf" +default[:neutron][:opflex_config_file] = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" default[:neutron][:rpc_workers] = 1 default[:neutron][:db][:database] = "neutron" @@ -126,8 +129,8 @@ cisco_apic_pkgs: ["python-apicapi", "python-neutron-ml2-driver-apic"], cisco_apic_gbp_pkgs: ["openstack-neutron-gbp", - "python-gbpclient"], - cisco_opflex_pkgs: ["agent-ovs", + "python-group-based-policy-client"], + cisco_opflex_pkgs: ["opflex-agent", "lldpd", "openstack-neutron-opflex-agent"], infoblox_pkgs: ["python-infoblox-client", @@ -172,8 +175,8 @@ cisco_apic_pkgs: ["python-apicapi", "python-neutron-ml2-driver-apic"], cisco_apic_gbp_pkgs: ["openstack-neutron-gbp", - "python-gbpclient"], - cisco_opflex_pkgs: ["agent-ovs", + "python-group-based-policy-client"], + cisco_opflex_pkgs: ["opflex-agent", "lldpd", "neutron-opflex-agent"], infoblox_pkgs: [], diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb index 5351655984..c1d8acd4cd 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb @@ -92,15 +92,14 @@ end # Update config file from template -opflex_agent_conf = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" apic = neutron[:neutron][:apic] opflex_list = apic[:opflex].select { |i| i[:nodes].include? node[:hostname] } opflex_list.any? || raise("Opflex instance not found for node '#{node[:hostname]}'") opflex_list.one? || raise("Multiple opflex instances found for node '#{node[:hostname]}'") opflex = opflex_list.first -template opflex_agent_conf do +template node[:neutron][:opflex_config_file] do cookbook "neutron" - source "10-opflex-agent-ovs.conf.erb" + source "opflex-agent-ovs.conf.erb" mode "0755" owner "root" group neutron[:neutron][:platform][:group] @@ -110,6 +109,8 @@ socketgroup: neutron[:neutron][:platform][:group], opflex_peer_ip: opflex[:peer_ip], opflex_peer_port: opflex[:peer_port], + opflex_int_bridge: opflex[:integration_bridge], + opflex_access_bridge: opflex[:access_bridge], opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], opflex_vxlan_uplink_iface: opflex[:vxlan][:uplink_iface], opflex_vxlan_uplink_vlan: opflex[:vxlan][:uplink_vlan], diff --git a/chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb similarity index 93% rename from chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb rename to chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb index 28f504218d..b03e7a3b25 100644 --- a/chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb +++ b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb @@ -36,7 +36,8 @@ "renderers": { "stitched-mode": { - "ovs-bridge-name": "br-int", + "int-bridge-name": "<%= @opflex_int_bridge %>", + "access-bridge-name": "<%= @opflex_access_bridge %>", "encap": { "vxlan" : { "encap-iface": "<%= @opflex_vxlan_encap_iface %>", diff --git a/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb b/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb new file mode 100644 index 0000000000..2e781a2005 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb @@ -0,0 +1,21 @@ +def upgrade(tattr, tdep, attr, dep) + unless attr["apic"]["opflex"].key?("integration_bridge") + attr["apic"]["opflex"]["integration_bridge"] = tattr["apic"]["opflex"]["integration_bridge"] + end + unless attr["apic"]["opflex"].key?("access_bridge") + attr["apic"]["opflex"]["access_bridge"] = tattr["apic"]["opflex"]["access_bridge"] + end + + return attr, dep +end + +def downgrade(tattr, tdep, attr, dep) + unless tattr["apic"]["opflex"].key?("integration_bridge") + attr["apic"]["opflex"].delete("integration_bridge") if attr.key?("integration_bridge") + end + unless tattr["apic"]["opflex"].key?("access_bridge") + attr["apic"]["opflex"].delete("access_bridge") if attr.key?("access_bridge") + end + + return attr, dep +end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 0d5a6fc4c1..76a425b27d 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -65,8 +65,10 @@ "peer_ip": "", "peer_port": 8009, "encap": "vxlan", + "integration_bridge": "br-int", + "access_bridge": "br-fabric", "vxlan": { - "encap_iface": "br-int_vxlan0", + "encap_iface": "br-fab_vxlan0", "uplink_iface": "vlan.4093", "uplink_vlan": 4093, "remote_ip": "", @@ -195,7 +197,7 @@ "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 124, + "schema-revision": 125, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], "neutron-network": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index ef60e3881e..1897bce036 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -73,6 +73,8 @@ "peer_ip": { "type": "str", "required" : true }, "peer_port": { "type": "int", "required" : true }, "encap": { "type": "str", "required": true }, + "integration_bridge": { "type": "str", "required": true }, + "access_bridge": { "type": "str", "required": true }, "vxlan": { "type": "map", "required": true, "mapping" : { "encap_iface": {"type": "str", "required": true }, "uplink_iface": { "type": "str", "required": true }, From dacd4ee28994f25e8333513b7979bbe77c8ba54f Mon Sep 17 00:00:00 2001 From: Varadhan Veerapuram Date: Thu, 1 Feb 2018 18:15:30 +0530 Subject: [PATCH 207/207] [neutron][Cisco ACI] Multi-VMM domain support (SOC - 10471) A Single ACI fabric can support multiple VMM domains. Each VMM domain can be governed by a different controller (Eg: VMWare vCenter or OpenStack or MicroSoft SCVMM). Several production data centers tend to use multiple VMM domains and expect to be able to monitor and control network policies from a single ACI fabric. Integration of OpenStack with such a setup requires crowbar to provide parameters specific to each VMM domain. This commit adds the additional parameters and logic to validate and send these to the correct config location. The changes now allow to provide "Vmware" or "OpenStack" as the VMM type. Multiple entries of either types are possible. - Also added "ssl_mode" as a configurable parameter which is needed to be in "encrypted" mode if ESXi is used as compute. Other use-cases may need to change it as required and hence included it as a configurable parameter within the opflex node structure. (cherry picked from commit 1f164360fda298d36f2e3ab982cd7e3f126b3a3e) --- .../neutron/recipes/cisco_apic_agents.rb | 7 ++++--- .../neutron/recipes/cisco_apic_support.rb | 7 ++++++- chef/cookbooks/neutron/recipes/server.rb | 4 ++-- .../default/ml2_conf_cisco_apic.ini.erb | 15 ++++++++++--- .../default/opflex-agent-ovs.conf.erb | 2 +- .../neutron/125_add_apic_multi_vmm_domains.rb | 15 +++++++++++++ ...25_add_opflex_access_integration_bridge.rb | 21 ------------------- chef/data_bags/crowbar/template-neutron.json | 13 +++++++++++- .../data_bags/crowbar/template-neutron.schema | 10 ++++++++- 9 files changed, 61 insertions(+), 33 deletions(-) create mode 100644 chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb delete mode 100644 chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb index c1d8acd4cd..7a74d4a9e4 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb @@ -109,6 +109,7 @@ socketgroup: neutron[:neutron][:platform][:group], opflex_peer_ip: opflex[:peer_ip], opflex_peer_port: opflex[:peer_port], + opflex_ssl_mode: opflex[:ssl_mode], opflex_int_bridge: opflex[:integration_bridge], opflex_access_bridge: opflex[:access_bridge], opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], @@ -133,8 +134,8 @@ end utils_systemd_service_restart "neutron-opflex-agent" -service "agent-ovs" do +service "opflex-agent" do action [:enable, :start] - subscribes :restart, resources("template[#{opflex_agent_conf}]") + subscribes :restart, resources("template[#{node[:neutron][:opflex_config_file]}]") end -utils_systemd_service_restart "agent-ovs" +utils_systemd_service_restart "opflex-agent" diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb index f08d9e50c4..72ea20abaf 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb @@ -21,7 +21,9 @@ end aciswitches = node[:neutron][:apic][:apic_switches].to_hash -template "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco_apic.ini.conf" do +acivmms = node[:neutron][:apic][:apic_vmms] + +template node[:neutron][:ml2_cisco_apic_config_file] do cookbook "neutron" source "ml2_conf_cisco_apic.ini.erb" mode "0640" @@ -30,6 +32,9 @@ variables( vpc_pairs: node[:neutron][:apic][:vpc_pairs], apic_switches: aciswitches, + optimized_dhcp: node[:neutron][:apic][:optimized_dhcp], + optimized_metadata: node[:neutron][:apic][:optimized_metadata], + apic_vmms: acivmms, ml2_mechanism_drivers: node[:neutron][:ml2_mechanism_drivers], policy_drivers: "implicit_policy,apic", default_ip_pool: "192.168.0.0/16" diff --git a/chef/cookbooks/neutron/recipes/server.rb b/chef/cookbooks/neutron/recipes/server.rb index 35f2f8175e..3f156bae75 100644 --- a/chef/cookbooks/neutron/recipes/server.rb +++ b/chef/cookbooks/neutron/recipes/server.rb @@ -85,7 +85,7 @@ else cisco_nexus_link_action = "delete" end -link "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco.ini.conf" do +link "#{node[:neutron][:platform][:ml2_cisco_config_file]}" do to "/etc/neutron/plugins/ml2/ml2_conf_cisco.ini" action cisco_nexus_link_action notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" @@ -99,7 +99,7 @@ else cisco_apic_link_action = "delete" end -link "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco_apic.ini.conf" do +link "#{node[:neutron][:platform][:ml2_cisco_apic_config_file]}" do to "/etc/neutron/plugins/ml2/ml2_conf_cisco_apic.ini" action cisco_apic_link_action notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" diff --git a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb index 30e24dc022..421c8bebbc 100644 --- a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb +++ b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb @@ -2,7 +2,7 @@ apic_system_id=<%= node[:neutron][:apic][:system_id] %> [opflex] networks = * -[ml2_cisco_apic] +[apic] apic_hosts=<%= node[:neutron][:apic][:hosts] %> apic_username=<%= node[:neutron][:apic][:username] %> apic_password=<%= node[:neutron][:apic][:password] %> @@ -11,8 +11,8 @@ apic_name_mapping = use_name apic_clear_node_profiles = True enable_aci_routing = True apic_arp_flooding = True -enable_optimized_metadata = <%= node[:neutron][:apic][:optimized_metadata] %> -enable_optimized_dhcp = <%= node[:neutron][:apic][:optimized_dhcp] %> +enable_optimized_metadata = <%= @optimized_metadata %> +enable_optimized_dhcp = <%= @optimized_dhcp %> apic_provision_infra = True apic_provision_hostlinks = True <% unless @vpc_pairs.nil? -%> @@ -41,3 +41,12 @@ enable_nat = <%= node[:neutron][:apic][:ext_net][:nat_enabled] %> <% end -%> external_epg = <%= node[:neutron][:apic][:ext_net][:ext_epg] %> host_pool_cidr = <%= node[:neutron][:apic][:ext_net][:host_pool_cidr] %> + +<% @apic_vmms.each do |vmm_domain| -%> +[apic_vmdom:<%= vmm_domain[:vmm_name]%>] +vmm_type = <%= vmm_domain[:vmm_type]%> +<% if vmm_domain[:vlan_ranges] -%> +vlan_ranges = <%= vmm_domain[:vlan_ranges] %> +<% end -%> +<% end -%> + diff --git a/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb index b03e7a3b25..45eb74dcbb 100644 --- a/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb +++ b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb @@ -10,7 +10,7 @@ {"hostname": "<%= @opflex_peer_ip %>", "port": "<%= @opflex_peer_port %>"} ], "ssl": { - "mode": "enabled", + "mode": "<%= @opflex_ssl_mode %>", "ca-store": "/etc/ssl/certs/" }, "inspector": { diff --git a/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb b/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb new file mode 100644 index 0000000000..ac15244d9d --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb @@ -0,0 +1,15 @@ +def upgrade(tattr, tdep, attr, dep) + unless attr["apic"].key?("apic_vmms") + attr["apic"]["apic_vmms"] = tattr["apic"]["apic_vmms"] + end + + return attr, dep +end + +def downgrade(tattr, tdep, attr, dep) + unless tattr["apic"].key?("apic_vmms") + attr["apic"].delete("apic_vmms") if attr.key?("apic_vmms") + end + + return attr, dep +end diff --git a/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb b/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb deleted file mode 100644 index 2e781a2005..0000000000 --- a/chef/data_bags/crowbar/migrate/neutron/125_add_opflex_access_integration_bridge.rb +++ /dev/null @@ -1,21 +0,0 @@ -def upgrade(tattr, tdep, attr, dep) - unless attr["apic"]["opflex"].key?("integration_bridge") - attr["apic"]["opflex"]["integration_bridge"] = tattr["apic"]["opflex"]["integration_bridge"] - end - unless attr["apic"]["opflex"].key?("access_bridge") - attr["apic"]["opflex"]["access_bridge"] = tattr["apic"]["opflex"]["access_bridge"] - end - - return attr, dep -end - -def downgrade(tattr, tdep, attr, dep) - unless tattr["apic"]["opflex"].key?("integration_bridge") - attr["apic"]["opflex"].delete("integration_bridge") if attr.key?("integration_bridge") - end - unless tattr["apic"]["opflex"].key?("access_bridge") - attr["apic"]["opflex"].delete("access_bridge") if attr.key?("access_bridge") - end - - return attr, dep -end diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 76a425b27d..7e30ee04ee 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -64,6 +64,7 @@ "nodes" : [], "peer_ip": "", "peer_port": 8009, + "ssl_mode": "encrypted", "encap": "vxlan", "integration_bridge": "br-int", "access_bridge": "br-fabric", @@ -99,7 +100,17 @@ } } } - } + }, + "apic_vmms": [{ + "vmm_name": "soc_kvm_domain", + "vmm_type": "openstack", + "vlan_ranges": "" + }, + { + "vmm_name": "soc_vm_domain", + "vmm_type": "vmware", + "vlan_ranges": "" + }] }, "allow_overlapping_ips": true, "use_syslog": false, diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index 1897bce036..55da58a801 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -72,6 +72,7 @@ "nodes": { "type" : "seq", "required" : true, "sequence": [ { "type": "str" } ] }, "peer_ip": { "type": "str", "required" : true }, "peer_port": { "type": "int", "required" : true }, + "ssl_mode": { "type": "str", "required": true }, "encap": { "type": "str", "required": true }, "integration_bridge": { "type": "str", "required": true }, "access_bridge": { "type": "str", "required": true }, @@ -95,7 +96,14 @@ }} }} }} - } + }, + "apic_vmms": { "type" : "seq", "required" : true, "sequence" : [ { + "type" : "map", "required" : true, "mapping" : { + "vmm_name": { "type": "str", "required": true }, + "vmm_type": { "type": "str", "required": true }, + "vlan_ranges": { "type": "str", "required": true } + } + } ] } }}, "allow_overlapping_ips": { "type": "bool", "required": true }, "cisco_switches": {