diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c52ae28de..45d4fc008 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,7 +53,7 @@ $ py.test --cov ./ --cov-report=html $ open htmlcov/index.html ``` -If you are running our functional tests you will need a real BIG-IP® to run +If you are running our functional tests you will need a real BIG-IP to run them against, but you can get one of those pretty easily in [Amazon EC2](https://aws.amazon.com/marketplace/pp/B00JL3UASY/ref=srh_res_product_title?ie=UTF8&sr=0-10&qid=1449332167461). ## License @@ -72,5 +72,5 @@ See the License for the specific language governing permissions and limitations under the License. ### Contributor License Agreement -Individuals or business entities who contribute to this project must have completed and submitted the [F5® Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html) to Openstack_CLA@f5.com prior to their code submission being included in this project. +Individuals or business entities who contribute to this project must have completed and submitted the [F5 Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html) to Openstack_CLA@f5.com prior to their code submission being included in this project. diff --git a/dev_install b/dev_install new file mode 100755 index 000000000..cd1e6814b --- /dev/null +++ b/dev_install @@ -0,0 +1,5 @@ +git init +python setup.py install + +dumb-init neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini +#python /var/lib/openstack/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/neutron_lbaas.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_f5.ini --config-file /etc/neutron/plugins/ml2/ml2-conf-aci.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_manila.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_arista.ini --config-file /etc/neutron/plugins/ml2/ml2_conf_asr1k.ini --config-file /etc/neutron/plugins/cisco/cisco_device_manager_plugin.ini --config-file /etc/neutron/plugins/cisco/cisco_router_plugin.ini \ No newline at end of file diff --git a/docs/_static/f5-openstack-agent.gre.ini b/docs/_static/f5-openstack-agent.gre.ini index 506855004..d34432583 100644 --- a/docs/_static/f5-openstack-agent.gre.ini +++ b/docs/_static/f5-openstack-agent.gre.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -210,10 +210,10 @@ advertised_tunnel_types = gre # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -222,13 +222,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -239,22 +239,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -263,12 +263,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -318,14 +318,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -363,16 +363,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -391,7 +391,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -411,7 +411,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -430,7 +430,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -443,17 +443,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -503,7 +503,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/_static/f5-openstack-agent.grm.ini b/docs/_static/f5-openstack-agent.grm.ini index 45b878c8a..4d4cfedab 100644 --- a/docs/_static/f5-openstack-agent.grm.ini +++ b/docs/_static/f5-openstack-agent.grm.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'Project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -165,7 +165,7 @@ f5_external_physical_mappings = default:1.1:True # # Device Tunneling (VTEP) Self IPs # -# This is the name of a BIG-IP® self IP address to use for VTEP addresses. +# This is the name of a BIG-IP self IP address to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -209,10 +209,10 @@ f5_external_physical_mappings = default:1.1:True # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -221,13 +221,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -238,22 +238,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -262,12 +262,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = True # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -317,14 +317,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -362,16 +362,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -390,7 +390,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -410,7 +410,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -429,7 +429,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -442,17 +442,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -502,7 +502,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # \ No newline at end of file diff --git a/docs/_static/f5-openstack-agent.vlan.ini b/docs/_static/f5-openstack-agent.vlan.ini index 3f2aa4d11..fb863bb89 100644 --- a/docs/_static/f5-openstack-agent.vlan.ini +++ b/docs/_static/f5-openstack-agent.vlan.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -210,10 +210,10 @@ advertised_tunnel_types = # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -222,13 +222,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -239,22 +239,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -263,12 +263,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -318,14 +318,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -363,16 +363,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -391,7 +391,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -411,7 +411,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -430,7 +430,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -443,17 +443,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -503,7 +503,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/_static/f5-openstack-agent.vxlan.ini b/docs/_static/f5-openstack-agent.vxlan.ini index 7bfd08153..9c1d252e8 100644 --- a/docs/_static/f5-openstack-agent.vxlan.ini +++ b/docs/_static/f5-openstack-agent.vxlan.ini @@ -46,21 +46,21 @@ periodic_interval = 10 # # service_resync_interval = 500 # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # ############################################################################### # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # # environment_prefix = 'Project' @@ -132,10 +132,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -150,12 +150,12 @@ f5_external_physical_mappings = default:1.1:True # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -166,7 +166,7 @@ f5_external_physical_mappings = default:1.1:True # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format -# selfip addresses, one per BIG-IP® device, to use for VTEP addresses. +# selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -211,10 +211,10 @@ advertised_tunnel_types = vxlan # # Device Tunneling (VTEP) selfips # -# This is a boolean entry which determines if they BIG-IP® will use +# This is a boolean entry which determines if they BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are -# setup. If the BIG-IP® agent and other tunnel agents don't match +# setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -223,13 +223,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -240,22 +240,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -264,12 +264,12 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = False # # Allow overlapping IP subnets across multiple tenants. -# This creates route domains on BIG-IP® in order to +# This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -319,14 +319,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -364,16 +364,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -392,7 +392,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -412,7 +412,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collected # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -431,7 +431,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -444,17 +444,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.0.0 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 @@ -504,7 +504,7 @@ icontrol_password = admin # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATTED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, -# and if it does not exist on your BIG-IP® system the agent will use the default +# and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/coding-example-lbaasv2.rst b/docs/coding-example-lbaasv2.rst index b22218fb1..b8bce8492 100644 --- a/docs/coding-example-lbaasv2.rst +++ b/docs/coding-example-lbaasv2.rst @@ -3,7 +3,7 @@ Coding Example ============== -We've provided some code examples below to help you get started with the F5® OpenStack LBaaSv2 agent and driver. This series demonstrates how to configure basic load balancing via the Neutron CLI. To access the full Neutron LBaaS command set, please see the `OpenStack CLI Documentation `_. LBaaSv2 commands all begin with ``lbaas``. +We've provided some code examples below to help you get started with the F5 OpenStack LBaaSv2 agent and driver. This series demonstrates how to configure basic load balancing via the Neutron CLI. To access the full Neutron LBaaS command set, please see the `OpenStack CLI Documentation `_. LBaaSv2 commands all begin with ``lbaas``. Create a load balancer @@ -70,9 +70,9 @@ The example command below shows how to create a listener that uses the ``TERMINA .. important:: - You must configure Barbican, Keystone, Neutron, and the F5® agent before you can create a tls load balancer. + You must configure Barbican, Keystone, Neutron, and the F5 agent before you can create a tls load balancer. See the `OpenStack LBaaS documentation `_ for further information and configuration instructions for the OpenStack pieces. - The necessary F5® agent configurations are described in :ref:`Certificate Manager / SSL Offloading`. + The necessary F5 agent configurations are described in :ref:`Certificate Manager / SSL Offloading`. diff --git a/docs/includes/ref_agent-config-file.rst b/docs/includes/ref_agent-config-file.rst index 248cb3659..3ad233e42 100644 --- a/docs/includes/ref_agent-config-file.rst +++ b/docs/includes/ref_agent-config-file.rst @@ -3,14 +3,14 @@ Agent Configuration File ======================== -The agent configuration file -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- controls how the agent interacts with your BIG-IP®(s). The file contains detailed descriptions of each available configuration option. +The agent configuration file -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- controls how the agent interacts with your BIG-IP(s). The file contains detailed descriptions of each available configuration option. For reference, we've provided here a set of 'pre-configured' agent config files. These examples can help guide you in setting up the F5 agent to work with your specific environment. :ref:`Global Routed Mode` ------------------------- -Can be used for :term:`standalone`, :term:`overcloud` BIG-IP® VE deployments. +Can be used for :term:`standalone`, :term:`overcloud` BIG-IP VE deployments. * :download:`f5-openstack-agent.grm.ini <../_static/f5-openstack-agent.grm.ini>` @@ -18,7 +18,7 @@ Can be used for :term:`standalone`, :term:`overcloud` BIG-IP® VE deployments. :ref:`L2 Adjacent Mode` ------------------------------- -Can be used for :term:`standalone` or :term:`clustered` :term:`undercloud` BIG-IP® hardware or VE deployments. +Can be used for :term:`standalone` or :term:`clustered` :term:`undercloud` BIG-IP hardware or VE deployments. * :download:`f5-openstack-agent.gre.ini <../_static/f5-openstack-agent.gre.ini>` diff --git a/docs/includes/ref_lbaasv2-version-compatibility.rst b/docs/includes/ref_lbaasv2-version-compatibility.rst index 3460e01dd..700be639b 100644 --- a/docs/includes/ref_lbaasv2-version-compatibility.rst +++ b/docs/includes/ref_lbaasv2-version-compatibility.rst @@ -3,5 +3,5 @@ Release ------- -Release |version| is compatible with OpenStack |openstack|. For more information, please see the F5® OpenStack `Releases, Versioning, and Support Matrix `_. +Release |version| is compatible with OpenStack |openstack|. For more information, please see the F5 OpenStack `Releases, Versioning, and Support Matrix `_. diff --git a/docs/includes/ref_neutron-to-bigip-configs-table.rst b/docs/includes/ref_neutron-to-bigip-configs-table.rst index b1cfb5cf7..3e4bdcc13 100644 --- a/docs/includes/ref_neutron-to-bigip-configs-table.rst +++ b/docs/includes/ref_neutron-to-bigip-configs-table.rst @@ -3,7 +3,7 @@ Neutron Command to BIG-IP Configuration Mapping Table ===================================================== -F5 LBaaSv2 uses the `f5-sdk `_ to communicate with BIG-IP via the iControl® REST API. The table below shows the corresponding iControl endpoint and BIG-IP object for each neutron lbaas- ‘create’ command. +F5 LBaaSv2 uses the `f5-sdk `_ to communicate with BIG-IP via the iControl REST API. The table below shows the corresponding iControl endpoint and BIG-IP object for each neutron lbaas- ‘create’ command. +----------------------------------------+-----------------------------------------------------------------------------------------+-----------------------------------+ | Command | URI | BIG-IP Configurations Applied | diff --git a/docs/includes/ref_prerequisites.rst b/docs/includes/ref_prerequisites.rst index b75e9ec53..66dec60ee 100644 --- a/docs/includes/ref_prerequisites.rst +++ b/docs/includes/ref_prerequisites.rst @@ -56,7 +56,7 @@ - Three (3) VLANs :ref:`configured in Neutron ` -- 'mgmt', 'control', and 'data' -- to be used for system management, high availability (if desired), and data traffic, respectively. -- At least two (2) VLANs :ref:`configured in Neutron ` -- 'mgmt' and 'data' - to be used for BIG-IP® system management and client-server data traffic, respectively. +- At least two (2) VLANs :ref:`configured in Neutron ` -- 'mgmt' and 'data' - to be used for BIG-IP system management and client-server data traffic, respectively. - VLANs :ref:`configured in Neutron ` or `on the BIG-IP `_, as appropriate for your environment. diff --git a/docs/includes/topic_agent-redundancy-scaleout.rst b/docs/includes/topic_agent-redundancy-scaleout.rst index 4ac397b66..f128c6cb7 100644 --- a/docs/includes/topic_agent-redundancy-scaleout.rst +++ b/docs/includes/topic_agent-redundancy-scaleout.rst @@ -10,7 +10,7 @@ Overview We refer to 'hosts' a lot in this document. A 'host' could be a Neutron controller, a compute node, a container, etc.; the important takeaway is that in order to run multiple agents in one environment, **each agent must have a unique** ``hostname``. [#]_ -When the Neutron LBaaS plugin loads the F5® LBaaSv2 driver, it creates a global messaging queue to be used for all callbacks and status update requests from F5 LBaaSv2 agents. Requests are passed from the global messaging queue to F5 LBaaSv2 drivers in a round-robin fashion, then passed on to an F5 agent as described in the :ref:`Agent-Tenant Affinity` section. +When the Neutron LBaaS plugin loads the F5 LBaaSv2 driver, it creates a global messaging queue to be used for all callbacks and status update requests from F5 LBaaSv2 agents. Requests are passed from the global messaging queue to F5 LBaaSv2 drivers in a round-robin fashion, then passed on to an F5 agent as described in the :ref:`Agent-Tenant Affinity` section. Agent-Tenant Affinity ````````````````````` @@ -83,7 +83,7 @@ To manage one BIG-IP device or device service group with multiple F5 agents, dep .. tip:: - * Be sure to provide the iControl® endpoints for all BIG-IP devices you'd like the agents to manage. + * Be sure to provide the iControl endpoints for all BIG-IP devices you'd like the agents to manage. * You can configure the F5 agent once, on the Neutron controller, then copy the agent config file (:file:`/etc/neutron/services/f5/f5-openstack-agent.ini`) over to the other hosts. #. :ref:`Start the F5 agent` on each host. @@ -102,5 +102,5 @@ Further Reading * :ref:`Multiple Agents and Differentiated Service Environments` -.. [#] **F5 Networks® does not provide support for container service deployments.** If you are already well versed in containerized environments, you can run one F5 agent per container. The neutron.conf file must be present in the container. The service provider driver does not need to run in the container; rather, it only needs to be in the container's build context. +.. [#] **F5 Networks does not provide support for container service deployments.** If you are already well versed in containerized environments, you can run one F5 agent per container. The neutron.conf file must be present in the container. The service provider driver does not need to run in the container; rather, it only needs to be in the container's build context. diff --git a/docs/includes/topic_basic-environment-reqs.rst b/docs/includes/topic_basic-environment-reqs.rst index 1a494b44c..bbbd66eed 100644 --- a/docs/includes/topic_basic-environment-reqs.rst +++ b/docs/includes/topic_basic-environment-reqs.rst @@ -4,7 +4,7 @@ Basic Environment Requirements for F5 LBaaSv2 ============================================= -This document provides the minimum basic requirements for using F5® LBaaSv2 in OpenStack |openstack|. +This document provides the minimum basic requirements for using F5 LBaaSv2 in OpenStack |openstack|. OpenStack Requirements ---------------------- @@ -41,7 +41,7 @@ BIG-IP Requirements .. important:: - - You must have the appropriate `license`_ for the BIG-IP® features you wish to use. + - You must have the appropriate `license`_ for the BIG-IP features you wish to use. - All numbers shown in the table below are per BIG-IP device. diff --git a/docs/includes/topic_capacity-based-scaleout.rst b/docs/includes/topic_capacity-based-scaleout.rst index 47010a2c9..dd7413986 100644 --- a/docs/includes/topic_capacity-based-scaleout.rst +++ b/docs/includes/topic_capacity-based-scaleout.rst @@ -6,7 +6,7 @@ Capacity-Based Scale Out Overview -------- -When using :ref:`differentiated service environments `, you can configure capacity metrics for the F5® agent to provide scale out across multiple BIG-IP device groups. The F5 agent :ref:`configuration parameters ` ``environment_group_number`` and ``environment_capacity_score`` allow the F5 LBaaSv2 agent scheduler to assign requests to the group that has the lowest capacity score. +When using :ref:`differentiated service environments `, you can configure capacity metrics for the F5 agent to provide scale out across multiple BIG-IP device groups. The F5 agent :ref:`configuration parameters ` ``environment_group_number`` and ``environment_capacity_score`` allow the F5 LBaaSv2 agent scheduler to assign requests to the group that has the lowest capacity score. Each F5 agent expected to manage a specific :term:`device group` must be configured with the same ``icontrol_endpoints``. They must also be configured with the same ``environment_group_number``; this is used by the F5 LBaaSv2 driver to map the agents to the BIG-IP device group. The ``environment_group_number`` provides a convenient way for the F5 driver to identify agents that are available to handle requests for any of the devices in a given group. diff --git a/docs/includes/topic_cert-manager.rst b/docs/includes/topic_cert-manager.rst index 157820327..2208bbec4 100644 --- a/docs/includes/topic_cert-manager.rst +++ b/docs/includes/topic_cert-manager.rst @@ -10,9 +10,9 @@ Overview OpenStack's 'Barbican' certificate manager provides a secure location where users can store sensitive information, such as SSH keys, private keys, certificates, and user passwords (referred to as "`secrets`_ " in OpenStack lingo). -The F5® agent uses Barbican certificates to perform :term:`SSL offloading` on BIG-IP®. It allows users to either create a new SSL profile, or to designate an existing `BIG-IP SSL profile`_ as the parent from which client profiles created for LBaaS objects will inherit settings. +The F5 agent uses Barbican certificates to perform :term:`SSL offloading` on BIG-IP. It allows users to either create a new SSL profile, or to designate an existing `BIG-IP SSL profile`_ as the parent from which client profiles created for LBaaS objects will inherit settings. -In general, SSL offloading frees up server and application capacity for handling traffic by shifting authentication processing from the target server to a designated authentication server. As shown in the diagram, once an admin user has added `secrets`_ to a Barbican container, he can use it to create a :ref:`TLS load balancer `. After the certificate data is validated, the F5® agent configures the load balancer on the BIG-IP. +In general, SSL offloading frees up server and application capacity for handling traffic by shifting authentication processing from the target server to a designated authentication server. As shown in the diagram, once an admin user has added `secrets`_ to a Barbican container, he can use it to create a :ref:`TLS load balancer `. After the certificate data is validated, the F5 agent configures the load balancer on the BIG-IP. .. figure:: ../media/LBaaS_cert-mgr_with-legend.jpg :alt: SSL Offloading with OpenStack Barbican, Neutron LBaaSv2, and BIG-IP @@ -107,7 +107,7 @@ Configuration 4. Set the BIG-IP parent SSL profile. - - ``f5_parent_ssl_profile``: The parent SSL profile on the BIG-IP® from which the agent SSL profile should inherit settings + - ``f5_parent_ssl_profile``: The parent SSL profile on the BIG-IP from which the agent SSL profile should inherit settings .. topic:: Example @@ -120,7 +120,7 @@ Configuration # protocol. You can define the parent profile for this profile by setting # f5_parent_ssl_profile. The profile created to support TERMINATED_HTTPS will # inherit settings from the parent you define. This must be an existing profile, - # and if it does not exist on your BIG-IP® system the agent will use the default + # and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. #f5_parent_ssl_profile = clientssl # diff --git a/docs/includes/topic_clustering.rst b/docs/includes/topic_clustering.rst index 0d6f5a86a..8ccb4d9e8 100644 --- a/docs/includes/topic_clustering.rst +++ b/docs/includes/topic_clustering.rst @@ -6,7 +6,7 @@ Manage BIG-IP Clusters with F5 LBaaSv2 Overview -------- -The F5® LBaaSv2 agent and driver can manage BIG-IP® :term:`device service clusters`, providing :term:`high availability`, :term:`mirroring`, and :term:`failover` services within your OpenStack cloud. +The F5 LBaaSv2 agent and driver can manage BIG-IP :term:`device service clusters`, providing :term:`high availability`, :term:`mirroring`, and :term:`failover` services within your OpenStack cloud. The F5 agent applies LBaaS configuration changes to each BIG-IP :term:`device` in a cluster at the same time, in real time. It is unnecessary to use BIG-IP's '`configuration synchronization`_ mode' to sync LBaaS objects managed by the agent across the devices in a cluster. @@ -100,7 +100,7 @@ Configuration # # -#. Add the IP address for each BIG-IP device, the admin username, and the admin password to the :ref:`Device Driver - iControl® Driver Setting ` section of the config file. Values must be comma-separated. +#. Add the IP address for each BIG-IP device, the admin username, and the admin password to the :ref:`Device Driver - iControl Driver Setting ` section of the config file. Values must be comma-separated. .. code-block:: text :emphasize-lines: 10 diff --git a/docs/includes/topic_configure-neutron-lbaasv2.rst b/docs/includes/topic_configure-neutron-lbaasv2.rst index 32eadecdc..03d03547a 100644 --- a/docs/includes/topic_configure-neutron-lbaasv2.rst +++ b/docs/includes/topic_configure-neutron-lbaasv2.rst @@ -5,9 +5,9 @@ Configure Neutron for LBaaSv2 ============================= -You will need to make a few configurations in your Neutron environment in order to use the F5® OpenStack LBaasv2 driver and agent. +You will need to make a few configurations in your Neutron environment in order to use the F5 OpenStack LBaasv2 driver and agent. -First, you'll need to set F5 Networks® as the Neutron LBaaSv2 service provider driver. Then, add the LBaaSv2 plugin to the list of service plugins in the Neutron configuration file. +First, you'll need to set F5 Networks as the Neutron LBaaSv2 service provider driver. Then, add the LBaaSv2 plugin to the list of service plugins in the Neutron configuration file. Set 'F5Networks' as the LBaaSv2 Service Provider ------------------------------------------------ @@ -25,7 +25,7 @@ Edit the ``service_providers`` section of :file:`/etc/neutron/neutron_lbaas.conf .. note:: - If there is an active entry for the F5® LBaaSv1 service provider driver, comment (#) it out. + If there is an active entry for the F5 LBaaSv1 service provider driver, comment (#) it out. Add the Neutron LBaaSv2 Service Plugin -------------------------------------- diff --git a/docs/includes/topic_device-driver-settings.rst b/docs/includes/topic_device-driver-settings.rst index a1bd5daaa..c7e3cae72 100644 --- a/docs/includes/topic_device-driver-settings.rst +++ b/docs/includes/topic_device-driver-settings.rst @@ -8,9 +8,9 @@ Device Driver Settings / iControl Driver Settings Overview -------- -The Device Driver Settings in the :ref:`Agent Configuration File` provide the means of communication between the F5® agent and BIG-IP® device(s). **Do not change this setting**. +The Device Driver Settings in the :ref:`Agent Configuration File` provide the means of communication between the F5 agent and BIG-IP device(s). **Do not change this setting**. -The iControl® Driver Settings identify the BIG-IP device(s) that you want the F5 agent to manage and record the login information the agent will use to communicate with the BIG-IP(s). +The iControl Driver Settings identify the BIG-IP device(s) that you want the F5 agent to manage and record the login information the agent will use to communicate with the BIG-IP(s). Use Case -------- @@ -29,7 +29,7 @@ Prerequisites - Administrator access to both BIG-IP device(s) and OpenStack cloud. -- Basic understanding of `BIG-IP® system configuration `_. +- Basic understanding of `BIG-IP system configuration `_. - F5 :ref:`agent ` and :ref:`service provider driver ` installed on the Neutron controller and all other hosts for which you want to provision LBaaS services. @@ -37,7 +37,7 @@ Prerequisites Caveats ------- -- vCMP® is unsupported in this release (v |release|). +- vCMP is unsupported in this release (v |release|). Configuration @@ -62,7 +62,7 @@ Configuration :emphasize-lines: 17, 31, 36 ############################################################################### - # Device Driver - iControl® Driver Setting + # Device Driver - iControl Driver Setting ############################################################################### # # This setting can be either a single IP address or a @@ -73,13 +73,13 @@ Configuration # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. - # In order to access devices' iControl® interfaces via + # In order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.7.232 \\ replace with the IP address(es) of your BIG-IP(s) # - # If you are using vCMP® with VLANs, you will need to configure + # If you are using vCMP with VLANs, you will need to configure # your vCMP host addresses, in addition to the guests addresses. # vCMP Host access is necessary for provisioning VLANs to a guest. # Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname diff --git a/docs/includes/topic_differentiated-services.rst b/docs/includes/topic_differentiated-services.rst index 0ea09620f..af2454c68 100644 --- a/docs/includes/topic_differentiated-services.rst +++ b/docs/includes/topic_differentiated-services.rst @@ -6,9 +6,9 @@ Differentiated Service Environments Overview -------- -The F5® LBaaSv2 driver and F5 agent can manage multiple BIG-IP environments. In a :dfn:`differentiated service environment` -- a uniquely-named environment for which dedicated F5 LBaaS services are required -- the F5 driver has its own, uniquely-named messaging queue. The F5 LBaaS agent scheduler for a differentiated service environment can only assign tasks to agents running in that environment. +The F5 LBaaSv2 driver and F5 agent can manage multiple BIG-IP environments. In a :dfn:`differentiated service environment` -- a uniquely-named environment for which dedicated F5 LBaaS services are required -- the F5 driver has its own, uniquely-named messaging queue. The F5 LBaaS agent scheduler for a differentiated service environment can only assign tasks to agents running in that environment. -The service environment corresponds to the ``environment_prefix`` parameter in the :ref:`agent configuration file`. when you create a new ``lbaas-loadbalancer`` in OpenStack, this prefix is prepended to the OpenStack tenant id and used to create a new partition on your BIG-IP® device(s). The default ``environment_prefix`` parameter is ``Project``. +The service environment corresponds to the ``environment_prefix`` parameter in the :ref:`agent configuration file`. when you create a new ``lbaas-loadbalancer`` in OpenStack, this prefix is prepended to the OpenStack tenant id and used to create a new partition on your BIG-IP device(s). The default ``environment_prefix`` parameter is ``Project``. Differentiated service environments can be used in conjunction with :ref:`capacity-based scale out` to provide agent redundancy and scale out across BIG-IP device groups. diff --git a/docs/includes/topic_environment-generator.rst b/docs/includes/topic_environment-generator.rst index 11b12c82d..bc88c5899 100644 --- a/docs/includes/topic_environment-generator.rst +++ b/docs/includes/topic_environment-generator.rst @@ -6,7 +6,7 @@ F5 Environment Generator Overview -------- -The F5® environment generator is a Python utility that creates new service provider drivers and adds them to the Neutron LBaaS configuration file (:file:`/etc/neutron/neutron_lbaas.conf`). +The F5 environment generator is a Python utility that creates new service provider drivers and adds them to the Neutron LBaaS configuration file (:file:`/etc/neutron/neutron_lbaas.conf`). Use Case -------- diff --git a/docs/includes/topic_f5lbaas-l7_content_switching.rst b/docs/includes/topic_f5lbaas-l7_content_switching.rst index ce1261d1b..9edb2842d 100644 --- a/docs/includes/topic_f5lbaas-l7_content_switching.rst +++ b/docs/includes/topic_f5lbaas-l7_content_switching.rst @@ -73,13 +73,13 @@ L7 policies are ranked by a position value and are evaluated according to their Send request to default pool -OpenStack Policy/Rules Definition Versus BIG-IP® Policy/Rules: +OpenStack Policy/Rules Definition Versus BIG-IP Policy/Rules: `````````````````````````````````````````````````````````````` The Neutron L7 terminology does not directly align with the common vocabulary of BIG-IP Local Traffic Manager. In the BIG-IP LTM, policies also have a set of rules, but it is the rules that specify actions and not the policy. Also, policies attached to a virtual server on the BIG-IP are all evaluated regardless of the truth of the associated rules. In addition to this difference the BIG-IP policies have no ordinal, it is the BIG-IP rules that have this attribute. Because of these confusing differences it is useful to attempt to define the terms as they apply to each domain. +------------------+-------------------------------+ - | Neutron LBaaS L7 | BIG-IP® Local Traffic Manager | + | Neutron LBaaS L7 | BIG-IP Local Traffic Manager | +==================+===============================+ | Policy | Policy Rules (wrapper_policy) | +------------------+-------------------------------+ @@ -197,7 +197,7 @@ Configuration .. code-block:: text - # The resulting BIG-IP® LTM Policy configuration from the steps above. + # The resulting BIG-IP LTM Policy configuration from the steps above. ltm policy wrapper_policy { controls { forwarding } last-modified 2016-12-05:09:19:05 diff --git a/docs/includes/topic_f5lbaas-vcmp.rst b/docs/includes/topic_f5lbaas-vcmp.rst index 5b33df84a..da3a0d6a0 100644 --- a/docs/includes/topic_f5lbaas-vcmp.rst +++ b/docs/includes/topic_f5lbaas-vcmp.rst @@ -4,7 +4,7 @@ F5 LBaaSv2 and vCMP Overview -------- -Virtual Clustered Multiprocessing™ (vCMP®) is a feature of the BIG-IP® system that allows you to run multiple instances of BIG-IP software on a single hardware platform. vCMP allocates a specific share of the hardware resources to each BIG-IP® instance, or :term:`vCMP guest`. +Virtual Clustered Multiprocessing™ (vCMP) is a feature of the BIG-IP system that allows you to run multiple instances of BIG-IP software on a single hardware platform. vCMP allocates a specific share of the hardware resources to each BIG-IP instance, or :term:`vCMP guest`. A vCMP guest consists of a TMOS instance and one or more BIG-IP modules. The :term:`vCMP host` allocates a share of the hardware resources to each guest; each guest also has its own management IP address, self IP addresses, virtual servers, and so on. In this way, each guest can effectively receive and process application traffic with no knowledge of other guests on the system. @@ -42,11 +42,11 @@ Configuration .. code-block:: text :emphasize-lines: 8 - # If you are using vCMP® with VLANs, you will need to configure - # your vCMP® host addresses, in addition to the guests addresses. - # vCMP® Host access is necessary for provisioning VLANs to a guest. - # Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname - # for vCMP® hosts. The plug-in will automatically determine + # If you are using vCMP with VLANs, you will need to configure + # your vCMP host addresses, in addition to the guests addresses. + # vCMP Host access is necessary for provisioning VLANs to a guest. + # Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname + # for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # icontrol_vcmp_hostname = 192.168.1.245 @@ -58,7 +58,7 @@ Configuration :emphasize-lines: 19 ############################################################################### - # Device Driver - iControl® Driver Setting + # Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -71,7 +71,7 @@ Configuration # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. - # If order to access devices' iControl® interfaces via + # If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # diff --git a/docs/includes/topic_global-routed-mode.rst b/docs/includes/topic_global-routed-mode.rst index 634bcf4e7..ff3e31981 100644 --- a/docs/includes/topic_global-routed-mode.rst +++ b/docs/includes/topic_global-routed-mode.rst @@ -8,7 +8,7 @@ Global Routed Mode Overview -------- -The F5® agent determines BIG-IP® devices' L2 and L3 network configurations based on the settings provided in the :ref:`L2/L3 segmentation modes ` settings in the :ref:`agent configuration file`. When configured to use global routed mode, the F5 agent makes the following assumptions: +The F5 agent determines BIG-IP devices' L2 and L3 network configurations based on the settings provided in the :ref:`L2/L3 segmentation modes ` settings in the :ref:`agent configuration file`. When configured to use global routed mode, the F5 agent makes the following assumptions: #. LBaaS objects are accessible via global L3 routes; #. All virtual IPs are routable from clients; @@ -33,7 +33,7 @@ Global routed mode is generally used for :term:`undercloud` BIG-IP hardware depl Example BIG-IP 'undercloud' deployment -Global routed mode uses BIG-IP `secure network address translation`_ (SNAT) 'automapping' to map one or more origin IP addresses to a pool of translation addresses. The pool is created by the BIG-IP Local Traffic Manager® (LTM) from existing `self IP`_ addresses. This means that *before* you configure the F5 agent to use global routed mode, you should create enough `self IP`_ addresses on the BIG-IP(s) to handle anticipated connection loads. [#]_ You do not need to configure a SNAT pool, as one will be created automatically. +Global routed mode uses BIG-IP `secure network address translation`_ (SNAT) 'automapping' to map one or more origin IP addresses to a pool of translation addresses. The pool is created by the BIG-IP Local Traffic Manager (LTM) from existing `self IP`_ addresses. This means that *before* you configure the F5 agent to use global routed mode, you should create enough `self IP`_ addresses on the BIG-IP(s) to handle anticipated connection loads. [#]_ You do not need to configure a SNAT pool, as one will be created automatically. Prerequisites ------------- @@ -87,18 +87,18 @@ Configuration # L3 Segmentation Mode Settings ############################################################################### # - # Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® + # Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device - # L3 routes, which must be already provisioned on the BIG-IP®s. + # L3 routes, which must be already provisioned on the BIG-IPs. # ... # f5_global_routed_mode = True # # Allow overlapping IP subnets across multiple tenants. - # This creates route domains on BIG-IP® in order to + # This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -115,14 +115,14 @@ Configuration # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not - # be created (routed mode) and the BIG-IP® + # be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will no longer work - # if the same BIG-IP® device is not being used + # if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if diff --git a/docs/includes/topic_ha-modes.rst b/docs/includes/topic_ha-modes.rst index edb5590a6..ea84f9d39 100644 --- a/docs/includes/topic_ha-modes.rst +++ b/docs/includes/topic_ha-modes.rst @@ -6,7 +6,7 @@ HA mode Overview -------- -:term:`HA`, or, 'high availability', mode refers to high availability of the BIG-IP® device(s). The F5® agent can configure BIG-IP to operate in :term:`standalone`, :term:`pair`, or :term:`scalen` mode. The F5 agent configures LBaaS objects on HA BIG-IP devices in real time. +:term:`HA`, or, 'high availability', mode refers to high availability of the BIG-IP device(s). The F5 agent can configure BIG-IP to operate in :term:`standalone`, :term:`pair`, or :term:`scalen` mode. The F5 agent configures LBaaS objects on HA BIG-IP devices in real time. Use Case -------- @@ -41,7 +41,7 @@ Prerequisites - Basic understanding of OpenStack networking concepts. See the `OpenStack docs `_ for more information. -- Basic understanding of `BIG-IP® Local Traffic Management `_ +- Basic understanding of `BIG-IP Local Traffic Management `_ - F5 :ref:`agent ` and :ref:`service provider driver ` installed on the Neutron controller and all other hosts from which you want to provision LBaaS services. diff --git a/docs/includes/topic_hierarchical-port-binding.rst b/docs/includes/topic_hierarchical-port-binding.rst index 5e3cb5615..a7daee876 100644 --- a/docs/includes/topic_hierarchical-port-binding.rst +++ b/docs/includes/topic_hierarchical-port-binding.rst @@ -4,7 +4,7 @@ Hierarchical Port Binding Overview -------- -Neutron `hierarchical port binding`_ [#]_ allows software-defined networking (SDN) users to dynamically configure VLANs and VLAN tags for a physical BIG-IP® :term:`device` or :term:`device service cluster` connected to a 'top of rack' L3 switch (a network 'segment'). Telling the F5® agent what physical switch and port the BIG-IPs are connected to allows the agent to configure the BIG-IPs to process traffic for networks that are dynamically created in that segment. +Neutron `hierarchical port binding`_ [#]_ allows software-defined networking (SDN) users to dynamically configure VLANs and VLAN tags for a physical BIG-IP :term:`device` or :term:`device service cluster` connected to a 'top of rack' L3 switch (a network 'segment'). Telling the F5 agent what physical switch and port the BIG-IPs are connected to allows the agent to configure the BIG-IPs to process traffic for networks that are dynamically created in that segment. Disconnected Services ````````````````````` diff --git a/docs/includes/topic_l2-l3-segmentation-modes.rst b/docs/includes/topic_l2-l3-segmentation-modes.rst index 82baece8e..874042296 100644 --- a/docs/includes/topic_l2-l3-segmentation-modes.rst +++ b/docs/includes/topic_l2-l3-segmentation-modes.rst @@ -6,7 +6,7 @@ L2 Adjacent Mode Overview -------- -The F5® agent uses the L2/L3 segmentation mode settings to determine the L2/L3 network configurations for your BIG-IP® device(s). +The F5 agent uses the L2/L3 segmentation mode settings to determine the L2/L3 network configurations for your BIG-IP device(s). .. warning:: @@ -142,7 +142,7 @@ Device VLAN to interface and tag mapping VLAN device and interface to port mappings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``vlan_binding_driver``: Binds tagged VLANs to specific BIG-IP ports; it should be configured using a valid subclass of the iControl® :class:`VLANBindingBase` class. [#]_ **To use this feature, uncomment the line in the :ref:`agent configuration file`.** +- ``vlan_binding_driver``: Binds tagged VLANs to specific BIG-IP ports; it should be configured using a valid subclass of the iControl :class:`VLANBindingBase` class. [#]_ **To use this feature, uncomment the line in the :ref:`agent configuration file`.** Device Tunneling (VTEP) selfips @@ -160,7 +160,7 @@ Device Tunneling (VTEP) selfips # Device Tunneling (VTEP) selfips # # This is a single entry or comma separated list of cidr (h/m) format - # selfip addresses, one per BIG-IP® device, to use for VTEP addresses. + # selfip addresses, one per BIG-IP device, to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -228,10 +228,10 @@ Static ARP population for members on tunnel networks # f5_populate_static_arp = True # ... - # This is a boolean entry which determines if the BIG-IP® will use + # This is a boolean entry which determines if the BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be setup in accordance with the way the other tunnel agents are - # setup. If the BIG-IP® agent and other tunnel agents don't match + # setup. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -256,7 +256,7 @@ Namespaces and Routing :emphasize-lines: 8 # Allow overlapping IP subnets across multiple tenants. - # This creates route domains on BIG-IP® in order to + # This creates route domains on BIG-IP in order to # separate the tenant networks. # # This setting is forced to False if @@ -340,14 +340,14 @@ SNAT Mode and SNAT Address Counts # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not - # be created (routed mode) and the BIG-IP® + # be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will no longer work - # if the same BIG-IP® device is not being used + # if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -396,16 +396,16 @@ Common Networks # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted - # on the BIG-IP®, but rather assumed that the value + # on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined - # on the BIG-IP® prior to LBaaS configuration. The + # on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # - # If your Internet VLAN on your BIG-IP® is named + # If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: diff --git a/docs/includes/topic_lbaasv2-plugin-overview.rst b/docs/includes/topic_lbaasv2-plugin-overview.rst index b0feb9daf..e15b63fd1 100644 --- a/docs/includes/topic_lbaasv2-plugin-overview.rst +++ b/docs/includes/topic_lbaasv2-plugin-overview.rst @@ -3,7 +3,7 @@ Overview -------- -The F5® OpenStack LBaaSv2 service provider driver and agent (also called, simply, 'F5 LBaaSv2') make it possible to provision F5 BIG-IP® `Local Traffic Manager `_ (LTM®) services in an OpenStack cloud. +The F5 OpenStack LBaaSv2 service provider driver and agent (also called, simply, 'F5 LBaaSv2') make it possible to provision F5 BIG-IP `Local Traffic Manager `_ (LTM) services in an OpenStack cloud. How the plugin works diff --git a/docs/includes/topic_multi-tenancy.rst b/docs/includes/topic_multi-tenancy.rst index 8ff2bc36a..2a386e333 100644 --- a/docs/includes/topic_multi-tenancy.rst +++ b/docs/includes/topic_multi-tenancy.rst @@ -6,7 +6,7 @@ Manage Multi-Tenant BIG-IP Devices with F5 LBaaSv2 Overview -------- -BIG-IP® devices allow users to create and customize partitions for which specific features that meet a tenant's needs can be enabled. This type of configuration, called multi-tenancy, allows a greater degree of flexibility in allocating network resources to multiple individual projects. [#]_ +BIG-IP devices allow users to create and customize partitions for which specific features that meet a tenant's needs can be enabled. This type of configuration, called multi-tenancy, allows a greater degree of flexibility in allocating network resources to multiple individual projects. [#]_ .. figure:: ../media/f5-lbaas-multi-tenancy.png :alt: Multi-tenant BIG-IP and F5 LBaaS diff --git a/docs/includes/topic_neutron-bigip-command-mapping.rst b/docs/includes/topic_neutron-bigip-command-mapping.rst index dfa5047e4..8de834afd 100644 --- a/docs/includes/topic_neutron-bigip-command-mapping.rst +++ b/docs/includes/topic_neutron-bigip-command-mapping.rst @@ -6,7 +6,7 @@ F5 LBaaSv2 to BIG-IP Configuration Mapping Overview -------- -When you issue ``neutron lbaas`` commands on your OpenStack Neutron controller or host, the F5® LBaaSv2 driver and F5 agent configure objects on your BIG-IP® device(s). Here, we've provided some insight into what exactly happens behind the scenes to configure BIG-IP objects. You can also view the actual calls made by setting the F5 agent's DEBUG level to 'True' in the :ref:`agent configuration file` and viewing the logs (:file:`/var/log/neutron/f5-openstack-agent.log`). +When you issue ``neutron lbaas`` commands on your OpenStack Neutron controller or host, the F5 LBaaSv2 driver and F5 agent configure objects on your BIG-IP device(s). Here, we've provided some insight into what exactly happens behind the scenes to configure BIG-IP objects. You can also view the actual calls made by setting the F5 agent's DEBUG level to 'True' in the :ref:`agent configuration file` and viewing the logs (:file:`/var/log/neutron/f5-openstack-agent.log`). .. include:: ref_neutron-to-bigip-configs-table.rst :start-line: 5 diff --git a/docs/includes/topic_supported-features-intro.rst b/docs/includes/topic_supported-features-intro.rst index 57d495889..686fa93ab 100644 --- a/docs/includes/topic_supported-features-intro.rst +++ b/docs/includes/topic_supported-features-intro.rst @@ -1,4 +1,4 @@ :orphan: true -The :ref:`agent configuration file` -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- provides the mechanism for identifying your BIG-IP® device(s) to F5 LBaaSv2 and allowing the agent to discover and configure BIG-IP network (``/net``) and Local Traffic Manager® (``/ltm``) objects. +The :ref:`agent configuration file` -- :file:`/etc/neutron/services/f5/f5-openstack-agent.ini` -- provides the mechanism for identifying your BIG-IP device(s) to F5 LBaaSv2 and allowing the agent to discover and configure BIG-IP network (``/net``) and Local Traffic Manager (``/ltm``) objects. diff --git a/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst b/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst index 7075c37f4..23866aef9 100644 --- a/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst +++ b/docs/includes/topic_upgrading-f5-lbaasv2-plugin.rst @@ -3,7 +3,7 @@ Upgrading the F5 LBaaSv2 Components =================================== -If you are upgrading from an earlier version, F5® recommends that you uninstall the current version, then install the new version. +If you are upgrading from an earlier version, F5 recommends that you uninstall the current version, then install the new version. .. warning:: diff --git a/docs/map_before-you-begin.rst b/docs/map_before-you-begin.rst index c5ceb84d1..5d5b0e1e2 100644 --- a/docs/map_before-you-begin.rst +++ b/docs/map_before-you-begin.rst @@ -1,11 +1,11 @@ Before You Begin ================ -In order to use F5® LBaaSv2 services, you will need the following: +In order to use F5 LBaaSv2 services, you will need the following: - Operational OpenStack cloud (|openstack| release). -- Licensed, operational BIG-IP® :term:`device` or :term:`device cluster`; can be deployed either as an OpenStack instance (BIG-IP VE) or external to the cloud (VE or hardware). +- Licensed, operational BIG-IP :term:`device` or :term:`device cluster`; can be deployed either as an OpenStack instance (BIG-IP VE) or external to the cloud (VE or hardware). .. important:: diff --git a/docs/map_f5-lbaasv2-user-guide.rst b/docs/map_f5-lbaasv2-user-guide.rst index b7440a97e..8ecfdecbf 100644 --- a/docs/map_f5-lbaasv2-user-guide.rst +++ b/docs/map_f5-lbaasv2-user-guide.rst @@ -3,7 +3,7 @@ F5 OpenStack LBaaSv2 User Guide ############################### -This guide provides instructions for installing and using the F5® OpenStack LBaaSv2 service provider driver and agent (also called, collectively, 'F5 LBaaSv2'). +This guide provides instructions for installing and using the F5 OpenStack LBaaSv2 service provider driver and agent (also called, collectively, 'F5 LBaaSv2'). .. include:: includes/ref_lbaasv2-version-compatibility.rst :start-line: 5 diff --git a/docs/map_multi-agents-in-diff-environments.rst b/docs/map_multi-agents-in-diff-environments.rst index 5126611ab..bd2f83d6c 100644 --- a/docs/map_multi-agents-in-diff-environments.rst +++ b/docs/map_multi-agents-in-diff-environments.rst @@ -4,7 +4,7 @@ Multiple Agents and Differentiated Service Environments Overview -------- -You can run :ref:`multiple F5® agents ` on separate hosts in OpenStack to provide agent redundancy and scale out. Additionally, you can set up custom :ref:`service environments ` in your OpenStack cloud to manage environments with different requirements and/or configurations. +You can run :ref:`multiple F5 agents ` on separate hosts in OpenStack to provide agent redundancy and scale out. Additionally, you can set up custom :ref:`service environments ` in your OpenStack cloud to manage environments with different requirements and/or configurations. Use Case -------- @@ -38,7 +38,7 @@ Configuration #. :ref:`Configure the F5 agents `. - * Each agent must be configured with the same iControl® endpoint(s). + * Each agent must be configured with the same iControl endpoint(s). * Each agent must be configured with the same ``environment_prefix``; this is the name you assigned to the new custom environment. * Each agent must run on a separate host (in other words, the hostname must be unique). diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index 456ab63ee..3f760ef42 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -12,7 +12,7 @@ Troubleshooting Set Logging Level to DEBUG -------------------------- -To troubleshoot general problems, set the Neutron and the F5® agent ``debug`` setting to ``True``. +To troubleshoot general problems, set the Neutron and the F5 agent ``debug`` setting to ``True``. Extensive logging will then appear in the ``neutron-server`` and ``f5-oslbaasv1-agent`` log files on their respective hosts. @@ -61,7 +61,7 @@ Here are a few things you can try: $ sudo service f5-oslbaasv2-agent status \\ Ubuntu -3. Make sure you can connect to the BIG-IP® and that the iControl® hostname, username, and password in the :ref:`agent configuration file` are correct. +3. Make sure you can connect to the BIG-IP and that the iControl hostname, username, and password in the :ref:`agent configuration file` are correct. 4. If you're using ``global_routed_mode``, comment out (#) the ``vtep`` lines (shown below) in the :ref:`agent configuration file`. diff --git a/f5lbaasdriver/test/tempest/services/clients/bigip_client.py b/f5lbaasdriver/test/tempest/services/clients/bigip_client.py index 50e5c4826..1191393c4 100644 --- a/f5lbaasdriver/test/tempest/services/clients/bigip_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/bigip_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py b/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py index 44d7bf8fa..1a7b276bf 100644 --- a/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/l7policy_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py b/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py index 7fd47280e..fe039a7b5 100644 --- a/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/l7rule_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 rules client for tempest tests.""" +u"""F5 Networks LBaaSv2 L7 rules client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py b/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py index b4acb187f..8e9c177fb 100644 --- a/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py +++ b/f5lbaasdriver/test/tempest/services/clients/plugin_rpc_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 plugin_rpc client for tempest tests.""" +u"""F5 Networks LBaaSv2 plugin_rpc client for tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_esd.py b/f5lbaasdriver/test/tempest/tests/api/test_esd.py index e01b86aec..e27c092ff 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_esd.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_esd.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py index 5a877d78d..b6548b214 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py index 0b761553a..b77dd81eb 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_rules.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py index f547494dd..fe68f3a82 100644 --- a/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py +++ b/f5lbaasdriver/test/tempest/tests/api/test_l7policy_update.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 L7 policy rules tempest tests.""" +u"""F5 Networks LBaaSv2 L7 policy rules tempest tests.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/agent_rpc.py b/f5lbaasdriver/v2/bigip/agent_rpc.py index 4870ad348..3e6d11a60 100644 --- a/f5lbaasdriver/v2/bigip/agent_rpc.py +++ b/f5lbaasdriver/v2/bigip/agent_rpc.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""RPC Calls to Agents for F5® LBaaSv2.""" +u"""RPC Calls to Agents for F5 LBaaSv2.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/agent_scheduler.py b/f5lbaasdriver/v2/bigip/agent_scheduler.py index 712e629f3..67d44d699 100644 --- a/f5lbaasdriver/v2/bigip/agent_scheduler.py +++ b/f5lbaasdriver/v2/bigip/agent_scheduler.py @@ -33,26 +33,29 @@ def __init__(self): """Initialze with the ChanceScheduler base class.""" super(TenantScheduler, self).__init__() + # ccloud: this method should work with rebinding. + # it only rebinds an agent of the same envGrp so no changes on BIGIPs will be made def get_lbaas_agent_hosting_loadbalancer(self, plugin, context, loadbalancer_id, env=None): """Return the agent that is hosting the loadbalancer.""" LOG.debug('Getting agent for loadbalancer %s with env %s' % (loadbalancer_id, env)) - lbaas_agent = None with context.session.begin(subtransactions=True): + lbaas_agent = None # returns {'agent': agent_dict} lbaas_agent = plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer_id ) # if the agent bound to this loadbalancer is alive, return it - if lbaas_agent is not None: - - if not lbaas_agent['agent']['alive'] and env is not None: - # The agent bound to this loadbalancer is not live; - # find another agent in the same environment - # which environment group is the agent in + if lbaas_agent: + if (not lbaas_agent['agent']['alive'] or + not lbaas_agent['agent']['admin_state_up']) and \ + env is not None: + # The agent bound to this loadbalancer is not live + # or is not active. Find another agent in the same + # environment and environment group if possible ac = self.deserialize_agent_configurations( lbaas_agent['agent']['configurations'] ) @@ -61,25 +64,82 @@ def get_lbaas_agent_hosting_loadbalancer(self, plugin, context, gn = ac['environment_group_number'] else: gn = 1 - - # find all active agents matching the environment - # and group number. - env_agents = self.get_agents_in_env( - context, - plugin, - env, - group=gn, - active=True - ) - LOG.debug("Primary lbaas agent is dead, env_agents: %s", - env_agents) - if env_agents: - # return the first active agent in the - # group to process this task - lbaas_agent = {'agent': env_agents[0]} + LOG.debug("ccloud: scrubbing - Loadbalancer_id %s from EnvGroup %s will be rebound to agent %s" % + (loadbalancer_id, gn, lbaas_agent['agent'])) + reassigned_agent = self.rebind_loadbalancers( + context, plugin, env, gn, lbaas_agent['agent']) + if reassigned_agent: + lbaas_agent = {'agent': reassigned_agent} return lbaas_agent + def rebind_loadbalancers( + self, context, plugin, env, group, current_agent): + # wtn: check if this works + env_agents = self.get_agents_in_env(context, plugin, env, + group=group, active=True) + if env_agents: + reassigned_agent = env_agents[0] + bindings = \ + context.session.query( + agent_scheduler.LoadbalancerAgentBinding).filter_by( + agent_id=current_agent['id']).all() + + # wtn: disabled until tested + # for binding in bindings: + # binding.agent_id = reassigned_agent['id'] + # context.session.add(binding) + LOG.debug("ccloud: TESTRUN scrubbing: %s Loadbalancers from EnvGroup %s bound to agent %s now bound to %s" % + (len(bindings), + group, + current_agent['id'], + reassigned_agent['id'])) + + return reassigned_agent + else: + LOG.debug("ccloud: scrubbing - No active agent found for envGrp %s. Rebinding skipped for agent %s" % + (group, current_agent['id'])) + return None + + def get_dead_agents_in_env( + self, context, plugin, env, group=None): + return_agents = [] + all_agents = self.get_agents_in_env(context, + plugin, + env, + group, + active=None) + + for agent in all_agents: + if not plugin.db.is_eligible_agent(active=True, agent=agent): + agent_dead = plugin.db.is_agent_down( + agent['heartbeat_timestamp']) + if not agent['admin_state_up'] or agent_dead: + return_agents.append(agent) + return return_agents + + def scrub_dead_agents(self, context, plugin, env, group=None): + dead_agents = self.get_dead_agents_in_env(context, plugin, env, group) + for agent in dead_agents: + ag = None + if group is None: + LOG.info("ccloud: scrubbing agents across EnvGroups. Dead agent: {}".format(agent)) + ac = self.deserialize_agent_configurations( + agent['configurations'] + ) + if 'environment_group_number' in ac: + ag = ac['environment_group_number'] + LOG.info("ccloud: torsten found group for dead agent. EnvGroup: {}".format(ag)) + else: + LOG.info("ccloud: scrubbing agents for ONE EnvGroup number %s. Dead agent: %s" % (ag, agent)) + ag = group + if ag: + LOG.debug("ccloud: scrubbing - Dead agent found in EnvGroup %s . Agent: %s: " % (ag, agent)) + self.rebind_loadbalancers(context, plugin, env, ag, agent) + else: + LOG.debug("ccloud: scrubbing - Dead agent found without EnvGroup. Skipping scrubbing") + + def get_agents_in_env( self, context, plugin, env, group=None, active=None): """Get an active agents in the specified environment.""" @@ -150,7 +210,7 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): # There is no existing loadbalancer agent binding. # Find all active agent candidates in this env. - # We use environment_prefix to find F5® agents + # We use environment_prefix to find F5 agents # rather then map to the agent binary name. candidates = self.get_agents_in_env( context, @@ -159,6 +219,21 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): active=True ) + # ccloud: If no active agent can be found, get all non active ones to assign at least an agent to the LB. + # Otherwise the LB will be left in nirvana because it's already defined in neutron LB but without + # any assigned agent, so LB will never be scheduled to an agent and created on F5 + + if not candidates: + candidates = self.get_agents_in_env( + context, + plugin, + env + ) + if not candidates: + LOG.error('ccloud: No f5 lbaas agents are active. No agent could be found for env %s' % env) + else: + LOG.error('ccloud: No f5 lbaas agents are active. Using a non active one for env %s' % env) + LOG.debug("candidate agents: %s", candidates) if len(candidates) == 0: LOG.error('No f5 lbaas agents are active for env %s' % env) @@ -200,11 +275,16 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): chosen_agent = candidate break + # ccloud: schedule to an overloaded agent because we can't split partitions across F5 devicess if chosen_agent: # Does the agent which had tenants assigned # to it still have capacity? if group_capacity >= 1.0: - chosen_agent = None + LOG.error('ccloud: scheduling loadbalancer %s to an overloaded agent with capcity %s because ' + 'tenant is already assigned to this agent!' + % (loadbalancer_id, group_capacity)) + break + #chosen_agent = None else: break @@ -235,6 +315,7 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): LOG.warn('No capacity left on any agents in env: %s' % env) LOG.warn('Group capacity in environment %s were %s.' % (env, capacity_by_group)) + LOG.error('ccloud: Aborting loadbalancer scheduling') raise lbaas_agentschedulerv2.NoEligibleLbaasAgent( loadbalancer_id=loadbalancer.id) @@ -247,4 +328,5 @@ def schedule(self, plugin, context, loadbalancer_id, env=None): 'lbaas agent %(agent_id)s'), {'loadbalancer_id': loadbalancer.id, 'agent_id': chosen_agent['id']}) + return chosen_agent diff --git a/f5lbaasdriver/v2/bigip/constants_v2.py b/f5lbaasdriver/v2/bigip/constants_v2.py index aaa57d99a..6bdf4a916 100644 --- a/f5lbaasdriver/v2/bigip/constants_v2.py +++ b/f5lbaasdriver/v2/bigip/constants_v2.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Constants for F5® LBaaSv2 Driver.""" +u"""Constants for F5 LBaaSv2 Driver.""" # coding=utf-8 # Copyright 2016 F5 Networks Inc. # diff --git a/f5lbaasdriver/v2/bigip/disconnected_service.py b/f5lbaasdriver/v2/bigip/disconnected_service.py index be8c58017..269576e10 100644 --- a/f5lbaasdriver/v2/bigip/disconnected_service.py +++ b/f5lbaasdriver/v2/bigip/disconnected_service.py @@ -22,7 +22,7 @@ class DisconnectedService(object): def __init__(self): - self.supported_encapsulations = ['vlan'] + self.supported_encapsulations = [u'vlan'] # Retain this method for future use in case a particular ML2 implementation # decouples network_id from physical_network name. The implementation in @@ -41,7 +41,6 @@ def get_network_segments(self, session): def get_network_segment(self, context, agent_configuration, network): data = None - network_segment_physical_network = \ agent_configuration.get('network_segment_physical_network', None) @@ -52,31 +51,22 @@ def get_network_segment(self, context, agent_configuration, network): # look up segment details in the ml2_network_segments table segments = db.get_network_segments(context.session, network['id'], filter_dynamic=None) - for segment in segments: - if ((network_segment_physical_network == - segment['physical_network']) and - (segment['network_type'].lower() in - supported_encapsulations)): + if (network_segment_physical_network == segment['physical_network'] and + segment['network_type'].lower() in supported_encapsulations): data = segment + LOG.debug("ccloud: Got network segment: %s" % segment) break - elif (network['provider:network_type'] == 'opflex' and + elif ('provider:network_type' in network and network['provider:network_type'] == 'opflex' and segment['network_type'] == 'vlan'): data = segment LOG.debug("Got OPFLEX segment: %s" % segment) break if not data: - LOG.debug('Using default segment for network %s' % - (network['id'])) - - # neutron is expected to provide this data immediately - data = { - 'segmentation_id': network['provider:segmentation_id'] - } - if 'provider:network_type' in network: - data['network_type'] = network['provider:network_type'] - if 'provider:physical_network' in network: - data['physical_network'] = network['provider:physical_network'] + data = {} + data['provider:network_type'] = None + data['provider:segmentation_id'] = None + data['provider:physical_network'] = None return data diff --git a/f5lbaasdriver/v2/bigip/driver_v2.py b/f5lbaasdriver/v2/bigip/driver_v2.py index c8e4b28d9..589797443 100644 --- a/f5lbaasdriver/v2/bigip/driver_v2.py +++ b/f5lbaasdriver/v2/bigip/driver_v2.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 Driver Implementation.""" +u"""F5 Networks LBaaSv2 Driver Implementation.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,6 +38,15 @@ from f5lbaasdriver.v2.bigip import neutron_client from f5lbaasdriver.v2.bigip import plugin_rpc +import urllib3 +import requests + +from requests.packages.urllib3.exceptions import InsecureRequestWarning + + +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + LOG = logging.getLogger(__name__) OPTS = [ @@ -71,10 +80,12 @@ def __str__(self): class F5DriverV2(object): - u"""F5 Networks® LBaaSv2 Driver.""" + u"""F5 Networks LBaaSv2 Driver.""" def __init__(self, plugin=None, env=None): """Driver initialization.""" + LOG.debug('F5 LBAAS driver initializing') + if not plugin: LOG.error('Required LBaaS Driver and Core Driver Missing') sys.exit(1) @@ -209,6 +220,8 @@ def create(self, context, loadbalancer): models.LoadBalancer, loadbalancer.id, plugin_constants.ERROR) + # ccloud: exit with exception in case scheduling failed + raise except Exception as e: LOG.error("Exception: loadbalancer create: %s" % e.message) raise e @@ -383,10 +396,26 @@ def update(self, context, old_pool, pool): def delete(self, context, pool): """Delete a pool.""" + if self._attached_to_policy(context,pool): + self.driver.plugin.db.update_status(context, models.PoolV2, pool.id, + plugin_constants.ACTIVE) + raise Exception("Cannot delete pool, attached to policy") + + + self.loadbalancer = pool.loadbalancer self.api_dict = self._get_pool_dict(pool) self._call_rpc(context, pool, 'delete_pool') + def _attached_to_policy(self, context, pool): + query = context.session.query(models.L7Policy) + query = query.filter((models.L7Policy).redirect_pool_id==pool.id) + + if query.count() > 0: + return True + + return False + class MemberManager(EntityManager): """MemberManager class handles Neutron LBaaS pool member CRUD.""" diff --git a/f5lbaasdriver/v2/bigip/exceptions.py b/f5lbaasdriver/v2/bigip/exceptions.py index ad574f25e..04f2d26a3 100644 --- a/f5lbaasdriver/v2/bigip/exceptions.py +++ b/f5lbaasdriver/v2/bigip/exceptions.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""F5 Networks® LBaaSv2 Exceptions.""" +u"""F5 Networks LBaaSv2 Exceptions.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/neutron_client.py b/f5lbaasdriver/v2/bigip/neutron_client.py index c64bc5918..23ba172d3 100644 --- a/f5lbaasdriver/v2/bigip/neutron_client.py +++ b/f5lbaasdriver/v2/bigip/neutron_client.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Service Module for F5® LBaaSv2.""" +u"""Service Module for F5 LBaaSv2.""" # Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/f5lbaasdriver/v2/bigip/plugin_rpc.py b/f5lbaasdriver/v2/bigip/plugin_rpc.py index 52c64e8ab..6c031f842 100644 --- a/f5lbaasdriver/v2/bigip/plugin_rpc.py +++ b/f5lbaasdriver/v2/bigip/plugin_rpc.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""RPC Callbacks for F5® LBaaSv2 Plugins.""" +u"""RPC Callbacks for F5 LBaaSv2 Plugins.""" # Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,6 +26,11 @@ from neutron.extensions import portbindings from neutron.plugins.common import constants as plugin_constants from neutron_lbaas.db.loadbalancer import models +from neutron_lbaas import agent_scheduler +from neutron_lbaas.services.loadbalancer import data_models +from neutron_lbaas.extensions import loadbalancerv2 +from neutron_lbaas.services.loadbalancer import constants as nlb_constant + from f5lbaasdriver.v2.bigip import constants_v2 as constants @@ -38,6 +43,7 @@ class LBaaSv2PluginCallbacksRPC(object): def __init__(self, driver=None): """LBaaSv2PluginCallbacksRPC constructor.""" self.driver = driver + self.cluster_wide_agents = {} def create_rpc_listener(self): topic = constants.TOPIC_PROCESS_ON_HOST_V2 @@ -52,6 +58,45 @@ def create_rpc_listener(self): fanout=False) self.conn.consume_in_threads() + # change the admin_state_up of the an agent + @log_helpers.log_method_call + def set_agent_admin_state(self, context, admin_state_up, host=None): + """Set the admin_up_state of an agent.""" + if not host: + LOG.error('tried to set agent admin_state_up without host') + return False + with context.session.begin(subtransactions=True): + query = context.session.query(agents_db.Agent) + query = query.filter( + agents_db.Agent.agent_type == + nlb_constant.AGENT_TYPE_LOADBALANCERV2, + agents_db.Agent.host == host) + try: + agent = query.one() + if not agent.admin_state_up == admin_state_up: + agent.admin_state_up = admin_state_up + context.session.add(agent) + except Exception as exc: + LOG.error('query for agent produced: %s' % str(exc)) + return False + return True + + # change the admin_state_up of the an agent + @log_helpers.log_method_call + def scrub_dead_agents(self, context, env, group, host=None): + """Remove all non-alive or admin down agents.""" + LOG.debug('scrubbing dead agent bindings for group %s' % group) + with context.session.begin(subtransactions=True): + try: + # don't set group because otherwise only agent of same group could initiate scrubbing. + # scrub method get's group out of a dead agent to find another agent inside same group + self.driver.scheduler.scrub_dead_agents( + context, self.driver.plugin, env, group=None) + except Exception as exc: + LOG.error('scub dead agents exception: %s' % str(exc)) + return False + return True + # get a list of loadbalancer ids which are active on this agent host @log_helpers.log_method_call def get_active_loadbalancers_for_agent(self, context, host=None): @@ -67,7 +112,7 @@ def get_active_loadbalancers_for_agent(self, context, host=None): return [] elif len(agents) > 1: LOG.warning('Multiple lbaas agents found on host %s' % host) - lbs = self.driver.plugin.db.list_loadbalancers_on_lbaas_agent( + lbs = self. self._list_loadbalancers_on_lbaas_agent( context, agents[0].id ) @@ -85,12 +130,22 @@ def get_active_loadbalancers_for_agent(self, context, host=None): active_lb_ids.add(lb.id) return active_lb_ids + # ccloud: get a list of loadbalancer without binding to an agent @log_helpers.log_method_call - def get_service_by_loadbalancer_id( - self, + def get_loadbalancers_without_agent_binding(self, context, env, group): + + agents = self.driver.scheduler.get_agents_in_env( context, - loadbalancer_id=None, - host=None): + self.driver.plugin, + env, + group) + + return self._list_loadbalancers_without_lbaas_agent_binding(context, agents) + + + @log_helpers.log_method_call + def get_service_by_loadbalancer_id( + self, context, loadbalancer_id=None, host=None): """Get the complete service definition by loadbalancer_id.""" service = {} with context.session.begin(subtransactions=True): @@ -109,12 +164,10 @@ def get_service_by_loadbalancer_id( # the preceeding get call returns a nested dict, unwind # one level if necessary agent = (agent['agent'] if 'agent' in agent else agent) - service = self.driver.service_builder.build(context, - lb, - agent) + service = self.driver.service_builder.build( + context, lb, agent) except Exception as e: - LOG.error("Exception: get_service_by_loadbalancer_id: %s", - e.message) + LOG.warning("ccloud Error in get_service_by_loadbalancer_id. ID = %s. Message %s " % (loadbalancer_id, e)) return service @@ -125,14 +178,12 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): plugin = self.driver.plugin with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) agents = self.driver.scheduler.get_agents_in_env( - context, - self.driver.plugin, - env, - group) - + context, plugin, env, group, active=None) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -151,21 +202,17 @@ def get_all_loadbalancers(self, context, env, group=None, host=None): @log_helpers.log_method_call def get_active_loadbalancers(self, context, env, group=None, host=None): - """Get all loadbalancers for this group in this env.""" + """Get active loadbalancers for this group in this env.""" loadbalancers = [] plugin = self.driver.plugin with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) agents = self.driver.scheduler.get_agents_in_env( - context, - self.driver.plugin, - env, - group=group, - active=True - ) - + context, plugin, env, group, active=None) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) @@ -187,10 +234,81 @@ def get_active_loadbalancers(self, context, env, group=None, host=None): @log_helpers.log_method_call def get_pending_loadbalancers(self, context, env, group=None, host=None): + """Get pending loadbalancers for this group in this env.""" + loadbalancers = [] + plugin = self.driver.plugin + + with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) + agents = self.driver.scheduler.get_agents_in_env( + context, plugin, env, group, active=None) + for agent in agents: + agent_lbs = self._list_loadbalancers_on_lbaas_agent( + context, + agent.id + ) + for lb in agent_lbs: + if (lb.provisioning_status != plugin_constants.ACTIVE and + lb.provisioning_status != plugin_constants.ERROR): + + loadbalancers.append( + { + 'agent_host': agent['host'], + 'lb_id': lb.id, + 'tenant_id': lb.tenant_id + } + ) + + if host: + return [lb for lb in loadbalancers if lb['agent_host'] == host] + else: + return loadbalancers + + @log_helpers.log_method_call + def get_errored_loadbalancers(self, context, env, group=None, host=None): + """Get pending loadbalancers for this group in this env.""" + loadbalancers = [] + plugin = self.driver.plugin + with context.session.begin(subtransactions=True): + self.driver.scheduler.scrub_dead_agents( + context, plugin, env, group) + agents = self.driver.scheduler.get_agents_in_env( + context, plugin, env, group, active=None) + for agent in agents: + agent_lbs = self._list_loadbalancers_on_lbaas_agent( + context, + agent.id + ) + for lb in agent_lbs: + if (lb.provisioning_status == plugin_constants.ERROR): + loadbalancers.append( + { + 'agent_host': agent['host'], + 'lb_id': lb.id, + 'tenant_id': lb.tenant_id + } + ) + if host: + return [lb for lb in loadbalancers if lb['agent_host'] == host] + else: + return loadbalancers + + @log_helpers.log_method_call + def get_loadbalancers_by_network(self, context, env, network_id, group=None, host=None,): """Get all loadbalancers for this group in this env.""" loadbalancers = [] plugin = self.driver.plugin + + network = self.driver.plugin.db._core_plugin.get_network( + context, + network_id) + + subnets = network['subnets'] + + + # get subnets on network and then filter based on the vip subnet. with context.session.begin(subtransactions=True): agents = self.driver.scheduler.get_agents_in_env( context, @@ -199,27 +317,58 @@ def get_pending_loadbalancers(self, context, env, group=None, host=None): group) for agent in agents: - agent_lbs = plugin.db.list_loadbalancers_on_lbaas_agent( + agent_lbs = self._list_loadbalancers_on_lbaas_agent( context, agent.id ) for lb in agent_lbs: - if (lb.provisioning_status != plugin_constants.ACTIVE and - lb.provisioning_status != plugin_constants.ERROR): - + if lb.vip_subnet_id in subnets : loadbalancers.append( { 'agent_host': agent['host'], 'lb_id': lb.id, - 'tenant_id': lb.tenant_id + 'tenant_id': lb.tenant_id, + 'network_id': network_id } ) - if host: return [lb for lb in loadbalancers if lb['agent_host'] == host] else: return loadbalancers + + + def _list_loadbalancers_on_lbaas_agent(self, context, id): + + query = context.session.query(agent_scheduler.LoadbalancerAgentBinding.loadbalancer_id) + query = query.filter_by(agent_id=id) + loadbalancer_ids = [item[0] for item in query] + if loadbalancer_ids: + lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer, + filters={'id': loadbalancer_ids}) + return [lb_db for lb_db in lbs] + + return [] + + def _list_loadbalancers_without_lbaas_agent_binding(self, context, agents): + + all_lbs = self.driver.plugin.db._get_resources(context, models.LoadBalancer) + all_bindings = self.driver.plugin.db._get_resources(context, agent_scheduler.LoadbalancerAgentBinding) + agent_ids = [agent.id for agent in agents] + + bound_ids = [] + for bind in all_bindings: + if bind['loadbalancer_id']: + if bind['agent_id'] and bind['agent_id'] in agent_ids: + bound_ids.append(bind['loadbalancer_id']) + + unbound_lbs = [] + for lb in all_lbs: + if lb['id'] not in bound_ids: + unbound_lbs.append(lb) + + return unbound_lbs + @log_helpers.log_method_call def update_loadbalancer_stats(self, context, @@ -714,3 +863,156 @@ def remove_allowed_address(self, context, port_id=None, ip_address=None): except Exception as exc: LOG.error('could not remove allowed address pair: %s' % exc.message) + + # validate a list of loadbalancer id - assure they are not deleted + @log_helpers.log_method_call + def validate_loadbalancers_state(self, context, loadbalancers, host=None): + lb_status = {} + for lbid in loadbalancers: + with context.session.begin(subtransactions=True): + try: + lb_db = self.driver.plugin.db.get_loadbalancer(context, + lbid) + lb_status[lbid] = lb_db.provisioning_status + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_loadbalancer: %s', + enf.message) + lb_status[lbid] = 'Unknown' + except Exception as e: + LOG.error('Exception: get_loadbalancer: %s', + e.message) + lb_status[lbid] = 'Indefinite' + return lb_status + + # validate a list of pools id - assure they are not deleted + @log_helpers.log_method_call + def validate_pools_state(self, context, pools, host=None): + pool_status = {} + for poolid in pools: + with context.session.begin(subtransactions=True): + try: + pool_db = self.driver.plugin.db.get_pool(context, poolid) + pool_status[poolid] = pool_db.provisioning_status + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_pool: %s', + enf.message) + pool_status[poolid] = 'Unknown' + except Exception as e: + LOG.error('Exception: get_pool: %s', + e.message) + pool_status[poolid] = 'Indefinite' + return pool_status + + @log_helpers.log_method_call + def get_pools_members(self, context, pools, host=None): + pools_members = dict() + for poolid in pools: + members = self.driver.plugin.db.get_pool_members( + context, + filters={'pool_id': [poolid]} + ) + pools_members[poolid] = [member.to_dict(pool=False) + for member in members] + return pools_members + + # validate a list of listeners id - assure they are not deleted + @log_helpers.log_method_call + def validate_listeners_state(self, context, listeners, host=None): + listener_status = {} + for listener_id in listeners: + with context.session.begin(subtransactions=True): + try: + listener_db = \ + self.driver.plugin.db.get_listener(context, + listener_id) + listener_status[listener_id] = \ + listener_db.provisioning_status + # ccloud: distinguish betwwen REAL not founds and any other kind of errors to avoid + # treating the object as orphan in case of general db errors, ... + except loadbalancerv2.EntityNotFound as enf: + LOG.error('Exception: get_listener: %s', + enf.message) + listener_status[listener_id] = 'Unknown' + except Exception as e: + LOG.error('Exception: get_listener: %s', + e.message) + listener_status[listener_id] = 'Indefinite' + return listener_status + + # validate a list of l7policys id - assure they are not deleted + @log_helpers.log_method_call + def validate_l7policys_state_by_listener(self, context, listeners): + """Performs a validation against l7policies with a list of listeners + + This method will attempt to check the Neutron DB for a list of + l7policies that reference the given list of listener_id's. + + This will return a dict of: + {listener_id_0: bool, + ... + } + The bool will indicate that true: there are l7policies here, false: + there are none on this listener. + """ + has_l7policy = {} + try: + # NOTE: neutron_lbaas has a deprecated code filter for queries + # that appears to silence filter queries for 'listener_id' + l7policy_db = self.driver.plugin.db.get_l7policies(context) + except Exception as error: + LOG.exception("Exception: plugin.db.get_l7policies({}): " + "({})".format(listeners, error)) + return {} + LOG.debug("({}) = get_l7policies({})".format(l7policy_db, context)) + for listener_id in listeners: + # Given filter limitations, double-loop iterator results + result = False + if l7policy_db: + if isinstance(l7policy_db, list): + for l7policy in l7policy_db: + if l7policy.listener_id == listener_id: + result = True + break + else: + if l7policy_db.listener_id == listener_id: + result = True + else: + result = False + has_l7policy[listener_id] = result + LOG.debug("has_l7policy: ({})".format(has_l7policy)) + return has_l7policy + + # ccloud: Not used at the moment + # + # return a single active agent to implement cluster wide changes + # which can not efficiently mapped back to a particulare agent + @log_helpers.log_method_call + def get_clusterwide_agent(self, context, env, group, host=None): + """Get an agent to perform clusterwide tasks.""" + LOG.debug('getting agent to perform clusterwide tasks') + with context.session.begin(subtransactions=True): + if (env, group) in self.cluster_wide_agents: + known_agent = self.cluster_wide_agents[(env, group)] + if self.driver.plugin.db.is_eligible_agent(active=True, + agent=known_agent): + return known_agent + else: + del(self.cluster_wide_agents[(env, group)]) + try: + agents = \ + self.driver.scheduler.get_agents_in_env(context, + self.driver.plugin, + env, group, True) + if agents: + self.cluster_wide_agents[(env, group)] = agents[0] + return agents[0] + else: + LOG.error('no active agents available for clusterwide ', + ' tasks %s group number %s' % (env, group)) + except Exception as exc: + LOG.error('clusterwide agent exception: %s' % str(exc)) + return {} diff --git a/f5lbaasdriver/v2/bigip/service_builder.py b/f5lbaasdriver/v2/bigip/service_builder.py index 2efb92f6c..0f5d02c20 100644 --- a/f5lbaasdriver/v2/bigip/service_builder.py +++ b/f5lbaasdriver/v2/bigip/service_builder.py @@ -1,5 +1,5 @@ # coding=utf-8 -u"""Service Module for F5® LBaaSv2.""" +u"""Service Module for F5 LBaaSv2.""" # Copyright 2014-2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,7 @@ # limitations under the License. # import datetime +import time import json from oslo_log import helpers as log_helpers @@ -47,7 +48,7 @@ def __init__(self, driver): self.net_cache = {} self.subnet_cache = {} - self.last_cache_update = datetime.datetime.fromtimestamp(0) + self.last_cache_update = datetime.datetime.now() #fromtimestamp(0) self.plugin = self.driver.plugin self.disconnected_service = DisconnectedService() self.q_client = q_client.F5NetworksNeutronClient(self.plugin) @@ -55,10 +56,11 @@ def __init__(self, driver): def build(self, context, loadbalancer, agent): """Get full service definition from loadbalancer ID.""" # Invalidate cache if it is too old - if ((datetime.datetime.now() - self.last_cache_update).seconds > - constants_v2.NET_CACHE_SECONDS): + if ((datetime.datetime.now() - self.last_cache_update).seconds > constants_v2.NET_CACHE_SECONDS): self.net_cache = {} self.subnet_cache = {} + self.last_cache_update = datetime.datetime.now() + LOG.debug('ccloud: Network cache regulary cleared after %s seconds' % constants_v2.NET_CACHE_SECONDS) service = {} with context.session.begin(subtransactions=True): @@ -85,68 +87,68 @@ def build(self, context, loadbalancer, agent): vip_port = service['loadbalancer']['vip_port'] network_id = vip_port['network_id'] service['loadbalancer']['network_id'] = network_id - network = self._get_network_cached( - context, - network_id - ) # Override the segmentation ID and network type for this network # if we are running in disconnected service mode agent_config = self.deserialize_agent_configurations( agent['configurations']) - segment_data = self.disconnected_service.get_network_segment( - context, agent_config, network) - if segment_data: - network['provider:segmentation_id'] = \ - segment_data.get('segmentation_id', None) - network['provider:network_type'] = \ - segment_data.get('network_type', None) - network['provider:physical_network'] = \ - segment_data.get('physical_network', None) - network_map[network_id] = network - - # Check if the tenant can create a loadbalancer on the network. - if (agent and not self._valid_tenant_ids(network, - loadbalancer.tenant_id, - agent)): - LOG.error("Creating a loadbalancer %s for tenant %s on a" - " non-shared network %s owned by %s." % ( - loadbalancer.id, - loadbalancer.tenant_id, - network['id'], - network['tenant_id'])) - - # Get the network VTEPs if the network provider type is - # either gre or vxlan. - if 'provider:network_type' in network: - net_type = network['provider:network_type'] - if net_type == 'vxlan' or net_type == 'gre': - self._populate_loadbalancer_network_vteps( - context, - service['loadbalancer'], - net_type - ) - - # Get listeners and pools. - service['listeners'] = self._get_listeners(context, loadbalancer) - - service['pools'], service['healthmonitors'] = \ - self._get_pools_and_healthmonitors(context, loadbalancer) - service['members'] = self._get_members( - context, service['pools'], subnet_map, network_map) - - service['subnets'] = subnet_map - service['networks'] = network_map - - service['l7policies'] = self._get_l7policies( - context, service['listeners']) - service['l7policy_rules'] = self._get_l7policy_rules( - context, service['l7policies']) + try: + network = self._get_network_cached( + context, + network_id, + agent_config + ) - return service + network_map[network_id] = network + + # Check if the tenant can create a loadbalancer on the network. + if (agent and not self._valid_tenant_ids(network, + loadbalancer.tenant_id, + agent)): + LOG.error("Creating a loadbalancer %s for tenant %s on a" + " non-shared network %s owned by %s." % ( + loadbalancer.id, + loadbalancer.tenant_id, + network['id'], + network['tenant_id'])) + + # Get the network VTEPs if the network provider type is + # either gre or vxlan. + if 'provider:network_type' in network: + net_type = network['provider:network_type'] + if net_type == 'vxlan' or net_type == 'gre': + self._populate_loadbalancer_network_vteps( + context, + service['loadbalancer'], + net_type + ) + + # Get listeners and pools. + service['listeners'] = self._get_listeners(context, loadbalancer) + + service['pools'], service['healthmonitors'] = \ + self._get_pools_and_healthmonitors(context, loadbalancer) + + service['members'] = self._get_members( + context, service['pools'], subnet_map, network_map, agent_config) + + service['subnets'] = subnet_map + service['networks'] = network_map + + service['l7policies'] = self._get_l7policies( + context, service['listeners']) + service['l7policy_rules'] = self._get_l7policy_rules( + context, service['l7policies']) + + return service + + # Return nothing in case network retrieval failed + except Exception as e: + LOG.exception("ccloud: Build service for loadbalancer failed. Aborting with exception ", e) + raise @log_helpers.log_method_call - def _get_extended_member(self, context, member): + def _get_extended_member(self, context, member, agent_config): """Get extended member attributes and member networking.""" member_dict = member.to_dict(pool=False) subnet_id = member.subnet_id @@ -157,7 +159,8 @@ def _get_extended_member(self, context, member): network_id = subnet['network_id'] network = self._get_network_cached( context, - network_id + network_id, + agent_config ) member_dict['network_id'] = network_id @@ -206,20 +209,73 @@ def _get_subnet_cached(self, context, subnet_id): return self.subnet_cache[subnet_id] @log_helpers.log_method_call - def _get_network_cached(self, context, network_id): + def _get_network_cached(self, context, network_id, agent_config): """Retrieve network from cache or from Neutron.""" - if network_id not in self.net_cache: - network = self.plugin.db._core_plugin.get_network( - context, - network_id - ) - if 'provider:network_type' not in network: - network['provider:network_type'] = 'undefined' - if 'provider:segmentation_id' not in network: - network['provider:segmentation_id'] = 0 - self.net_cache[network_id] = network + network = None + # read network if not cached or no segment id given + if (network_id not in self.net_cache) or (network_id in self.net_cache and not self.net_cache[network_id]['provider:segmentation_id']): + LOG.debug("ccloud: Network ID %s NOT CACHED" % (network_id)) + count = 0 + # try 3 times + while count < 3: + count += 1 + try: + if not network: + network = self.plugin.db._core_plugin.get_network( + context, + network_id) + # stop if found + if network: + break + else: + LOG.error("ccloud: Network ID %s NOT FOUND. Will try again in some seconds." % network_id) + time.sleep(3) + except Exception as e: + LOG.exception("ccloud: Exception in network retrieval for Network ID %s. Will try again in some seconds." % network_id) + time.sleep(3) + + # abort if network not found (not sure what to do in this case) + if not network: + LOG.error("ccloud: Network ID %s NOT FOUND. Aborting with Exception." % network_id) + raise Exception("ccloud: Network ID %s NOT FOUND. Aborting with Exception." % network_id) + + # try to get segment data for network 3 times + segment_data = None + count = 0 + while count < 3: + count += 1 + try: + segment_data = self.disconnected_service.get_network_segment( + context, agent_config, network) + # stop if found (means an id is given) + if segment_data.get('segmentation_id', None): + break + else: + LOG.warning("ccloud: Segment Data for network ID %s NOT FOUND #1. Will try again in some seconds." % network_id) + time.sleep(10) + except Exception as e: + LOG.exception("ccloud: Segment Data for network ID %s NOT FOUND #2. Will try again in some seconds." % network_id) + time.sleep(3) - return self.net_cache[network_id] + + network['provider:segmentation_id'] = \ + segment_data.get('segmentation_id', None) + network['provider:network_type'] = \ + segment_data.get('network_type', None) + network['provider:physical_network'] = \ + segment_data.get('physical_network', None) + + if segment_data.get('segmentation_id', None): + self.net_cache[network_id] = network + LOG.debug("ccloud: Network ID %s and Segment %s FOUND. Added to the cache, Cache: " % (network_id, segment_data)) + else: + LOG.error("ccloud: Segment Data for network ID %s NOT FOUND. Returning dummy segment %s " % (network_id, segment_data)) + + else: + network = self.net_cache[network_id] + LOG.debug("ccloud: Network ID %s found and served from cache, Cache: " % (network_id)) + + return network @log_helpers.log_method_call def _get_listener(self, context, listener_id): @@ -235,18 +291,7 @@ def _populate_member_network(self, context, member, network): member['vxlan_vteps'] = [] member['gre_vteps'] = [] - agent_config = {} - segment_data = self.disconnected_service.get_network_segment( - context, agent_config, network) - if segment_data: - network['provider:segmentation_id'] = \ - segment_data.get('segmentation_id', None) - network['provider:network_type'] = \ - segment_data.get('network_type', None) - network['provider:physical_network'] = \ - segment_data.get('physical_network', None) - - net_type = network.get('provider:network_type', "undefined") + net_type = network['provider:network_type'] if net_type == 'vxlan': if 'binding:host_id' in member['port']: host = member['port']['binding:host_id'] @@ -257,10 +302,6 @@ def _populate_member_network(self, context, member, network): host = member['port']['binding:host_id'] member['gre_vteps'] = self._get_endpoints( context, 'gre', host) - if 'provider:network_type' not in network: - network['provider:network_type'] = 'undefined' - if 'provider:segmentation_id' not in network: - network['provider:segmentation_id'] = 0 @log_helpers.log_method_call def _populate_loadbalancer_network_vteps( @@ -331,27 +372,9 @@ def deserialize_agent_configurations(self, configurations): agent_conf = {} return agent_conf - @log_helpers.log_method_call - def _is_common_network(self, network, agent): - common_external_networks = False - common_networks = {} - if agent and "configurations" in agent: - agent_configs = self.deserialize_agent_configurations( - agent['configurations']) - - if 'common_networks' in agent_configs: - common_networks = agent_configs['common_networks'] - - if 'f5_common_external_networks' in agent_configs: - common_external_networks = ( - agent_configs['f5_common_external_networks']) - - return (network['shared'] or - (network['id'] in common_networks) or - ('router:external' in network and - network['router:external'] and - common_external_networks)) + def _is_common_network(self, network, agent): + return True def _valid_tenant_ids(self, network, lb_tenant_id, agent): if (network['tenant_id'] == lb_tenant_id): @@ -434,7 +457,7 @@ def _get_listeners(self, context, loadbalancer): l7_policies=False ) listener_dict['l7_policies'] = \ - [{'id': l7_policy.id} for l7_policy in listener.l7_policies] + [{'id': l7_policy.id,'name':l7_policy.name,'provisioning_status':l7_policy.provisioning_status} for l7_policy in listener.l7_policies] if listener.default_pool: listener_dict['default_pool_id'] = listener.default_pool.id @@ -470,7 +493,7 @@ def _get_pools_and_healthmonitors(self, context, loadbalancer): return pools, healthmonitors @log_helpers.log_method_call - def _get_members(self, context, pools, subnet_map, network_map): + def _get_members(self, context, pools, subnet_map, network_map, agent_config): pool_members = [] if pools: members = self.plugin.db.get_pool_members( @@ -481,7 +504,7 @@ def _get_members(self, context, pools, subnet_map, network_map): for member in members: # Get extended member attributes, network, and subnet. member_dict, subnet, network = ( - self._get_extended_member(context, member) + self._get_extended_member(context, member, agent_config) ) subnet_map[subnet['id']] = subnet @@ -509,7 +532,7 @@ def _pool_to_dict(self, pool): pool_dict['members'] = [{'id': member.id} for member in pool.members] pool_dict['listeners'] = [{'id': listener.id} for listener in pool.listeners] - pool_dict['l7_policies'] = [{'id': l7_policy.id} + pool_dict['l7_policies'] = [{'id': l7_policy.id,'name':l7_policy.name,'provisioning_status':l7_policy.provisioning_status} for l7_policy in pool.l7_policies] if pool.session_persistence: pool_dict['session_persistence'] = (