You are here

Openstack RDO kilo work notes

Hi

Here you have some notes on building and HA openstack RDO kilo release step by step
 there are also info on ceph configuration and docker integration:



#Install rdo kilo repo:

[root@openstackbox ~]# yum -y install centos-release-openstack-kilo

#Install openvswitch:

[root@openstackbox ~]# yum -y install openvswitch.x86_64 openvswitch-test.noarch python-openvswitch.noarch
[root@openstackbox system]# systemctl enable openvswitch.service
ln -s '/usr/lib/systemd/system/openvswitch.service' '/etc/systemd/system/multi-user.target.wants/openvswitch.service'
[root@openstackbox system]# systemctl start openvswitch.service
[root@openstackbox system]# ovs-vsctl show
d48f095c-a1a8-4fd8-a1cb-165e6bbcca83
    ovs_version: "2.3.1"
[root@openstackbox system]# rm /etc/openvswitch/conf.db
rm: remove regular file ‘/etc/openvswitch/conf.db’? y
[root@openstackbox system]# systemctl start openvswitch.service
[root@openstackbox system]# ovs-vsctl show
8f268ae7-badb-43bb-b25b-24d4ae0e1559
    ovs_version: "2.3.1"

#configure the net files:

[root@openstackbox system]# cat /etc/sysconfig/network-scripts/ifcfg-br-eth0 
DEVICE=br-eth0
BOOTPROTO=static
IPADDR=94.23.214.41
NETMASK=255.255.255.0
ONBOOT=yes
GATEWAY=94.23.214.254
#IPV6INIT=yes
#IPV6_AUTOCONF=no
#IPV6ADDR=2001:41D0:2:5929::/64
[root@openstackbox system]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
MTU=1546




[root@openstackbox system]# ovs-vsctl add-br br-eth0
[root@openstackbox system]# ovs-vsctl add-port br-eth0 eth0 && service network restart
Restarting network (via systemctl):                        [  OK  ]



#Install kvm libvirtd:

[root@openstackbox system]# yum install libvirt-daemon-driver-interface.x86_64 libvirt-daemon-config-network.x86_64 libvirt-daemon-config-nwfilter.x86_64 libvirt-daemon-driver-storage.x86_64 libvirt-daemon-driver-qemu.x86_64 libvirt-daemon-driver-interface.x86_64
[root@openstackbox system]#yum install qemu-kvm-tools.x86_64 qemu-kvm.x86_64 qemu-kvm-common.x86_64 virt-install


#Configure networks:
[root@openstackbox ~]# ovs-vsctl add-br br-int

[root@openstackbox ~]# cat openstack-mgmt.txt
openstack-network

[root@openstackbox ~]# virsh net-autostart openstack-network
Network openstack-network marked as autostarted

[root@openstackbox ~]# virsh net-start openstack-network
Network openstack-network started

[root@openstackbox ~]# ovs-vsctl show
8f268ae7-badb-43bb-b25b-24d4ae0e1559
    Bridge br-int
        Port br-int
            Interface br-int
                type: internal
    Bridge "br-eth0"
        Port "br-eth0"
            Interface "br-eth0"
                type: internal
        Port "eth0"
            Interface "eth0"
    ovs_version: "2.3.1"


[root@openstackbox os]# virsh net-list
 Name                 State      Autostart     Persistent
----------------------------------------------------------
 default              active     yes           yes
 openstack-network    active     yes           yes

[root@openstackbox os]# virsh pool-list
 Name                 State      Autostart
-------------------------------------------
 os                   active     yes

[root@openstackbox os]# virsh pool-autostart os
Pool os marked as autostarted




Configure the template master VM. ntp,epel,software,etc.

Then create a template:

[root@openstackbox ~]# virt-clone --original centos7 --name template --file /vm/os/template.img
Allocating 'template.img'                                                                                                                                                       |  30 GB  00:00:24     

Clone 'template' created successfully.
[root@openstackbox os]# cp /etc/libvirt/qemu/template.xml /vm/xml/

We are going to remove the centos7 vm, and create the haproxy vm's, what we do is first configure the general info on a disk then create a qcow layer for each server:

[root@openstackbox os]# virsh dumpxml centos7 > ../xml/haproxy1.xml
[root@openstackbox os]# virsh undefine centos7
Domain centos7 has been undefined
[root@openstackbox os]# pwd
/vm/os
[root@openstackbox os]# ls
haproxy.img  template.img

We first convert out raw file to qcow2:

[root@openstackbox os]# qemu-img convert -O qcow2 haproxy.img haproxy.qcow

We then modify our vm to run from the disk image haproxy.qcow


Boot the server, and configure ips,ntp,ssh-keys,etc

IP's assigned:

[root@haproxy1 ~]# ip a | grep -E '(eth|inet )'
    inet 127.0.0.1/8 scope host lo
2: eth0:  mtu 1500 qdisc pfifo_fast state UP qlen 1000 ---> external, access to internet
    link/ether 52:54:00:d9:a6:a0 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.11/24 brd 192.168.122.255 scope global eth0
3: eth1:  mtu 1500 qdisc pfifo_fast state UP qlen 1000 ---> openstack public network
    link/ether 52:54:00:b7:2e:65 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.11/24 brd 10.10.10.255 scope global eth1
4: eth2:  mtu 1500 qdisc pfifo_fast state UP qlen 1000 ---> openstack Managment
    link/ether 52:54:00:38:a7:65 brd ff:ff:ff:ff:ff:ff
    inet 10.10.20.11/24 brd 10.10.20.255 scope global eth2
5: eth3:  mtu 1500 qdisc pfifo_fast state UP qlen 1000 ----> storage ceph private network
    link/ether 52:54:00:84:27:1c brd ff:ff:ff:ff:ff:ff
    inet 10.10.30.11/24 brd 10.10.30.255 scope global eth3


Once we have finished with the OS config, we start with the haproxy configuration:

[root@haproxy1 sysctl.d]# pwd
/etc/sysctl.d
[root@haproxy1 sysctl.d]# cat 88-haproxy.conf
net.ipv4.ip_nonlocal_bind=1

[root@haproxy1 sysctl.d]# yum install -y keepalived haproxy

[root@haproxy1 sysctl.d]# cat /etc/keepalived/keepalived.conf
global_defs {
  router_id haproxy1
}
vrrp_script haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}
vrrp_instance 50 {
  virtual_router_id 50
  advert_int 1
  priority 101
  state MASTER
  interface eth1
  virtual_ipaddress {
    10.10.10.10 dev eth1
  }
  track_script {
    haproxy
  }
}

And also haproxy basic configuration:

[root@haproxy1 sysctl.d]# cat /etc/haproxy/haproxy.cfg 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

listen stats 10.10.10.11:80
        mode http
        stats enable
        stats uri /stats
        stats realm HAProxy\ Statistics
        stats auth admin:administrator


We start the daemons:

[root@haproxy1 sysctl.d]# systemctl start haproxy 
[root@haproxy1 sysctl.d]# systemctl start keepalived
[root@haproxy1 sysctl.d]# systemctl enable keepalived
[root@haproxy1 sysctl.d]# systemctl enable haproxy

We check we have the ip on the rigth interface:

3: eth1:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:b7:2e:65 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.11/24 brd 10.10.10.255 scope global eth1
       valid_lft forever preferred_lft forever
    inet 10.10.10.10/32 scope global eth1
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:feb7:2e65/64 scope link 
       valid_lft forever preferred_lft forever

Our first haproxy server is ready, we need to create the second server, firs we are going to use haproxy disk as the base image and the create 2 qcow disks from the base:

[root@openstackbox os]# qemu-img create -b /vm/os/haproxy.qcow -f qcow2 /vm/os/haproxy1.qcow
Formatting '/vm/os/haproxy1.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/haproxy.qcow' encryption=off cluster_size=65536 lazy_refcounts=off 
[root@openstackbox os]# qemu-img create -b /vm/os/haproxy.qcow -f qcow2 /vm/os/haproxy2.qcow
Formatting '/vm/os/haproxy2.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/haproxy.qcow' encryption=off cluster_size=65536 lazy_refcounts=off 

then we modify our vm, and create a new one.

[root@openstackbox os]# virsh dumpxml haproxy1 | grep qco
      

[root@openstackbox os]# virt-clone -o haproxy1 -f /vm/os/haproxy2.qcow --preserve-data -n haproxy2
Clone 'haproxy2' created successfully.

This way if we need more haproxy servers we can just create another disk from the base and clone the vm config.

We modify the vm config so each one uses it's disk:

[root@openstackbox ~]# for i in 1 2; do virsh dumpxml haproxy$i |grep os; done
  

We modify ips in haproxy2, change ips and haproxy/keepalive conf files, and we have the basic VIP and load balancer ready for our openstack API's.


no we are going to install the controller servers, where we are first going to get up and running mysql in active/active configuration with galera.

We use openstack mgmt network for replication, once we have this ready we build controller2:


[root@openstackbox os]# qemu-img create -b /vm/os/controller.qcow -f qcow2 /vm/os/controller1.qcow
Formatting '/vm/os/controller1.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/controller.qcow' encryption=off cluster_size=65536 lazy_refcounts=off
[root@openstackbox os]# qemu-img create -b /vm/os/controller.qcow -f qcow2 /vm/os/controller2.qcow
Formatting '/vm/os/controller2.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/controller.qcow' encryption=off cluster_size=65536 lazy_refcounts=off
[root@openstackbox os]# virsh edit controller1
Domain controller1 XML configuration edited.
[root@openstackbox os]#
[root@openstackbox os]# virt-clone -o controller1  --preserve-data -f /vm/os/controller2.qcow -n controller2

Clone 'controller2' created successfully.

[root@openstackbox ~]# cp -pr haproxy.qcow controller.qcow
[root@openstackbox ~]# virt-clone -o template --preserve-data -f /vm/os/controller.qcow -n controller1

We configure ips'etc in controller1, then we install gallera and configure the config file:

[root@controller1 sysconfig]#yum -y install mariadb-galera-server rsync xinetd
systemctl start mariadb.service
/usr/bin/mysql_secure_installation
systemctl stop mariadb.service
[root@controller1 sysconfig]# cat /etc/my.cnf.d/galera.cnf | grep -v ^# | grep -v ^$
[mysqld]
binlog_format=ROW
skip-name-resolve=1
max_connections=2048
query_cache_size=0
query_cache_type=0
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
bind-address=0.0.0.0
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="openstack_cluster"
wsrep_cluster_address="gcomm://10.10.20.31,10.10.20.32"
wsrep_node_name=controller1
wsrep_node_address=10.10.20.31
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
wsrep_sst_auth=root:adm3-.PW

Configure monitor for haproxy
[root@controller1 ~]# cat /etc/sysconfig/clustercheck
MYSQL_USERNAME="root"
MYSQL_PASSWORD=adm3-.PW
MYSQL_HOST="localhost"
MYSQL_PORT="3306"
[root@controller1 ~]# cat /etc/xinetd.d/galera-monitor
service galera-monitor
{
  port = 9200
  disable = no
  socket_type = stream
  protocol = tcp
  wait = no
  user = root
  group = root
  groups = yes
  server = /usr/bin/clustercheck
  type = UNLISTED
  per_source = UNLIMITED
  log_on_success =
  log_on_failure = HOST
  flags = REUSE
}

Copy files to node2:

[root@controller1 ~]# scp /etc/sysconfig/clustercheck controller2:/etc/sysconfig/clustercheck
clustercheck                                                                                                                                                         100%   87     0.1KB/s   00:00    
[root@controller1 ~]# scp /etc/xinetd.d/galera-monitor controller2:/etc/xinetd.d/galera-monitor
galera-monitor        


[root@controller1 ~]# daemon-reload
[root@controller1 ~]# systemctl enable xinetd
[root@controller1 ~]# systemctl start xinetd


We start de DB in cluster mode:
[root@controller1 ~]# sudo -u mysql /usr/libexec/mysqld --wsrep-cluster-address='gcomm://' &

We start on node 2:
[root@controller2 ~]# systemctl start mariadb

[root@controller1 ~]# mysql -u root -p -e "show status like 'wsrep%'" | grep wsrep_local_state_comment
Enter password: 
wsrep_local_state_comment	Synced

Now that we have the cluster running we kill the mysql process and start mariadb with systemctl on node1:

[root@controller1 ~]# systemctl start mariadb
[root@controller1 ~]# systemctl enable mariadb
[root@controller2 ~]# systemctl enable mariadb

With this we have a 2 node galera mysql running, but it's not of much help because we need quorum to have the Database running, so we are going to add and arbitrator node(we don't have mem for another controller node at the moment), the arbitrator node is going to be ceph1 server:

root@ceph1 ~]# yum install galera-25.3.5-7.el7.x86_64

edit the config file:

[root@ceph1 system]# cat /etc/sysconfig/garb 
# Copyright (C) 2012 Coedership Oy
# This config file is to be sourced by garb service script.

# A space-separated list of node addresses (address[:port]) in the cluster
GALERA_NODES="10.10.20.31:4567 10.10.20.32:4567 10.10.20.21:4567"

# Galera cluster name, should be the same as on the rest of the nodes.
GALERA_GROUP="openstack_cluster"

# Optional Galera internal options string (e.g. SSL settings)
# see http://www.codership.com/wiki/doku.php?id=galera_parameters
# GALERA_OPTIONS=""

# Log file for garbd. Optional, by default logs to syslog
# LOG_FILE=""

Start the gardb daemon:

[root@ceph1 system]# systemctl start garbd 
[root@ceph1 system]# systemctl status garbd 
garbd.service - Galera Arbitrator Daemon
   Loaded: loaded (/usr/lib/systemd/system/garbd.service; disabled)
   Active: active (running) since Mon 2015-11-23 14:33:51 CET; 3s ago
     Docs: http://www.codership.com/wiki/doku.php?id=galera_arbitrator
 Main PID: 18323 (garbd)
   CGroup: /system.slice/garbd.service
           └─18323 /usr/sbin/garbd -a gcomm://10.10.20.31:4567 -g openstack_cluster

Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.773  INFO: Shifting OPEN -> PRIMARY (TO: 5218156)
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.773  INFO: Sending state transfer request: 'trivial', size: 7
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.773  INFO: Member 0.0 (garb) requested state transfer from '*any*'. Selected 1.0 (controller1)(SYNCED) as donor.
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.773  INFO: Shifting PRIMARY -> JOINER (TO: 5218156)
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: 0.0 (garb): State transfer from 1.0 (controller1) complete.
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: Shifting JOINER -> JOINED (TO: 5218156)
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: 1.0 (controller1): State transfer to 0.0 (garb) complete.
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: Member 0.0 (garb) synced with group.
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: Shifting JOINED -> SYNCED (TO: 5218156)
Nov 23 14:33:52 ceph1 garbd-wrapper[18323]: 2015-11-23 14:33:52.774  INFO: Member 1.0 (controller1) synced with group.
[root@ceph1 system]# systemctl enable garbd 
ln -s '/usr/lib/systemd/system/garbd.service' '/etc/systemd/system/multi-user.target.wants/garbd.service'

If we check the database now we can see we have a 3 node cluster:

MariaDB [(none)]> SHOW STATUS LIKE 'wsrep%';
+------------------------------+--------------------------------------+
| Variable_name                | Value                                |
+------------------------------+--------------------------------------+
| wsrep_cluster_size           | 3                                    |
+------------------------------+--------------------------------------+
48 rows in set (0.00 sec)

MariaDB [(none)]> 

So now if we loose one of our controller nodes, the arbitrator node will mantain quorum so we can keep working with our other controller node


Now we have to configure haproxy to use our VIP for our MYSQL cluster:

sten galera_cluster
  bind 10.10.10.10:3306
  balance  source
  mode tcp
  option  httpchk
  server controller1 10.10.10.31:3306 check port 9200 inter 2000 rise 2 fall 5
  server controller2 10.10.10.32:3306 backup check port 9200 inter 2000 rise 2 fall 5

port 9200 is the http check we configured in xinetd.


no we are going to check with a connection:

[root@openstackbox ~]# mysql -h 10.10.10.10 -u root -p
MariaDB [(none)]> show variables where Variable_name like '%host%';
+---------------+-------------+
| Variable_name | Value       |
+---------------+-------------+
| hostname      | controller2 |
| report_host   |             |
+---------------+-------------+
2 rows in set (0.00 sec)

[root@openstackbox ~]# mysql -h 10.10.10.10 -u root -p
MariaDB [(none)]> show variables where Variable_name like '%host%';
+---------------+-------------+
| Variable_name | Value       |
+---------------+-------------+
| hostname      | controller1 |
| report_host   |             |
+---------------+-------------+
2 rows in set (0.01 sec)



No we are going to install rabbit mq and configure and HA cluster:

[root@controller1 ~]# yum -y install rabbitmq-server.noarch
[root@controller1 ~]# systemctl start rabbitmq-server
[root@controller1 ~]# systemctl enable rabbitmq-server
ln -s '/usr/lib/systemd/system/rabbitmq-server.service' '/etc/systemd/system/multi-user.target.wants/rabbitmq-server.service'
[root@controller1 ~]# systemctl stop rabbitmq-server
[root@controller1 ~]# scp /var/lib/rabbitmq/.erlang.cookie root@controller2:/var/lib/rabbitmq/.erlang.cookie
.erlang.cookie                                                                                                                                                       100%   20     0.0KB/s   00:00    
[root@controller1 ~]# chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie
[root@controller1 ~]# chmod 400 /var/lib/rabbitmq/.erlang.cookie

We add controller2 to the rabbit@controller1 cluster:

[root@controller2 ~]# rabbitmqctl stop_app
Stopping node rabbit@controller2 ...
...done.
[root@controller2 ~]# rabbitmqctl join_cluster rabbit@controller1
Clustering node rabbit@controller2 with rabbit@controller1 ...
...done.
[root@controller2 ~]# rabbitmqctl start_app
Starting node rabbit@controller2 ...
...done.
[root@controller2 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@controller2 ...
[{nodes,[{disc,[rabbit@controller1,rabbit@controller2]}]},
 {running_nodes,[rabbit@controller1,rabbit@controller2]},
 {cluster_name,>},
 {partitions,[]}]
...done.

To ensure that all queues, except those with auto-generated names, are mirrored across all running nodes it is necessary to set the policy key ha-mode to all. Run the following command on one of the nodes:

[root@controller1 ~]# rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}'
Setting policy "ha-all" for pattern "^(?!amq\\.).*" to "{\"ha-mode\": \"all\"}" with priority "0" ...
...done.


OK, so here we start with openstack, first we have to get keystone running.


We create the DB

[root@controller1 ~]# mysql -h 10.10.10.10 -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 1907
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> create database keystone;
Query OK, 1 row affected (0.06 sec)

MariaDB [(none)]> grant all on keystone.* to keystone@'%' identified by 'Amena2006';
Query OK, 0 rows affected (0.05 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.01 sec)

MariaDB [(none)]> exit ;
Bye

[root@controller1 ~]# keystone-manage db_sync

Modify config file and copy it to the other node:


[root@controller1 ~]# cat /etc/keystone/keystone.conf | grep -v ^# | grep -v ^$
[DEFAULT]
admin_token = Amena2006
[assignment]
[auth]
[cache]
[catalog]
[credential]
[database]
connection = mysql://keystone:Amena2006@10.10.10.10/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[eventlet_server_ssl]
[federation]
[fernet_tokens]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[matchmaker_ring]
[memcache]
[oauth1]
[os_inherit]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

[root@controller1 ~]# systemctl restart openstack-keystone
[root@controller1 ~]# systemctl status openstack-keystone
openstack-keystone.service - OpenStack Identity Service (code-named Keystone)
   Loaded: loaded (/usr/lib/systemd/system/openstack-keystone.service; disabled)
   Active: active (running) since Mon 2015-10-05 16:56:35 CEST; 5s ago
 Main PID: 18581 (keystone-all)
   CGroup: /system.slice/openstack-keystone.service
           ├─18581 /usr/bin/python /usr/bin/keystone-all
           ├─18595 /usr/bin/python /usr/bin/keystone-all
           ├─18596 /usr/bin/python /usr/bin/keystone-all
           ├─18597 /usr/bin/python /usr/bin/keystone-all
           └─18598 /usr/bin/python /usr/bin/keystone-all

Oct 05 16:56:35 controller1 systemd[1]: Started OpenStack Identity Service (code-named Keystone).
[root@controller1 ~]# systemctl enable openstack-keystone
ln -s '/usr/lib/systemd/system/openstack-keystone.service' '/etc/systemd/system/multi-user.target.wants/openstack-keystone.service'
[root@controller2 ~]# systemctl restart openstack-keystone
[root@controller2 ~]# systemctl enable openstack-keystone
ln -s '/usr/lib/systemd/system/openstack-keystone.service' '/etc/systemd/system/multi-user.target.wants/openstack-keystone.service'

We add the keystone endpoints to haproxy config:

listen keystone_admin 10.10.10.10:35357
        balance source
        option tcpka
        option httpchk
        maxconn 10000
        server controller1 10.10.10.31:35357 check inter 2000 rise 2 fall 5
        server controller2 10.10.10.32:35357 check inter 2000 rise 2 fall 5

listen keystone_api 10.10.10.10:5000
        balance source
        option tcpka
        option httpchk
        maxconn 10000
        server controller1 10.10.10.31:5000 check inter 2000 rise 2 fall 5
        server controller2 10.10.10.32:5000 check inter 2000 rise 2 fall 5


After restarting haproxy on both nodes, we are going to test if keystone is working

[root@controller1 ~]# yum -y install python-openstackclient
[root@controller2 ~]# yum -y install python-openstackclient

first we use the admin token we put in the keystone config file:

[root@controller1 ~]#openstack --os-token Amena2006 --os-url http://127.0.0.1:35357/v2.0/  project list

now against our vip:

[root@controller1 ~]#openstack --os-token Amena2006 --os-url http://10.10.10.10:35357/v2.0/  project list

if this is working we can create our admin project and user, and asign the role admin to the user admin, we export the token and the url via env variables

[root@controller1 ~]#export OS_TOKEN=Amena2006
[root@controller1 ~]#export OS_URL=http://10.10.10.10:35357/v2.0
[root@controller1 ~]#openstack project create --description "Admin Project" admin
[root@controller1 ~]#openstack user create --password-prompt admin
[root@controller1 ~]#openstack role create admin
[root@controller1 ~]#openstack role add --project admin --user admin admin

Now I use a script(it's deprecated now.., it uses the old python clients), but it works for what we need:

[root@controller2 ~]# cat keystone-populate.sh
#!/bin/bash

# Modify these variables as needed
ADMIN_PASSWORD=Amena2006
SERVICE_PASSWORD=Amena2006
DEMO_PASSWORD=demo
export OS_SERVICE_TOKEN=Amena2006
export OS_SERVICE_ENDPOINT="http://10.10.10.10:35357/v2.0"
SERVICE_TENANT_NAME=service
#
MYSQL_USER=keystone
MYSQL_DATABASE=keystone
MYSQL_HOST=localhost
MYSQL_PASSWORD=adm3-.PW
#
KEYSTONE_REGION=regionOne
KEYSTONE_HOST=10.10.10.10

# Shortcut function to get a newly generated ID
function get_field() {
    while read data; do
        if [ "$1" -lt 0 ]; then
            field="(\$(NF$1))"
        else
            field="\$$(($1 + 1))"
        fi
        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
    done
}

# Tenants
ADMIN_TENANT=$(keystone tenant-create --name=admin | grep " id " | get_field 2)
DEMO_TENANT=$(keystone tenant-create --name=demo | grep " id " | get_field 2)
SERVICE_TENANT=$(keystone tenant-create --name=$SERVICE_TENANT_NAME | grep " id " | get_field 2)

# Users
ADMIN_USER=$(keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com | grep " id " | get_field 2)
DEMO_USER=$(keystone user-create --name=demo --pass="$DEMO_PASSWORD" --email=demo@domain.com --tenant-id=$DEMO_TENANT | grep " id " | get_field 2)
NOVA_USER=$(keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com | grep " id " | get_field 2)
GLANCE_USER=$(keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com | grep " id " | get_field 2)
#QUANTUM_USER=$(keystone user-create --name=quantum --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=quantum@domain.com | grep " id " | get_field 2)
CINDER_USER=$(keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com | grep " id " | get_field 2)
SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com | grep " id " | get_field 2)

# Roles
ADMIN_ROLE=$(keystone role-create --name=admin | grep " id " | get_field 2)
MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2)

# Add Roles to Users in Tenants
keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE
#keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $QUANTUM_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
keystone user-role-add --tenant-id $DEMO_TENANT --user-id $DEMO_USER --role-id $MEMBER_ROLE

# Create services
COMPUTE_SERVICE=$(keystone service-create --name nova --type compute --description 'OpenStack Compute Service' | grep " id " | get_field 2)
VOLUME_SERVICE=$(keystone service-create --name cinder --type volume --description 'OpenStack Volume Service' | grep " id " | get_field 2)
OBJECT_SERVICE=$(keystone service-create --name swift --type object-store --description 'OpenStack Object Storage Service' | grep " id " | get_field 2)
IMAGE_SERVICE=$(keystone service-create --name glance --type image --description 'OpenStack Image Service' | grep " id " | get_field 2)
IDENTITY_SERVICE=$(keystone service-create --name keystone --type identity --description 'OpenStack Identity' | grep " id " | get_field 2)
EC2_SERVICE=$(keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service' | grep " id " | get_field 2)
#NETWORK_SERVICE=$(keystone service-create --name quantum --type network --description 'OpenStack Networking service' | grep " id " | get_field 2)

# Create endpoints
keystone endpoint-create --region $KEYSTONE_REGION --service-id $COMPUTE_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$KEYSTONE_HOST"':8774/v2/$(tenant_id)s'
keystone endpoint-create --region $KEYSTONE_REGION --service-id $VOLUME_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$KEYSTONE_HOST"':8776/v1/$(tenant_id)s'
keystone endpoint-create --region $KEYSTONE_REGION --service-id $OBJECT_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':8080/v1/AUTH_%(tenant_id)s' --adminurl 'http://'"$KEYSTONE_HOST"':8080' --internalurl 'http://'"$KEYSTONE_HOST"':8080/v1/AUTH_%(tenant_id)s'
keystone endpoint-create --region $KEYSTONE_REGION --service-id $IMAGE_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':9292' --adminurl 'http://'"$KEYSTONE_HOST"':9292' --internalurl 'http://'"$KEYSTONE_HOST"':9292'
keystone endpoint-create --region $KEYSTONE_REGION --service-id $IDENTITY_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':5000/v2.0' --adminurl 'http://'"$KEYSTONE_HOST"':35357/v2.0' --internalurl 'http://'"$KEYSTONE_HOST"':5000/v2.0'
keystone endpoint-create --region $KEYSTONE_REGION --service-id $EC2_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':8773/services/Cloud' --adminurl 'http://'"$KEYSTONE_HOST"':8773/services/Admin' --internalurl 'http://'"$KEYSTONE_HOST"':8773/services/Cloud'
#keystone endpoint-create --region $KEYSTONE_REGION --service-id $NETWORK_SERVICE --publicurl 'http://'"$KEYSTONE_HOST"':9696/' --adminurl 'http://'"$KEYSTONE_HOST"':9696/' --internalurl 'http://'"$KEYSTONE_HOST"':9696/


Once we run the script,we can try and auth via auth url api

[root@controller2 ~]# cat admin_rc
export OS_USERNAME=admin
export OS_PASSWORD=Amena2006
export OS_PROJECT_NAME=admin
export OS_AUTH_URL=http://10.10.10.10:35357/
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
[root@controller2 ~]# source admin_rc
[root@controller2 ~]# openstack user list
+----------------------------------+--------+
| ID                               | Name   |
+----------------------------------+--------+
| 0760f4fa4ef14ef19c5c5194a4cf6b0b | liquid |
| 0af34f8e748344dbbcf8e8de27f236d5 | glance |
| 51ec164297524647986603e90fd50330 | demo   |
| 6996f4957ebc4dbc874d36f4177fef11 | cinder |
| 6d5479e66f1441ec9cdb685732c0b661 | admin  |
| 82ccc17f31074117841c973fd90300d5 | nova   |
| c26603ca441e45c2bf008b2f31f22fe6 | swift  |
+----------------------------------+--------+

Ok, it's working.

And now lets go for glance:

[root@controller1 ~]# yum -y install openstack-glance
[root@controller2 ~]# yum -y install openstack-glance

root@controller1 ~]# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 100050
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE glance;
Query OK, 1 row affected (0.04 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
    ->   IDENTIFIED BY 'Amena2006'
    -> ;
Query OK, 0 rows affected (0.04 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
    ->   IDENTIFIED BY 'Amena2006' ;
Query OK, 0 rows affected (0.03 sec)

MariaDB [(none)]> exit

We configure Glance files:

[root@controller2 ~]# cat /etc/glance/glance-api.conf | grep -v ^# | grep -v ^$
[DEFAULT]
verbose=True
registry_host=10.10.10.10
auth_strategy=keystone
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true
[oslo_policy]
[database]
connection = mysql://glance:Amena2006@10.10.10.10/glance
[oslo_concurrency]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = Amena2006
[paste_deploy]
flavor=keystone
[store_type_location_strategy]
[profiler]
[task]
[taskflow_executor]
[glance_store]
default_store=file
filesystem_store_datadir=/var/lib/glance/images/

[root@controller2 ~]# cat /etc/glance/glance-registry.conf | grep -v ^# | grep -v ^$
[DEFAULT]
verbose=True
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true
[oslo_policy]
[database]
connection = mysql://glance:Amena2006@10.10.10.10/glance
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = Amena2006
[paste_deploy]
flavor=keystone
[profiler]

Provision de database:

[root@controller1 glance]# su -s /bin/sh -c "glance-manage db_sync" glance

Copy the files to the other node and start:

[root@controller1 glance]# systemctl enable openstack-glance-api.service openstack-glance-registry.service
ln -s '/usr/lib/systemd/system/openstack-glance-api.service' '/etc/systemd/system/multi-user.target.wants/openstack-glance-api.service'
ln -s '/usr/lib/systemd/system/openstack-glance-registry.service' '/etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service'
[root@controller1 glance]# systemctl start openstack-glance-api.service openstack-glance-registry.service

No we have to add the api endpoints to the HAproxy:

listen glance-api 10.10.10.10:9292
        balance source
        option tcpka
        option httpchk
        maxconn 10000
        server icehouse1 10.10.10.31:9292 check inter 2000 rise 2 fall 5
        server icehouse2 10.10.10.32:9292 check inter 2000 rise 2 fall 5

listen glance-registry 10.10.10.10:9191
        balance source
        option tcpka
        option httpchk
        maxconn 10000
        server icehouse1 10.10.10.31:9191 check inter 2000 rise 2 fall 5
        server icehouse2 10.10.10.32:9191 check inter 2000 rise 2 fall 5

restart haproxy.

We also add the export OS_IMAGE_API_VERSION=2 variable to our source files:

[root@controller2 ~]# cat admin_rc 
export OS_USERNAME=admin
export OS_PASSWORD=Amena2006
export OS_PROJECT_NAME=admin
export OS_AUTH_URL=http://10.10.10.10:35357/
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_IMAGE_API_VERSION=2

No we can test out installation:

[root@controller2 ~]# mkdir /tmp/images
[root@controller2 ~]# wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
--2015-10-06 23:21:15--  http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
Resolving download.cirros-cloud.net (download.cirros-cloud.net)... 69.163.241.114
Connecting to download.cirros-cloud.net (download.cirros-cloud.net)|69.163.241.114|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 13287936 (13M) [text/plain]
Saving to: ‘/tmp/images/cirros-0.3.4-x86_64-disk.img’

100%[=============================================================================================================================================================>] 13,287,936  2.89MB/s   in 4.6s   

2015-10-06 23:21:20 (2.73 MB/s) - ‘/tmp/images/cirros-0.3.4-x86_64-disk.img’ saved [13287936/13287936]

[root@controller2 ~]# glance image-create --name "cirros-0.3.4-x86_64" --file /tmp/images/cirros-0.3.4-x86_64-disk.img \
>   --disk-format qcow2 --container-format bare --visibility public --progress
[=============================>] 100%
+------------------+--------------------------------------+
| Property         | Value                                |
+------------------+--------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6     |
| container_format | bare                                 |
| created_at       | 2015-10-06T21:23:14Z                 |
| disk_format      | qcow2                                |
| id               | 6e9d91ab-339e-4cac-a667-f5344aa6e548 |
| min_disk         | 0                                    |
| min_ram          | 0                                    |
| name             | cirros-0.3.4-x86_64                  |
| owner            | 83e7173e84e949fb9583e839a7037232     |
| protected        | False                                |
| size             | 13287936                             |
| status           | active                               |
| tags             | []                                   |
| updated_at       | 2015-10-06T21:23:15Z                 |
| virtual_size     | None                                 |
| visibility       | public                               |
+------------------+--------------------------------------+

[root@controller2 ~]# glance image-list
+--------------------------------------+---------------------+
| ID                                   | Name                |
+--------------------------------------+---------------------+
| 6e9d91ab-339e-4cac-a667-f5344aa6e548 | cirros-0.3.4-x86_64 |
+--------------------------------------+---------------------+
[root@controller2 ~]# openstack image list
+--------------------------------------+---------------------+
| ID                                   | Name                |
+--------------------------------------+---------------------+
| 6e9d91ab-339e-4cac-a667-f5344aa6e548 | cirros-0.3.4-x86_64 |
+--------------------------------------+---------------------+

Now let's install the nova stack

yum -y install openstack-nova-api openstack-nova-cert openstack-nova-conductor   openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler

[root@controller2 ~]# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 113880
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.05 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
    ->   IDENTIFIED BY 'Amena2006'
    -> ;
Query OK, 0 rows affected (0.06 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
    ->   IDENTIFIED BY 'Amena2006'
    -> ;
Query OK, 0 rows affected (0.07 sec)

MariaDB [(none)]> exit
Bye

Configure File:

[root@controller2 ~]# cat /etc/nova/nova.conf | grep -v ^# | grep -v ^$
[DEFAULT]
verbose = True
auth_strategy = keystone
rpc_backend=rabbit
my_ip=10.10.10.32
vncserver_listen=10.10.20.31
vncserver_proxyclient_address=10.10.20.31
[api_database]
connection = mysql://nova:Amena2006@10.10.10.10/nova
[barbican]
[cells]
[cinder]
[conductor]
[database]
connection = mysql://nova:Amena2006@10.10.10.10/nova
[ephemeral_storage_encryption]
[glance]
host=10.10.10.10
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = Amena2006
[libvirt]
[metrics]
[neutron]
[osapi_v3]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[workarounds]
[xenserver]
[zookeeper]
[matchmaker_redis]
[matchmaker_ring]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

[root@controller1 glance]# su -s /bin/sh -c "nova-manage db sync" nova
[root@controller1 glance]# tail /var/log/nova/nova-manage.log 
2015-10-06 23:48:51.409 9133 INFO migrate.versioning.api [-] 275 -> 276... 
2015-10-06 23:48:51.975 9133 INFO migrate.versioning.api [-] done
2015-10-06 23:48:51.975 9133 INFO migrate.versioning.api [-] 276 -> 277... 
2015-10-06 23:48:52.225 9133 INFO migrate.versioning.api [-] done
2015-10-06 23:48:52.225 9133 INFO migrate.versioning.api [-] 277 -> 278... 
2015-10-06 23:48:53.075 9133 INFO migrate.versioning.api [-] done
2015-10-06 23:48:53.075 9133 INFO migrate.versioning.api [-] 278 -> 279... 
2015-10-06 23:48:53.576 9133 INFO migrate.versioning.api [-] done
2015-10-06 23:48:53.576 9133 INFO migrate.versioning.api [-] 279 -> 280... 
2015-10-06 23:48:54.293 9133 INFO migrate.versioning.api [-] done

[root@controller2 ~]# systemctl enable openstack-nova-api.service openstack-nova-cert.service \
>   openstack-nova-consoleauth.service openstack-nova-scheduler.service \
>   openstack-nova-conductor.service openstack-nova-novncproxy.service
ln -s '/usr/lib/systemd/system/openstack-nova-api.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-api.service'
ln -s '/usr/lib/systemd/system/openstack-nova-cert.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-cert.service'
ln -s '/usr/lib/systemd/system/openstack-nova-consoleauth.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-consoleauth.service'
ln -s '/usr/lib/systemd/system/openstack-nova-scheduler.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service'
ln -s '/usr/lib/systemd/system/openstack-nova-conductor.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service'
ln -s '/usr/lib/systemd/system/openstack-nova-novncproxy.service' '/etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service'
[root@controller2 ~]# systemctl start openstack-nova-api.service openstack-nova-cert.service \
>   openstack-nova-consoleauth.service openstack-nova-scheduler.service \
>   openstack-nova-conductor.service openstack-nova-novncproxy.service


No we can test if nova api is working:

[root@controller1 ~]# source admin_rc 
[root@controller1 ~]# nova service-list
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary           | Host        | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+
| 2  | nova-cert        | controller1 | internal | enabled | up    | 2015-10-06T22:00:23.000000 | -               |
| 4  | nova-conductor   | controller1 | internal | enabled | up    | 2015-10-06T22:00:23.000000 | -               |
| 6  | nova-consoleauth | controller1 | internal | enabled | up    | 2015-10-06T22:00:23.000000 | -               |
| 8  | nova-scheduler   | controller1 | internal | enabled | up    | 2015-10-06T22:00:23.000000 | -               |
| 10 | nova-conductor   | controller2 | internal | enabled | up    | 2015-10-06T22:00:19.000000 | -               |
| 12 | nova-scheduler   | controller2 | internal | enabled | up    | 2015-10-06T22:00:19.000000 | -               |
| 14 | nova-consoleauth | controller2 | internal | enabled | up    | 2015-10-06T22:00:20.000000 | -               |
| 16 | nova-cert        | controller2 | internal | enabled | up    | 2015-10-06T22:00:20.000000 | -               |
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+


I needed to debug problems in nova, the nova image-list didn't work. the error was when triying to access the glance registry service, so I used curl to check:

First we get a token, we copy the token id and use it in the next commands

 curl -d '{"auth":{"passwordCredentials":{"username": "admin", "password": "Amena2006"},"tenantName": "admin"}}' -H "Content-Type: application/json" http://10.10.10.10:5000/v2.0/tokens | python -m json.tool

for example:

[root@controller1 glance]# curl -g -i -X GET -H 'Accept-Encoding: gzip, deflate' -H 'Accept: */*' -H 'User-Agent: python-glanceclient' -H 'Connection: keep-alive' -H 'X-Auth-Token: f5c1dcfc443b4d3ca51c14ab336d46b0' -H 'Content-Type: application/octet-stream' http://10.10.10.10:9292/v1/images

How to put haproxy in debug mode:

[root@haproxy2 ~]# cat /etc/haproxy/haproxy.cfg 
global
    log         127.0.0.1 local0
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    debug

We have to configure rsyslogd:

[root@haproxy2 ~]# cat /etc/rsyslog.d/haproxy.conf 
$ModLoad imudp
$UDPServerAddress 127.0.0.1
$UDPServerRun 514

[root@haproxy2 ~]# systemctl restart haproxy
[root@haproxy2 ~]# 


In the end it was a haproxy httpchk problem, once fixed it's working ok:

[root@controller2 ~]# nova image-list
+--------------------------------------+---------------------+--------+--------+
| ID                                   | Name                | Status | Server |
+--------------------------------------+---------------------+--------+--------+
| 6e9d91ab-339e-4cac-a667-f5344aa6e548 | cirros-0.3.4-x86_64 | ACTIVE |        |
+--------------------------------------+---------------------+--------+--------+

Now we are going to go for networkin, we are going to install neutron:

[root@controller1 ~]# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 270840
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.40 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
    ->   IDENTIFIED BY 'Amena2006'
    -> ;
Query OK, 0 rows affected (0.21 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
    ->   IDENTIFIED BY 'Amena2006';
Query OK, 0 rows affected (0.05 sec)

MariaDB [(none)]> exit
Bye

[root@controller1 ~]# openstack user create --password-prompt neutron
User Password:
Repeat User Password:
+----------+----------------------------------+
| Field    | Value                            |
+----------+----------------------------------+
| email    | None                             |
| enabled  | True                             |
| id       | 53424b7800cc4620997eb435c17e3231 |
| name     | neutron                          |
| username | neutron                          |
+----------+----------------------------------+
[root@controller1 ~]# openstack role add --project service --user neutron admin
+-------+----------------------------------+
| Field | Value                            |
+-------+----------------------------------+
| id    | c8dbdcc276054a02bffc85c24df647cd |
| name  | admin                            |
+-------+----------------------------------+
[root@controller1 ~]# openstack service create --name neutron \
>   --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | 77644d2774304a4daccabab6de10fcc3 |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@controller1 ~]# openstack endpoint create \
>   --publicurl http://controller:9696 \
>   --adminurl http://controller:9696 \
>   --internalurl http://controller:9696 \
>   ^Cregion RegionOne \
[root@controller1 ~]# openstack endpoint create --publicurl http://10.10.10.10:9696  --adminurl http://10.10.10.10:9696 --internalurl http://10.10.10.10:9696 --region RegionOne
usage: openstack endpoint create [-h] [-f {shell,table,value}] [-c COLUMN]
                                 [--max-width ] [--prefix PREFIX]
                                 --publicurl  [--adminurl ]
                                 [--internalurl ] [--region ]
                                 
openstack endpoint create: error: too few arguments
[root@controller1 ~]# openstack endpoint create --publicurl http://10.10.10.10:9696  --adminurl http://10.10.10.10:9696 --internalurl http://10.10.10.10:9696 --region RegionOne network 
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| adminurl     | http://10.10.10.10:9696          |
| id           | d36d088cc911451da36ed7d7f53e2fdf |
| internalurl  | http://10.10.10.10:9696          |
| publicurl    | http://10.10.10.10:9696          |
| region       | RegionOne                        |
| service_id   | 77644d2774304a4daccabab6de10fcc3 |
| service_name | neutron                          |
| service_type | network                          |
+--------------+----------------------------------+
[root@controller1 ~]# yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which

[root@controller2 ~]# cat /etc/neutron/neutron.conf | grep -v ^# | grep -v ^$
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.10.10:8774/v2
 auth_strategy = keystone
l3_ha = True
max_l3_agents_per_router = 3
min_l3_agents_per_router = 2
l3_ha_net_cidr = 169.254.192.0/18
 rpc_backend=rabbit
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = Amena2006
[database]
connection = mysql://neutron:Amena2006@10.10.10.10/neutron
[nova]
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = Amena2006
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

[root@controller2 ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini | grep -v ^# | grep -v ^$
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver


[root@controller1 ~]#  cat /etc/nova/nova.conf | grep -v ^# | grep -v ^? | grep -A 6 "neutron"
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver




--
[neutron]

url = http://10.10.10.10:9696
auth_strategy = keystone
admin_auth_url = http://controller:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = Amena2006

[root@controller1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
INFO  [alembic.migration] Context impl MySQLImpl.
INFO  [alembic.migration] Will assume non-transactional DDL.
INFO  [alembic.migration] Context impl MySQLImpl.
INFO  [alembic.migration] Will assume non-transactional DDL.
INFO  [alembic.migration] Running upgrade  -> havana, havana_initial
INFO  [alembic.migration] Running upgrade havana -> e197124d4b9, add unique constraint to members
INFO  [alembic.migration] Running upgrade e197124d4b9 -> 1fcfc149aca4, Add a unique constraint on (agent_type, host) columns to prevent a race

[root@controller1 ~]# systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \
>   openstack-nova-conductor.service
[root@controller1 ~]# systemctl status openstack-nova-api.service
openstack-nova-api.service - OpenStack Nova API Server
   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-api.service; enabled)
   Active: active (running) since Thu 2015-10-08 21:18:49 CEST; 5s ago
 Main PID: 1603 (nova-api)
   CGroup: /system.slice/openstack-nova-api.service
           ├─1603 /usr/bin/python /usr/bin/nova-api
           ├─1648 /usr/bin/python /usr/bin/nova-api
           ├─1649 /usr/bin/python /usr/bin/nova-api
           ├─1651 /usr/bin/python /usr/bin/nova-api
           ├─1652 /usr/bin/python /usr/bin/nova-api
           ├─1659 /usr/bin/python /usr/bin/nova-api
           └─1660 /usr/bin/python /usr/bin/nova-api

Oct 08 21:18:47 controller1 systemd[1]: Starting OpenStack Nova API Server...
Oct 08 21:18:49 controller1 sudo[1653]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf iptables-save -c
Oct 08 21:18:49 controller1 sudo[1656]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/nova-rootwrap /etc/nova/rootwrap.conf iptables-restore -c
Oct 08 21:18:49 controller1 systemd[1]: Started OpenStack Nova API Server.
[root@controller1 ~]# systemctl enable neutron-server.service
ln -s '/usr/lib/systemd/system/neutron-server.service' '/etc/systemd/system/multi-user.target.wants/neutron-server.service'
[root@controller1 ~]# systemctl start neutron-server.service

We now add the API port to the HA proxy config:

[root@haproxy1 ~]# cat /etc/haproxy/haproxy.cfg | tail -6
listen neutron-server-api 10.10.10.10:9696
        balance source
        option tcpka
        maxconn 10000
        server controller1 10.10.10.31:9696 check inter 2000 rise 2 fall 5
        server controller2 10.10.10.32:9696 check inter 2000 rise 2 fall 5


Just to check the API is working:

[root@controller1 ~]# neutron ext-list
+-----------------------+-----------------------------------------------+
| alias                 | name                                          |
+-----------------------+-----------------------------------------------+
| security-group        | security-group                                |
| l3_agent_scheduler    | L3 Agent Scheduler                            |
| net-mtu               | Network MTU                                   |
| ext-gw-mode           | Neutron L3 Configurable external gateway mode |
| binding               | Port Binding                                  |
| provider              | Provider Network                              |
| agent                 | agent                                         |
| quotas                | Quota management support                      |
| subnet_allocation     | Subnet Allocation                             |
| dhcp_agent_scheduler  | DHCP Agent Scheduler                          |
| l3-ha                 | HA Router extension                           |
| multi-provider        | Multi Provider Network                        |
| external-net          | Neutron external network                      |
| router                | Neutron L3 Router                             |
| allowed-address-pairs | Allowed Address Pairs                         |
| extraroute            | Neutron Extra Route                           |
| extra_dhcp_opt        | Neutron Extra DHCP opts                       |
| dvr                   | Distributed Virtual Router                    |
+-----------------------+-----------------------------------------------+

With this we have the neutron-server running with the API working, now we can install the neutron-agents, in our case we are going to install the agents in the controller node with the server:

[root@controller2 ~]# vi /etc/sysctl.conf 
[root@controller2 ~]# sysctl -p
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
[root@controller2 ~]# yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
Loaded plugins: fastestmirror


We need to configure in our controller nodes the external network adapter eth0 under the control of ovs, so we modify the network-scripts:
[root@controller1 network-scripts]# cat ifcfg-br-ex
DEVICE=br-ex
BOOTPROTO=static
IPADDR=192.168.122.31
PREFIX=24
GATEWAY=192.168.122.1
DEFROUTE=yes
IPV6INIT=no
DNS1=8.8.8.8
DEVICETYPE=ovsx
TYPE=OVSBridge
USERCTL=no
[root@controller1 network-scripts]# cat ifcfg-eth0
# Generated by dracut initrd
DEVICE=eth0
ONBOOT=yes
BOOTPROTO=none
TYPE=OVSPort
DEVICETYPE=ovs
OVS_BRIDGE=br-ex

[root@controller1 system]# systemctl disable NetworkManager.service
rm '/etc/systemd/system/multi-user.target.wants/NetworkManager.service'
rm '/etc/systemd/system/dbus-org.freedesktop.NetworkManager.service'
rm '/etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service'
[root@controller1 system]# systemctl stop NetworkManager.service ;
[root@controller1 system]# systemctl enable openvswitch.service
ln -s '/usr/lib/systemd/system/openvswitch.service' '/etc/systemd/system/multi-user.target.wants/openvswitch.service'
[root@controller1 system]# systemctl start openvswitch.service
[root@controller1 system]# ovs-vsctl show
8a98b71f-0edd-43d3-914f-35d631ed5ba7
    ovs_version: "2.3.1"
[root@controller1 system]# ovs-vsctl add-br br-ex
[root@controller1 system]# ovs-vsctl add-port br-ex eth0 ; systemctl restart network


Now we modify the configuration files:

[root@controller1 ~]# cat /etc/neutron/neutron.conf | grep -v ^# | grep -v ^$
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.10.10:8774/v2
auth_strategy = keystone
l3_ha = True
max_l3_agents_per_router = 3
min_l3_agents_per_router = 2
l3_ha_net_cidr = 169.254.192.0/18
rpc_backend=rabbit
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = Amena2006
[database]
connection = mysql://neutron:Amena2006@10.10.10.10/neutron
[nova]
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = Amena2006
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

[root@controller1 ~]#  cat /etc/neutron/metadata_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
debug = True
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = Amena2006
nova_metadata_ip = 10.10.10.10
metadata_proxy_shared_secret = Amena2006


[root@controller1 ~]# cat /etc/neutron/l3_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge =
router_delete_namespaces = True
verbose = True
[root@controller1 ~]# cat /etc/neutron/dhcp_agent.ini | grep -v ^# | grep -v ^$
[DEFAULT]
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_delete_namespaces = True
verbose = True
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
[root@controller1 ~]# cat /etc/neutron/dnsmasq-neutron.conf | grep -v ^# | grep -v ^$
dhcp-option-force=26,1454

[root@controller1 openvswitch]# cat ovs_neutron_plugin.ini | grep -v ^# | grep -v ^$
[ovs]
local_ip = 10.10.20.31
bridge_mappings = external:br-ex
[agent]
tunnel_types = vxlan
[securitygroup]

[root@controller1 openvswitch]# cat /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini | grep -v ^# | grep -v ^$
[ovs]
local_ip = 10.10.20.31
bridge_mappings = external:br-ex
[agent]
tunnel_types = vxlan
[securitygroup]
[root@controller1 openvswitch]# cat /etc/neutron/plugins/ml2/ml2_conf.ini | grep -v ^# | grep -v ^$
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver



And then enable and start services:


[root@controller1 neutron]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
ln: failed to create symbolic link ‘/etc/neutron/plugin.ini’: File exists
[root@controller1 neutron]# cp /usr/lib/systemd/system/neutron-openvswitch-agent.service \
>   /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
[root@controller1 neutron]# sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' \
>   /usr/lib/systemd/system/neutron-openvswitch-agent.service
[root@controller1 neutron]# systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service \
>   neutron-dhcp-agent.service neutron-metadata-agent.service \
>   neutron-ovs-cleanup.service
ln -s '/usr/lib/systemd/system/neutron-openvswitch-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-openvswitch-agent.service'
ln -s '/usr/lib/systemd/system/neutron-l3-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-l3-agent.service'
ln -s '/usr/lib/systemd/system/neutron-dhcp-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service'
ln -s '/usr/lib/systemd/system/neutron-metadata-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service'
ln -s '/usr/lib/systemd/system/neutron-ovs-cleanup.service' '/etc/systemd/system/multi-user.target.wants/neutron-ovs-cleanup.service'
[root@controller1 neutron]# systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service \
>   neutron-dhcp-agent.service neutron-metadata-agent.service

We can now check it's working:

[root@controller1 openvswitch]# neutron agent-list
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host        | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+
| 4f094c3a-bf7b-4fcf-8655-cde05425a07a | DHCP agent         | controller2 | :-)   | True           | neutron-dhcp-agent        |
| 4f48383e-1be9-41d7-bcd0-beb75357a07b | Open vSwitch agent | controller2 | :-)   | True           | neutron-openvswitch-agent |
| 51697db4-b846-428f-8657-5e706fc9bf2d | DHCP agent         | controller1 | :-)   | True           | neutron-dhcp-agent        |
| 5ae9b22c-8d61-400f-8a40-8eaf9734345d | Open vSwitch agent | controller1 | :-)   | True           | neutron-openvswitch-agent |
| a40c7721-130d-4568-9545-537d6e1301cc | L3 agent           | controller2 | :-)   | True           | neutron-l3-agent          |
| a61c551c-573f-4ef1-b554-61eff441be1c | Metadata agent     | controller2 | :-)   | True           | neutron-metadata-agent    |
| a6ac7789-d6dd-4329-85ad-729c73f99120 | Metadata agent     | controller1 | :-)   | True           | neutron-metadata-agent    |
| e220bdbd-2616-466c-929f-48e70bb7c564 | L3 agent           | controller1 | :-)   | True           | neutron-l3-agent          |
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+

Now we are going to build a compute node with the KVM hypervisor:

[root@openstackbox os]# virt-clone -o template -n compute1 --preserve-data -f /vm/os/compute.qcow

Clone 'compute1' created successfully.

We change ips/hostname,etc...

[root@compute1 ~]# yum -y install openstack-nova-compute sysfsutils
Loaded plugins: fastestmirror


If we want to user pure nested hardware virtualization to get better performance than we get using qemu, for no we use qemu:


[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.20.41
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.10.20.41
novncproxy_base_url = http://10.10.10.10:6080/vnc_auto.html
verbose = True
[api_database]
[barbican]
[cells]
[cinder]
[conductor]
[database]
[ephemeral_storage_encryption]
[glance]
host = 10.10.10.10
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = Amena2006
[libvirt]
virt_type=qemu
inject_password=true
inject_key=true
[metrics]
[neutron]
[osapi_v3]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[workarounds]
[xenserver]
[zookeeper]
[matchmaker_redis]
[matchmaker_ring]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true

systemctl enable libvirtd.service openstack-nova-compute.service

we are now going to create compute node 2, once created we change the config files, and start with the neutron parte of the compute node configuration:

[root@openstackbox os]# qemu-img create -b /vm/os/compute.qcow -f qcow2 /vm/os/compute2.qcow
Formatting '/vm/os/compute2.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/compute.qcow' encryption=off cluster_size=65536 lazy_refcounts=off 
[root@openstackbox os]# qemu-img create -b /vm/os/compute.qcow -f qcow2 /vm/os/compute1.qcow
Formatting '/vm/os/compute1.qcow', fmt=qcow2 size=32212254720 backing_file='/vm/os/compute.qcow' encryption=off cluster_size=65536 lazy_refcounts=off 

[root@controller1 ~]# nova service-list
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary           | Host        | Zone     | Status  | State | Updated_at                 | Disabled Reason |
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+
| 2  | nova-cert        | controller1 | internal | enabled | up    | 2015-10-10T18:21:33.000000 | -               |
| 4  | nova-conductor   | controller1 | internal | enabled | up    | 2015-10-10T18:21:34.000000 | -               |
| 6  | nova-consoleauth | controller1 | internal | enabled | up    | 2015-10-10T18:21:29.000000 | -               |
| 8  | nova-scheduler   | controller1 | internal | enabled | up    | 2015-10-10T18:21:27.000000 | -               |
| 10 | nova-conductor   | controller2 | internal | enabled | up    | 2015-10-10T18:21:35.000000 | -               |
| 12 | nova-scheduler   | controller2 | internal | enabled | up    | 2015-10-10T18:21:29.000000 | -               |
| 14 | nova-consoleauth | controller2 | internal | enabled | up    | 2015-10-10T18:21:35.000000 | -               |
| 16 | nova-cert        | controller2 | internal | enabled | up    | 2015-10-10T18:21:35.000000 | -               |
| 18 | nova-compute     | compute1    | nova     | enabled | up    | 2015-10-10T18:21:28.000000 | -               |
| 20 | nova-compute     | compute2    | nova     | enabled | up    | 2015-10-10T18:21:32.000000 | -               |
+----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+

We have our 2 compute nodes ready, we need to configure the neutron agent on the compute nodes next:

[root@compute1 sysctl.d]# cat /etc/sysctl.d/88-neutron.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
[root@compute1 sysctl.d]# sysctl -p


yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

[root@compute1 sysctl.d]# cat /etc/neutron/neutron.conf | grep -v ^$ | grep -v ^#
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
[matchmaker_redis]
[matchmaker_ring]
[quotas]
[agent]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = Amena2006
[database]
[nova]
[oslo_concurrency]
lock_path = $state_path/lock
[oslo_policy]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true


[root@compute1 sysctl.d]# cat /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini | grep -v ^$ | grep -v ^# 
[ovs]
local_ip = 10.10.20.41
[agent]
tunnel_types = vxlan
[securitygroup]

[root@compute1 sysctl.d]# cat /etc/neutron/plugins/ml2/ml2_conf.ini | grep -v ^$ | grep -v ^# 
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver



  338  ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
  339  cp /usr/lib/systemd/system/neutron-openvswitch-agent.service   /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
  340  sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'   /usr/lib/systemd/system/neutron-openvswitch-agent.service

now connect nova to neutron:

[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.20.41
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.10.20.41
novncproxy_base_url = http://10.10.10.10:6080/vnc_auto.html
verbose = True
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
[barbican]
[cells]
[cinder]
[conductor]
[database]
[ephemeral_storage_encryption]
[glance]
host = 10.10.10.10
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = Amena2006
[libvirt]
virt_type=qemu
inject_password=true
inject_key=true
[metrics]
[neutron]
url = http://10.10.10.10:9696
auth_strategy = keystone
admin_auth_url = http://10.10.10.10:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = Amena2006
[osapi_v3]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[workarounds]
[xenserver]
[zookeeper]
[matchmaker_redis]
[matchmaker_ring]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true


start the services:

  342  systemctl restart openstack-nova-compute.service
  343  systemctl enable neutron-openvswitch-agent.service
  344  systemctl start neutron-openvswitch-agent.service


Check if we have the OVS agents working on the compute nodes:

[root@controller2 ~]# neutron agent-list
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host        | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+
| 4f094c3a-bf7b-4fcf-8655-cde05425a07a | DHCP agent         | controller2 | :-)   | True           | neutron-dhcp-agent        |
| 4f48383e-1be9-41d7-bcd0-beb75357a07b | Open vSwitch agent | controller2 | :-)   | True           | neutron-openvswitch-agent |
| 51697db4-b846-428f-8657-5e706fc9bf2d | DHCP agent         | controller1 | :-)   | True           | neutron-dhcp-agent        |
| 5ae9b22c-8d61-400f-8a40-8eaf9734345d | Open vSwitch agent | controller1 | :-)   | True           | neutron-openvswitch-agent |
| 938756b5-2f87-4ada-926d-99eb1d02218b | Open vSwitch agent | compute1    | :-)   | True           | neutron-openvswitch-agent |
| a40c7721-130d-4568-9545-537d6e1301cc | L3 agent           | controller2 | :-)   | True           | neutron-l3-agent          |
| a61c551c-573f-4ef1-b554-61eff441be1c | Metadata agent     | controller2 | :-)   | True           | neutron-metadata-agent    |
| a6ac7789-d6dd-4329-85ad-729c73f99120 | Metadata agent     | controller1 | :-)   | True           | neutron-metadata-agent    |
| e220bdbd-2616-466c-929f-48e70bb7c564 | L3 agent           | controller1 | :-)   | True           | neutron-l3-agent          |
| fea9f0ad-1d93-4e00-a6a4-dc6a91f33bb4 | Open vSwitch agent | compute2    | :-)   | True           | neutron-openvswitch-agent |
+--------------------------------------+--------------------+-------------+-------+----------------+---------------------------+


No we are going to create our first networks, first the external network:

[root@controller1 ~]# neutron net-create ext-net --router:external \
>   --provider:physical_network external --provider:network_type flat
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | e905eb4c-0f11-454a-8042-6cbb9f6b6396 |
| mtu                       | 0                                    |
| name                      | ext-net                              |
| provider:network_type     | flat                                 |
| provider:physical_network | external                             |
| provider:segmentation_id  |                                      |
| router:external           | True                                 |
| shared                    | False                                |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tenant_id                 | f42a6212d7584cb1bc6997b2049daff5     |
+---------------------------+--------------------------------------+
[root@controller1 ~]# neutron net-list
+--------------------------------------+---------+---------+
| id                                   | name    | subnets |
+--------------------------------------+---------+---------+
| e905eb4c-0f11-454a-8042-6cbb9f6b6396 | ext-net |         |
+--------------------------------------+---------+---------+

[root@controller1 ~]# neutron subnet-create ext-net 192.168.122.0/24 --name ext-subnet --allocation-pool start=192.168.122.200,end=192.168.122.249 --disable-dhcp --gateway 192.168.122.1
Created a new subnet:
+-------------------+--------------------------------------------------------+
| Field             | Value                                                  |
+-------------------+--------------------------------------------------------+
| allocation_pools  | {"start": "192.168.122.200", "end": "192.168.122.249"} |
| cidr              | 192.168.122.0/24                                       |
| dns_nameservers   |                                                        |
| enable_dhcp       | False                                                  |
| gateway_ip        | 192.168.122.1                                          |
| host_routes       |                                                        |
| id                | b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6                   |
| ip_version        | 4                                                      |
| ipv6_address_mode |                                                        |
| ipv6_ra_mode      |                                                        |
| name              | ext-subnet                                             |
| network_id        | e905eb4c-0f11-454a-8042-6cbb9f6b6396                   |
| subnetpool_id     |                                                        |
| tenant_id         | f42a6212d7584cb1bc6997b2049daff5                       |
+-------------------+--------------------------------------------------------+

[root@controller1 ~]# source liquid_rc 
[root@controller1 ~]# neutron net-create liquid-net
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| id                        | cc3baa04-78e9-4ba1-838d-b38607099002 |
| mtu                       | 0                                    |
| name                      | liquid-net                           |
| provider:network_type     | vxlan                                |
| provider:physical_network |                                      |
| provider:segmentation_id  | 1                                    |
| router:external           | False                                |
| shared                    | False                                |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tenant_id                 | f42a6212d7584cb1bc6997b2049daff5     |
+---------------------------+--------------------------------------+
[root@controller1 ~]# neutron subnet-create liquid-net 192.168.0.0/24 --name liquid-subnet --gateway 192.168.0.1
Created a new subnet:
+-------------------+--------------------------------------------------+
| Field             | Value                                            |
+-------------------+--------------------------------------------------+
| allocation_pools  | {"start": "192.168.0.2", "end": "192.168.0.254"} |
| cidr              | 192.168.0.0/24                                   |
| dns_nameservers   |                                                  |
| enable_dhcp       | True                                             |
| gateway_ip        | 192.168.0.1                                      |
| host_routes       |                                                  |
| id                | 0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a             |
| ip_version        | 4                                                |
| ipv6_address_mode |                                                  |
| ipv6_ra_mode      |                                                  |
| name              | liquid-subnet                                    |
| network_id        | cc3baa04-78e9-4ba1-838d-b38607099002             |
| subnetpool_id     |                                                  |
| tenant_id         | f42a6212d7584cb1bc6997b2049daff5                 |
+-------------------+--------------------------------------------------+

[root@controller1 ~]# neutron router-create liquid-router
Created a new router:
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| admin_state_up        | True                                 |
| distributed           | False                                |
| external_gateway_info |                                      |
| ha                    | True                                 |
| id                    | d7de500b-2c4b-4fa4-8ca5-bd3c1e5360c1 |
| name                  | liquid-router                        |
| routes                |                                      |
| status                | ACTIVE                               |
| tenant_id             | f42a6212d7584cb1bc6997b2049daff5     |
+-----------------------+--------------------------------------+
[root@controller1 ~]# neutron router-interface-add liquid-router liquid-subnet
Added interface 01aca54e-306a-476f-8d1f-24c3a515bf0c to router liquid-router.
[root@controller1 ~]# neutron router-gateway-set liquid-router ext-net
Set gateway for router liquid-router


Finally to get neutron working and the VXLAN overlay going, we need to put the  [ovs] and [agent] config we had in the /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini file , we have tu put them at the end of the ml2_conf.ini 


[root@controller2 ml2]# cat ml2_conf.ini | grep -vE '(^#|^$)'
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.10.20.32
bridge_mappings = external:br-ex
[agent]
tunnel_types = vxlan
[root@controller2 ml2]# cat ml2_conf.ini | grep -v ^# | grep -v ^$
[ml2]
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = vxlan
mechanism_drivers = openvswitch
[ml2_type_flat]
flat_networks = external
[ml2_type_vlan]
[ml2_type_gre]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ovs]
local_ip = 10.10.20.32
bridge_mappings = external:br-ex
[agent]
tunnel_types = vxlan


once we have done this changes, the vxlan overlay tunnel gets created:

[root@controller2 ml2]# ovs-vsctl show
3108e993-e1f3-48c5-bb42-dea613f91c50
    Bridge br-ex
        Port br-ex
            Interface br-ex
                type: internal
        Port "eth0"
            Interface "eth0"
        Port phy-br-ex
            Interface phy-br-ex
                type: patch
                options: {peer=int-br-ex}
    Bridge br-tun
        fail_mode: secure
        Port "vxlan-0a0a141f"
            Interface "vxlan-0a0a141f"
                type: vxlan
                options: {df_default="true", in_key=flow, local_ip="10.10.20.32", out_key=flow, remote_ip="10.10.20.31"}
        Port patch-int
            Interface patch-int
                type: patch
                options: {peer=patch-tun}
        Port br-tun
            Interface br-tun
                type: internal
    Bridge br-int
        fail_mode: secure
        Port "qg-d08ef10e-4f"
            tag: 1
            Interface "qg-d08ef10e-4f"
                type: internal
        Port "ha-b64be9d1-15"
            tag: 4095
            Interface "ha-b64be9d1-15"
                type: internal
        Port br-int
            Interface br-int
                type: internal
        Port patch-tun
            Interface patch-tun
                type: patch
                options: {peer=patch-int}
        Port "qr-01aca54e-30"
            tag: 2
            Interface "qr-01aca54e-30"
                type: internal
        Port int-br-ex
            Interface int-br-ex
                type: patch
                options: {peer=phy-br-ex}
    ovs_version: "2.3.1"


Now we can check all is working:

[root@controller1 neutron]# ip netns exec qrouter-d7de500b-2c4b-4fa4-8ca5-bd3c1e5360c1 ip a
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
11: ha-4ca8eb16-cd:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:b0:8f:78 brd ff:ff:ff:ff:ff:ff
    inet 169.254.192.1/18 brd 169.254.255.255 scope global ha-4ca8eb16-cd
       valid_lft forever preferred_lft forever
    inet 169.254.0.1/24 scope global ha-4ca8eb16-cd
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:feb0:8f78/64 scope link 
       valid_lft forever preferred_lft forever
12: qr-01aca54e-30:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:5e:90:66 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 scope global qr-01aca54e-30
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe5e:9066/64 scope link nodad 
       valid_lft forever preferred_lft forever
13: qg-d08ef10e-4f:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:df:ec:ab brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.200/24 scope global qg-d08ef10e-4f
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fedf:ecab/64 scope link nodad 
       valid_lft forever preferred_lft forever

We have to ping the 192.168.122.200 route gateway from outsite the controller nodes:

[root@openstackbox os]# ping 192.168.122.200
PING 192.168.122.200 (192.168.122.200) 56(84) bytes of data.
64 bytes from 192.168.122.200: icmp_seq=1 ttl=64 time=0.332 ms

We need to be able to ping the tenant network GW from inside the netns:

[root@controller1 neutron]# ip netns exec qrouter-d7de500b-2c4b-4fa4-8ca5-bd3c1e5360c1 ping 192.168.0.1
PING 192.168.0.1 (192.168.0.1) 56(84) bytes of data.
64 bytes from 192.168.0.1: icmp_seq=1 ttl=64 time=0.025 ms

We also have to be able to ping the other HA router ip address:

[root@controller1 neutron]# ip netns exec qrouter-f2580de7-ab7f-4be1-ac95-1a65588fb3fd ip a
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
18: ha-b9452ca9-51:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:6c:27:5d brd ff:ff:ff:ff:ff:ff
    inet 169.254.192.3/18 brd 169.254.255.255 scope global ha-b9452ca9-51
       valid_lft forever preferred_lft forever
    inet 169.254.0.1/24 scope global ha-b9452ca9-51
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe6c:275d/64 scope link 
       valid_lft forever preferred_lft forever
19: qr-f38a2314-f7:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:31:06:2a brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 scope global qr-f38a2314-f7
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe31:62a/64 scope link nodad 
       valid_lft forever preferred_lft forever
20: qg-41a16d4f-c6:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:74:b3:4d brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.201/24 scope global qg-41a16d4f-c6
       valid_lft forever preferred_lft forever
    inet 192.168.122.202/32 scope global qg-41a16d4f-c6
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe74:b34d/64 scope link nodad 
       valid_lft forever preferred_lft forever
[root@controller1 neutron]# ip netns exec qrouter-f2580de7-ab7f-4be1-ac95-1a65588fb3fd ping 169.254.192.4
PING 169.254.192.4 (169.254.192.4) 56(84) bytes of data.
64 bytes from 169.254.192.4: icmp_seq=1 ttl=64 time=0.711 ms
^C
--- 169.254.192.4 ping statistics ---

The passive route has to only have the HA ip addres like this:

[root@controller2 ~]# ip netns exec qrouter-f2580de7-ab7f-4be1-ac95-1a65588fb3f ip a
Cannot open network namespace "qrouter-f2580de7-ab7f-4be1-ac95-1a65588fb3f": No such file or directory
[root@controller2 ~]# ip netns exec qrouter-f2580de7-ab7f-4be1-ac95-1a65588fb3fd ip a
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
14: ha-d70af1d2-74:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:65:e4:68 brd ff:ff:ff:ff:ff:ff
    inet 169.254.192.4/18 brd 169.254.255.255 scope global ha-d70af1d2-74
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe65:e468/64 scope link 
       valid_lft forever preferred_lft forever
15: qr-f38a2314-f7:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:31:06:2a brd ff:ff:ff:ff:ff:ff
16: qg-41a16d4f-c6:  mtu 1500 qdisc noqueue state UNKNOWN 
    link/ether fa:16:3e:74:b3:4d brd ff:ff:ff:ff:ff:ff



IMPORTANT:
To get HA working we need to have vxlan multicast configured in ml2.ini conf file:

[ml2_type_vxlan]
# (ListOpt) Comma-separated list of : tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
vni_ranges = 1:1000
vxlan_group = 239.1.1.1


Also to get our dhcp resolving externel names, we need to configure a dns to forward the request
[root@controller1 neutron]# cat /etc/neutron/dhcp_agent.ini | grep 8.8.8
dnsmasq_dns_servers = 8.8.8.8

No we are going to start an instance and see if the network is ok:

[root@controller2 ~]# nova boot  --flavor m1.tiny --image cirros-0.3.4-x86_64 --key-name liquid-key --security-groups default --nic net-id=cc3baa04-78e9-4ba1-838d-b38607099002 cirros1
[root@controller2 ~]# nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
[root@controller2 ~]# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
[root@controller2 ~]# openstack ip floating create ext-net
[root@controller2 ~]# openstack ip floating add 192.168.122.202 cirros1
[root@controller1 neutron]# nova list
+--------------------------------------+---------+--------+------------+-------------+-----------------------------------------+
| ID                                   | Name    | Status | Task State | Power State | Networks                                |
+--------------------------------------+---------+--------+------------+-------------+-----------------------------------------+
| ee51acbd-672d-492e-8d6b-707f60f4bf5f | cirros1 | ACTIVE | -          | Running     | liquid-net=192.168.0.6, 192.168.122.202 |
+--------------------------------------+---------+--------+------------+-------------+-----------------------------------------+
[root@controller1 neutron]# ping 192.168.122.202
PING 192.168.122.202 (192.168.122.202) 56(84) bytes of data.
64 bytes from 192.168.122.202: icmp_seq=1 ttl=63 time=1.26 ms


No we are going to configure block storage using cinder:

[root@controller2 ~]# yum install openstack-cinder python-cinderclient python-oslo-db
Loaded plugins: fastestmirror, priorities
Ceph                                                                                                                                                                            |  951 B  00:00:00     
Ceph-noarch                                                                                                                                                                     |  951 B  00:00:00     
base                                                                                                                                                                            | 3.6 kB  00:00:00     
centos-openstack-kilo                                                                                                                                                           | 2.9 kB  00:00:00     
ceph-source                                                                                                                                                                     |  951 B  00:00:00     
epel/x86_64/metalink                                                                                                                                                            |  26 kB  00:00:00     
epel                                                                                                                                                                            | 4.3 kB  00:00:00     


[root@controller1 mapper]# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 354428
Server version: 5.5.40-MariaDB-wsrep MariaDB Server, wsrep_25.11.r4026

Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE cinder;
Query OK, 1 row affected (0.04 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
    ->   IDENTIFIED BY 'Amena2006';
Query OK, 0 rows affected (0.03 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
    ->   IDENTIFIED BY 'Amena2006';
Query OK, 0 rows affected (0.05 sec)

MariaDB [(none)]> exit
Bye


[root@controller1 ~]# cat /etc/cinder/cinder.conf | grep -v ^# | grep -v ^$
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.20.31
verbose = True
glance_host=10.10.10.10
glance_port=9292
[BRCD_FABRIC_EXAMPLE]
[CISCO_FABRIC_EXAMPLE]
[database]
connection = mysql://cinder:Amena2006@10.10.10.10/cinder
[fc-zone-manager]
[keymgr]
[keystone_authtoken]
auth_uri = http://10.10.10.10:5000
auth_url = http://10.10.10.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = Amena2006
[matchmaker_redis]
[matchmaker_ring]
[oslo_messaging_amqp]
[oslo_messaging_qpid]
[oslo_messaging_rabbit]
rabbit_hosts=controller1:5672,controller2:5672
rabbit_retry_interval=1
rabbit_retry_backoff=2
rabbit_max_retries=0
rabbit_durable_queues=true
rabbit_ha_queues=true
[profiler]

[root@controller1 ~]#su -s /bin/sh -c "cinder-manage db sync" cinder


HA proxy config:

listen cinder_api 10.10.10.10:8776
  balance  source
  option  tcpka
  option  httpchk
  option  tcplog
  server icehouse1 10.10.10.31:8776  check inter 2000 rise 2 fall 5
  server icehouse2 10.10.10.32:8776  check inter 2000 rise 2 fall 5

Now we start the daemons in the controllers:

[root@controller2 system]# systemctl enable openstack-cinder-volume.service
ln -s '/usr/lib/systemd/system/openstack-cinder-volume.service' '/etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service'
[root@controller2 system]# systemctl enable openstack-cinder-scheduler.service
ln -s '/usr/lib/systemd/system/openstack-cinder-scheduler.service' '/etc/systemd/system/multi-user.target.wants/openstack-cinder-scheduler.service'
[root@controller2 system]# systemctl enable openstack-cinder-api.service
ln -s '/usr/lib/systemd/system/openstack-cinder-api.service' '/etc/systemd/system/multi-user.target.wants/openstack-cinder-api.service'
[root@controller2 system]# systemctl start openstack-cinder-volume.service
[root@controller2 system]# systemctl start openstack-cinder-scheduler.service
[root@controller2 system]# systemctl start openstack-cinder-api.service


We had problems with the cinder python client, we needed the V2 api endpoint, so we created it:

[root@controller2 ~]# openstack service create --name cinderv2 \
>   --description "OpenStack Block Storage" volumev2
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Block Storage          |
| enabled     | True                             |
| id          | 146afd63cddd4419bc8ed99affc253c7 |
| name        | cinderv2                         |
| type        | volumev2                         |
+-------------+----------------------------------+
[root@controller2 ~]# openstack endpoint create --publicurl http://10.10.10.10:8776/v2/%\(tenant_id\)s --internalurl http://10.10.10.10:8776/v2/%\(tenant_id\)s --adminurl http://10.10.10.10:8776/v2/%\(tenant_id\)s --region RegionOne volumev2
+--------------+------------------------------------------+
| Field        | Value                                    |
+--------------+------------------------------------------+
| adminurl     | http://10.10.10.10:8776/v2/%(tenant_id)s |
| id           | a3bfcba3ccc849af932bebcb58ff43ef         |
| internalurl  | http://10.10.10.10:8776/v2/%(tenant_id)s |
| publicurl    | http://10.10.10.10:8776/v2/%(tenant_id)s |
| region       | RegionOne                                |
| service_id   | 146afd63cddd4419bc8ed99affc253c7         |
| service_name | cinderv2                                 |
| service_type | volumev2                                 |
+--------------+------------------------------------------+
[root@controller2 ~]# cinder service-list
+------------------+-------------+------+---------+-------+----------------------------+-----------------+
|      Binary      |     Host    | Zone |  Status | State |         Updated_at         | Disabled Reason |
+------------------+-------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller1 | nova | enabled |   up  | 2015-10-15T14:19:37.000000 |       None      |
| cinder-scheduler | controller2 | nova | enabled |   up  | 2015-10-15T14:19:44.000000 |       None      |
|  cinder-volume   | controller1 | nova | enabled |   up  | 2015-10-15T14:19:40.000000 |       None      |
|  cinder-volume   | controller2 | nova | enabled |   up  | 2015-10-15T14:19:43.000000 |       None      |
+------------------+-------------+------+---------+-------+----------------------------+-----------------+

We also have to modify the source files:

[root@controller2 ~]# cat admin_rc 
export OS_USERNAME=admin
export OS_PASSWORD=Amena2006
export OS_PROJECT_NAME=admin
export OS_AUTH_URL=http://10.10.10.10:35357/
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_IMAGE_API_VERSION=2
export OS_VOLUME_API_VERSION=2
export OS_TENANT_ID=83e7173e84e949fb9583e839a7037232


Now we are going to configure, cinder to access ceph:

in glance.conf, you should create a specific user for cinder with less privileges than admin, but here just for the dema we are going to work with the admin ceph user:

[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.20.31
verbose = True
glance_host=10.10.10.10
glance_port=9292
rbd_pool=datastore
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
rbd_user=admin
glance_api_version=2
volume_driver=cinder.volume.drivers.rbd.RBDDriver

We need a pool called datastore in ceph:

[ceph@ceph1 ~]$ ceph osd pool create datastore 150
pool 'datastore' created
[ceph@ceph1 ~]$ ceph osd df
ID WEIGHT  REWEIGHT SIZE  USE    AVAIL %USE VAR  
 0 0.18999  1.00000  194G 37452k  194G 0.02 1.02 
 5 0.18999  1.00000  194G 35784k  194G 0.02 0.98 
 1 0.18999  1.00000  194G 36560k  194G 0.02 1.00 
 4 0.18999  1.00000  194G 36684k  194G 0.02 1.00 
 2 0.18999  1.00000  194G 36172k  194G 0.02 0.99 
 3 0.18999  1.00000  194G 36832k  194G 0.02 1.01 
              TOTAL 1169G   214M 1169G 0.02      
MIN/MAX VAR: 0.98/1.02  STDDEV: 0


Now we restart the cinder services and create a test volume

[root@controller1 ceph]# cinder create --display-name testVolume 1
+---------------------------------------+--------------------------------------+
|                Property               |                Value                 |
+---------------------------------------+--------------------------------------+
|              attachments              |                  []                  |
|           availability_zone           |                 nova                 |
|                bootable               |                false                 |
|          consistencygroup_id          |                 None                 |
|               created_at              |      2015-10-15T14:51:31.000000      |
|              description              |                 None                 |
|               encrypted               |                False                 |
|                   id                  | c648fbd4-1f4f-45fc-a081-97f778a6e64f |
|                metadata               |                  {}                  |
|              multiattach              |                False                 |
|                  name                 |              testVolume              |
|         os-vol-host-attr:host         |                 None                 |
|     os-vol-mig-status-attr:migstat    |                 None                 |
|     os-vol-mig-status-attr:name_id    |                 None                 |
|      os-vol-tenant-attr:tenant_id     |   83e7173e84e949fb9583e839a7037232   |
|   os-volume-replication:driver_data   |                 None                 |
| os-volume-replication:extended_status |                 None                 |
|           replication_status          |               disabled               |
|                  size                 |                  1                   |
|              snapshot_id              |                 None                 |
|              source_volid             |                 None                 |
|                 status                |               creating               |
|                user_id                |   6d5479e66f1441ec9cdb685732c0b661   |
|              volume_type              |                 None                 |
+---------------------------------------+--------------------------------------+
[root@controller1 ceph]# cinder list
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+
|                  ID                  |   Status  |    Name    | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+
| c648fbd4-1f4f-45fc-a081-97f778a6e64f | available | testVolume |  1   |     None    |  false   |             |
+--------------------------------------+-----------+------------+------+-------------+----------+-------------+
[root@controller1 ceph]# rbd list datastore
volume-c648fbd4-1f4f-45fc-a081-97f778a6e64f

All looking good , now we need to configure the compute nodes to be able to attache the rdb volumes via libvirt and qemu-rbd, so first we install ceph on the compute nodes, and give them admin rights:

[root@compute1 system]# useradd -m ceph
[root@compute1 system]# passwd ceph
[root@compute1 system]# su - ceph
[ceph@compute1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/ceph/.ssh/id_rsa): 
Created directory '/home/ceph/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /home/ceph/.ssh/id_rsa.
Your public key has been saved in /home/ceph/.ssh/id_rsa.pub.
The key fingerprint is:
00:84:ca:51:6a:10:15:9f:52:76:9b:fb:58:7e:2e:90 ceph@compute1
The key's randomart image is:
+--[ RSA 2048]----+
|oo+== .          |
|..o+ + o         |
|.+o o +          |
|o. .   o         |
|      ..S        |
|      E=         |
|      ..o .      |
|        .o       |
|         ..      |
+-----------------+

[root@controller1 ~]# su - ceph
Last login: Thu Oct 15 22:15:50 CEST 2015 on pts/2
[ceph@controller1 ~]$ ls
ceph.log  my-cluster
[ceph@controller1 ~]$ cd my-cluster/
[ceph@controller1 my-cluster]$ ls
ceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.bootstrap-rgw.keyring  ceph.client.admin.keyring  ceph.conf  ceph.log  ceph.mon.keyring

[ceph@controller1 my-cluster]$ ceph-deploy install compute1 compute2
[ceph@controller1 my-cluster]$ ceph-deploy admin compute1 compute2

From the compute nodes we check the ceph user has auth:

[root@compute1 nova]# su - ceph
Last login: Fri Oct 16 07:50:41 CEST 2015 on pts/1
[ceph@compute1 ceph]$ sudo chmod +r /etc/ceph/ceph.client.admin.keyring
[ceph@compute1 ceph]$ ceph status
    cluster 7e92d6e2-3c06-417b-9c23-ef86f2166393
     health HEALTH_OK
     monmap e3: 3 mons at {ceph1=10.10.10.21:6789/0,ceph2=10.10.10.22:6789/0,ceph3=10.10.10.23:6789/0}
            election epoch 8, quorum 0,1,2 ceph1,ceph2,ceph3
     osdmap e32: 6 osds: 6 up, 6 in
      pgmap v254: 150 pgs, 1 pools, 16 bytes data, 3 objects
            214 MB used, 1169 GB / 1169 GB avail
                 150 active+clean
[ceph@compute1 ceph]$ exit

No we have to configure the secret in libvirt, to enable auth between qemu and ceph:

[root@compute1 nova]# cat /tmp/local.txt
client.admin secret

[root@compute1 nova]# virsh secret-set-value --secret 07b1919e-9080-4201-b45c-5f742f45ad06 --base64 AQBKOB1WRCLYGBAAKDTLtQSJg1NlFQ7E47snVA==
[root@compute1 nova]# virsh secret-list 
 UUID                                  Usage
--------------------------------------------------------------------------------
 07b1919e-9080-4201-b45c-5f742f45ad06  ceph client.admin secret

We dump the secret config, and define it in the other compute nodes:

[root@compute1 nova]# virsh secret-dumpxml 07b1919e-9080-4201-b45c-5f742f45ad06 > /tmp/secret.xml

[root@compute2 nova]#virsh secret-define secret.xml
[root@compute2 nova]#virsh secret-set-value --secret 07b1919e-9080-4201-b45c-5f742f45ad06 --base64 AQBKOB1WRCLYGBAAKDTLtQSJg1NlFQ7E47snVA==

And now we have to configure first the cinder.conf from the controller nodes, and then the nova.conf from the compute nodes

[root@controller1 ~]# cat /etc/cinder/cinder.conf | grep -v ^# | grep -v ^$
rbd_pool=datastore
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
rbd_user=admin
glance_api_version=2
rbd_secret_uuid=07b1919e-9080-4201-b45c-5f742f45ad06
volume_driver=cinder.volume.drivers.rbd.RBDDriver

NOVA compute on compute nodes:

[root@controller1 ~]#cat /etc/nova/nova.conf |  grep -v ^# | grep -v ^$
[libvirt]
virt_type=qemu
inject_password=true
inject_key=true
libvirt_images_type=rbd
libvirt_images_rbd_pool=datastore
libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf
libvirt_inject_password=false
libvirt_inject_key=false
libvirt_inject_partition=-2
rbd_user=admin
rbd_secret_uuid=07b1919e-9080-4201-b45c-5f742f45ad06

Once we have modified the files we restart the cinder daemon in the controller nodes, and the nova-compute on the compute nodes.

Now at las we can attach our volume:

[root@controller1 ~]# nova volume-attach cirros1 6efab9b7-9884-4b0b-994b-e9948c6ce4f7
+----------+--------------------------------------+
| Property | Value                                |
+----------+--------------------------------------+
| device   | /dev/vdb                             |
| id       | 6efab9b7-9884-4b0b-994b-e9948c6ce4f7 |
| serverId | 77ed96b5-1bcf-4823-963b-b375a7121f73 |
| volumeId | 6efab9b7-9884-4b0b-994b-e9948c6ce4f7 |
+----------+--------------------------------------+
[root@controller1 ~]# nova volume-list
+--------------------------------------+--------+--------------+------+-------------+--------------------------------------+
| ID                                   | Status | Display Name | Size | Volume Type | Attached to                          |
+--------------------------------------+--------+--------------+------+-------------+--------------------------------------+
| 6efab9b7-9884-4b0b-994b-e9948c6ce4f7 | in-use |              | 3    | -           | 77ed96b5-1bcf-4823-963b-b375a7121f73 |
+--------------------------------------+--------+--------------+------+-------------+--------------------------------------+

if we check on the compute node the libvirt xml dump of the instance we can see the ceph rbd info:

    6efab9b7-9884-4b0b-994b-e9948c6ce4f7
And if we connect to the instance we can see our volume: # fdisk /dev/vdb -l Disk /dev/vdb: 3221 MB, 3221225472 bytes 16 heads, 63 sectors/track, 6241 cylinders, total 6291456 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk identifier: 0x00000000 Disk /dev/vdb doesn't contain a valid partition table # fdisk /dev/vdb -l Disk /dev/vdb: 3221 MB, 3221225472 bytes 16 heads, 63 sectors/track, 6241 cylinders, total 6291456 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk identifier: 0x00000000 Disk /dev/vdb doesn't contain a valid partition table # mkfs.ext4 /dev/vdb # mount /dev/vdb /mnt # cd /mnt # df -h . Filesystem Size Used Available Use% Mounted on /dev/vdb 3.0G 68.5M 2.7G 2% /mnt # We now have the basics of cinder, we are going to configure glance to use ceph: [root@controller1 ~]# cat /etc/glance/glance-api.conf | grep -v ^# | grep -v ^$ [DEFAULT] verbose=True data_api = glance.db.sqlalchemy.api enable_v1_api=True enable_v2_api=True enable_v3_api=True registry_host=10.10.10.10 auth_strategy=keystone rabbit_hosts=controller1:5672,controller2:5672 rabbit_retry_interval=1 rabbit_retry_backoff=2 rabbit_max_retries=0 rabbit_durable_queues=true rabbit_ha_queues=true [oslo_policy] [database] connection = mysql://glance:Amena2006@10.10.10.10/glance [oslo_concurrency] [keystone_authtoken] auth_uri = http://10.10.10.10:5000 auth_url = http://10.10.10.10:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = glance password = Amena2006 [paste_deploy] flavor=keystone [store_type_location_strategy] [profiler] [task] [taskflow_executor] [glance_store] stores=glance.store.filesystem.Store, glance.store.rbd.Store default_store=rbd rbd_store_user=admin rbd_store_pool=images show_image_direct_url=True rbd_store_ceph_conf=/etc/ceph/ceph.conf rbd_store_chunk_size=8 Then we restart the glance api and test it: [root@controller1 images]# wget https://cloud-images.ubuntu.com/trusty/20151014.1/trusty-server-cloudimg-amd64-disk1.img --2015-10-18 21:38:18-- https://cloud-images.ubuntu.com/trusty/20151014.1/trusty-server-cloudimg-amd64-disk1.img Resolving cloud-images.ubuntu.com (cloud-images.ubuntu.com)... 91.189.88.141, 2001:67c:1360:8001:ffff:ffff:ffff:fffe Connecting to cloud-images.ubuntu.com (cloud-images.ubuntu.com)|91.189.88.141|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 258146816 (246M) [text/plain] Saving to: ‘trusty-server-cloudimg-amd64-disk1.img’ 100%[=============================================================================================================================================================>] 258,146,816 25.4MB/s in 10s 2015-10-18 21:38:28 (24.2 MB/s) - ‘trusty-server-cloudimg-amd64-disk1.img’ saved [258146816/258146816] [root@controller1 images]# [root@controller1 images]# [root@controller1 images]# [root@controller1 images]# glance image-create --name "cloud-trusty-ubuntu-amd64.img" --file trusty-server-cloudimg-amd64-disk1.img --disk-format qcow2 --container-format bare --visibility public --progress [=============================>] 100% +------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ | checksum | db18c8da3bff2861fcd126c2f303fd8e | | container_format | bare | | created_at | 2015-10-18T19:39:25Z | | disk_format | qcow2 | | id | 5554c2ae-b607-40a0-bb98-7fc6a20065dd | | min_disk | 0 | | min_ram | 0 | | name | cloud-trusty-ubuntu-amd64.img | | owner | f42a6212d7584cb1bc6997b2049daff5 | | protected | False | | size | 258146816 | | status | active | | tags | [] | | updated_at | 2015-10-18T19:39:47Z | | virtual_size | None | | visibility | public | +------------------+--------------------------------------+ [root@controller1 ~]# glance image-create --name "cloud-Fedora-22.img" --file Fedora-Cloud-Base-22-20150521.x86_64.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress [=============================>] 100% +------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ | checksum | 18abc933d17f69d55ecea0d19f8f5c71 | | container_format | bare | | created_at | 2015-10-18T19:44:53Z | | disk_format | qcow2 | | id | 650bce53-f8ee-4125-ad38-5f4ba3528048 | | min_disk | 0 | | min_ram | 0 | | name | cloud-Fedora-22.img | | owner | f42a6212d7584cb1bc6997b2049daff5 | | protected | False | | size | 228599296 | | status | active | | tags | [] | | updated_at | 2015-10-18T19:45:11Z | | virtual_size | None | | visibility | public | +------------------+--------------------------------------+ [root@controller1 ~]# glance image-list +--------------------------------------+-------------------------------+ | ID | Name | +--------------------------------------+-------------------------------+ | 6e9d91ab-339e-4cac-a667-f5344aa6e548 | cirros-0.3.4-x86_64 | | b7657f2f-2a1e-43b4-8f26-c02996b5c99a | cirros-0.3.5-x86_64 | | 650bce53-f8ee-4125-ad38-5f4ba3528048 | cloud-Fedora-22.img | | 5554c2ae-b607-40a0-bb98-7fc6a20065dd | cloud-trusty-ubuntu-amd64.img | +--------------------------------------+-------------------------------+ Now to horizon the dashboard: [root@controller2 ~]# yum -y install openstack-dashboard httpd mod_wsgi memcached python-memcached [root@controller2 ~]# chown -R apache:apache /usr/share/openstack-dashboard/static [root@controller2 ~]# setsebool -P httpd_can_network_connect on [root@controller1 ~]# cat /etc/openstack-dashboard/local_settings | grep -v ^# | grep -v ^$ import os from django.utils.translation import ugettext_lazy as _ from openstack_dashboard import exceptions DEBUG = False TEMPLATE_DEBUG = DEBUG WEBROOT = '/dashboard/' ALLOWED_HOSTS = '*' HORIZON_CONFIG = { 'user_home': 'openstack_dashboard.views.get_user_home', 'ajax_queue_limit': 10, 'auto_fade_alerts': { 'delay': 3000, 'fade_duration': 1500, 'types': ['alert-success', 'alert-info'] }, 'help_url': "http://docs.openstack.org", 'exceptions': {'recoverable': exceptions.RECOVERABLE, 'not_found': exceptions.NOT_FOUND, 'unauthorized': exceptions.UNAUTHORIZED}, 'modal_backdrop': 'static', 'angular_modules': [], 'js_files': [], 'js_spec_files': [], } LOCAL_PATH = '/tmp' SECRET_KEY='42317dfe09a3556f2d18' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '10.10.10.10:11211', } } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' OPENSTACK_HOST = "10.10.10.10" OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" [root@controller1 ~]# vi /etc/sysconfig/memcached [root@controller1 ~]# scp /etc/openstack-dashboard/local_settings controller2:/etc/openstack-dashboard/local_settings local_settings 100% 22KB 21.5KB/s 00:00 [root@controller1 ~]# systemctl enable httpd.service memcached.service ln -s '/usr/lib/systemd/system/httpd.service' '/etc/systemd/system/multi-user.target.wants/httpd.service' ln -s '/usr/lib/systemd/system/memcached.service' '/etc/systemd/system/multi-user.target.wants/memcached.service' [root@controller1 ~]# cat /etc/sysconfig/memcached PORT="11211" USER="memcached" MAXCONN="1024" CACHESIZE="64" OPTIONS="-l 10.10.20.31" Now we add the config to the HA proxy: listen dashboard 10.10.10.10:80 balance source capture cookie vgnvisitor= len 32 cookie SERVERID insert indirect nocache mode http option forwardfor option httpchk option httpclose rspidel ^Set-cookie:\ IP= server controller1 10.10.10.31:80 cookie control01 check inter 2000 rise 2 fall 5 server controller2 10.10.10.32:80 cookie control02 check inter 2000 rise 2 fall 5 listen memcached 10.10.10.10:11211 balance source option tcpka option httpchk maxconn 10000 server controller1 10.10.20.31:11211 check inter 2000 rise 2 fall 5 server controller2 10.10.20.32:11211 check inter 2000 rise 2 fall 5 Restart ha proxy, and start httpd and memcached, and we have a working dashboard. If you get authorization problems, check if your endpoint region names are all the same, for example: [root@controller2 ~]# openstack endpoint list +----------------------------------+-----------+--------------+--------------+ | ID | Region | Service Name | Service Type | +----------------------------------+-----------+--------------+--------------+ | 3b6b02616b77444294f48d3e58c08733 | regionOne | neutron | network | | 8da5b44fbbdc4663ba89b67a5575fc54 | regionOne | swift | object-store | | f7ff1c78dfea4240ad23b6bf91d4f3cb | regionOne | glance | image | | bc4e7de605f64fb3b92fd69bff91a63c | regionOne | cinderv2 | volumev2 | | b693c46497214e60894075b6d9ec5739 | RegionOne | cinder | volume | : [root@openstackbox ~]# virsh dumpxml compute1 | grep -A 4 custom SandyBridge Ok, ready, now we have to start all the VM's again, we query our compute VM, and as you see it has vmx virtualization: [root@compute1 ~]# cat /proc/cpuinfo | grep vmx flags : fpu de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx rdtscp lm constant_tsc rep_good nopl eagerfpu pni pclmulqdq vmx ssse3 cx16 sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx hypervisor lahf_lm xsaveopt vnmi ept So now we can modify the nova config file: root@compute2 ~]# grep virt_type /etc/nova/nova.conf virt_type=kvm # virt_type) (string value) # a server, which is dependent on virt_type. (valid options [root@compute2 ~]# openstack-service restart Now if we start and instance we can see it is using the kvm modules: [root@controller1 ~]# nova start cirros1 Request to start server cirros1 has been accepted. [root@compute1 ~]# virsh dumpxml instance-00000009 | grep -i doma OK, that's another step done. Now we are going to install heat, so we can have our oschestration working: yum -y install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine python-heatclient We configure the file [root@controller1 ~]# cat /etc/heat/heat.conf | grep -v ^# | grep -v ^$ [DEFAULT] heat_metadata_server_url = http://10.10.10.10:8000 heat_waitcondition_server_url = http://10.10.10.10:8000/v1/waitcondition log_dir = /var/log/heat use_stderr = False auth_strategy = keystone rpc_backend=rabbit stack_user_domain_name = heat_user_domain stack_user_domain_id=f76d8d6c53464b82a5bb7a85764db567 stack_domain_admin=heat_domain_admin stack_domain_admin_password=Amena2006 verbose = True [keystone_authtoken] auth_uri = http://10.10.10.10:5000/v2.0 identity_uri = http://10.10.10.10:35357 admin_tenant_name = service admin_user = heat admin_password = Amena2006 [ssl] [database] connection = mysql://heat:Amena2006@10.10.10.10/heat [paste_deploy] [rpc_notifier2] [ec2authtoken] auth_uri = http://10.10.10.10:5000/v2.0 [heat_api_cloudwatch] [heat_api] [heat_api_cfn] [auth_password] [matchmaker_ring] [matchmaker_redis] [oslo_messaging_rabbit] rabbit_hosts=controller1:5672,controller2:5672 rabbit_retry_interval=1 rabbit_retry_backoff=2 rabbit_max_retries=0 rabbit_durable_queues=true rabbit_ha_queues=true Create openstack user, the service amd the DB finallt populate it: openstack user create --password-prompt heat openstack role add --project service --user heat admin openstack role create heat_stack_owner openstack role add --project liquid-project --user liquid heat_stack_owner openstack role create heat_stack_user openstack service create --name heat --description "Orchestration" orchestration openstack service create --name heat-cfn --description "Orchestration" cloudformation openstack endpoint create --publicurl http://10.10.10.10:8004/v1/%\(tenant_id\)s --internalurl http://10.10.10.10:8004/v1/%\(tenant_id\)s --adminurl http://10.10.10.10:8004/v1/%\(tenant_id\)s --region regionOne orchestration openstack endpoint create --publicurl http://10.10.10.10:8004/v1 --internalurl http://10.10.10.10:8004/v1 --adminurl http://10.10.10.10:8004/v1 --region regionOne cloudformation heat-keystone-setup-domain --stack-user-domain-name heat_user_domain --stack-domain-admin heat_domain_admin --stack-domain-admin-password Amena2006 su -s /bin/sh -c "heat-manage db_sync" heat systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service systemctl start openstack-heat-api.service openstack-heat-api-cfn.service openstack-heat-engine.service Now we create a template in yaml: [root@controller1 ~]# cat test-stat.yml heat_template_version: 2014-10-16 description: A simple server. parameters: ImageID: type: string description: Image use to boot a server NetID: type: string description: Network ID for the server keyID: type: string description: Name of keypair to assign to servers resources: server: type: OS::Nova::Server properties: image: { get_param: ImageID } flavor: m1.tiny networks: - network: { get_param: NetID } outputs: private_ip: description: IP address of the server in the private network value: { get_attr: [ server, first_address ] } [root@controller1 ~]# heat --debug stack-create liquid-stack -f /root/test-stat.yml -P "keyID=liquid-key;ImageID=cirros-0.3.4-x86_64;NetID=cc3baa04-78e9-4ba1-838d-b38607099002" +--------------------------------------+--------------+--------------------+----------------------+ | id | stack_name | stack_status | creation_time | +--------------------------------------+--------------+--------------------+----------------------+ | ff89d187-5016-4a44-baae-9e35f6f746c8 | liquid-stack | CREATE_IN_PROGRESS | 2015-10-26T11:26:17Z | +--------------------------------------+--------------+--------------------+----------------------+ [root@controller1 ~]# heat list WARNING (shell) DEPRECATED! Use stack-list instead. +--------------------------------------+--------------+-----------------+----------------------+ | id | stack_name | stack_status | creation_time | +--------------------------------------+--------------+-----------------+----------------------+ | ff89d187-5016-4a44-baae-9e35f6f746c8 | liquid-stack | CREATE_COMPLETE | 2015-10-26T11:26:17Z | +--------------------------------------+--------------+-----------------+----------------------+ [root@controller1 ~]# nova list +--------------------------------------+----------------------------------+---------+------------+-------------+-----------------------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+----------------------------------+---------+------------+-------------+-----------------------------------------+ | 77ed96b5-1bcf-4823-963b-b375a7121f73 | cirros1 | SHUTOFF | - | Shutdown | liquid-net=192.168.0.8, 192.168.122.202 | | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | fedora22 | ACTIVE | - | Running | liquid-net=192.168.0.9, 192.168.122.203 | | 97883ba1-ce8a-4d92-a152-476c7be1414e | liquid-stack-server-dddbnpukxkws | ACTIVE | - | Running | liquid-net=192.168.0.10 | +--------------------------------------+----------------------------------+---------+------------+-------------+-----------------------------------------+ [root@controller1 ~]# heat stack-show ff89d187-5016-4a44-baae-9e35f6f746c8 +-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ | capabilities | [] | | creation_time | 2015-10-26T11:26:17Z | | description | A simple server. | | disable_rollback | True | | id | ff89d187-5016-4a44-baae-9e35f6f746c8 | | links | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/liquid-stack/ff89d187-5016-4a44-baae-9e35f6f746c8 (self) | | notification_topics | [] | | outputs | [ | | | { | | | "output_value": "192.168.0.10", | | | "description": "IP address of the server in the private network", | | | "output_key": "private_ip" | | | } | | | ] | | parameters | { | | | "OS::project_id": "f42a6212d7584cb1bc6997b2049daff5", | | | "keyID": "liquid-key", | | | "OS::stack_id": "ff89d187-5016-4a44-baae-9e35f6f746c8", | | | "OS::stack_name": "liquid-stack", | | | "NetID": "cc3baa04-78e9-4ba1-838d-b38607099002", | | | "ImageID": "cirros-0.3.4-x86_64" | | | } | | parent | None | | stack_name | liquid-stack | | stack_owner | liquid | | stack_status | CREATE_COMPLETE | | stack_status_reason | Stack CREATE completed successfully | | stack_user_project_id | a67252c9a2ae4e12bb2e4d52d47483e7 | | template_description | A simple server. | | timeout_mins | None | | updated_time | None | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ A more advanced example for Heat, and also heat debug: We are going to create a web instance, using nested stacks. First we have our main stack yaml file: [root@controller1 heat]# cat new-stack.yml heat_template_version: 2015-04-30 description: Deploy web Front end servers parameters: image: type: string label: Image name or ID description: Image to be used for compute instance default: cloud-Fedora-22.img flavor: type: string label: Flavor description: Type of instance (flavor) to be used default: m1.web key: type: string label: Key name description: Name of key-pair to be used for compute instance default: liquid-key private_network: type: string label: Private network name or ID description: Network to attach instance to. default: liquid-net public_network: type: string label: Private network name or ID description: Network to attach instance to. default: ext-net resources: network: type: lib/private-network.yaml properties: public_network: { get_param: public_network } floating_ip: type: lib/floating.yaml properties: port: { get_attr: [fo1, port] } public_network: { get_param: public_network } fo1: type: lib/fo.yaml properties: image: { get_param: image } flavor: { get_param: flavor } key: { get_param: key } private_network: { get_attr: [network, name] } outputs: ip: description: The public IP address value: { get_attr: [floating_ip, ip] } And then we have our nested stacks: One for the Frontal web server resource: heat_template_version: 2015-04-30 description: Deploy web Front end servers parameters: image: type: string label: Image name or ID description: Image to be used for compute instance default: cloud-Fedora-22.img flavor: type: string label: Flavor description: Type of instance (flavor) to be used default: m1.web key: type: string label: Key name description: Name of key-pair to be used for compute instance default: liquid-key private_network: type: string label: Private network name or ID description: Network to attach instance to. default: liquid-net public_network: type: string label: Private network name or ID description: Network to attach instance to. default: ext-net resources: port: type: OS::Neutron::Port properties: network: { get_param: private_network } security_groups: - { get_resource: web_server_security_group } web_server_security_group: type: OS::Neutron::SecurityGroup properties: name: web_server_security_group rules: - protocol: icmp - protocol: tcp port_range_min: 22 port_range_max: 22 - protocol: tcp port_range_min: 443 port_range_max: 443 - protocol: tcp port_range_min: 80 port_range_max: 80 web_fo: type: OS::Nova::Server properties: image: { get_param: image } flavor: { get_param: flavor } key_name: { get_param: key } networks: - port: { get_resource: port } user_data: | #!/bin/sh -x # install dependencies dnf -y update dnf -y install nginx systemctl enable nginx systemctl start nginx systemctl status nginx user_data_format: RAW outputs: ip: description: The IP address of the MySQL instance. value: { get_attr: [web_fo, first_address] } port: description: The network port value: { get_resource: port } Onther one for create a private Network for the stack: [root@controller1 heat]# cat lib/private-network.yaml heat_template_version: 2013-05-23 description: Template that creates a private network. parameters: public_network: type: string label: Public network name or ID description: Public network with floating IP addresses. default: public cidr: type: string label: Network CIDR description: The CIDR of the private network. default: '10.20.40.0/24' dns: type: comma_delimited_list label: DNS nameservers description: Comma separated list of DNS nameservers for the private network. default: '8.8.8.8' resources: private_network: type: OS::Neutron::Net private_subnet: type: OS::Neutron::Subnet properties: network_id: { get_resource: private_network } cidr: { get_param: cidr } dns_nameservers: { get_param: dns } router: type: OS::Neutron::Router properties: external_gateway_info: network: { get_param: public_network } router-interface: type: OS::Neutron::RouterInterface properties: router_id: { get_resource: router } subnet: { get_resource: private_subnet } outputs: name: description: The private network. value: { get_attr: [private_network, name] } Another one for attaching a floating IP to the instance: [root@controller1 heat]# cat lib/floating.yaml heat_template_version: 2013-05-23 description: Template that assigns a floating IP address to a server. parameters: port: type: string label: Server port description: The server port that receives the floating IP address. public_network: type: string label: Public network name or ID description: Public network with floating IP addresses. default: ext-net resources: floating_ip: type: OS::Neutron::FloatingIP properties: floating_network: { get_param: public_network } floating_ip_assoc: type: OS::Neutron::FloatingIPAssociation properties: floatingip_id: { get_resource: floating_ip } port_id: { get_param: port } outputs: ip: description: The floating IP address assigned to the server. value: { get_attr: [floating_ip, floating_ip_address] } Now we can run the stack: [root@controller1 lib]# heat stack-list +--------------------------------------+------------+-----------------+----------------------+ | id | stack_name | stack_status | creation_time | +--------------------------------------+------------+-----------------+----------------------+ | 2673e175-6a2b-4762-80ca-0a579e89ce3e | web-stack | CREATE_COMPLETE | 2015-11-03T13:28:44Z | +--------------------------------------+------------+-----------------+----------------------+ [root@controller1 lib]# nova list +--------------------------------------+------------------------------------------------+--------+------------+-------------+-----------------------------------------------------------------------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------------------------------------------------+--------+------------+-------------+-----------------------------------------------------------------------------------------+ | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | fedora22 | ACTIVE | - | Running | liquid-net=192.168.0.9, 192.168.122.203 | | 3db301f3-5f72-420f-9408-aa29b06bd184 | web-stack-fo1-b2evznnf2cpj-web_fo-wr53hebldic2 | ACTIVE | - | Running | web-stack-network-2h7vbad5p2mw-private_network-kxo6vcq5qmp7=10.20.40.4, 192.168.122.219 | +--------------------------------------+------------------------------------------------+--------+------------+-------------+-----------------------------------------------------------------------------------------+ [root@controller1 lib]# neutron subnet-list +--------------------------------------+------------------------------------------------------------+------------------+--------------------------------------------------------+ | id | name | cidr | allocation_pools | +--------------------------------------+------------------------------------------------------------+------------------+--------------------------------------------------------+ | 0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a | liquid-subnet | 192.168.0.0/24 | {"start": "192.168.0.2", "end": "192.168.0.254"} | | bd8829bd-a1c5-4eb2-a778-1431eb872846 | web-stack-network-2h7vbad5p2mw-private_subnet-a66u6n42qekm | 10.20.40.0/24 | {"start": "10.20.40.2", "end": "10.20.40.254"} | | b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6 | ext-subnet | 192.168.122.0/24 | {"start": "192.168.122.200", "end": "192.168.122.249"} | | cae6f97b-a2d4-4858-bf50-77053c5bee50 | HA subnet tenant f42a6212d7584cb1bc6997b2049daff5 | 169.254.192.0/18 | {"start": "169.254.192.1", "end": "169.254.255.254"} | +--------------------------------------+------------------------------------------------------------+------------------+--------------------------------------------------------+ [root@controller1 lib]# neutron net-list +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ | id | name | subnets | +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ | cc3baa04-78e9-4ba1-838d-b38607099002 | liquid-net | 0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a 192.168.0.0/24 | | 6d984518-4fe1-438d-b090-1abd10f5aaef | web-stack-network-2h7vbad5p2mw-private_network-kxo6vcq5qmp7 | bd8829bd-a1c5-4eb2-a778-1431eb872846 10.20.40.0/24 | | e905eb4c-0f11-454a-8042-6cbb9f6b6396 | ext-net | b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6 192.168.122.0/24 | | 964f0839-032d-4277-bca8-291a03c152d8 | HA network tenant f42a6212d7584cb1bc6997b2049daff5 | cae6f97b-a2d4-4858-bf50-77053c5bee50 169.254.192.0/18 | +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ We are going to take a look from heat: [root@controller1 lib]# heat stack-list --show-nested +--------------------------------------+------------------------------------+-----------------+----------------------+--------------------------------------+ | id | stack_name | stack_status | creation_time | parent | +--------------------------------------+------------------------------------+-----------------+----------------------+--------------------------------------+ | 2673e175-6a2b-4762-80ca-0a579e89ce3e | web-stack | CREATE_COMPLETE | 2015-11-03T13:28:44Z | None | | 457a6b54-c1b5-44ef-9818-03bc371d1ab5 | web-stack-network-2h7vbad5p2mw | CREATE_COMPLETE | 2015-11-03T13:28:45Z | 2673e175-6a2b-4762-80ca-0a579e89ce3e | | 52860845-3dae-478f-a068-76f7f8314295 | web-stack-fo1-b2evznnf2cpj | CREATE_COMPLETE | 2015-11-03T13:29:03Z | 2673e175-6a2b-4762-80ca-0a579e89ce3e | | def2c335-73f6-4fc3-ad5e-df9a535b7981 | web-stack-floating_ip-owxz32qm5ovt | CREATE_COMPLETE | 2015-11-03T13:29:37Z | 2673e175-6a2b-4762-80ca-0a579e89ce3e | +--------------------------------------+------------------------------------+-----------------+----------------------+--------------------------------------+ [root@controller1 lib]# heat resource-list --show-nested 5 web-stack +---------------------------+-------------------------------------------------------------------------------------+--------------------------------------------+-----------------+----------------------+-----------------+ | resource_name | physical_resource_id | resource_type | resource_status | updated_time | parent_resource | +---------------------------+-------------------------------------------------------------------------------------+--------------------------------------------+-----------------+----------------------+-----------------+ | floating_ip | def2c335-73f6-4fc3-ad5e-df9a535b7981 | file:///root/heat/lib/floating.yaml | CREATE_COMPLETE | 2015-11-03T13:28:45Z | | | fo1 | 52860845-3dae-478f-a068-76f7f8314295 | file:///root/heat/lib/fo.yaml | CREATE_COMPLETE | 2015-11-03T13:28:45Z | | | network | 457a6b54-c1b5-44ef-9818-03bc371d1ab5 | file:///root/heat/lib/private-network.yaml | CREATE_COMPLETE | 2015-11-03T13:28:45Z | | | private_network | 6d984518-4fe1-438d-b090-1abd10f5aaef | OS::Neutron::Net | CREATE_COMPLETE | 2015-11-03T13:28:46Z | network | | private_subnet | bd8829bd-a1c5-4eb2-a778-1431eb872846 | OS::Neutron::Subnet | CREATE_COMPLETE | 2015-11-03T13:28:46Z | network | | router | 17ebc0bd-39c9-473f-8902-7c21bf0513a4 | OS::Neutron::Router | CREATE_COMPLETE | 2015-11-03T13:28:46Z | network | | router-interface | 17ebc0bd-39c9-473f-8902-7c21bf0513a4:subnet_id=bd8829bd-a1c5-4eb2-a778-1431eb872846 | OS::Neutron::RouterInterface | CREATE_COMPLETE | 2015-11-03T13:28:46Z | network | | port | 4029a358-b8e6-4614-8c43-bd52e5f71af8 | OS::Neutron::Port | CREATE_COMPLETE | 2015-11-03T13:29:03Z | fo1 | | web_fo | 3db301f3-5f72-420f-9408-aa29b06bd184 | OS::Nova::Server | CREATE_COMPLETE | 2015-11-03T13:29:03Z | fo1 | | web_server_security_group | 1a18e941-bd1b-4f01-92ec-f084e4fc7edf | OS::Neutron::SecurityGroup | CREATE_COMPLETE | 2015-11-03T13:29:03Z | fo1 | | floating_ip | 99fac315-6301-49cf-a505-59b7a7ab048a | OS::Neutron::FloatingIP | CREATE_COMPLETE | 2015-11-03T13:29:38Z | floating_ip | | floating_ip_assoc | 217 | OS::Neutron::FloatingIPAssociation | CREATE_COMPLETE | 2015-11-03T13:29:38Z | floating_ip | +---------------------------+-------------------------------------------------------------------------------------+--------------------------------------------+-----------------+----------------------+-----------------+ [root@controller1 lib]# heat resource-show web-stack network +------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ | attributes | { | | | "name": "web-stack-network-2h7vbad5p2mw-private_network-kxo6vcq5qmp7" | | | } | | description | | | links | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack/2673e175-6a2b-4762-80ca-0a579e89ce3e/resources/network (self) | | | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack/2673e175-6a2b-4762-80ca-0a579e89ce3e (stack) | | | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack-network-2h7vbad5p2mw/457a6b54-c1b5-44ef-9818-03bc371d1ab5 (nested) | | logical_resource_id | network | | physical_resource_id | 457a6b54-c1b5-44ef-9818-03bc371d1ab5 | | required_by | fo1 | | resource_name | network | | resource_status | CREATE_COMPLETE | | resource_status_reason | state changed | | resource_type | file:///root/heat/lib/private-network.yaml | | updated_time | 2015-11-03T13:28:45Z | +------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ [root@controller1 lib]# heat resource-show web-stack floating_ip +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ | attributes | { | | | "ip": "192.168.122.219" | | | } | | description | | | links | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack/2673e175-6a2b-4762-80ca-0a579e89ce3e/resources/floating_ip (self) | | | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack/2673e175-6a2b-4762-80ca-0a579e89ce3e (stack) | | | http://10.10.10.10:8004/v1/f42a6212d7584cb1bc6997b2049daff5/stacks/web-stack-floating_ip-owxz32qm5ovt/def2c335-73f6-4fc3-ad5e-df9a535b7981 (nested) | | logical_resource_id | floating_ip | | physical_resource_id | def2c335-73f6-4fc3-ad5e-df9a535b7981 | | required_by | | | resource_name | floating_ip | | resource_status | CREATE_COMPLETE | | resource_status_reason | state changed | | resource_type | file:///root/heat/lib/floating.yaml | | updated_time | 2015-11-03T13:28:45Z | +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------+ [root@controller1 lib]# heat event-list web-stack +---------------+--------------------------------------+------------------------+--------------------+----------------------+ | resource_name | id | resource_status_reason | resource_status | event_time | +---------------+--------------------------------------+------------------------+--------------------+----------------------+ | network | cc70dab7-8aec-4534-a174-36639cf124e3 | state changed | CREATE_IN_PROGRESS | 2015-11-03T13:28:45Z | | network | e806166a-d74d-4f8d-89bc-3fa38c4f90d7 | state changed | CREATE_COMPLETE | 2015-11-03T13:28:59Z | | fo1 | bd8a4390-449e-44a4-892c-eb246922415e | state changed | CREATE_IN_PROGRESS | 2015-11-03T13:29:00Z | | fo1 | 2a1e1958-1f98-443d-9118-811763ef0774 | state changed | CREATE_COMPLETE | 2015-11-03T13:29:36Z | | floating_ip | d8a34e79-f154-41d3-97d6-d5623deebb96 | state changed | CREATE_IN_PROGRESS | 2015-11-03T13:29:37Z | | floating_ip | 8cabdc11-5133-46a2-afe7-bd44621d3880 | state changed | CREATE_COMPLETE | 2015-11-03T13:29:45Z | +---------------+--------------------------------------+------------------------+--------------------+----------------------+ Now we are going to install and configure load balancer as a service: [root@controller1 neutron]# yum -y install openstack-neutron-fwaas.noarch openstack-neutron-lbaas.noarch openstack-neutron-vpnaas.noarch haproxy Then we add lbaas to the services: [root@controller1 neutron]# cat neutron.conf | grep -i service_plu service_plugins = router,lbaas # neutron.service_plugins namespace. See setup.cfg for the entrypoint names of # service_plugins = # Example: service_plugins = router,firewall,lbaas,vpnaas,metering Edit the agent config: [root@controller1 neutron]# cat lbaas_agent.ini | grep -v ^# | grep -v ^$ [DEFAULT] debug = True periodic_interval = 10 interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver [haproxy] send_gratuitous_arp = 3 and restart neutron: [root@controller1 system]# openstack-service restart neutron Enable and start the lb agent: [root@controller1 system]# systemctl enable neutron-lbaas-agent.service ln -s '/usr/lib/systemd/system/neutron-lbaas-agent.service' '/etc/systemd/system/multi-user.target.wants/neutron-lbaas-agent.service' [root@controller1 system]# systemctl start neutron-lbaas-agent.service Just to test it out we are going to create a load balancer for our web servers [root@controller1 neutron]# neutron lb-pool-create --description "Web lb for app" --lb-method ROUND_ROBIN --name lbliquid --protocol HTTP --subnet-id web-stack-network-tjbighixmyh6-private_subnet-duplwszf7jnf Created a new pool: +------------------------+--------------------------------------+ | Field | Value | +------------------------+--------------------------------------+ | admin_state_up | True | | description | Web lb for app | | health_monitors | | | health_monitors_status | | | id | 2d2e515d-b062-4ed8-b03e-551e54e90252 | | lb_method | ROUND_ROBIN | | members | | | name | lbliquid | | protocol | HTTP | | provider | haproxy | | status | PENDING_CREATE | | status_description | | | subnet_id | 8e997dce-314e-4442-be72-68cf9b1428e1 | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | vip_id | | +------------------------+--------------------------------------+ [root@controller1 neutron]# neutron lb-pool-list +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ | id | name | provider | lb_method | protocol | admin_state_up | status | +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ | 2d2e515d-b062-4ed8-b03e-551e54e90252 | lbliquid | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ [root@controller1 neutron]# neutron lb-vip-create --name liquid-vip --protocol-port 80 --protocol HTTP --subnet-id 8e997dce-314e-4442-be72-68cf9b1428e1 lbliquid Created a new vip: +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | address | 10.20.40.6 | | admin_state_up | True | | connection_limit | -1 | | description | | | id | c7f961a8-fc4a-459a-a17b-1a36b6986b89 | | name | liquid-vip | | pool_id | 2d2e515d-b062-4ed8-b03e-551e54e90252 | | port_id | e686875f-7230-4fb7-8422-a79af9c81185 | | protocol | HTTP | | protocol_port | 80 | | session_persistence | | | status | PENDING_CREATE | | status_description | | | subnet_id | 8e997dce-314e-4442-be72-68cf9b1428e1 | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | +---------------------+--------------------------------------+ [root@controller1 neutron]# ping ^C [root@controller1 neutron]# neutron lb-vip-list +--------------------------------------+------------+------------+----------+----------------+--------+ | id | name | address | protocol | admin_state_up | status | +--------------------------------------+------------+------------+----------+----------------+--------+ | c7f961a8-fc4a-459a-a17b-1a36b6986b89 | liquid-vip | 10.20.40.6 | HTTP | True | ACTIVE | +--------------------------------------+------------+------------+----------+----------------+--------+ [root@controller1 neutron]# neutron lb-member-create --address 10.20.40.5 --protocol-port 80 lbliquid Created a new member: +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | address | 10.20.40.5 | | admin_state_up | True | | id | 8f761dfe-c9d2-4f9a-b726-6eecd0eda117 | | pool_id | 2d2e515d-b062-4ed8-b03e-551e54e90252 | | protocol_port | 80 | | status | PENDING_CREATE | | status_description | | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | weight | 1 | +--------------------+--------------------------------------+ [root@controller1 neutron]# neutron lb-member-create --address 10.20.40.4 --protocol-port 80 lbliquid Created a new member: +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | address | 10.20.40.4 | | admin_state_up | True | | id | 69fa33e7-969c-4f58-9de2-a836004c5573 | | pool_id | 2d2e515d-b062-4ed8-b03e-551e54e90252 | | protocol_port | 80 | | status | PENDING_CREATE | | status_description | | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | weight | 1 | +--------------------+--------------------------------------+ [root@controller1 neutron]# neutron lb-member-list +--------------------------------------+------------+---------------+--------+----------------+--------+ | id | address | protocol_port | weight | admin_state_up | status | +--------------------------------------+------------+---------------+--------+----------------+--------+ | 69fa33e7-969c-4f58-9de2-a836004c5573 | 10.20.40.4 | 80 | 1 | True | ACTIVE | | 8f761dfe-c9d2-4f9a-b726-6eecd0eda117 | 10.20.40.5 | 80 | 1 | True | ACTIVE | +--------------------------------------+------------+---------------+--------+----------------+--------+ [root@controller1 neutron]# neutron lb-pool-show lbliquid +------------------------+--------------------------------------+ | Field | Value | +------------------------+--------------------------------------+ | admin_state_up | True | | description | Web lb for app | | health_monitors | | | health_monitors_status | | | id | 2d2e515d-b062-4ed8-b03e-551e54e90252 | | lb_method | ROUND_ROBIN | | members | 69fa33e7-969c-4f58-9de2-a836004c5573 | | | 8f761dfe-c9d2-4f9a-b726-6eecd0eda117 | | name | lbliquid | | protocol | HTTP | | provider | haproxy | | status | ACTIVE | | status_description | | | subnet_id | 8e997dce-314e-4442-be72-68cf9b1428e1 | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | vip_id | c7f961a8-fc4a-459a-a17b-1a36b6986b89 | +------------------------+--------------------------------------+ [root@controller1 neutron]# neutron port-list +--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+ | id | name | mac_address | fixed_ips | +--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+ | 12e59a81-fe04-4537-ab7c-a5109d651923 | HA port tenant f42a6212d7584cb1bc6997b2049daff5 | fa:16:3e:d3:22:d7 | {"subnet_id": "cae6f97b-a2d4-4858-bf50-77053c5bee50", "ip_address": "169.254.192.16"} | | 1e533eaa-db65-45a6-91ed-c1127f2bfc14 | | fa:16:3e:5c:7c:80 | {"subnet_id": "b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6", "ip_address": "192.168.122.220"} | | 37bd286c-ac74-40f1-b4b5-39c5cf054d88 | | fa:16:3e:d1:a0:96 | {"subnet_id": "b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6", "ip_address": "192.168.122.203"} | | 41a16d4f-c6de-4355-ad10-3ceed4cca8ec | | fa:16:3e:74:b3:4d | {"subnet_id": "b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6", "ip_address": "192.168.122.201"} | | 5914010d-1504-4d2d-9832-5068ac4bb1be | | fa:16:3e:81:f0:42 | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.2"} | | 66a17289-b74e-47e1-a6b0-90a6f2e6974c | web-stack-fo1-jvbtmfk76nu5-port-2vw7uzrhnl6c | fa:16:3e:42:9e:4b | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.5"} | | 67073c5c-a251-418e-b0b7-fdc59a48025f | HA port tenant f42a6212d7584cb1bc6997b2049daff5 | fa:16:3e:eb:33:1d | {"subnet_id": "cae6f97b-a2d4-4858-bf50-77053c5bee50", "ip_address": "169.254.192.15"} | | 6e30e2fb-0fcb-446a-a3da-5ed54e5d6352 | | fa:16:3e:a7:36:72 | {"subnet_id": "0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a", "ip_address": "192.168.0.3"} | | 8de5af98-9a25-4828-a8a1-2940400082b6 | | fa:16:3e:c2:ad:1c | {"subnet_id": "0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a", "ip_address": "192.168.0.9"} | | 9b4a846c-400c-4e79-999e-c364f3e3205f | | fa:16:3e:0f:cc:14 | {"subnet_id": "0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a", "ip_address": "192.168.0.2"} | | 9d12a8a8-787a-4dd5-9626-90d055d021f0 | | fa:16:3e:d6:ff:25 | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.3"} | | b9452ca9-5196-4a32-9fde-1d642addfe52 | HA port tenant f42a6212d7584cb1bc6997b2049daff5 | fa:16:3e:6c:27:5d | {"subnet_id": "cae6f97b-a2d4-4858-bf50-77053c5bee50", "ip_address": "169.254.192.3"} | | c8bf7dbc-c8ba-44c6-b0e5-931fd7c87a0b | web-stack-fo2-5q5svixdztyr-port-j35urzaw4r7o | fa:16:3e:88:f2:36 | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.4"} | | d70af1d2-7427-4b10-9f81-5f13fe9d5267 | HA port tenant f42a6212d7584cb1bc6997b2049daff5 | fa:16:3e:65:e4:68 | {"subnet_id": "cae6f97b-a2d4-4858-bf50-77053c5bee50", "ip_address": "169.254.192.4"} | | e686875f-7230-4fb7-8422-a79af9c81185 | vip-c7f961a8-fc4a-459a-a17b-1a36b6986b89 | fa:16:3e:62:25:d1 | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.6"} | | ea1ab80a-c333-4c82-848d-f9a39f6b4a00 | | fa:16:3e:54:0e:17 | {"subnet_id": "8e997dce-314e-4442-be72-68cf9b1428e1", "ip_address": "10.20.40.1"} | | f38a2314-f732-4e82-a09d-0fc0ca89c721 | | fa:16:3e:31:06:2a | {"subnet_id": "0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a", "ip_address": "192.168.0.1"} | | fceed4c0-04c0-448b-9cd7-96f2ca999f69 | | fa:16:3e:1c:80:bf | {"subnet_id": "b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6", "ip_address": "192.168.122.221"} | +--------------------------------------+-------------------------------------------------+-------------------+----------------------------------------------------------------------------------------+ [root@controller1 neutron]# neutron net-list +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ | id | name | subnets | +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ | cc3baa04-78e9-4ba1-838d-b38607099002 | liquid-net | 0e6b7df7-c983-4b4f-9c11-f7ebf73d1d7a 192.168.0.0/24 | | e905eb4c-0f11-454a-8042-6cbb9f6b6396 | ext-net | b51badee-a2b7-4cb4-ab54-83bf4cb2e4c6 192.168.122.0/24 | | 1f4c9171-587b-43f2-bcbf-b5b175a734ca | web-stack-network-tjbighixmyh6-private_network-6amsxliwu3ea | 8e997dce-314e-4442-be72-68cf9b1428e1 10.20.40.0/24 | | 964f0839-032d-4277-bca8-291a03c152d8 | HA network tenant f42a6212d7584cb1bc6997b2049daff5 | cae6f97b-a2d4-4858-bf50-77053c5bee50 169.254.192.0/18 | +--------------------------------------+-------------------------------------------------------------+-------------------------------------------------------+ [root@controller1 neutron]# neutron floatingip-create ext-net Created a new floatingip: +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | | | floating_ip_address | 192.168.122.222 | | floating_network_id | e905eb4c-0f11-454a-8042-6cbb9f6b6396 | | id | 77afec07-46ee-4a47-ae96-e660f1e05e35 | | port_id | | | router_id | | | status | DOWN | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | +---------------------+--------------------------------------+ [root@controller1 neutron]# neutron floatingip-associate 77afec07-46ee-4a47-ae96-e660f1e05e35 e686875f-7230-4fb7-8422-a79af9c81185 Associated floating IP 77afec07-46ee-4a47-ae96-e660f1e05e35 [root@controller1 ~]# neutron lb-healthmonitor-create --delay 32 --max-retries 4 --timeout 16 --type HTTP Created a new health_monitor: +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | admin_state_up | True | | delay | 32 | | expected_codes | 200 | | http_method | GET | | id | 0bb1e01c-bb1c-4feb-a557-831a36c23694 | | max_retries | 4 | | pools | | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | timeout | 16 | | type | HTTP | | url_path | / | +----------------+--------------------------------------+ [root@controller1 ~]# neutron lb-healthmonitor-list +--------------------------------------+------+----------------+ | id | type | admin_state_up | +--------------------------------------+------+----------------+ | 0bb1e01c-bb1c-4feb-a557-831a36c23694 | HTTP | True | +--------------------------------------+------+----------------+ [root@controller1 ~]# neutron lb-pool-list +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ | id | name | provider | lb_method | protocol | admin_state_up | status | +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ | 2d2e515d-b062-4ed8-b03e-551e54e90252 | lbliquid | haproxy | ROUND_ROBIN | HTTP | True | ACTIVE | +--------------------------------------+----------+----------+-------------+----------+----------------+--------+ [root@controller1 ~]# neutron lb-healthmonitor-associate 0bb1e01c-bb1c-4feb-a557-831a36c23694 2d2e515d-b062-4ed8-b03e-551e54e90252 Associated health monitor 0bb1e01c-bb1c-4feb-a557-831a36c23694 Ok, so now lets test it out: [root@openstackbox ~]# curl http://192.168.122.222 Fedora 2

Welcome to nginx on Fedora2

[root@openstackbox ~]# curl http://192.168.122.222 Fedora1

Welcome to nginx on Fedora1

Working!, I would like to add the load balancer to the heat template, leaving that activity for the near future. Now I wan't to configure live migration of vms between the hypervisors: on the compute nodes, we configure libvirtd to listen on the network, you can configure tls and certificates, we are going to use plain comms: [root@compute2 nova]# grep -Ev '(^#|^$)' /etc/libvirt/libvirtd.conf listen_tls = 0 listen_tcp = 1 auth_tcp = "none" [root@compute2 nova]# grep -Ev '(^#|^$)' /etc/sysconfig/libvirtd LIBVIRTD_CONFIG=/etc/libvirt/libvirtd.conf LIBVIRTD_ARGS="--listen" [root@compute2 nova]# systemctl restart libvirtd [root@compute2 nova]# netstat -natlop | grep -i libvi tcp 0 0 0.0.0.0:16509 0.0.0.0:* LISTEN 9055/libvirtd off (0.00/0/0) tcp6 0 0 :::16509 :::* LISTEN 9055/libvirtd off (0.00/0/0) Now let's live migrate: [root@controller2 ~]# nova hypervisor-servers compute2 +--------------------------------------+-------------------+---------------+---------------------+ | ID | Name | Hypervisor ID | Hypervisor Hostname | +--------------------------------------+-------------------+---------------+---------------------+ | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | instance-0000000a | 4 | compute2 | +--------------------------------------+-------------------+---------------+---------------------+ [root@controller2 ~]# nova live-migration fedora22 compute1 [root@controller2 ~]# nova hypervisor-servers compute2 +----+------+---------------+---------------------+ | ID | Name | Hypervisor ID | Hypervisor Hostname | +----+------+---------------+---------------------+ +----+------+---------------+---------------------+ [root@controller2 ~]# nova hypervisor-servers compute1 +--------------------------------------+-------------------+---------------+---------------------+ | ID | Name | Hypervisor ID | Hypervisor Hostname | +--------------------------------------+-------------------+---------------+---------------------+ | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | instance-0000000a | 2 | compute1 | +--------------------------------------+-------------------+---------------+---------------------+ Great easy in kilo. Clone a cinder volume and use it to boot and instance: [root@controller2 ~]#cinder create --source-volid d8b9cdee-5ee3-4764-a3ae-b607a6daba5a --name fedora-clone 16 [root@controller1 ~]# cinder list +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ | 3fca5f7f-0296-4ff4-876d-d950508b6a02 | in-use | fedora-clone | 16 | None | true | 69a07d33-4e75-4d1e-b5cb-3ba4ab758cf3 | | 6837d5c5-857f-44a4-9149-4f0d491c6548 | in-use | media-vol | 100 | None | false | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | | d8b9cdee-5ee3-4764-a3ae-b607a6daba5a | in-use | None | 15 | None | true | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ [root@controller1 ~]#nova boot --boot-volume 3fca5f7f-0296-4ff4-876d-d950508b6a02 --flavor m1.fedora --key-name liquid-key --nic net-id=cc3baa04-78e9-4ba1-838d-b38607099002 --security-groups default fedora2 [root@controller1 ~]# nova list +--------------------------------------+----------+--------+------------+-------------+-----------------------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+----------+--------+------------+-------------+-----------------------------------------+ | 69a07d33-4e75-4d1e-b5cb-3ba4ab758cf3 | fedora2 | ACTIVE | - | Running | liquid-net=192.168.0.25 | | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | fedora22 | ACTIVE | - | Running | liquid-net=192.168.0.9, 192.168.122.203 | +--------------------------------------+----------+--------+------------+-------------+-----------------------------------------+ The cloning of the new cinder volume only takes seconds, that's because rbd creates a snapshot of the original: [root@controller1 ~]# cat /etc/cinder/cinder.conf | grep -i pool | grep -v ^# backup_ceph_pool=backups rbd_pool=datastore [root@controller1 ~]# rbd ls datastore volume-3fca5f7f-0296-4ff4-876d-d950508b6a02 volume-6837d5c5-857f-44a4-9149-4f0d491c6548 volume-d8b9cdee-5ee3-4764-a3ae-b607a6daba5a Original volume fome fedora22: [root@controller1 ~]# rbd info -p datastore volume-d8b9cdee-5ee3-4764-a3ae-b607a6daba5a rbd image 'volume-d8b9cdee-5ee3-4764-a3ae-b607a6daba5a': size 15360 MB in 3840 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.85502ae8944a format: 2 features: layering flags: Our clone: [root@controller1 ~]# rbd info -p datastore volume-3fca5f7f-0296-4ff4-876d-d950508b6a02 rbd image 'volume-3fca5f7f-0296-4ff4-876d-d950508b6a02': size 16384 MB in 4096 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.1d7764d3e3389 format: 2 features: layering, striping flags: parent: datastore/volume-d8b9cdee-5ee3-4764-a3ae-b607a6daba5a@volume-3fca5f7f-0296-4ff4-876d-d950508b6a02.clone_snap overlap: 15360 MB stripe unit: 4096 kB stripe count: 1 If we would lake to promote this snapshot, make it independent from the parent, we can flatten the image as follows: [root@controller1 ~]# rbd flatten -p datastore volume-3fca5f7f-0296-4ff4-876d-d950508b6a02 Image flatten: 1% complete... [root@controller1 ~]# rbd info -p datastore volume-3fca5f7f-0296-4ff4-876d-d950508b6a02 rbd image 'volume-3fca5f7f-0296-4ff4-876d-d950508b6a02': size 16384 MB in 4096 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.1d7764d3e3389 format: 2 features: layering, striping flags: stripe unit: 4096 kB stripe count: 1 Here is and example of what ceph/rbd does under the hood when you create a volume snapshot: [root@controller1 cinder]# cinder list +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ | 3fca5f7f-0296-4ff4-876d-d950508b6a02 | in-use | fedora-clone | 16 | None | true | 69a07d33-4e75-4d1e-b5cb-3ba4ab758cf3 | | 6837d5c5-857f-44a4-9149-4f0d491c6548 | in-use | media-vol | 100 | None | false | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | | d8b9cdee-5ee3-4764-a3ae-b607a6daba5a | in-use | None | 15 | None | true | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | +--------------------------------------+--------+--------------+------+-------------+----------+--------------------------------------+ [root@controller1 cinder]# rbd info -p datastore volume-6837d5c5-857f-44a4-9149-4f0d491c6548 rbd image 'volume-6837d5c5-857f-44a4-9149-4f0d491c6548': size 102400 MB in 25600 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.12e563d72634 format: 2 features: layering, striping flags: stripe unit: 4096 kB stripe count: 1 [root@controller1 cinder]# cinder snapshot-list +--------------------------------------+--------------------------------------+-----------+-----------+------+ | ID | Volume ID | Status | Name | Size | +--------------------------------------+--------------------------------------+-----------+-----------+------+ | 70629670-be40-42bb-8b71-20a8ffc9ae08 | 6837d5c5-857f-44a4-9149-4f0d491c6548 | available | snap-data | 100 | +--------------------------------------+--------------------------------------+-----------+-----------+------+ [root@controller1 cinder]# rbd -p datastore snap ls volume-6837d5c5-857f-44a4-9149-4f0d491c6548 SNAPID NAME SIZE 7 snapshot-70629670-be40-42bb-8b71-20a8ffc9ae08 102400 MB Let's say we delete something on our original volume and we want to rollback our snapshot, this feature is not yet implemented in openstack but it's on the way: https://review.openstack.org/#/c/104127/ In the mean time we can do it with RBD if we have ceph as our backend: We create a couple of files after creating our snapshot: [root@fedora22 media]# ls Docs lost+found Movies Music Series Sports [root@fedora22 media]# mkdir TEst1 [root@fedora22 media]# mkdir test2 [root@fedora22 media]# ls -ltr total 44 drwx------. 2 uftp uftp 16384 Oct 29 14:01 lost+found drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Series drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Music drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Docs drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:03 Sports drwxr-xr-x. 5 uftp uftp 4096 Oct 30 12:26 Movies drwxr-xr-x. 2 root root 4096 Nov 10 07:30 TEst1 drwxr-xr-x. 2 root root 4096 Nov 10 07:30 test2 We stop/detach the volume we are going to do rollback of, and then run: [root@controller1 cinder]# rbd -p datastore --snap snapshot-70629670-be40-42bb-8b71-20a8ffc9ae08 snap rollback volume-6837d5c5-857f-44a4-9149-4f0d491c6548 Rolling back to snapshot: 100% complete...done. After that we mount the volume again: total 36 drwx------. 2 uftp uftp 16384 Oct 29 14:01 lost+found drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Series drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Music drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:02 Docs drwxr-xr-x. 2 uftp uftp 4096 Oct 29 14:03 Sports drwxr-xr-x. 5 uftp uftp 4096 Oct 30 12:26 Movies We can see our snapshot has been restored. No we are going to install docker and the nova-docker driver: 653 wget http://195.220.108.108/linux/fedora/linux/development/rawhide/source/SRPMS/p/python-pip-7.1.0-1.fc23.src.rpm 655 rpm -iv python-pip-7.1.0-1.fc23.src.rpm 658 cd rpmbuild/ 660 cd SPECS/ 662 rpmbuild -bb ./python-pip.spec 666 yum install rpm-build-4.11.1-25.el7.x86_64 668 yum install python-devel 669 rpmbuild -bb ./python-pip.spec 670 cd ../RPMS/noarch 672 sudo yum install python-pip-7.1.0-1.el7.centos.noarch.rpm 675 yum install docker-io -y 676 mkdir /var/lib/docker 679 yum install git 681 git clone http://github.com/stackforge/nova-docker.git 682 cd nova-docker/ 684 git checkout -b kilo origin/stable/kilo 685 git branch -v -a 686 python setup.py install 687 systemctl start docker 692 chmod 666 /var/run/docker.sock 693 mkdir /etc/nova/rootwrap.d 700 docker run ubuntu /bin/bash 701 docker ubuntu ps -a This part get's docker working and the nova-docker driver ready now we have to configure nova: We need to create a dedicated compute node for docker so I cloned compute2 to compute3, and then configured compute3 nova.conf to use the nova-docker driver: [root@compute3 ~]# cat /etc/nova/nova.conf | grep -i compute_driver compute_driver = novadocker.virt.docker.DockerDriver [root@compute3 ~]# systemctl restart openstack-nova-compute Now in glance: [root@controller1 ~]# cat /etc/glance/glance-api.conf | grep -i docker container_formats=ami,ari,aki,bare,ovf,ova,docker [root@controller1 ~]# openstack-service restart glance We are now going to upload a docker image to glance: [root@compute3 ~]# docker pull rastasheep/ubuntu-sshd:14.04 Trying to pull repository docker.io/rastasheep/ubuntu-sshd ... 14.04: Pulling from rastasheep/ubuntu-sshd 1bcadc55d550: Pull complete a5d9a9206c6e: Pull complete 7e40149a8ce3: Pull complete 4e0ce13b9139: Pull complete ad5d69b4f00e: Pull complete 20b0be271037: Pull complete b657fe661538: Pull complete dd81e7ca22a0: Pull complete a5c0d5f358f7: Pull complete a5c0d5f358f7: Pulling fs layer 2332d8973c93: Already exists ea358092da77: Already exists a467a7c6794f: Already exists Digest: sha256:27498a23450faad52548d9b3667ae8ab3482c934e95314eaf26aec5d224a7d5d Status: Downloaded newer image for docker.io/rastasheep/ubuntu-sshd:14.04 [root@compute3 ~]# docker save rastasheep/ubuntu-sshd:14.04 | glance image-create --container-format=docker --disk-format=raw --name rastasheep/ubuntu-sshd:14.04 +------------------+----------------------------------------------------------------------------------+ | Property | Value | +------------------+----------------------------------------------------------------------------------+ | checksum | 1fc4c770d69fd16d6eada1fe88293681 | | container_format | docker | | created_at | 2015-11-11T10:31:04Z | | direct_url | rbd://7e92d6e2-3c06-417b-9c23-ef86f2166393/images/8d5e936f- | | | 79a9-4c1e-b779-34860b85baf5/snap | | disk_format | raw | | id | 8d5e936f-79a9-4c1e-b779-34860b85baf5 | | min_disk | 0 | | min_ram | 0 | | name | rastasheep/ubuntu-sshd:14.04 | | owner | f42a6212d7584cb1bc6997b2049daff5 | | protected | False | | size | 264492544 | | status | active | | tags | [] | | updated_at | 2015-11-11T10:31:36Z | | virtual_size | None | | visibility | private | +------------------+----------------------------------------------------------------------------------+ [root@compute3 ~]# glance image-list +--------------------------------------+-------------------------------+ | ID | Name | +--------------------------------------+-------------------------------+ | 2ea14813-6775-4b32-b0b6-2abc5a349f94 | centos7 | | b7657f2f-2a1e-43b4-8f26-c02996b5c99a | cirros-0.3.5-x86_64 | | 650bce53-f8ee-4125-ad38-5f4ba3528048 | cloud-Fedora-22.img | | 5554c2ae-b607-40a0-bb98-7fc6a20065dd | cloud-trusty-ubuntu-amd64.img | | 8d5e936f-79a9-4c1e-b779-34860b85baf5 | rastasheep/ubuntu-sshd:14.04 | +--------------------------------------+-------------------------------+ No we need to create a docker availability zone: [root@controller1 ~]# nova availability-zone-list +-----------------------+----------------------------------------+ | Name | Status | +-----------------------+----------------------------------------+ | internal | available | | |- controller1 | | | | |- nova-conductor | enabled :-) 2015-11-11T13:40:40.000000 | | | |- nova-cert | enabled :-) 2015-11-11T13:40:40.000000 | | | |- nova-consoleauth | enabled :-) 2015-11-11T13:40:43.000000 | | | |- nova-scheduler | enabled :-) 2015-11-11T13:40:36.000000 | | |- controller2 | | | | |- nova-conductor | enabled :-) 2015-11-11T13:40:44.000000 | | | |- nova-scheduler | enabled :-) 2015-11-11T13:40:42.000000 | | | |- nova-consoleauth | enabled :-) 2015-11-11T13:40:45.000000 | | | |- nova-cert | enabled :-) 2015-11-11T13:40:43.000000 | | nova | available | | |- compute1 | | | | |- nova-compute | enabled :-) 2015-11-11T13:40:45.000000 | | |- compute2 | | | | |- nova-compute | enabled XXX 2015-11-11T13:12:44.000000 | | |- compute3 | | | | |- nova-compute | enabled :-) 2015-11-11T13:40:35.000000 | +-----------------------+----------------------------------------+ [root@controller1 ~]# nova aggregate-create docker_az docker +----+-----------+-------------------+-------+----------------------------+ | Id | Name | Availability Zone | Hosts | Metadata | +----+-----------+-------------------+-------+----------------------------+ | 1 | docker_az | docker | | 'availability_zone=docker' | +----+-----------+-------------------+-------+----------------------------+ [root@controller1 ~]# nova aggregate-add-host docker_az compute3 Host compute3 has been successfully added for aggregate 1 +----+-----------+-------------------+------------+----------------------------+ | Id | Name | Availability Zone | Hosts | Metadata | +----+-----------+-------------------+------------+----------------------------+ | 1 | docker_az | docker | 'compute3' | 'availability_zone=docker' | +----+-----------+-------------------+------------+----------------------------+ [root@controller1 ~]# nova service-list +----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ | Id | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | +----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ | 2 | nova-cert | controller1 | internal | enabled | up | 2015-11-11T13:44:30.000000 | - | | 4 | nova-conductor | controller1 | internal | enabled | up | 2015-11-11T13:44:30.000000 | - | | 6 | nova-consoleauth | controller1 | internal | enabled | up | 2015-11-11T13:44:33.000000 | - | | 8 | nova-scheduler | controller1 | internal | enabled | up | 2015-11-11T13:44:26.000000 | - | | 10 | nova-conductor | controller2 | internal | enabled | up | 2015-11-11T13:44:26.000000 | - | | 12 | nova-scheduler | controller2 | internal | enabled | up | 2015-11-11T13:44:32.000000 | - | | 14 | nova-consoleauth | controller2 | internal | enabled | up | 2015-11-11T13:44:25.000000 | - | | 16 | nova-cert | controller2 | internal | enabled | up | 2015-11-11T13:44:33.000000 | - | | 18 | nova-compute | compute1 | nova | enabled | up | 2015-11-11T13:44:25.000000 | - | | 20 | nova-compute | compute2 | nova | enabled | down | 2015-11-11T13:12:44.000000 | - | | 21 | nova-compute | compute3 | docker | enabled | up | 2015-11-11T13:44:25.000000 | - | +----+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ [root@controller1 ~]# nova availability-zone-list +-----------------------+----------------------------------------+ | Name | Status | +-----------------------+----------------------------------------+ | internal | available | | |- controller1 | | | | |- nova-conductor | enabled :-) 2015-11-11T13:48:40.000000 | | | |- nova-cert | enabled :-) 2015-11-11T13:48:40.000000 | | | |- nova-consoleauth | enabled :-) 2015-11-11T13:48:43.000000 | | | |- nova-scheduler | enabled :-) 2015-11-11T13:48:46.000000 | | |- controller2 | | | | |- nova-conductor | enabled :-) 2015-11-11T13:48:46.000000 | | | |- nova-scheduler | enabled :-) 2015-11-11T13:48:42.000000 | | | |- nova-consoleauth | enabled :-) 2015-11-11T13:48:45.000000 | | | |- nova-cert | enabled :-) 2015-11-11T13:48:43.000000 | | nova | available | | |- compute1 | | | | |- nova-compute | enabled :-) 2015-11-11T13:48:45.000000 | | |- compute2 | | | | |- nova-compute | enabled XXX 2015-11-11T13:12:44.000000 | | docker | available | | |- compute3 | | | | |- nova-compute | enabled :-) 2015-11-11T13:48:45.000000 | +-----------------------+----------------------------------------+ [root@controller1 ~]# nova boot --availability-zone docker --image rastasheep/ubuntu-sshd:14.04 --flavor m1.tiny --key-name liquid-key --nic net-id=cc3baa04-78e9-4ba1-838d-b38607099002 --security-groups default docker +--------------------------------------+---------------------------------------------------------------------+ | Property | Value | +--------------------------------------+---------------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | - | | OS-EXT-SRV-ATTR:hypervisor_hostname | - | | OS-EXT-SRV-ATTR:instance_name | instance-0000005b | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | 9mqRH2RditFz | | config_drive | | | created | 2015-11-11T13:48:07Z | | flavor | m1.tiny (1) | | hostId | | | id | 1b100cf9-6ec9-4f4a-bb23-8f6d47f6b727 | | image | rastasheep/ubuntu-sshd:14.04 (8d5e936f-79a9-4c1e-b779-34860b85baf5) | | key_name | liquid-key | | metadata | {} | | name | docker | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | f42a6212d7584cb1bc6997b2049daff5 | | updated | 2015-11-11T13:48:07Z | | user_id | 0760f4fa4ef14ef19c5c5194a4cf6b0b | +--------------------------------------+---------------------------------------------------------------------+ now we have our docker instance running: [root@controller1 ~]# nova list +--------------------------------------+----------+---------+------------+-------------+-----------------------------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+----------+---------+------------+-------------+-----------------------------------------+ | 8a139577-a1c3-4362-9903-f0f413d4c732 | cirros | ACTIVE | - | Running | liquid-net=192.168.0.34 | | 1b100cf9-6ec9-4f4a-bb23-8f6d47f6b727 | docker | ACTIVE | - | Running | liquid-net=192.168.0.43 | | bfdace39-3c19-4e1a-a68a-4dd57e6dbba9 | fedora22 | SHUTOFF | - | Shutdown | liquid-net=192.168.0.9, 192.168.122.203 | +--------------------------------------+----------+---------+------------+-------------+-----------------------------------------+ on the compute host: [root@compute3 nova]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 23eef950689d rastasheep/ubuntu-sshd:14.04 "/usr/sbin/sshd -D" About a minute ago Up About a minute nova-1b100cf9-6ec9-4f4a-bb23-8f6d47f6b727 Configure Destination nat on libvirt, so we can expose VM's with nat network configured to the public: [root@openstackbox hooks]# cat /etc/libvirt/hooks/qemu #!/bin/bash # used some from advanced script to have multiple ports: use an equal number of guest and host ports # Update the following variables to fit your setup Guest_name=haproxy1 Guest_ipaddr=192.168.122.11 Host_ipaddr=94.23.214.41 Host_port=( '6666' '8888' ) Guest_port=( '22' '43' ) length=$(( ${#Host_port[@]} - 1 )) iptables -t nat -D PREROUTING -d ${Host_ipaddr} -p tcp --dport ${Host_port[$i]} -j DNAT --to ${Guest_ipaddr}:${Guest_port[$i]} iptables -D FORWARD -d ${Guest_ipaddr}/32 -p tcp -m state --state NEW -m tcp --dport ${Guest_port[$i]} -j ACCEPT done fi if [ "${2}" = "start" ] || [ "${2}" = "reconnect" ]; then for i in `seq 0 $length`; do iptables -t nat -A PREROUTING -d ${Host_ipaddr} -p tcp --dport ${Host_port[$i]} -j DNAT --to ${Guest_ipaddr}:${Guest_port[$i]} iptables -I FORWARD -d ${Guest_ipaddr}/32 -p tcp -m state --state NEW -m tcp --dport ${Guest_port[$i]} -j ACCEPT done fi fi [root@openstackbox hooks]# systemctl restart libvirtd [root@openstackbox hooks]# iptables -t nat -vnL Chain PREROUTING (policy ACCEPT 76 packets, 3691 bytes) pkts bytes target prot opt in out source destination 0 0 DNAT tcp -- * * 0.0.0.0/0 94.23.214.41 tcp dpt:6666 to:192.168.122.11:22 0 0 DNAT tcp -- * * 0.0.0.0/0 94.23.214.41 tcp dpt:8888 to:192.168.122.11:43 So when the vm starts the rules are applied to IPtables, and whe it's stop they get deleted. #Install rdo kilo repo: [root@openstackbox ~]# yum -y install centos-release-openstack-kilo
Unix Systems: 

Add new comment

Filtered HTML

  • Web page addresses and e-mail addresses turn into links automatically.
  • Allowed HTML tags: <a> <em> <strong> <cite> <blockquote> <code> <ul> <ol> <li> <dl> <dt> <dd>
  • Lines and paragraphs break automatically.

Plain text

  • No HTML tags allowed.
  • Web page addresses and e-mail addresses turn into links automatically.
  • Lines and paragraphs break automatically.
By submitting this form, you accept the Mollom privacy policy.
Error | HP-UX Tips & Tricks Site

Error

Error message

  • Warning: Cannot modify header information - headers already sent by (output started at /homepages/37/d228974590/htdocs/includes/common.inc:2567) in drupal_send_headers() (line 1207 of /homepages/37/d228974590/htdocs/includes/bootstrap.inc).
  • PDOException: SQLSTATE[42000]: Syntax error or access violation: 1142 INSERT command denied to user 'dbo229817041'@'217.160.155.192' for table 'watchdog': INSERT INTO {watchdog} (uid, type, message, variables, severity, link, location, referer, hostname, timestamp) VALUES (:db_insert_placeholder_0, :db_insert_placeholder_1, :db_insert_placeholder_2, :db_insert_placeholder_3, :db_insert_placeholder_4, :db_insert_placeholder_5, :db_insert_placeholder_6, :db_insert_placeholder_7, :db_insert_placeholder_8, :db_insert_placeholder_9); Array ( [:db_insert_placeholder_0] => 0 [:db_insert_placeholder_1] => cron [:db_insert_placeholder_2] => Attempting to re-run cron while it is already running. [:db_insert_placeholder_3] => a:0:{} [:db_insert_placeholder_4] => 4 [:db_insert_placeholder_5] => [:db_insert_placeholder_6] => http://www.hpuxtips.es/?q=content/openstack-rdo-kilo-work-notes [:db_insert_placeholder_7] => [:db_insert_placeholder_8] => 54.90.207.75 [:db_insert_placeholder_9] => 1512950963 ) in dblog_watchdog() (line 157 of /homepages/37/d228974590/htdocs/modules/dblog/dblog.module).
The website encountered an unexpected error. Please try again later.