Browse code

Rename from ha*config to ipfailover. Fixes and cleanup as per @smarterclayton review comments.

ramr authored on 2015/04/22 04:06:04
Showing 51 changed files
... ...
@@ -57,7 +57,6 @@ ability to setup a high availability configuration on a selection of nodes.
57 57
             <options> = One or more of:
58 58
                 --type=keepalived  #  For now, always keepalived.
59 59
                 --create
60
-                --delete
61 60
                 --credentials=<credentials>
62 61
                 --no-headers=<headers>
63 62
                 -o|--output=<format>
... ...
@@ -122,9 +121,6 @@ Examples:
122 122
                                    --virtual-ips="54.192.0.42-43"       \
123 123
                                    --watch-port=80  --create
124 124
 
125
-       $ # Delete a previously created HA configuration.
126
-       $ openshift admin ha-config ha-amzn --delete
127
-
128 125
 
129 126
 ## Under-the-hood
130 127
 Under the hood, the HA configuration creates and starts up an HA sidecar
... ...
@@ -155,6 +151,10 @@ This allows us to the cover:
155 155
   1. When a cluster is modified - really node labels are modified.
156 156
   1. Failure cases when a node or watched service or network fails.
157 157
 
158
+Note: The PerNodeController in the future will remove the need to watch
159
+      the nodes when a cluster is resized or modified, as the keepalived
160
+      sidecar pod would be directly run on the given set of nodes.
161
+
158 162
 
159 163
 ## Usage
160 164
 The intended usage is a workflow that follows a pattern similar to the
... ...
@@ -35,7 +35,7 @@ tar xzf "${OS_IMAGE_RELEASE_TAR}" -C "${imagedir}"
35 35
 # Copy primary binaries to the appropriate locations.
36 36
 cp -f "${imagedir}/openshift" images/origin/bin
37 37
 cp -f "${imagedir}/openshift" images/router/haproxy/bin
38
-cp -f "${imagedir}/openshift" images/ha-config/keepalived/bin
38
+cp -f "${imagedir}/openshift" images/ipfailover/keepalived/bin
39 39
 
40 40
 # Copy image binaries to the appropriate locations.
41 41
 cp -f "${imagedir}/pod" images/pod/bin
... ...
@@ -57,7 +57,7 @@ image openshift/origin-haproxy-router        images/router/haproxy
57 57
 # For now, don't build the v2 registry image
58 58
 # To be reenabled when we actually switch to the v2 registry
59 59
 #image openshift/origin-docker-registry       images/dockerregistry
60
-image openshift/origin-keepalived-ha-config  images/ha-config/keepalived
60
+image openshift/origin-keepalived-ipfailover images/ipfailover/keepalived
61 61
 # images that depend on openshift/origin
62 62
 image openshift/origin-deployer              images/deployer
63 63
 image openshift/origin-docker-builder        images/builder/docker/docker-builder
64 64
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-settings.minion*
2
-!.gitignore
3 1
deleted file mode 100644
... ...
@@ -1,17 +0,0 @@
1
-#
2
-# VIP failover monitoring container for OpenShift Origin.
3
-#
4
-# ImageName: openshift/origin-keepalived-ha-config
5
-#
6
-# FROM fedora
7
-FROM openshift/origin-base
8
-
9
-RUN yum -y install kmod keepalived iproute psmisc nc net-tools
10
-
11
-ADD conf/ /var/lib/openshift/ha-config/keepalived/conf/
12
-ADD lib/  /var/lib/openshift/ha-config/keepalived/lib/
13
-ADD bin/  /var/lib/openshift/ha-config/keepalived/bin/
14
-ADD monitor.sh /var/lib/openshift/ha-config/keepalived/
15
-
16
-EXPOSE 1985
17
-ENTRYPOINT ["/var/lib/openshift/ha-config/keepalived/monitor.sh"]
18 1
deleted file mode 100644
... ...
@@ -1,312 +0,0 @@
1
-HA Router and Failover
2
-======================
3
-This readme describes steps to add multiple HA OpenShift routers with
4
-failover capability to achieve several nines of availability.
5
-
6
-
7
-Build and Test
8
-1.  Verify docker image build and run tests.
9
-
10
-        $ make -f makefile.test  #  or make -f makefile.test image
11
-        $ make -f makefile.test test
12
-
13
-
14
-Pre-requisites/Prep Time
15
-
16
-1. Launch a OpenShift cluster via whatever mechanism you use. The steps
17
-   below assume you are doing this on a dev machine using vagrant.
18
-
19
-        $ export OPENSHIFT_DEV_CLUSTER=1
20
-        $ cd $this-repo-git-co-dir  # cloned from git@github.com:ramr/origin
21
-        $ vagrant up
22
-
23
-
24
-2. Wait for the cluster to come up and then start the OpenShift router
25
-   with two (_2_) replicas.
26
-
27
-        $ vagrant ssh minion-1  # (or master or minion-2).
28
-        #  Ensure KUBECONFIG is set or else set it.
29
-        [ -n "$KUBECONFIG" ] ||  \
30
-           export KUBECONFIG=/openshift.local.certificates/admin/.kubeconfig
31
-        #  openshift kube get dc,rc,pods,se,mi,routes
32
-        openshift ex router arparp --create --replicas=2  \
33
-                                   --credentials="${KUBECONFIG}"
34
-
35
-
36
-3. Wait for the Router pods to get into running state (I'm just sitting
37
-   here watching the wheels go round and round).
38
-
39
-        $ vagrant ssh minion-1 # (or master or minion-2).
40
-        pods="openshift/origin-haproxy-router|openshift/origin-deployer"
41
-        while openshift kube get pods | egrep -e "$pods" |   \
42
-                grep "Pending" > /dev/null; do
43
-            echo -n "."
44
-            #  "OkOk"
45
-            sleep 1
46
-        done
47
-        echo ""
48
-
49
-
50
-4. Check that the two OpenShift router replicas are up and serving.
51
-
52
-        $ #  This will be a bit slow, but it should return a 503 HTTP code
53
-        $ #  indicating that haproxy is serving on port 80.
54
-        $ vagrant ssh minion-1
55
-        sudo docker ps  | grep "openshift/origin-haproxy-router"
56
-        curl -s -o /dev/null -w "%{http_code}\n"  http://localhost/
57
-
58
-        $ #  Repeat on minion-2:
59
-        $ vagrant ssh minion-2
60
-        sudo docker ps  | grep "openshift/origin-haproxy-router"
61
-        curl -s -o /dev/null -w "%{http_code}\n"  http://localhost/
62
-
63
-
64
-5. Create an user, project and app.
65
-
66
-        $ vagrant ssh minion-1
67
-        #  Add user and project.
68
-        openshift ex policy add-user view anypassword:test-admin
69
-        openshift ex new-project test --display-name="Failover Sample" \
70
-           --description="Router Failover" --admin=anypassword:test-admin
71
-        #  Create a test app using the template.
72
-        cd /vagrant/hack/exp/router-failover
73
-        openshift cli create -n test -f conf/hello-openshift-template.json
74
-
75
-        echo "Wait for the app to startup and check app is reachable."
76
-        for ip in 10.245.2.3 10.245.2.4; do
77
-          curl -H "Host: hello.openshift.test" -o /dev/null -s -m 5  \
78
-               -w "%{http_code}\n" http://$ip/
79
-        done
80
-        echo "Ensure HTTP status code is 200 for both http://10.245.2.{3,4}"
81
-        #  && echo "YAY"
82
-
83
-
84
-6. Ensure you can get to the hello openshift app from inside/outside the vm.
85
-
86
-        $ #  minion-{1,2} use IPs 10.245.2.{3,4} in the dev environment.
87
-        for ip in 10.245.2.3 10.245.2.4; do
88
-          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
89
-                            -m 5 http://hello.openshift.test)"
90
-        done
91
-
92
-
93
-HA Routing Failover Setup
94
-=========================
95
-
96
-1. Copy the router HA settings example config and edit it as needed.
97
-
98
-        $ cd /vagrant/hack/exp/router-failover
99
-        $ cp conf/settings.example  settings.minion-1
100
-        $ cp conf/settings.example  settings.minion-2
101
-        $ #
102
-        $ #  And as per your environment, set/edit the values for
103
-        $ #    ADMIN_EMAILS, EMAIL_FROM, SMTP_SERVER,
104
-        $ #    PRIMARY_HA_VIPS, SLAVE_HA_VIPS and INTERFACE.
105
-
106
-2. For demo purposes, we are going to flip the PRIMARY and SLAVE groups
107
-   on minion-2 ... this allows both minions to serve in an Active-Active
108
-   fashion.
109
-
110
-        $ #  Flip PRIMARY+SLAVE groups on minion-2 ("Papoy?! Ah Papoy!!").
111
-        $ sed -i "s/^PRIMARY_GROUPS=\(.*\)/PRIMARY_GROUPS_OLD=\1/g;
112
-                  s/^SLAVE_GROUPS=\(.*\)/PRIMARY_GROUPS=\1/g;
113
-                  s/^PRIMARY_GROUPS_OLD=\(.*\)/SLAVE_GROUPS=\1/g;" \
114
-              settings.minion-2
115
-
116
-        $ #  Check what the differences are on the minions.
117
-        $ diff conf/settings.example  settings.minion-1
118
-        $ diff conf/settings.example  settings.minion-2
119
-
120
-
121
-3. Optionally clear the config - just so that we have a completely clean
122
-   slate. Step 4 below does this - but this is here just for my demo env
123
-   reuse purposes.
124
-
125
-        $ #  Run these commands on the minions via vagrant ssh minion-{1,2}
126
-        $ #    sudo service keepalived stop
127
-        $ #    sudo rm -f /etc/keepalived/keepalived.conf
128
-
129
-        $ #  OkOk
130
-        for m in minion-1 minion-2; do
131
-           vagrant ssh $m -c "sudo service keepalived stop;  \
132
-                              sudo rm -f /etc/keepalived/keepalived.conf"
133
-        done
134
-
135
-
136
-4. Setup router HA with failover using the 2 config files we created.
137
-
138
-        $ #  Run these commands on the minions via vagrant ssh minion-{1,2}
139
-        $ #    cd /vagrant/hack/exp/router-failover
140
-        $ #    sudo ./failover-setup.sh settings.minion-{1,2}
141
-
142
-        $ #  OkOk - minion-1
143
-        for m in minion-1 minion-2; do
144
-           vagrant ssh $m -c "cd /vagrant/hack/exp/router-failover;  \
145
-                              sudo ./failover-setup.sh settings.$m"
146
-        done
147
-
148
-
149
-5. On each minion, you can check what VIPs are being serviced by that
150
-   minion via `ip a ls dev enp0s8`. Substitute the appropriate interface
151
-   name for `enp0s8` in your environment.
152
-
153
-        $ #  "minions laughing" ...
154
-        for m in minion-1 minion-2; do
155
-           vagrant ssh $m -c "ip a ls dev enp0s8"
156
-        done
157
-
158
-
159
-6. Check that you can get to the hello openshift app using the VIPs from
160
-   inside/outside the vms.
161
-
162
-        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
163
-          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
164
-                            -m 5 http://hello.openshift.test)"
165
-        done
166
-        #  && echo "YAY"
167
-
168
-
169
-HA Routing Failover Demo
170
-========================
171
-Whilst following the steps below, you can also monitor one of the VIPs on a
172
-terminal on your host system. This just busy loops sending requests to a
173
-specific VIP.
174
-
175
-        tko="--connect-timeout 2"  #  Maybe use -m 2 instead.
176
-        resolver="--resolve hello.openshift.test:80:10.245.2.111"
177
-        while true; do
178
-          echo "$(date): $(curl -s $tko $resolver hello.openshift.test)"
179
-        done | tee /tmp/foo
180
-
181
-
182
-HA Simple Failover Test (keepalived)
183
-====================================
184
-The simplest test on VIP failover is to stop keepalived on one of the
185
-minions.
186
-
187
-        $ vagrant ssh minion-1
188
-
189
-        $ #  Check which VIPs are served by this minion.
190
-        ip a ls dev enp0s8
191
-
192
-        $ #  Make sure the VIP in the busy loop above 10.245.2.111 is
193
-        $ #  "owned"/serviced by this minion. Or then use a VIP that's
194
-        $ #  serviced by this minion in the above mentioned busy looper
195
-        $ #  monitoring script (while true; curl ... done).
196
-        sudo service keepalived stop
197
-
198
-        $ vagrant ssh minion-2
199
-        #  Check that the VIPs from minion-1 are taken over by this minion.
200
-        ip a ls dev enp0s8
201
-
202
-        $ vagrant ssh minion-1
203
-        $ #  Set things back to a "good" state by starting back keepalived.
204
-        sudo service keepalived start
205
-
206
-        $ #  Check the VIPs served by this minion.
207
-        ip a ls dev enp0s8
208
-
209
-
210
-HA Hard Failover Test (bring down the minion)
211
-=============================================
212
-The hard failover VIP test basically involves stopping the whole shebang
213
-(keepalived, openshift-router and haproxy) by bringing down one of
214
-the minions.
215
-
216
-1. Halt one of the minions ("Aww") ...
217
-
218
-        $ #  If you are monitoring a specific VIP ala 10.245.2.111 in the
219
-        $ #  example mentioned above, then bring down the minion that's
220
-        $ #  "owns" that VIP. For now, bringing a random one down.
221
-        $ vagrant halt minion-$((RANDOM%2 + 1))
222
-
223
-
224
-2. Check that you can still get to the hello openshift app using the VIPs
225
-   from inside/outside the vms.
226
-
227
-        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
228
-          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
229
-                            -m 5 http://hello.openshift.test)"
230
-        done
231
-        $ #  && echo "YAY"
232
-
233
-
234
-3. Bring back the minion ("YAY") ...
235
-
236
-        $ vagrant up minion-{1,2}
237
-
238
-
239
-4. Wait for the minion to come back online.
240
-
241
-5. Check how the VIPs are balanced between the 2 minions.
242
-
243
-        for m in minion-1 minion-2; do
244
-          vagrant ssh $m -c "ip a ls dev enp0s8"
245
-        done
246
-
247
-6. Check that you can still get to the hello openshift app using the VIPs
248
-   from inside/outside the vms.
249
-
250
-        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
251
-          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
252
-                            -m 5 http://hello.openshift.test)"
253
-        done
254
-        $ #  && echo "YAY"
255
-
256
-
257
-
258
-HA Soft Failover Test
259
-=====================
260
-
261
-1. Eventually this would test the keepalived process - but for now this
262
-   just shows how long the Kubernetes Replication Controller takes to
263
-   restart the services.
264
-
265
-        $ #  Stop the router on one of the minions ("Aaw").
266
-        $ vagrant ssh minion-$((RANDOM%2 + 1))
267
-        sudo kill -9 $(ps -e -opid,args | grep openshift-router |  \
268
-                          grep -v grep | awk '{print $1}')
269
-        $ # OR:
270
-        sudo docker rm -f $(sudo docker ps |  \
271
-                               grep openshift/origin-haproxy-router |  \
272
-                               awk '{print $1}')
273
-
274
-2. Check that you can still get to the hello openshift app using the VIPs
275
-   from inside/outside the vms.
276
-
277
-        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
278
-          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
279
-                            -m 5 http://hello.openshift.test)"
280
-        done
281
-        $ #  && echo "YAY"
282
-        $ #  Wait for the router to come back up and run above check again.
283
-
284
-
285
-
286
-TODOs/Edge CASES:
287
-
288
-##  *Beware of the dog - it bites! You have been warned*
289
-There's a 2 second delay (process existence check) as of now, we can
290
-tune this up/down appropriately.
291
-And it is pertinent to mention here that this solution is not true
292
-fault-tolerance (100% availbility) - its just failover capability to
293
-provide high availability (99.[9]{n}% availability - cheap but by no
294
-means perfect).
295
-So be aware of this and use it appropriately within your environment.
296
-
297
-One alternative to achieve several more 9s of availability is to
298
-  * stop keepalived immediately if the router or the docker container
299
-    running the router goes down.
300
-  * And start keepalived start it when the router comes back up because
301
-    the replication controller notices things ain't kosher.
302
-But the bang for buck here is a low.
303
-
304
-
305
-##  *Sound Effects*
306
-Link quoted sound effects (ala "OkOk") to
307
-
308
-        http://www.soundboard.com/sb/minions
309
-
310 1
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-*
2
-!.gitignore
3 1
deleted file mode 100644
... ...
@@ -1,65 +0,0 @@
1
-{
2
-    "metadata":{
3
-        "name":"hello-service-pod-meta"
4
-    },
5
-    "kind":"Config",
6
-    "apiVersion":"v1beta1",
7
-    "creationTimestamp":"2014-09-18T18:28:38-04:00",
8
-    "items":[
9
-        {
10
-            "id": "hello-openshift-service",
11
-            "kind": "Service",
12
-            "apiVersion": "v1beta1",
13
-            "port": 8080,
14
-            "selector": {
15
-                "name": "hello-openshift-label"
16
-            }
17
-        },
18
-        {
19
-            "id": "hello-openshift",
20
-            "kind": "ReplicationController",
21
-            "apiVersion": "v1beta1",
22
-            "desiredState": {
23
-                "podTemplate": {
24
-                    "desiredState": {
25
-                        "manifest": {
26
-                            "version": "v1beta1",
27
-                            "id": "",
28
-                            "containers": [{
29
-                                "name": "hello-openshift-container",
30
-                                "image": "openshift/hello-openshift",
31
-                                "ports": [{
32
-                                    "hostPort": 6061,
33
-                                    "containerPort": 8080,
34
-                                    "protocol": "TCP"
35
-                                }]
36
-                            }],
37
-                            "dnsPolicy": "ClusterFirst",
38
-                            "restartPolicy": {
39
-                                "always": {}
40
-                            },
41
-                            "volumes": null
42
-                        }
43
-                    },
44
-                    "labels": {
45
-                        "name": "hello-openshift-label"
46
-                    }
47
-                },
48
-                "replicaSelector": {
49
-                    "name": "hello-openshift-label"
50
-                },
51
-                "replicas": 1
52
-            }
53
-        },
54
-        {
55
-            "kind": "Route",
56
-            "apiVersion": "v1beta1",
57
-            "metadata": {
58
-                "name": "hello-openshift-route"
59
-            },
60
-            "id": "hello-openshift-route",
61
-            "host": "hello.openshift.test",
62
-            "serviceName": "hello-openshift-service"
63
-        }
64
-    ]
65
-}
66 1
deleted file mode 100644
... ...
@@ -1,81 +0,0 @@
1
-#!/bin/bash
2
-
3
-
4
-#  ========================================================================
5
-#  Settings passed by the failover coordinator on OpenShift Origin.
6
-#  ========================================================================
7
-
8
-#  Name of this HA config instance.
9
-HA_CONFIG_NAME=${OPENSHIFT_HA_CONFIG_NAME:-"OpenShift-HA-Config"}
10
-
11
-#  HA config selector.
12
-HA_SELECTOR=${OPENSHIFT_HA_SELECTOR:-""}
13
-
14
-
15
-#  List of virtual IP addresses.
16
-#
17
-#  The value entries are comma-separated entries of the form:
18
-#     <ipaddress-range|ipaddress>
19
-#
20
-#     where:  ipaddress-range = <start-ipaddress>-<endip>
21
-#
22
-#  Example:
23
-#     OPENSHIFT_HA_VIRTUAL_IPS="10.42.42.42,10.100.1.20-24"
24
-#
25
-HA_VIPS=${OPENSHIFT_HA_VIRTUAL_IPS:-""}
26
-
27
-
28
-#  Interface (ethernet) to use - bound by vrrp.
29
-NETWORK_INTERFACE=${OPENSHIFT_HA_NETWORK_INTERFACE:-""}  # "enp0s8"
30
-
31
-
32
-#  Service port to monitor for failover.
33
-HA_MONITOR_PORT=${OPENSHIFT_HA_MONITOR_PORT:-"80"}
34
-
35
-#  Number of initial replicas.
36
-HA_REPLICA_COUNT=${OPENSHIFT_HA_REPLICA_COUNT:-"1"}
37
-
38
-
39
-
40
-#  ========================================================================
41
-#  Default settings - not currently exposed or overriden on OpenShift.
42
-#  ========================================================================
43
-
44
-#  If your environment doesn't support multicast, you can send VRRP adverts
45
-#  to a list of IPv{4,6} addresses using unicast.
46
-#  Example:
47
-#     UNICAST_PEERS="5.6.7.8,9.10.11.12,13.14.15.16"
48
-UNICAST_PEERS=${OPENSHIFT_HA_UNICAST_PEERS:-""}
49
-
50
-
51
-#  List of emails to send admin messages to. If the list of email ids is
52
-#  too long, you can use a DL (distribution list) ala:
53
-#   ADMIN_EMAILS=("ramr@redhat.com" "cops@acme.org")
54
-ADMIN_EMAILS=(${OPENSHIFT_HA_ADMIN_EMAILS:-"root@localhost"})
55
-
56
-#  Email sender - the from address in the email headers.
57
-EMAIL_FROM="ha-config@openshift.local"
58
-
59
-#  IP address of the SMTP server.
60
-SMTP_SERVER=${OPENSHIFT_HA_SMTP_SERVER:-"127.0.0.1"}
61
-
62
-#  SMTP connect timeout (in seconds).
63
-SMTP_CONNECT_TIMEOUT=30
64
-
65
-
66
-#  VRRP will preempt a lower priority machine when a higher priority one
67
-#  comes back online. You can change the preemption strategy to either:
68
-#     "nopreempt"  - which allows the lower priority machine to maintain its
69
-#                    'MASTER' status.
70
-#     OR
71
-#     "preempt_delay 300"  - waits 5 mins (in seconds) after startup to
72
-#                            preempt lower priority MASTERs.
73
-PREEMPTION="preempt_delay 300"
74
-
75
-
76
-#  By default, the IP for binding vrrpd is the primary IP on the above
77
-#  specified interface. If you want to hide the location of vrrpd, you can
78
-#  specify a src_addr for multicast/unicast vrrp packets.
79
-#     MULTICAST_SOURCE_IPADDRESS="1.2.3.4"
80
-#     UNICAST_SOURCE_IPADDRESS="1.2.3.4"
81
-
82 1
deleted file mode 100755
... ...
@@ -1,268 +0,0 @@
1
-#!/bin/bash
2
-
3
-
4
-#  Includes.
5
-source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
6
-
7
-
8
-# Constants.
9
-readonly CHECK_SCRIPT_NAME="chk_${HA_CONFIG_NAME//-/_}"
10
-readonly CHECK_INTERVAL_SECS=2
11
-readonly VRRP_SLAVE_PRIORITY=42
12
-
13
-readonly DEFAULT_PREEMPTION_STRATEGY="preempt_delay 300"
14
-
15
-
16
-#
17
-#  Generate global config section.
18
-#
19
-#  Example:
20
-#     generate_global_config  arparp
21
-#
22
-function generate_global_config() {
23
-  local routername=$(scrub "$1")
24
-
25
-  echo "global_defs {"
26
-  echo "   notification_email {"
27
-
28
-  for email in ${ADMIN_EMAILS[@]}; do
29
-    echo "     $email"
30
-  done
31
-
32
-  echo "   }"
33
-  echo ""
34
-  echo "   notification_email_from ${EMAIL_FROM:-"ha-config@openshift.local"}"
35
-  echo "   smtp_server ${SMTP_SERVER:-"127.0.0.1"}"
36
-  echo "   smtp_connect_timeout ${SMTP_CONNECT_TIMEOUT:-"30"}"
37
-  echo "   router_id $routername"
38
-  echo "}"
39
-}
40
-
41
-
42
-#
43
-#  Generate VRRP checker script configuration section.
44
-#
45
-#  Example:
46
-#      generate_script_config
47
-#      generate_script_config "10.1.2.3" 8080
48
-#
49
-function generate_script_config() {
50
-  local serviceip=${1:-"127.0.0.1"}
51
-  local port=${2:-80}
52
-
53
-  echo ""
54
-  echo "vrrp_script $CHECK_SCRIPT_NAME {"
55
-  echo "   script \"</dev/tcp/${serviceip}/${port}\""
56
-  echo "   interval $CHECK_INTERVAL_SECS"
57
-  echo "}"
58
-}
59
-
60
-
61
-#
62
-#  Generate authentication information section.
63
-#
64
-#  Example:
65
-#      generate_authentication_info
66
-#
67
-function generate_authentication_info() {
68
-  local creds=${1:-"R0ut3r"}
69
-  echo ""
70
-  echo "   authentication {"
71
-  echo "      auth_type PASS"
72
-  echo "      auth_pass $creds"
73
-  echo "   }"
74
-}
75
-
76
-
77
-#
78
-#  Generate track script section.
79
-#
80
-#  Example:
81
-#      generate_track_script
82
-#
83
-function generate_track_script() {
84
-  echo ""
85
-  echo "   track_script {"
86
-  echo "      $CHECK_SCRIPT_NAME"
87
-  echo "   }"
88
-}
89
-
90
-
91
-#
92
-#  Generate multicast + unicast options section based on the values of the
93
-#  MULTICAST_SOURCE_IPADDRESS, UNICAST_SOURCE_IPADDRESS and UNICAST_PEERS
94
-#  environment variables.
95
-#
96
-#  Examples:
97
-#      generate_mucast_options
98
-#
99
-#      UNICAST_SOURCE_IPADDRESS=10.1.1.1 UNICAST_PEERS="10.1.1.2,10.1.1.3" \
100
-#          generate_mucast_options
101
-#
102
-function generate_mucast_options() {
103
-  echo ""
104
-
105
-  if [ -n "$MULTICAST_SOURCE_IPADDRESS" ]; then
106
-    echo "    mcast_src_ip $MULTICAST_SOURCE_IPADDRESS"
107
-  fi
108
-
109
-  if [ -n "$UNICAST_SOURCE_IPADDRESS" ]; then
110
-    echo "    unicast_src_ip $UNICAST_SOURCE_IPADDRESS"
111
-  fi
112
-
113
-  if [ -n "$UNICAST_PEERS" ]; then
114
-    echo ""
115
-    echo "    unicast_peer {"
116
-
117
-    for ip in $(echo "$UNICAST_PEERS" | tr "," " "); do
118
-      echo "        $ip"
119
-    done
120
-
121
-    echo "    }"
122
-  fi
123
-}
124
-
125
-
126
-#
127
-#  Generate VRRP sync groups section.
128
-#
129
-#  Examples:
130
-#      generate_vrrp_sync_groups "ha-1" "10.1.1.1 10.1.2.2"
131
-#
132
-#      generate_vrrp_sync_groups "arparp" "10.42.42.42-45, 10.9.1.1"
133
-#
134
-function generate_vrrp_sync_groups() {
135
-  local servicename=$(scrub "$1")
136
-
137
-  echo ""
138
-  echo "vrrp_sync_group group_${servicename} {"
139
-  echo "   group {"
140
-
141
-  local prefix="$(vrrp_instance_basename "$1")"
142
-  local counter=1
143
-
144
-  for ip in $(expand_ip_ranges "$2"); do
145
-    echo "      ${prefix}_${counter}   # VIP $ip"
146
-    counter=$((counter + 1))
147
-  done
148
-
149
-  echo "   }"
150
-  echo "}"
151
-}
152
-
153
-
154
-#
155
-#  Generate virtual ip address section.
156
-#
157
-#  Examples:
158
-#      generate_vip_section "10.245.2.3" "enp0s8"
159
-#
160
-#      generate_vip_section "10.1.1.1 10.1.2.2" "enp0s8"
161
-#
162
-#      generate_vip_section "10.42.42.42-45, 10.9.1.1"
163
-#
164
-function generate_vip_section() {
165
-  local interface=${2:-"$(get_network_device)"}
166
-
167
-  echo ""
168
-  echo "   virtual_ipaddress {"
169
-
170
-  for ip in $(expand_ip_ranges "$1"); do
171
-    echo "      ${ip} dev $interface"
172
-  done
173
-
174
-  echo "   }"
175
-}
176
-
177
-
178
-#
179
-#  Generate vrrpd instance configuration section.
180
-#
181
-#  Examples:
182
-#      generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "252" "master"
183
-#
184
-#      generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "3" "slave"
185
-#
186
-#      generate_vrrpd_instance_config ha-1 4 "10.1.2.3-4" enp0s8 "7"
187
-#
188
-function generate_vrrpd_instance_config() {
189
-  local servicename=$1
190
-  local iid=${2:-"0"}
191
-  local vips=$3
192
-  local interface=$4
193
-  local priority=${5:-"10"}
194
-  local instancetype=${6:-"slave"}
195
-
196
-  local vipname=$(scrub "$1")
197
-  local initialstate=""
198
-  local preempt=${PREEMPTION:-"$DEFAULT_PREEMPTION_STRATEGY"}
199
-
200
-  [ "$instancetype" = "master" ] && initialstate="state MASTER"
201
-
202
-  local instance_name=$(generate_vrrp_instance_name "$servicename" "$iid")
203
-
204
-  local auth_section=$(generate_authentication_info "$servicename")
205
-  local vip_section=$(generate_vip_section "$vips" "$interface")
206
-  echo "
207
-vrrp_instance ${instance_name} {
208
-   interface ${interface}
209
-   ${initialstate}
210
-   virtual_router_id ${iid}
211
-   priority ${priority}
212
-   ${preempt}
213
-   ${auth_section}
214
-   $(generate_track_script)
215
-   $(generate_mucast_options)
216
-   ${vip_section}
217
-}
218
-"
219
-
220
-}
221
-
222
-
223
-#
224
-#  Generate failover configuration.
225
-#
226
-#  Examples:
227
-#      generate_failover_configuration
228
-#
229
-function generate_failover_config() {
230
-  local vips=$(expand_ip_ranges "$HA_VIPS")
231
-  local interface=$(get_network_device "$NETWORK_INTERFACE")
232
-  local ipaddr=$(get_device_ip_address "$interface")
233
-  local port=$(echo "$HA_MONITOR_PORT" | sed 's/[^0-9]//g')
234
-
235
-  echo "! Configuration File for keepalived
236
-
237
-$(generate_global_config "$HA_CONFIG_NAME")
238
-$(generate_script_config "$ipaddr" "$port")
239
-$(generate_vrrp_sync_groups "$HA_CONFIG_NAME" "$vips")
240
-"
241
-
242
-  local ipkey=$(echo "$ipaddr" | cut -f 4 -d '.')
243
-  local ipslot=$((ipkey % 128))
244
-
245
-  local nodecount=$(($HA_REPLICA_COUNT > 0 ? $HA_REPLICA_COUNT : 1))
246
-  local idx=$((ipslot % $nodecount))
247
-  idx=$((idx + 1))
248
-
249
-  local counter=1
250
-
251
-  for vip in ${vips}; do
252
-    local offset=$((RANDOM % 32))
253
-    local priority=$(($((ipslot % 64)) + $offset))
254
-    local instancetype="slave"
255
-    local n=$((counter % $idx))
256
-
257
-    if [ $n -eq 0 ]; then
258
-      instancetype="master"
259
-      priority=$((255 - $ipslot))
260
-    fi
261
-
262
-    generate_vrrpd_instance_config "$HA_CONFIG_NAME" "$counter" "$vip"  \
263
-        "$interface" "$priority" "$instancetype"
264
-
265
-    counter=$((counter + 1))
266
-  done
267
-}
268
-
269 1
deleted file mode 100755
... ...
@@ -1,39 +0,0 @@
1
-#!/bin/bash
2
-
3
-
4
-#  Includes.
5
-mydir=$(dirname "${BASH_SOURCE[0]}")
6
-source "$mydir/../conf/settings.sh"
7
-source "$mydir/utils.sh"
8
-source "$mydir/config-generators.sh"
9
-
10
-#  Constants.
11
-readonly KEEPALIVED_CONFIG="/etc/keepalived/keepalived.conf"
12
-readonly KEEPALIVED_DEFAULTS="/etc/sysconfig/keepalived"
13
-
14
-
15
-function setup_failover() {
16
-  echo "  - Loading ip_vs module ..."
17
-  modprobe ip_vs
18
-
19
-  echo "  - Checking if ip_vs module is available ..."
20
-  if lsmod | grep '^ip_vs'; then
21
-    echo "  - Module ip_vs is loaded."
22
-  else
23
-    echo "ERROR: Module ip_vs is NOT available."
24
-  fi
25
-
26
-  echo "  - Generating and writing config to $KEEPALIVED_CONFIG"
27
-  generate_failover_config > "$KEEPALIVED_CONFIG"
28
-}
29
-
30
-
31
-function start_failover_services() {
32
-  echo "  - Starting failover services ..."
33
-
34
-  [ -f "$KEEPALIVED_DEFAULTS" ] && source "$KEEPALIVED_DEFAULTS"
35
-
36
-  killall -9 /usr/sbin/keepalived &> /dev/null || :
37
-  /usr/sbin/keepalived $KEEPALIVED_OPTIONS -n --log-console
38
-}
39
-
40 1
deleted file mode 100755
... ...
@@ -1,117 +0,0 @@
1
-#!/bin/bash
2
-
3
-
4
-#  Constants.
5
-LIB_DIR=$(dirname "${BASH_SOURCE[0]}")
6
-VBOX_INTERFACES="enp0s3 enp0s8"
7
-
8
-
9
-#
10
-#  Returns "scrubbed" name - removes characters that are not alphanumeric or
11
-#  underscore and replacing dashes with underscores.
12
-#
13
-#  Examples:
14
-#      scrub "config\!@#@$%$^&*()-+=1_{}|[]\\:;'<>?,./ha-failover"
15
-#         # -> config_1_ha_failover
16
-#
17
-#      scrub "ha-1"  # -> ha_1
18
-#
19
-function scrub() {
20
-  local val=$(echo "$1" | tr -dc '[:alnum:]\-_')
21
-  echo "${val//-/_}"
22
-}
23
-
24
-
25
-#
26
-#  Expands list of virtual IP addresses. List elements can be an IP address
27
-#  range or an IP address and elements can be space or comma separated.
28
-#
29
-#  Examples:
30
-#     expand_ip_ranges "1.1.1.1, 2.2.2.2,3.3.3.3-4  4.4.4.4"
31
-#         # -> 1.1.1.1 2.2.2.2 3.3.3.3 3.3.3.4 4.4.4.4
32
-#
33
-#     expand_ip_ranges "10.1.1.100-102 10.1.1.200-200 10.42.42.42"
34
-#         # -> 10.1.1.100 10.1.1.101 10.1.1.102 10.1.1.200 10.42.42.42
35
-#
36
-function expand_ip_ranges() {
37
-  local vips=${1:-""}
38
-  local expandedset=()
39
-
40
-  for iprange in $(echo "$vips" | sed 's/[^0-9\.\,-]//g' | tr "," " "); do
41
-    local ip1=$(echo "$iprange" | awk '{print $1}' FS='-')
42
-    local ip2=$(echo "$iprange" | awk '{print $2}' FS='-')
43
-    if [ -z "$ip2" ]; then
44
-      expandedset=(${expandedset[@]} "$ip1")
45
-    else
46
-      local base=$(echo "$ip1" | cut -f 1-3 -d '.')
47
-      local start=$(echo "$ip1" | awk '{print $NF}' FS='.')
48
-      local end=$(echo "$ip2" | awk '{print $NF}' FS='.')
49
-      for n in `seq $start $end`; do
50
-        expandedset=(${expandedset[@]} "${base}.$n")
51
-      done
52
-    fi
53
-  done
54
-
55
-  echo "${expandedset[@]}"
56
-}
57
-
58
-
59
-#
60
-#  Generate base name for the VRRP instance.
61
-#
62
-#  Examples:
63
-#     vrrp_instance_basename "arp"   # -> arp_VIP
64
-#
65
-#     vrrp_instance_basename "ha-1"  # -> ha_1_VIP
66
-#
67
-function vrrp_instance_basename() {
68
-  echo "$(scrub "$1")_VIP"
69
-}
70
-
71
-
72
-#
73
-#  Generate VRRP instance name.
74
-#
75
-#  Examples:
76
-#     generate_vrrp_instance_name arp 42  # -> arp_VIP_42
77
-#
78
-#     generate_vrrp_instance_name ha-1    # -> ha_1_VIP_0
79
-#
80
-function generate_vrrp_instance_name() {
81
-  local iid=${2:-0}
82
-  echo "$(vrrp_instance_basename "$1")_${iid}"
83
-}
84
-
85
-
86
-#
87
-#  Returns the network device name to use for VRRP.
88
-#
89
-#  Examples:
90
-#     get_network_device
91
-#
92
-#     get_network_device  "eth0"
93
-#
94
-function get_network_device() {
95
-  for dev in $1 ${VBOX_INTERFACES}; do
96
-    if ip addr show dev "$dev" &> /dev/null; then
97
-      echo "$dev"
98
-      return
99
-    fi
100
-  done
101
-
102
-  ip route get 8.8.8.8 | awk '/dev/ { f=NR }; f && (NR-1 == f)' RS=" "
103
-}
104
-
105
-
106
-#
107
-#  Returns the IP address associated with a network device.
108
-#
109
-#  Examples:
110
-#     get_device_ip_address
111
-#
112
-#     get_device_ip_address  "docker0"
113
-#
114
-function get_device_ip_address() {
115
-  local dev=${1:-"$(get_network_device)"}
116
-  ifconfig "$dev" | awk '/inet / { print $2 }'
117
-}
118 1
deleted file mode 100644
... ...
@@ -1,11 +0,0 @@
1
-
2
-IMAGE_NAME="openshift/origin-keepalived-ha-config"
3
-
4
-
5
-all:	image
6
-
7
-test:	image
8
-	(cd tests && ./verify_failover_image.sh)
9
-
10
-image:
11
-	docker build -t $(IMAGE_NAME) .
12 1
deleted file mode 100755
... ...
@@ -1,15 +0,0 @@
1
-#!/bin/bash
2
-
3
-#  Includes.
4
-source "$(dirname "${BASH_SOURCE[0]}")/lib/failover-functions.sh"
5
-
6
-
7
-#
8
-#  main():
9
-#
10
-setup_failover
11
-
12
-start_failover_services
13
-
14
-echo "`basename $0`: OpenShift HA-Config failover service terminated."
15
-
16 1
deleted file mode 100644
... ...
@@ -1,36 +0,0 @@
1
-#!/usr/bin/env python
2
-
3
-""" Echo server - reply back with the received message. """
4
-
5
-import os
6
-import signal
7
-import socket
8
-import sys
9
-
10
-
11
-def sigusr1_handler(signum, frame):
12
-    print 'signal %s received, exiting ...' % signum
13
-    sys.exit(0)
14
-
15
-
16
-def setup():
17
-    signal.signal(signal.SIGUSR1, sigusr1_handler)
18
-
19
-
20
-def runserver():
21
-    sock = socket.socket()
22
-    sock.bind(('0.0.0.0', int(os.environ.get('PORT', '12345'))))
23
-    sock.listen(10)
24
-
25
-    while True:
26
-        c, raddr = sock.accept()
27
-        try:
28
-            d = c.recv(4096)
29
-            c.send(d if d else '')
30
-        finally:
31
-            c.close()
32
-
33
-
34
-if "__main__" == __name__:
35
-    setup()
36
-    runserver()
37 1
deleted file mode 100755
... ...
@@ -1,90 +0,0 @@
1
-#!/bin/bash -e
2
-
3
-#  Constants.
4
-readonly TEST_DIR=$(dirname "${BASH_SOURCE[0]}")
5
-readonly FAILOVER_IMAGE="openshift/origin-keepalived-ha-config"
6
-readonly TEST_VIPS="10.0.2.100-102"
7
-readonly MONITOR_PORT="12345"
8
-
9
-
10
-function stop_echo_server() {
11
-  local pid=$1
12
-  if [ -z "$pid" ]; then
13
-    pid=$(ps -e -opid,args | grep echoserver.py | grep -v grep | awk '{print $1}')
14
-  fi
15
-
16
-  #  Send SIGUSR1 to the echo server to terminate it.
17
-  [ -n "$pid" ] && kill -s USR1 $pid
18
-}
19
-
20
-
21
-function start_echo_server() {
22
-  stop_echo_server
23
-
24
-  export PORT=${MONITOR_PORT}
25
-  nohup python ${TEST_DIR}/echoserver.py &> /dev/null &
26
-  echo $!
27
-}
28
-
29
-
30
-function start_failover_container() {
31
-  local cfg="-e OPENSHIFT_HA_CONFIG_NAME="roto-r00ter""
32
-  local vips="-e OPENSHIFT_HA_VIRTUAL_IPS="${TEST_VIPS}""
33
-  local netif="-e OPENSHIFT_HA_NETWORK_INTERFACE="enp0s3""
34
-  local port="-e OPENSHIFT_HA_MONITOR_PORT="${MONITOR_PORT}""
35
-  # local unicast="-e export OPENSHIFT_HA_USE_UNICAST="true""
36
-  # local unicastpeers="-e OPENSHIFT_HA_UNICAST_PEERS="127.0.0.1""
37
-  local selector="-e OPENSHIFT_HA_SELECTOR="""
38
-  local envopts="$cfg $vips $netif $port $unicast $unicastpeers $selector"
39
-
40
-  docker run -dit --net=host --privileged=true   \
41
-         -v /lib/modules:/lib/modules $envopts $FAILOVER_IMAGE &
42
-
43
-}
44
-
45
-
46
-function run_image_verification_test() {
47
-  echo "  - starting echo server ..."
48
-  local pid=$(start_echo_server)
49
-  echo "  - started echo server pid=$pid ..."
50
-
51
-  #  On interrupt, cleanup - stop echo server.
52
-  trap "stop_echo_server $pid" INT
53
-
54
-  local cname=$(start_failover_container)
55
-  echo "  - started docker container $cname ..."
56
-
57
-  #  Wait a bit for all the services to startup.
58
-  sleep 10
59
-
60
-  #  Check container is up and has keepalived processes.
61
-  local cmd="ps -ef  | grep '/usr/sbin/keepalived' | grep -v grep | wc -l"
62
-  local numprocs=$(echo "$cmd" | docker exec -i $cname /bin/bash)
63
-
64
-  #  Stop echo server.
65
-  stop_echo_server $pid
66
-
67
-  if [[ -n "$numprocs" && $numprocs -gt 0 ]]; then
68
-    #  Success - print info and kill the container.
69
-    echo "  - There are $numprocs keepalived processes running"
70
-    echo "  - Cleaning up docker containers ..."
71
-    docker rm -f $cname
72
-    echo "  - All tests PASSED."
73
-    return 0
74
-  fi
75
-
76
-  #  Failure - print info and dump logs (keep the docker container around
77
-  #  for debugging).
78
-  echo "  - There are $numprocs keepalived processes running"
79
-  echo "  - logs from container $cname:"
80
-  docker logs $cname || :
81
-  echo "  - Test FAILED."
82
-  exit 1
83
-}
84
-
85
-
86
-#
87
-#  main():
88
-#
89
-run_image_verification_test
90
-
91 1
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+settings.minion*
1
+!.gitignore
0 2
new file mode 100644
... ...
@@ -0,0 +1,16 @@
0
+#
1
+# VIP failover monitoring container for OpenShift Origin.
2
+#
3
+# ImageName: openshift/origin-keepalived-ipfailover
4
+#
5
+FROM openshift/origin-base
6
+
7
+RUN yum -y install kmod keepalived iproute psmisc nc net-tools
8
+
9
+ADD conf/ /var/lib/openshift/ipfailover/keepalived/conf/
10
+ADD lib/  /var/lib/openshift/ipfailover/keepalived/lib/
11
+ADD bin/  /var/lib/openshift/ipfailover/keepalived/bin/
12
+ADD monitor.sh /var/lib/openshift/ipfailover/keepalived/
13
+
14
+EXPOSE 1985
15
+ENTRYPOINT ["/var/lib/openshift/ipfailover/keepalived/monitor.sh"]
0 16
new file mode 100644
... ...
@@ -0,0 +1,312 @@
0
+HA Router and Failover
1
+======================
2
+This readme describes steps to add multiple HA OpenShift routers with
3
+failover capability to achieve several nines of availability.
4
+
5
+
6
+Build and Test
7
+--------------
8
+1.  Verify docker image build and run tests.
9
+
10
+        $ make -f makefile.test  #  or make -f makefile.test image
11
+        $ make -f makefile.test test
12
+
13
+
14
+Pre-requisites/Prep Time
15
+------------------------
16
+
17
+1. Launch a OpenShift cluster via whatever mechanism you use. The steps
18
+   below assume you are doing this on a dev machine using vagrant.
19
+
20
+        $ export OPENSHIFT_DEV_CLUSTER=1
21
+        $ cd $this-repo-git-co-dir  # cloned from git@github.com:ramr/origin
22
+        $ vagrant up
23
+
24
+
25
+2. Wait for the cluster to come up and then start the OpenShift router
26
+   with two (_2_) replicas.
27
+
28
+        $ vagrant ssh minion-1  # (or master or minion-2).
29
+        #  Ensure KUBECONFIG is set or else set it.
30
+        [ -n "$KUBECONFIG" ] ||  \
31
+           export KUBECONFIG=/openshift.local.certificates/admin/.kubeconfig
32
+        #  openshift kube get dc,rc,pods,se,mi,routes
33
+        openshift ex router arparp --create --replicas=2  \
34
+                                   --credentials="${KUBECONFIG}"
35
+
36
+
37
+3. Wait for the Router pods to get into running state (I'm just sitting
38
+   here watching the wheels go round and round).
39
+
40
+        $ vagrant ssh minion-1 # (or master or minion-2).
41
+        pods="openshift/origin-haproxy-router|openshift/origin-deployer"
42
+        while openshift kube get pods | egrep -e "$pods" |   \
43
+                grep "Pending" > /dev/null; do
44
+            echo -n "."
45
+            #  "OkOk"
46
+            sleep 1
47
+        done
48
+        echo ""
49
+
50
+
51
+4. Check that the two OpenShift router replicas are up and serving.
52
+
53
+        $ #  This will be a bit slow, but it should return a 503 HTTP code
54
+        $ #  indicating that haproxy is serving on port 80.
55
+        $ vagrant ssh minion-1
56
+        sudo docker ps  | grep "openshift/origin-haproxy-router"
57
+        curl -s -o /dev/null -w "%{http_code}\n"  http://localhost/
58
+
59
+        $ #  Repeat on minion-2:
60
+        $ vagrant ssh minion-2
61
+        sudo docker ps  | grep "openshift/origin-haproxy-router"
62
+        curl -s -o /dev/null -w "%{http_code}\n"  http://localhost/
63
+
64
+
65
+5. Create an user, project and app.
66
+
67
+        $ vagrant ssh minion-1
68
+        #  Add user and project.
69
+        openshift ex policy add-user view anypassword:test-admin
70
+        openshift ex new-project test --display-name="Failover Sample" \
71
+           --description="Router Failover" --admin=anypassword:test-admin
72
+        #  Create a test app using the template.
73
+        cd /vagrant/hack/exp/router-failover
74
+        openshift cli create -n test -f conf/hello-openshift-template.json
75
+
76
+        echo "Wait for the app to startup and check app is reachable."
77
+        for ip in 10.245.2.3 10.245.2.4; do
78
+          curl -H "Host: hello.openshift.test" -o /dev/null -s -m 5  \
79
+               -w "%{http_code}\n" http://$ip/
80
+        done
81
+        echo "Ensure HTTP status code is 200 for both http://10.245.2.{3,4}"
82
+        #  && echo "YAY"
83
+
84
+
85
+6. Ensure you can get to the hello openshift app from inside/outside the vm.
86
+
87
+        $ #  minion-{1,2} use IPs 10.245.2.{3,4} in the dev environment.
88
+        for ip in 10.245.2.3 10.245.2.4; do
89
+          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
90
+                            -m 5 http://hello.openshift.test)"
91
+        done
92
+
93
+
94
+HA Routing Failover Setup
95
+=========================
96
+
97
+1. Copy the router HA settings example config and edit it as needed.
98
+
99
+        $ cd /vagrant/hack/exp/router-failover
100
+        $ cp conf/settings.example  settings.minion-1
101
+        $ cp conf/settings.example  settings.minion-2
102
+        $ #
103
+        $ #  And as per your environment, set/edit the values for
104
+        $ #    ADMIN_EMAILS, EMAIL_FROM, SMTP_SERVER,
105
+        $ #    PRIMARY_HA_VIPS, SLAVE_HA_VIPS and INTERFACE.
106
+
107
+2. For demo purposes, we are going to flip the PRIMARY and SLAVE groups
108
+   on minion-2 ... this allows both minions to serve in an Active-Active
109
+   fashion.
110
+
111
+        $ #  Flip PRIMARY+SLAVE groups on minion-2 ("Papoy?! Ah Papoy!!").
112
+        $ sed -i "s/^PRIMARY_GROUPS=\(.*\)/PRIMARY_GROUPS_OLD=\1/g;
113
+                  s/^SLAVE_GROUPS=\(.*\)/PRIMARY_GROUPS=\1/g;
114
+                  s/^PRIMARY_GROUPS_OLD=\(.*\)/SLAVE_GROUPS=\1/g;" \
115
+              settings.minion-2
116
+
117
+        $ #  Check what the differences are on the minions.
118
+        $ diff conf/settings.example  settings.minion-1
119
+        $ diff conf/settings.example  settings.minion-2
120
+
121
+
122
+3. Optionally clear the config - just so that we have a completely clean
123
+   slate. Step 4 below does this - but this is here just for my demo env
124
+   reuse purposes.
125
+
126
+        $ #  Run these commands on the minions via vagrant ssh minion-{1,2}
127
+        $ #    sudo service keepalived stop
128
+        $ #    sudo rm -f /etc/keepalived/keepalived.conf
129
+
130
+        $ #  OkOk
131
+        for m in minion-1 minion-2; do
132
+           vagrant ssh $m -c "sudo service keepalived stop;  \
133
+                              sudo rm -f /etc/keepalived/keepalived.conf"
134
+        done
135
+
136
+
137
+4. Setup router HA with failover using the 2 config files we created.
138
+
139
+        $ #  Run these commands on the minions via vagrant ssh minion-{1,2}
140
+        $ #    cd /vagrant/hack/exp/router-failover
141
+        $ #    sudo ./failover-setup.sh settings.minion-{1,2}
142
+
143
+        $ #  OkOk - minion-1
144
+        for m in minion-1 minion-2; do
145
+           vagrant ssh $m -c "cd /vagrant/hack/exp/router-failover;  \
146
+                              sudo ./failover-setup.sh settings.$m"
147
+        done
148
+
149
+
150
+5. On each minion, you can check what VIPs are being serviced by that
151
+   minion via `ip a ls dev enp0s8`. Substitute the appropriate interface
152
+   name for `enp0s8` in your environment.
153
+
154
+        $ #  "minions laughing" ...
155
+        for m in minion-1 minion-2; do
156
+           vagrant ssh $m -c "ip a ls dev enp0s8"
157
+        done
158
+
159
+
160
+6. Check that you can get to the hello openshift app using the VIPs from
161
+   inside/outside the vms.
162
+
163
+        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
164
+          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
165
+                            -m 5 http://hello.openshift.test)"
166
+        done
167
+        #  && echo "YAY"
168
+
169
+
170
+HA Routing Failover Demo
171
+========================
172
+Whilst following the steps below, you can also monitor one of the VIPs on a
173
+terminal on your host system. This just busy loops sending requests to a
174
+specific VIP.
175
+
176
+        tko="--connect-timeout 2"  #  Maybe use -m 2 instead.
177
+        resolver="--resolve hello.openshift.test:80:10.245.2.111"
178
+        while true; do
179
+          echo "$(date): $(curl -s $tko $resolver hello.openshift.test)"
180
+        done | tee /tmp/foo
181
+
182
+
183
+HA Simple Failover Test (keepalived)
184
+====================================
185
+The simplest test on VIP failover is to stop keepalived on one of the
186
+minions.
187
+
188
+        $ vagrant ssh minion-1
189
+
190
+        $ #  Check which VIPs are served by this minion.
191
+        ip a ls dev enp0s8
192
+
193
+        $ #  Make sure the VIP in the busy loop above 10.245.2.111 is
194
+        $ #  "owned"/serviced by this minion. Or then use a VIP that's
195
+        $ #  serviced by this minion in the above mentioned busy looper
196
+        $ #  monitoring script (while true; curl ... done).
197
+        sudo service keepalived stop
198
+
199
+        $ vagrant ssh minion-2
200
+        #  Check that the VIPs from minion-1 are taken over by this minion.
201
+        ip a ls dev enp0s8
202
+
203
+        $ vagrant ssh minion-1
204
+        $ #  Set things back to a "good" state by starting back keepalived.
205
+        sudo service keepalived start
206
+
207
+        $ #  Check the VIPs served by this minion.
208
+        ip a ls dev enp0s8
209
+
210
+
211
+HA Hard Failover Test (bring down the minion)
212
+=============================================
213
+The hard failover VIP test basically involves stopping the whole shebang
214
+(keepalived, openshift-router and haproxy) by bringing down one of
215
+the minions.
216
+
217
+1. Halt one of the minions ("Aww") ...
218
+
219
+        $ #  If you are monitoring a specific VIP ala 10.245.2.111 in the
220
+        $ #  example mentioned above, then bring down the minion that's
221
+        $ #  "owns" that VIP. For now, bringing a random one down.
222
+        $ vagrant halt minion-$((RANDOM%2 + 1))
223
+
224
+
225
+2. Check that you can still get to the hello openshift app using the VIPs
226
+   from inside/outside the vms.
227
+
228
+        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
229
+          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
230
+                            -m 5 http://hello.openshift.test)"
231
+        done
232
+        $ #  && echo "YAY"
233
+
234
+
235
+3. Bring back the minion ("YAY") ...
236
+
237
+        $ vagrant up minion-{1,2}
238
+
239
+
240
+4. Wait for the minion to come back online.
241
+
242
+5. Check how the VIPs are balanced between the 2 minions.
243
+
244
+        for m in minion-1 minion-2; do
245
+          vagrant ssh $m -c "ip a ls dev enp0s8"
246
+        done
247
+
248
+6. Check that you can still get to the hello openshift app using the VIPs
249
+   from inside/outside the vms.
250
+
251
+        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
252
+          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
253
+                            -m 5 http://hello.openshift.test)"
254
+        done
255
+        $ #  && echo "YAY"
256
+
257
+
258
+
259
+HA Soft Failover Test
260
+=====================
261
+
262
+1. Eventually this would test the keepalived process - but for now this
263
+   just shows how long the Kubernetes Replication Controller takes to
264
+   restart the services.
265
+
266
+        $ #  Stop the router on one of the minions ("Aaw").
267
+        $ vagrant ssh minion-$((RANDOM%2 + 1))
268
+        sudo kill -9 $(ps -e -opid,args | grep openshift-router |  \
269
+                          grep -v grep | awk '{print $1}')
270
+        $ # OR:
271
+        sudo docker rm -f $(sudo docker ps |  \
272
+                               grep openshift/origin-haproxy-router |  \
273
+                               awk '{print $1}')
274
+
275
+2. Check that you can still get to the hello openshift app using the VIPs
276
+   from inside/outside the vms.
277
+
278
+        for ip in 10.245.2.90 10.245.2.111 10.245.2.222 10.245.2.223; do
279
+          echo "$ip: $(curl -s --resolve hello.openshift.test:80:$ip  \
280
+                            -m 5 http://hello.openshift.test)"
281
+        done
282
+        $ #  && echo "YAY"
283
+        $ #  Wait for the router to come back up and run above check again.
284
+
285
+
286
+
287
+TODOs/Edge CASES:
288
+-----------------
289
+
290
+##  *Beware of the dog - it bites! You have been warned*
291
+There's a 2 second delay (process existence check) as of now, we can
292
+tune this up/down appropriately.
293
+And it is pertinent to mention here that this solution is not true
294
+fault-tolerance (100% availbility) - its just failover capability to
295
+provide high availability (99.[9]{n}% availability - cheap but by no
296
+means perfect).
297
+So be aware of this and use it appropriately within your environment.
298
+
299
+One alternative to achieve several more 9s of availability is to
300
+  * stop keepalived immediately if the router or the docker container
301
+    running the router goes down.
302
+  * And start keepalived start it when the router comes back up because
303
+    the replication controller notices things ain't kosher.
304
+But the bang for buck here is a low.
305
+
306
+
307
+##  *Sound Effects*
308
+Link quoted sound effects (ala "OkOk") to
309
+
310
+        http://www.soundboard.com/sb/minions
311
+
0 312
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+*
1
+!.gitignore
0 2
new file mode 100644
... ...
@@ -0,0 +1,65 @@
0
+{
1
+    "metadata":{
2
+        "name":"hello-service-pod-meta"
3
+    },
4
+    "kind":"Config",
5
+    "apiVersion":"v1beta1",
6
+    "creationTimestamp":"2014-09-18T18:28:38-04:00",
7
+    "items":[
8
+        {
9
+            "id": "hello-openshift-service",
10
+            "kind": "Service",
11
+            "apiVersion": "v1beta1",
12
+            "port": 8080,
13
+            "selector": {
14
+                "name": "hello-openshift-label"
15
+            }
16
+        },
17
+        {
18
+            "id": "hello-openshift",
19
+            "kind": "ReplicationController",
20
+            "apiVersion": "v1beta1",
21
+            "desiredState": {
22
+                "podTemplate": {
23
+                    "desiredState": {
24
+                        "manifest": {
25
+                            "version": "v1beta1",
26
+                            "id": "",
27
+                            "containers": [{
28
+                                "name": "hello-openshift-container",
29
+                                "image": "openshift/hello-openshift",
30
+                                "ports": [{
31
+                                    "hostPort": 6061,
32
+                                    "containerPort": 8080,
33
+                                    "protocol": "TCP"
34
+                                }]
35
+                            }],
36
+                            "dnsPolicy": "ClusterFirst",
37
+                            "restartPolicy": {
38
+                                "always": {}
39
+                            },
40
+                            "volumes": null
41
+                        }
42
+                    },
43
+                    "labels": {
44
+                        "name": "hello-openshift-label"
45
+                    }
46
+                },
47
+                "replicaSelector": {
48
+                    "name": "hello-openshift-label"
49
+                },
50
+                "replicas": 1
51
+            }
52
+        },
53
+        {
54
+            "kind": "Route",
55
+            "apiVersion": "v1beta1",
56
+            "metadata": {
57
+                "name": "hello-openshift-route"
58
+            },
59
+            "id": "hello-openshift-route",
60
+            "host": "hello.openshift.test",
61
+            "serviceName": "hello-openshift-service"
62
+        }
63
+    ]
64
+}
0 65
new file mode 100644
... ...
@@ -0,0 +1,81 @@
0
+#!/bin/bash
1
+
2
+
3
+#  ========================================================================
4
+#  Settings passed by the failover coordinator on OpenShift Origin.
5
+#  ========================================================================
6
+
7
+#  Name of this IP Failover config instance.
8
+HA_CONFIG_NAME=${OPENSHIFT_HA_CONFIG_NAME:-"OpenShift-IPFailover"}
9
+
10
+#  IP Failover config selector.
11
+HA_SELECTOR=${OPENSHIFT_HA_SELECTOR:-""}
12
+
13
+
14
+#  List of virtual IP addresses.
15
+#
16
+#  The value entries are comma-separated entries of the form:
17
+#     <ipaddress-range|ipaddress>
18
+#
19
+#     where:  ipaddress-range = <start-ipaddress>-<endip>
20
+#
21
+#  Example:
22
+#     OPENSHIFT_HA_VIRTUAL_IPS="10.42.42.42,10.100.1.20-24"
23
+#
24
+HA_VIPS=${OPENSHIFT_HA_VIRTUAL_IPS:-""}
25
+
26
+
27
+#  Interface (ethernet) to use - bound by vrrp.
28
+NETWORK_INTERFACE=${OPENSHIFT_HA_NETWORK_INTERFACE:-""}  # "enp0s8"
29
+
30
+
31
+#  Service port to monitor for failover.
32
+HA_MONITOR_PORT=${OPENSHIFT_HA_MONITOR_PORT:-"80"}
33
+
34
+#  Number of initial replicas.
35
+HA_REPLICA_COUNT=${OPENSHIFT_HA_REPLICA_COUNT:-"1"}
36
+
37
+
38
+
39
+#  ========================================================================
40
+#  Default settings - not currently exposed or overriden on OpenShift.
41
+#  ========================================================================
42
+
43
+#  If your environment doesn't support multicast, you can send VRRP adverts
44
+#  to a list of IPv{4,6} addresses using unicast.
45
+#  Example:
46
+#     UNICAST_PEERS="5.6.7.8,9.10.11.12,13.14.15.16"
47
+UNICAST_PEERS=${OPENSHIFT_HA_UNICAST_PEERS:-""}
48
+
49
+
50
+#  List of emails to send admin messages to. If the list of email ids is
51
+#  too long, you can use a DL (distribution list) ala:
52
+#   ADMIN_EMAILS=("ramr@redhat.com" "cops@acme.org")
53
+ADMIN_EMAILS=(${OPENSHIFT_HA_ADMIN_EMAILS:-"root@localhost"})
54
+
55
+#  Email sender - the from address in the email headers.
56
+EMAIL_FROM="ipfailover@openshift.local"
57
+
58
+#  IP address of the SMTP server.
59
+SMTP_SERVER=${OPENSHIFT_HA_SMTP_SERVER:-"127.0.0.1"}
60
+
61
+#  SMTP connect timeout (in seconds).
62
+SMTP_CONNECT_TIMEOUT=30
63
+
64
+
65
+#  VRRP will preempt a lower priority machine when a higher priority one
66
+#  comes back online. You can change the preemption strategy to either:
67
+#     "nopreempt"  - which allows the lower priority machine to maintain its
68
+#                    'MASTER' status.
69
+#     OR
70
+#     "preempt_delay 300"  - waits 5 mins (in seconds) after startup to
71
+#                            preempt lower priority MASTERs.
72
+PREEMPTION="preempt_delay 300"
73
+
74
+
75
+#  By default, the IP for binding vrrpd is the primary IP on the above
76
+#  specified interface. If you want to hide the location of vrrpd, you can
77
+#  specify a src_addr for multicast/unicast vrrp packets.
78
+#     MULTICAST_SOURCE_IPADDRESS="1.2.3.4"
79
+#     UNICAST_SOURCE_IPADDRESS="1.2.3.4"
80
+
0 81
new file mode 100755
... ...
@@ -0,0 +1,271 @@
0
+#!/bin/bash
1
+
2
+#  TODO: This follows the initial demo pieces and uses a bash script to
3
+#        generate the keepalived config - rework this into a template
4
+#        similar to how it is done for the haproxy configuration.
5
+
6
+#  Includes.
7
+source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
8
+
9
+
10
+# Constants.
11
+readonly CHECK_SCRIPT_NAME="chk_${HA_CONFIG_NAME//-/_}"
12
+readonly CHECK_INTERVAL_SECS=2
13
+readonly VRRP_SLAVE_PRIORITY=42
14
+
15
+readonly DEFAULT_PREEMPTION_STRATEGY="preempt_delay 300"
16
+
17
+
18
+#
19
+#  Generate global config section.
20
+#
21
+#  Example:
22
+#     generate_global_config  arparp
23
+#
24
+function generate_global_config() {
25
+  local routername=$(scrub "$1")
26
+
27
+  echo "global_defs {"
28
+  echo "   notification_email {"
29
+
30
+  for email in ${ADMIN_EMAILS[@]}; do
31
+    echo "     $email"
32
+  done
33
+
34
+  echo "   }"
35
+  echo ""
36
+  echo "   notification_email_from ${EMAIL_FROM:-"ipfailover@openshift.local"}"
37
+  echo "   smtp_server ${SMTP_SERVER:-"127.0.0.1"}"
38
+  echo "   smtp_connect_timeout ${SMTP_CONNECT_TIMEOUT:-"30"}"
39
+  echo "   router_id $routername"
40
+  echo "}"
41
+}
42
+
43
+
44
+#
45
+#  Generate VRRP checker script configuration section.
46
+#
47
+#  Example:
48
+#      generate_script_config
49
+#      generate_script_config "10.1.2.3" 8080
50
+#
51
+function generate_script_config() {
52
+  local serviceip=${1:-"127.0.0.1"}
53
+  local port=${2:-80}
54
+
55
+  echo ""
56
+  echo "vrrp_script $CHECK_SCRIPT_NAME {"
57
+  echo "   script \"</dev/tcp/${serviceip}/${port}\""
58
+  echo "   interval $CHECK_INTERVAL_SECS"
59
+  echo "}"
60
+}
61
+
62
+
63
+#
64
+#  Generate authentication information section.
65
+#
66
+#  Example:
67
+#      generate_authentication_info
68
+#
69
+function generate_authentication_info() {
70
+  local creds=${1:-"R0ut3r"}
71
+  echo ""
72
+  echo "   authentication {"
73
+  echo "      auth_type PASS"
74
+  echo "      auth_pass $creds"
75
+  echo "   }"
76
+}
77
+
78
+
79
+#
80
+#  Generate track script section.
81
+#
82
+#  Example:
83
+#      generate_track_script
84
+#
85
+function generate_track_script() {
86
+  echo ""
87
+  echo "   track_script {"
88
+  echo "      $CHECK_SCRIPT_NAME"
89
+  echo "   }"
90
+}
91
+
92
+
93
+#
94
+#  Generate multicast + unicast options section based on the values of the
95
+#  MULTICAST_SOURCE_IPADDRESS, UNICAST_SOURCE_IPADDRESS and UNICAST_PEERS
96
+#  environment variables.
97
+#
98
+#  Examples:
99
+#      generate_mucast_options
100
+#
101
+#      UNICAST_SOURCE_IPADDRESS=10.1.1.1 UNICAST_PEERS="10.1.1.2,10.1.1.3" \
102
+#          generate_mucast_options
103
+#
104
+function generate_mucast_options() {
105
+  echo ""
106
+
107
+  if [ -n "$MULTICAST_SOURCE_IPADDRESS" ]; then
108
+    echo "    mcast_src_ip $MULTICAST_SOURCE_IPADDRESS"
109
+  fi
110
+
111
+  if [ -n "$UNICAST_SOURCE_IPADDRESS" ]; then
112
+    echo "    unicast_src_ip $UNICAST_SOURCE_IPADDRESS"
113
+  fi
114
+
115
+  if [ -n "$UNICAST_PEERS" ]; then
116
+    echo ""
117
+    echo "    unicast_peer {"
118
+
119
+    for ip in $(echo "$UNICAST_PEERS" | tr "," " "); do
120
+      echo "        $ip"
121
+    done
122
+
123
+    echo "    }"
124
+  fi
125
+}
126
+
127
+
128
+#
129
+#  Generate VRRP sync groups section.
130
+#
131
+#  Examples:
132
+#      generate_vrrp_sync_groups "ipf-1" "10.1.1.1 10.1.2.2"
133
+#
134
+#      generate_vrrp_sync_groups "arparp" "10.42.42.42-45, 10.9.1.1"
135
+#
136
+function generate_vrrp_sync_groups() {
137
+  local servicename=$(scrub "$1")
138
+
139
+  echo ""
140
+  echo "vrrp_sync_group group_${servicename} {"
141
+  echo "   group {"
142
+
143
+  local prefix="$(vrrp_instance_basename "$1")"
144
+  local counter=1
145
+
146
+  for ip in $(expand_ip_ranges "$2"); do
147
+    echo "      ${prefix}_${counter}   # VIP $ip"
148
+    counter=$((counter + 1))
149
+  done
150
+
151
+  echo "   }"
152
+  echo "}"
153
+}
154
+
155
+
156
+#
157
+#  Generate virtual ip address section.
158
+#
159
+#  Examples:
160
+#      generate_vip_section "10.245.2.3" "enp0s8"
161
+#
162
+#      generate_vip_section "10.1.1.1 10.1.2.2" "enp0s8"
163
+#
164
+#      generate_vip_section "10.42.42.42-45, 10.9.1.1"
165
+#
166
+function generate_vip_section() {
167
+  local interface=${2:-"$(get_network_device)"}
168
+
169
+  echo ""
170
+  echo "   virtual_ipaddress {"
171
+
172
+  for ip in $(expand_ip_ranges "$1"); do
173
+    echo "      ${ip} dev $interface"
174
+  done
175
+
176
+  echo "   }"
177
+}
178
+
179
+
180
+#
181
+#  Generate vrrpd instance configuration section.
182
+#
183
+#  Examples:
184
+#      generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "252" "master"
185
+#
186
+#      generate_vrrpd_instance_config arp 1 "10.1.2.3" enp0s8 "3" "slave"
187
+#
188
+#      generate_vrrpd_instance_config ipf-1 4 "10.1.2.3-4" enp0s8 "7"
189
+#
190
+function generate_vrrpd_instance_config() {
191
+  local servicename=$1
192
+  local iid=${2:-"0"}
193
+  local vips=$3
194
+  local interface=$4
195
+  local priority=${5:-"10"}
196
+  local instancetype=${6:-"slave"}
197
+
198
+  local vipname=$(scrub "$1")
199
+  local initialstate=""
200
+  local preempt=${PREEMPTION:-"$DEFAULT_PREEMPTION_STRATEGY"}
201
+
202
+  [ "$instancetype" = "master" ] && initialstate="state MASTER"
203
+
204
+  local instance_name=$(generate_vrrp_instance_name "$servicename" "$iid")
205
+
206
+  local auth_section=$(generate_authentication_info "$servicename")
207
+  local vip_section=$(generate_vip_section "$vips" "$interface")
208
+  echo "
209
+vrrp_instance ${instance_name} {
210
+   interface ${interface}
211
+   ${initialstate}
212
+   virtual_router_id ${iid}
213
+   priority ${priority}
214
+   ${preempt}
215
+   ${auth_section}
216
+   $(generate_track_script)
217
+   $(generate_mucast_options)
218
+   ${vip_section}
219
+}
220
+"
221
+
222
+}
223
+
224
+
225
+#
226
+#  Generate failover configuration.
227
+#
228
+#  Examples:
229
+#      generate_failover_configuration
230
+#
231
+function generate_failover_config() {
232
+  local vips=$(expand_ip_ranges "$HA_VIPS")
233
+  local interface=$(get_network_device "$NETWORK_INTERFACE")
234
+  local ipaddr=$(get_device_ip_address "$interface")
235
+  local port=$(echo "$HA_MONITOR_PORT" | sed 's/[^0-9]//g')
236
+
237
+  echo "! Configuration File for keepalived
238
+
239
+$(generate_global_config "$HA_CONFIG_NAME")
240
+$(generate_script_config "$ipaddr" "$port")
241
+$(generate_vrrp_sync_groups "$HA_CONFIG_NAME" "$vips")
242
+"
243
+
244
+  local ipkey=$(echo "$ipaddr" | cut -f 4 -d '.')
245
+  local ipslot=$((ipkey % 128))
246
+
247
+  local nodecount=$(($HA_REPLICA_COUNT > 0 ? $HA_REPLICA_COUNT : 1))
248
+  local idx=$((ipslot % $nodecount))
249
+  idx=$((idx + 1))
250
+
251
+  local counter=1
252
+
253
+  for vip in ${vips}; do
254
+    local offset=$((RANDOM % 32))
255
+    local priority=$(($((ipslot % 64)) + $offset))
256
+    local instancetype="slave"
257
+    local n=$((counter % $idx))
258
+
259
+    if [ $n -eq 0 ]; then
260
+      instancetype="master"
261
+      priority=$((255 - $ipslot))
262
+    fi
263
+
264
+    generate_vrrpd_instance_config "$HA_CONFIG_NAME" "$counter" "$vip"  \
265
+        "$interface" "$priority" "$instancetype"
266
+
267
+    counter=$((counter + 1))
268
+  done
269
+}
270
+
0 271
new file mode 100755
... ...
@@ -0,0 +1,39 @@
0
+#!/bin/bash
1
+
2
+
3
+#  Includes.
4
+mydir=$(dirname "${BASH_SOURCE[0]}")
5
+source "$mydir/../conf/settings.sh"
6
+source "$mydir/utils.sh"
7
+source "$mydir/config-generators.sh"
8
+
9
+#  Constants.
10
+readonly KEEPALIVED_CONFIG="/etc/keepalived/keepalived.conf"
11
+readonly KEEPALIVED_DEFAULTS="/etc/sysconfig/keepalived"
12
+
13
+
14
+function setup_failover() {
15
+  echo "  - Loading ip_vs module ..."
16
+  modprobe ip_vs
17
+
18
+  echo "  - Checking if ip_vs module is available ..."
19
+  if lsmod | grep '^ip_vs'; then
20
+    echo "  - Module ip_vs is loaded."
21
+  else
22
+    echo "ERROR: Module ip_vs is NOT available."
23
+  fi
24
+
25
+  echo "  - Generating and writing config to $KEEPALIVED_CONFIG"
26
+  generate_failover_config > "$KEEPALIVED_CONFIG"
27
+}
28
+
29
+
30
+function start_failover_services() {
31
+  echo "  - Starting failover services ..."
32
+
33
+  [ -f "$KEEPALIVED_DEFAULTS" ] && source "$KEEPALIVED_DEFAULTS"
34
+
35
+  killall -9 /usr/sbin/keepalived &> /dev/null || :
36
+  /usr/sbin/keepalived $KEEPALIVED_OPTIONS -n --log-console
37
+}
38
+
0 39
new file mode 100755
... ...
@@ -0,0 +1,117 @@
0
+#!/bin/bash
1
+
2
+
3
+#  Constants.
4
+LIB_DIR=$(dirname "${BASH_SOURCE[0]}")
5
+VBOX_INTERFACES="enp0s3 enp0s8"
6
+
7
+
8
+#
9
+#  Returns "scrubbed" name - removes characters that are not alphanumeric or
10
+#  underscore and replacing dashes with underscores.
11
+#
12
+#  Examples:
13
+#      scrub "config\!@#@$%$^&*()-+=1_{}|[]\\:;'<>?,./ipfailover"
14
+#         # -> config_1_ipfailover
15
+#
16
+#      scrub "ha-1"  # -> ha_1
17
+#
18
+function scrub() {
19
+  local val=$(echo "$1" | tr -dc '[:alnum:]\-_')
20
+  echo "${val//-/_}"
21
+}
22
+
23
+
24
+#
25
+#  Expands list of virtual IP addresses. List elements can be an IP address
26
+#  range or an IP address and elements can be space or comma separated.
27
+#
28
+#  Examples:
29
+#     expand_ip_ranges "1.1.1.1, 2.2.2.2,3.3.3.3-4  4.4.4.4"
30
+#         # -> 1.1.1.1 2.2.2.2 3.3.3.3 3.3.3.4 4.4.4.4
31
+#
32
+#     expand_ip_ranges "10.1.1.100-102 10.1.1.200-200 10.42.42.42"
33
+#         # -> 10.1.1.100 10.1.1.101 10.1.1.102 10.1.1.200 10.42.42.42
34
+#
35
+function expand_ip_ranges() {
36
+  local vips=${1:-""}
37
+  local expandedset=()
38
+
39
+  for iprange in $(echo "$vips" | sed 's/[^0-9\.\,-]//g' | tr "," " "); do
40
+    local ip1=$(echo "$iprange" | awk '{print $1}' FS='-')
41
+    local ip2=$(echo "$iprange" | awk '{print $2}' FS='-')
42
+    if [ -z "$ip2" ]; then
43
+      expandedset=(${expandedset[@]} "$ip1")
44
+    else
45
+      local base=$(echo "$ip1" | cut -f 1-3 -d '.')
46
+      local start=$(echo "$ip1" | awk '{print $NF}' FS='.')
47
+      local end=$(echo "$ip2" | awk '{print $NF}' FS='.')
48
+      for n in `seq $start $end`; do
49
+        expandedset=(${expandedset[@]} "${base}.$n")
50
+      done
51
+    fi
52
+  done
53
+
54
+  echo "${expandedset[@]}"
55
+}
56
+
57
+
58
+#
59
+#  Generate base name for the VRRP instance.
60
+#
61
+#  Examples:
62
+#     vrrp_instance_basename "arp"   # -> arp_VIP
63
+#
64
+#     vrrp_instance_basename "ha-1"  # -> ha_1_VIP
65
+#
66
+function vrrp_instance_basename() {
67
+  echo "$(scrub "$1")_VIP"
68
+}
69
+
70
+
71
+#
72
+#  Generate VRRP instance name.
73
+#
74
+#  Examples:
75
+#     generate_vrrp_instance_name arp 42  # -> arp_VIP_42
76
+#
77
+#     generate_vrrp_instance_name ha-1    # -> ha_1_VIP_0
78
+#
79
+function generate_vrrp_instance_name() {
80
+  local iid=${2:-0}
81
+  echo "$(vrrp_instance_basename "$1")_${iid}"
82
+}
83
+
84
+
85
+#
86
+#  Returns the network device name to use for VRRP.
87
+#
88
+#  Examples:
89
+#     get_network_device
90
+#
91
+#     get_network_device  "eth0"
92
+#
93
+function get_network_device() {
94
+  for dev in $1 ${VBOX_INTERFACES}; do
95
+    if ip addr show dev "$dev" &> /dev/null; then
96
+      echo "$dev"
97
+      return
98
+    fi
99
+  done
100
+
101
+  ip route get 8.8.8.8 | awk '/dev/ { f=NR }; f && (NR-1 == f)' RS=" "
102
+}
103
+
104
+
105
+#
106
+#  Returns the IP address associated with a network device.
107
+#
108
+#  Examples:
109
+#     get_device_ip_address
110
+#
111
+#     get_device_ip_address  "docker0"
112
+#
113
+function get_device_ip_address() {
114
+  local dev=${1:-"$(get_network_device)"}
115
+  ifconfig "$dev" | awk '/inet / { print $2 }'
116
+}
0 117
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+
1
+IMAGE_NAME="openshift/origin-keepalived-ipfailover"
2
+
3
+
4
+all:	image
5
+
6
+test:	image
7
+	(cd tests && ./verify_failover_image.sh)
8
+
9
+image:
10
+	docker build -t $(IMAGE_NAME) .
0 11
new file mode 100755
... ...
@@ -0,0 +1,15 @@
0
+#!/bin/bash
1
+
2
+#  Includes.
3
+source "$(dirname "${BASH_SOURCE[0]}")/lib/failover-functions.sh"
4
+
5
+
6
+#
7
+#  main():
8
+#
9
+setup_failover
10
+
11
+start_failover_services
12
+
13
+echo "`basename $0`: OpenShift IP Failover service terminated."
14
+
0 15
new file mode 100644
... ...
@@ -0,0 +1,36 @@
0
+#!/usr/bin/env python
1
+
2
+""" Echo server - reply back with the received message. """
3
+
4
+import os
5
+import signal
6
+import socket
7
+import sys
8
+
9
+
10
+def sigusr1_handler(signum, frame):
11
+    print 'signal %s received, exiting ...' % signum
12
+    sys.exit(0)
13
+
14
+
15
+def setup():
16
+    signal.signal(signal.SIGUSR1, sigusr1_handler)
17
+
18
+
19
+def runserver():
20
+    sock = socket.socket()
21
+    sock.bind(('0.0.0.0', int(os.environ.get('PORT', '12345'))))
22
+    sock.listen(10)
23
+
24
+    while True:
25
+        c, raddr = sock.accept()
26
+        try:
27
+            d = c.recv(4096)
28
+            c.send(d if d else '')
29
+        finally:
30
+            c.close()
31
+
32
+
33
+if "__main__" == __name__:
34
+    setup()
35
+    runserver()
0 36
new file mode 100755
... ...
@@ -0,0 +1,90 @@
0
+#!/bin/bash -e
1
+
2
+#  Constants.
3
+readonly TEST_DIR=$(dirname "${BASH_SOURCE[0]}")
4
+readonly FAILOVER_IMAGE="openshift/origin-keepalived-ipfailover"
5
+readonly TEST_VIPS="10.0.2.100-102"
6
+readonly MONITOR_PORT="12345"
7
+
8
+
9
+function stop_echo_server() {
10
+  local pid=$1
11
+  if [ -z "$pid" ]; then
12
+    pid=$(ps -e -opid,args | grep echoserver.py | grep -v grep | awk '{print $1}')
13
+  fi
14
+
15
+  #  Send SIGUSR1 to the echo server to terminate it.
16
+  [ -n "$pid" ] && kill -s USR1 $pid
17
+}
18
+
19
+
20
+function start_echo_server() {
21
+  stop_echo_server
22
+
23
+  export PORT=${MONITOR_PORT}
24
+  nohup python ${TEST_DIR}/echoserver.py &> /dev/null &
25
+  echo $!
26
+}
27
+
28
+
29
+function start_failover_container() {
30
+  local cfg="-e OPENSHIFT_HA_CONFIG_NAME="roto-r00ter""
31
+  local vips="-e OPENSHIFT_HA_VIRTUAL_IPS="${TEST_VIPS}""
32
+  local netif="-e OPENSHIFT_HA_NETWORK_INTERFACE="enp0s3""
33
+  local port="-e OPENSHIFT_HA_MONITOR_PORT="${MONITOR_PORT}""
34
+  # local unicast="-e export OPENSHIFT_HA_USE_UNICAST="true""
35
+  # local unicastpeers="-e OPENSHIFT_HA_UNICAST_PEERS="127.0.0.1""
36
+  local selector="-e OPENSHIFT_HA_SELECTOR="""
37
+  local envopts="$cfg $vips $netif $port $unicast $unicastpeers $selector"
38
+
39
+  docker run -dit --net=host --privileged=true   \
40
+         -v /lib/modules:/lib/modules $envopts $FAILOVER_IMAGE &
41
+
42
+}
43
+
44
+
45
+function run_image_verification_test() {
46
+  echo "  - starting echo server ..."
47
+  local pid=$(start_echo_server)
48
+  echo "  - started echo server pid=$pid ..."
49
+
50
+  #  On interrupt, cleanup - stop echo server.
51
+  trap "stop_echo_server $pid" INT
52
+
53
+  local cname=$(start_failover_container)
54
+  echo "  - started docker container $cname ..."
55
+
56
+  #  Wait a bit for all the services to startup.
57
+  sleep 10
58
+
59
+  #  Check container is up and has keepalived processes.
60
+  local cmd="ps -ef  | grep '/usr/sbin/keepalived' | grep -v grep | wc -l"
61
+  local numprocs=$(echo "$cmd" | docker exec -i $cname /bin/bash)
62
+
63
+  #  Stop echo server.
64
+  stop_echo_server $pid
65
+
66
+  if [[ -n "$numprocs" && $numprocs -gt 0 ]]; then
67
+    #  Success - print info and kill the container.
68
+    echo "  - There are $numprocs keepalived processes running"
69
+    echo "  - Cleaning up docker containers ..."
70
+    docker rm -f $cname
71
+    echo "  - All tests PASSED."
72
+    return 0
73
+  fi
74
+
75
+  #  Failure - print info and dump logs (keep the docker container around
76
+  #  for debugging).
77
+  echo "  - There are $numprocs keepalived processes running"
78
+  echo "  - logs from container $cname:"
79
+  docker logs $cname || :
80
+  echo "  - Test FAILED."
81
+  exit 1
82
+}
83
+
84
+
85
+#
86
+#  main():
87
+#
88
+run_image_verification_test
89
+
... ...
@@ -8,7 +8,7 @@ import (
8 8
 
9 9
 	"github.com/openshift/origin/pkg/cmd/cli/cmd"
10 10
 	"github.com/openshift/origin/pkg/cmd/experimental/buildchain"
11
-	exhaconfig "github.com/openshift/origin/pkg/cmd/experimental/haconfig"
11
+	exipfailover "github.com/openshift/origin/pkg/cmd/experimental/ipfailover"
12 12
 	"github.com/openshift/origin/pkg/cmd/experimental/policy"
13 13
 	"github.com/openshift/origin/pkg/cmd/experimental/project"
14 14
 	exregistry "github.com/openshift/origin/pkg/cmd/experimental/registry"
... ...
@@ -44,7 +44,7 @@ func NewCommandAdmin(name, fullName string, out io.Writer) *cobra.Command {
44 44
 
45 45
 	cmds.AddCommand(project.NewCmdNewProject(f, fullName, "new-project"))
46 46
 	cmds.AddCommand(policy.NewCommandPolicy(f, fullName, "policy"))
47
-	cmds.AddCommand(exhaconfig.NewCmdHAConfig(f, fullName, "ha-config", out))
47
+	cmds.AddCommand(exipfailover.NewCmdIPFailoverConfig(f, fullName, "ipfailover", out))
48 48
 	cmds.AddCommand(exrouter.NewCmdRouter(f, fullName, "router", out))
49 49
 	cmds.AddCommand(exregistry.NewCmdRegistry(f, fullName, "registry", out))
50 50
 	cmds.AddCommand(buildchain.NewCmdBuildChain(f, fullName, "build-chain"))
51 51
deleted file mode 100644
... ...
@@ -1,188 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"fmt"
5
-	"io"
6
-
7
-	cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util"
8
-	"github.com/golang/glog"
9
-	"github.com/spf13/cobra"
10
-
11
-	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
12
-	"github.com/openshift/origin/pkg/cmd/util/variable"
13
-	hac "github.com/openshift/origin/pkg/haconfig"
14
-	"github.com/openshift/origin/plugins/haconfig/keepalived"
15
-)
16
-
17
-const shortDesc = "Configure or view High Availability configuration"
18
-const description = `
19
-Configure or view High Availability configuration
20
-
21
-This command helps to setup High Availability (HA) configuration for an
22
-OpenShift environment. An administrator can configure HA on an entire
23
-cluster or as would normally be the case on a subset of nodes (as defined
24
-via a labelled selector).
25
-If no arguments are passed, this command will display the HA configuration
26
-for a resource name 'ha-config'.
27
-
28
-If a HA configuration does not exist with the given name, the --create flag
29
-can be passed to create a deployment configuration and service that will
30
-provide HA and failover capability. If you are running in production, it is
31
-recommended that the labelled selector for the nodes matches atleast 2
32
-nodes to ensure you have failover protection and that you provide a
33
-labelled selector.
34
-
35
-
36
-Examples:
37
-  Check the default HA configuration ("ha-config"):
38
-
39
-  $ %[1]s %[2]s
40
-
41
-  See what the HA configuration would look like if it is created:
42
-
43
-  $ %[1]s %[2]s -o json
44
-
45
-  Create a HA configuration if it does not already exist:
46
-
47
-  $ %[1]s %[2]s hac --virtual-ips="10.1.1.1-4" --create
48
-
49
-  Create a HA configuration on a selection of nodes labelled
50
-  "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
51
-  listening on port 80 (aka the OpenShift router process).
52
-
53
-  $ %[1]s %[2]s ha-config --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
54
-
55
-  Delete a previously created HA configuration:
56
-
57
-  $ %[1]s %[2]s hac --delete
58
-
59
-  Use a different HA config image and see the configuration:
60
-
61
-  $ %[1]s %[2]s ha-alt --selector="jack=the-vipper" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myhaconfig:mytag
62
-
63
-ALPHA: This command is currently being actively developed. It is intended
64
-       to simplify the administrative tasks of setting up a highly
65
-       available failover configuration.
66
-`
67
-
68
-func NewCmdHAConfig(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
69
-	options := &hac.HAConfigCmdOptions{
70
-		ImageTemplate:    variable.NewDefaultImageTemplate(),
71
-		Selector:         hac.DefaultSelector,
72
-		ServicePort:      hac.DefaultServicePort,
73
-		WatchPort:        hac.DefaultWatchPort,
74
-		NetworkInterface: hac.DefaultInterface,
75
-		Replicas:         1,
76
-	}
77
-
78
-	cmd := &cobra.Command{
79
-		Use:   fmt.Sprintf("%s [<name>]", name),
80
-		Short: shortDesc,
81
-		Long:  fmt.Sprintf(description, parentName, name),
82
-		Run: func(cmd *cobra.Command, args []string) {
83
-			processCommand(f, options, cmd, args, out)
84
-		},
85
-	}
86
-
87
-	cmd.Flags().StringVar(&options.Type, "type", hac.DefaultType, "The type of HA configurator to use.")
88
-	cmd.Flags().StringVar(&options.ImageTemplate.Format, "images", options.ImageTemplate.Format, "The image to base this HA configurator on - ${component} will be replaced based on --type.")
89
-	cmd.Flags().BoolVar(&options.ImageTemplate.Latest, "latest-images", options.ImageTemplate.Latest, "If true, attempt to use the latest images instead of the current release")
90
-	cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter nodes on.")
91
-	cmd.Flags().StringVar(&options.Credentials, "credentials", "", "Path to a .kubeconfig file that will contain the credentials the router should use to contact the master.")
92
-
93
-	cmd.Flags().BoolVar(&options.Create, "create", options.Create, "Create the configuration if it does not exist.")
94
-	cmd.Flags().BoolVar(&options.Delete, "delete", options.Delete, "Delete the configuration if it exists.")
95
-
96
-	cmd.Flags().StringVar(&options.VirtualIPs, "virtual-ips", "", "A set of virtual IP ranges and/or addresses that the routers bind and serve on and provide IP failover capability for.")
97
-	cmd.Flags().StringVarP(&options.NetworkInterface, "interface", "i", "", "Network interface bound by VRRP to use for the set of virtual IP ranges/addresses specified.")
98
-
99
-	// unicastHelp := `Send VRRP adverts using unicast instead of over the VRRP multicast group. This is useful in environments where multicast is not supported. Use with caution as this can get slow if the list of peers is large - it is recommended running this with the label option to select a set of nodes.`
100
-	// cmd.Flags().StringVarP(&options.UseUnicast, "unicast", "u", options.UseUnicast, unicastHelp)
101
-
102
-	cmd.Flags().IntVarP(&options.WatchPort, "watch-port", "w", hac.DefaultWatchPort, "Port to monitor or watch for resource availability.")
103
-	cmd.Flags().IntVarP(&options.Replicas, "replicas", "r", options.Replicas, "The replication factor of the HA configuration; commonly 2 when high availability is desired.")
104
-
105
-	cmdutil.AddPrinterFlags(cmd)
106
-	return cmd
107
-}
108
-
109
-func getConfigurationName(args []string) string {
110
-	name := hac.DefaultName
111
-
112
-	switch len(args) {
113
-	case 0:
114
-		// Do nothing - use default name.
115
-	case 1:
116
-		name = args[0]
117
-	default:
118
-		glog.Fatalf("Please pass zero or one arguments to provide a name for this configuration.")
119
-	}
120
-
121
-	return name
122
-}
123
-
124
-func getConfigurator(name string, f *clientcmd.Factory, options *hac.HAConfigCmdOptions, out io.Writer) *hac.Configurator {
125
-	//  Currently, the only supported plugin is keepalived (default).
126
-	plugin, err := keepalived.NewHAConfiguratorPlugin(name, f, options)
127
-
128
-	switch options.Type {
129
-	case hac.DefaultType:
130
-		//  Default.
131
-	// case <new-type>:  plugin, err = makeNewTypePlugin()
132
-	default:
133
-		glog.Fatalf("No plugins available to handle type %q", options.Type)
134
-	}
135
-
136
-	if err != nil {
137
-		glog.Fatalf("HAConfigurator %q plugin error: %v", options.Type, err)
138
-	}
139
-
140
-	return hac.NewConfigurator(name, plugin, out)
141
-}
142
-
143
-func previewConfiguration(c *hac.Configurator, cmd *cobra.Command, out io.Writer) bool {
144
-	p, output, err := cmdutil.PrinterForCommand(cmd)
145
-	if err != nil {
146
-		glog.Fatalf("Error configuring printer: %v", err)
147
-	}
148
-
149
-	// Check if we are outputting info.
150
-	if !output {
151
-		return false
152
-	}
153
-
154
-	if err := p.PrintObj(c.Generate(), out); err != nil {
155
-		glog.Fatalf("Unable to print object: %v", err)
156
-	}
157
-
158
-	return true
159
-}
160
-
161
-func processCommand(f *clientcmd.Factory, options *hac.HAConfigCmdOptions, cmd *cobra.Command, args []string, out io.Writer) {
162
-	name := getConfigurationName(args)
163
-	c := getConfigurator(name, f, options, out)
164
-
165
-	//  First up, validate all the command line options.
166
-	if err := hac.ValidateCmdOptions(options, c); err != nil {
167
-		glog.Fatal(err)
168
-	}
169
-
170
-	//  Check if we are just previewing the config.
171
-	if previewConfiguration(c, cmd, out) {
172
-		return
173
-	}
174
-
175
-	if options.Create {
176
-		c.Create()
177
-		if options.Delete {
178
-			glog.Warning("Superfluous --delete option was ignored.")
179
-		}
180
-		return
181
-	}
182
-
183
-	if options.Delete {
184
-		c.Delete()
185
-		return
186
-	}
187
-}
188 1
new file mode 100644
... ...
@@ -0,0 +1,173 @@
0
+package ipfailover
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+
6
+	cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util"
7
+	"github.com/golang/glog"
8
+	"github.com/spf13/cobra"
9
+
10
+	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
11
+	"github.com/openshift/origin/pkg/cmd/util/variable"
12
+	ipf "github.com/openshift/origin/pkg/ipfailover"
13
+	"github.com/openshift/origin/pkg/ipfailover/keepalived"
14
+)
15
+
16
+const shortDesc = "Configure or view IP Failover configuration"
17
+const description = `
18
+Configure or view IP Failover configuration
19
+
20
+This command helps to setup IP Failover configuration for an OpenShift
21
+environment. An administrator can configure IP failover on an entire
22
+cluster or as would normally be the case on a subset of nodes (as defined
23
+via a labelled selector).
24
+
25
+If an IP failover configuration does not exist with the given name,
26
+the --create flag can be passed to create a deployment configuration and
27
+service that will provide IP failover capability. If you are running in
28
+production, it is recommended that the labelled selector for the nodes
29
+matches atleast 2 nodes to ensure you have failover protection and that
30
+you provide a --replicas=<n> value that matches the number of nodes for
31
+the given labelled selector.
32
+
33
+
34
+Examples:
35
+  Check the default IP failover configuration ("ipfailover"):
36
+
37
+  $ %[1]s %[2]s
38
+
39
+  See what the IP failover configuration would look like if it is created:
40
+
41
+  $ %[1]s %[2]s -o json
42
+
43
+  Create an IP failover configuration if it does not already exist:
44
+
45
+  $ %[1]s %[2]s ipf --virtual-ips="10.1.1.1-4" --create
46
+
47
+  Create an IP failover configuration on a selection of nodes labelled
48
+  "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
49
+  listening on port 80 (aka the OpenShift router process).
50
+
51
+  $ %[1]s %[2]s ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
52
+
53
+  Use a different IP failover config image and see the configuration:
54
+
55
+  $ %[1]s %[2]s ipf-alt --selector="jack=the-vipper" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag
56
+
57
+ALPHA: This command is currently being actively developed. It is intended
58
+       to simplify the administrative tasks of setting up a highly
59
+       available failover configuration.
60
+`
61
+
62
+func NewCmdIPFailoverConfig(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
63
+	options := &ipf.IPFailoverConfigCmdOptions{
64
+		ImageTemplate:    variable.NewDefaultImageTemplate(),
65
+		Selector:         ipf.DefaultSelector,
66
+		ServicePort:      ipf.DefaultServicePort,
67
+		WatchPort:        ipf.DefaultWatchPort,
68
+		NetworkInterface: ipf.DefaultInterface,
69
+		Replicas:         1,
70
+	}
71
+
72
+	cmd := &cobra.Command{
73
+		Use:   fmt.Sprintf("%s [<name>]", name),
74
+		Short: shortDesc,
75
+		Long:  fmt.Sprintf(description, parentName, name),
76
+		Run: func(cmd *cobra.Command, args []string) {
77
+			processCommand(f, options, cmd, args, out)
78
+		},
79
+	}
80
+
81
+	cmd.Flags().StringVar(&options.Type, "type", ipf.DefaultType, "The type of IP failover configurator to use.")
82
+	cmd.Flags().StringVar(&options.ImageTemplate.Format, "images", options.ImageTemplate.Format, "The image to base this IP failover configurator on - ${component} will be replaced based on --type.")
83
+	cmd.Flags().BoolVar(&options.ImageTemplate.Latest, "latest-images", options.ImageTemplate.Latest, "If true, attempt to use the latest images instead of the current release")
84
+	cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter nodes on.")
85
+	cmd.Flags().StringVar(&options.Credentials, "credentials", "", "Path to a .kubeconfig file that will contain the credentials the router should use to contact the master.")
86
+
87
+	cmd.Flags().BoolVar(&options.Create, "create", options.Create, "Create the configuration if it does not exist.")
88
+
89
+	cmd.Flags().StringVar(&options.VirtualIPs, "virtual-ips", "", "A set of virtual IP ranges and/or addresses that the routers bind and serve on and provide IP failover capability for.")
90
+	cmd.Flags().StringVarP(&options.NetworkInterface, "interface", "i", "", "Network interface bound by VRRP to use for the set of virtual IP ranges/addresses specified.")
91
+
92
+	// unicastHelp := `Send VRRP adverts using unicast instead of over the VRRP multicast group. This is useful in environments where multicast is not supported. Use with caution as this can get slow if the list of peers is large - it is recommended running this with the label option to select a set of nodes.`
93
+	// cmd.Flags().StringVarP(&options.UseUnicast, "unicast", "u", options.UseUnicast, unicastHelp)
94
+
95
+	cmd.Flags().IntVarP(&options.WatchPort, "watch-port", "w", ipf.DefaultWatchPort, "Port to monitor or watch for resource availability.")
96
+	cmd.Flags().IntVarP(&options.Replicas, "replicas", "r", options.Replicas, "The replication factor of this IP failover configuration; commonly 2 when high availability is desired.")
97
+
98
+	cmdutil.AddPrinterFlags(cmd)
99
+	return cmd
100
+}
101
+
102
+func getConfigurationName(args []string) string {
103
+	name := ipf.DefaultName
104
+
105
+	switch len(args) {
106
+	case 0:
107
+		// Do nothing - use default name.
108
+	case 1:
109
+		name = args[0]
110
+	default:
111
+		glog.Fatalf("Please pass zero or one arguments to provide a name for this configuration.")
112
+	}
113
+
114
+	return name
115
+}
116
+
117
+func getConfigurator(name string, f *clientcmd.Factory, options *ipf.IPFailoverConfigCmdOptions, out io.Writer) *ipf.Configurator {
118
+	//  Currently, the only supported plugin is keepalived (default).
119
+	plugin, err := keepalived.NewIPFailoverConfiguratorPlugin(name, f, options)
120
+
121
+	switch options.Type {
122
+	case ipf.DefaultType:
123
+		//  Default.
124
+	// case <new-type>:  plugin, err = makeNewTypePlugin()
125
+	default:
126
+		glog.Fatalf("No plugins available to handle type %q", options.Type)
127
+	}
128
+
129
+	if err != nil {
130
+		glog.Fatalf("IPFailoverConfigurator %q plugin error: %v", options.Type, err)
131
+	}
132
+
133
+	return ipf.NewConfigurator(name, plugin, out)
134
+}
135
+
136
+func previewConfiguration(c *ipf.Configurator, cmd *cobra.Command, out io.Writer) bool {
137
+	p, output, err := cmdutil.PrinterForCommand(cmd)
138
+	if err != nil {
139
+		glog.Fatalf("Error configuring printer: %v", err)
140
+	}
141
+
142
+	// Check if we are outputting info.
143
+	if !output {
144
+		return false
145
+	}
146
+
147
+	if err := p.PrintObj(c.Generate(), out); err != nil {
148
+		glog.Fatalf("Unable to print object: %v", err)
149
+	}
150
+
151
+	return true
152
+}
153
+
154
+func processCommand(f *clientcmd.Factory, options *ipf.IPFailoverConfigCmdOptions, cmd *cobra.Command, args []string, out io.Writer) {
155
+	name := getConfigurationName(args)
156
+	c := getConfigurator(name, f, options, out)
157
+
158
+	//  First up, validate all the command line options.
159
+	if err := ipf.ValidateCmdOptions(options, c); err != nil {
160
+		glog.Fatal(err)
161
+	}
162
+
163
+	//  Check if we are just previewing the config.
164
+	if previewConfiguration(c, cmd, out) {
165
+		return
166
+	}
167
+
168
+	if options.Create {
169
+		c.Create()
170
+		return
171
+	}
172
+}
... ...
@@ -14,7 +14,7 @@ import (
14 14
 	"github.com/openshift/origin/pkg/cmd/experimental/buildchain"
15 15
 	"github.com/openshift/origin/pkg/cmd/experimental/bundlesecret"
16 16
 	"github.com/openshift/origin/pkg/cmd/experimental/generate"
17
-	exhaconfig "github.com/openshift/origin/pkg/cmd/experimental/haconfig"
17
+	exipfailover "github.com/openshift/origin/pkg/cmd/experimental/ipfailover"
18 18
 	"github.com/openshift/origin/pkg/cmd/experimental/policy"
19 19
 	"github.com/openshift/origin/pkg/cmd/experimental/project"
20 20
 	exregistry "github.com/openshift/origin/pkg/cmd/experimental/registry"
... ...
@@ -133,7 +133,7 @@ func newExperimentalCommand(parentName, name string) *cobra.Command {
133 133
 	experimental.AddCommand(project.NewCmdNewProject(f, subName, "new-project"))
134 134
 	experimental.AddCommand(tokens.NewCmdTokens(f, subName, "tokens"))
135 135
 	experimental.AddCommand(policy.NewCommandPolicy(f, subName, "policy"))
136
-	experimental.AddCommand(exhaconfig.NewCmdHAConfig(f, subName, "ha-config", os.Stdout))
136
+	experimental.AddCommand(exipfailover.NewCmdIPFailoverConfig(f, subName, "ipfailover", os.Stdout))
137 137
 	experimental.AddCommand(generate.NewCmdGenerate(f, subName, "generate", os.Stdout))
138 138
 	experimental.AddCommand(exrouter.NewCmdRouter(f, subName, "router", os.Stdout))
139 139
 	experimental.AddCommand(exregistry.NewCmdRegistry(f, subName, "registry", os.Stdout))
140 140
deleted file mode 100644
... ...
@@ -1,34 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"io"
5
-
6
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
7
-	"github.com/golang/glog"
8
-)
9
-
10
-type Configurator struct {
11
-	Name   string
12
-	Plugin HAConfiguratorPlugin
13
-	Writer io.Writer
14
-}
15
-
16
-func NewConfigurator(name string, plugin HAConfiguratorPlugin, out io.Writer) *Configurator {
17
-	glog.V(4).Infof("Creating haconfig configurator: %s", name)
18
-	return &Configurator{Name: name, Plugin: plugin, Writer: out}
19
-}
20
-
21
-func (c *Configurator) Generate() *kapi.List {
22
-	glog.V(4).Infof("Generating haconfig configuration: %s", c.Name)
23
-	return c.Plugin.Generate()
24
-}
25
-
26
-func (c *Configurator) Create() {
27
-	glog.V(4).Infof("Creating haconfig configuration: %s", c.Name)
28
-	c.Plugin.Create(c.Writer)
29
-}
30
-
31
-func (c *Configurator) Delete() {
32
-	glog.V(4).Infof("Deleting haconfig configuration: %s", c.Name)
33
-	c.Plugin.Delete(c.Writer)
34
-}
35 1
deleted file mode 100644
... ...
@@ -1,107 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"io"
5
-	"testing"
6
-
7
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
8
-
9
-	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
10
-)
11
-
12
-type MockPlugin struct {
13
-	Name      string
14
-	Factory   *clientcmd.Factory
15
-	Options   *HAConfigCmdOptions
16
-	Service   *kapi.Service
17
-	CallCount map[string]int
18
-}
19
-
20
-func (p *MockPlugin) IncrementCallCount(name string) {
21
-	value, ok := p.CallCount[name]
22
-	if !ok {
23
-		value = 0
24
-	}
25
-	value += 1
26
-	p.CallCount[name] = value
27
-}
28
-
29
-func (p *MockPlugin) GetWatchPort() int {
30
-	p.IncrementCallCount("GetWatchPort")
31
-	return p.Options.WatchPort
32
-}
33
-
34
-func (p *MockPlugin) GetSelector() map[string]string {
35
-	p.IncrementCallCount("GetSelector")
36
-	return map[string]string{DefaultName: p.Name}
37
-}
38
-
39
-func (p *MockPlugin) GetNamespace() string {
40
-	p.IncrementCallCount("GetNamespace")
41
-	return "mock"
42
-}
43
-
44
-func (p *MockPlugin) GetService() *kapi.Service {
45
-	p.IncrementCallCount("GetService")
46
-	return p.Service
47
-}
48
-
49
-func (p *MockPlugin) Generate() *kapi.List {
50
-	p.IncrementCallCount("Generate")
51
-	return &kapi.List{}
52
-}
53
-
54
-func (p *MockPlugin) Create(out io.Writer) {
55
-	p.IncrementCallCount("Create")
56
-}
57
-
58
-func (p *MockPlugin) Delete(out io.Writer) {
59
-	p.IncrementCallCount("Delete")
60
-}
61
-
62
-func TestNewConfigurator(t *testing.T) {
63
-	plugin := &MockPlugin{}
64
-	c := NewConfigurator("test-configurator", plugin, nil)
65
-	if nil == c {
66
-		t.Errorf("Test for NewConfigurator failed - got nil, expected a new configurator instance")
67
-	}
68
-}
69
-
70
-func makeMockPlugin(name string) *MockPlugin {
71
-	return &MockPlugin{
72
-		Name:      name,
73
-		Options:   &HAConfigCmdOptions{},
74
-		Service:   &kapi.Service{},
75
-		CallCount: make(map[string]int, 0),
76
-	}
77
-}
78
-
79
-type callback func(name string, c *Configurator)
80
-
81
-func runCallCountTest(t *testing.T, name string, expectation int, cb callback) {
82
-	plugin := makeMockPlugin(name)
83
-	c := NewConfigurator(name, plugin, nil)
84
-	cb(name, c)
85
-	callCount := plugin.CallCount[name]
86
-	if callCount != expectation {
87
-		t.Errorf("Test for Generate failed - got call count %d, expected %d", callCount, expectation)
88
-	}
89
-}
90
-
91
-func TestConfiguratorGenerate(t *testing.T) {
92
-	runCallCountTest(t, "Generate", 1, func(n string, c *Configurator) {
93
-		c.Generate()
94
-	})
95
-}
96
-
97
-func TestConfiguratorCreate(t *testing.T) {
98
-	runCallCountTest(t, "Create", 1, func(n string, c *Configurator) {
99
-		c.Create()
100
-	})
101
-}
102
-
103
-func TestConfiguratorDelete(t *testing.T) {
104
-	runCallCountTest(t, "Delete", 1, func(n string, c *Configurator) {
105
-		c.Delete()
106
-	})
107
-}
108 1
deleted file mode 100644
... ...
@@ -1,17 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"io"
5
-
6
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
7
-)
8
-
9
-type HAConfiguratorPlugin interface {
10
-	GetWatchPort() int
11
-	GetSelector() map[string]string
12
-	GetNamespace() string
13
-	GetService() *kapi.Service
14
-	Generate() *kapi.List
15
-	Create(out io.Writer)
16
-	Delete(out io.Writer)
17
-}
18 1
deleted file mode 100644
... ...
@@ -1,46 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"github.com/openshift/origin/pkg/cmd/util/variable"
5
-)
6
-
7
-const (
8
-	// Default ha-config resource name.
9
-	DefaultName = "ha-config"
10
-
11
-	// Default ha-config type.
12
-	DefaultType = "keepalived"
13
-
14
-	// Default service port.
15
-	DefaultServicePort = 1985
16
-
17
-	// Default ha-config watched port number.
18
-	DefaultWatchPort = 80
19
-
20
-	// Default resource selector.
21
-	DefaultSelector = "ha-config=<name>"
22
-
23
-	// Default network interface.
24
-	DefaultInterface = "eth0"
25
-)
26
-
27
-// Options supported by the ha-config admin command.
28
-type HAConfigCmdOptions struct {
29
-	Type          string
30
-	ImageTemplate variable.ImageTemplate
31
-	Credentials   string
32
-	ServicePort   int
33
-	Selector      string
34
-
35
-	//  Create/delete configuration.
36
-	Create bool
37
-	Delete bool
38
-
39
-	VirtualIPs       string
40
-	NetworkInterface string
41
-	WatchPort        int
42
-	Replicas         int
43
-
44
-	//  For the future - currently unused.
45
-	UseUnicast bool
46
-}
47 1
deleted file mode 100644
... ...
@@ -1,93 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"fmt"
5
-	"net"
6
-	"strconv"
7
-	"strings"
8
-)
9
-
10
-//  Validate IP address.
11
-func ValidateIPAddress(ip string) error {
12
-	ipaddr := strings.TrimSpace(ip)
13
-	if net.ParseIP(ipaddr) == nil {
14
-		return fmt.Errorf("Invalid IP address: %s", ip)
15
-	}
16
-
17
-	return nil
18
-}
19
-
20
-// Validate an IP address range or single IP address.
21
-func ValidateIPAddressRange(iprange string) error {
22
-	iprange = strings.TrimSpace(iprange)
23
-	if strings.Count(iprange, "-") < 1 {
24
-		return ValidateIPAddress(iprange)
25
-	}
26
-
27
-	// Its an IP range of the form: n.n.n.n-n
28
-	rangeLimits := strings.Split(iprange, "-")
29
-	startIP := rangeLimits[0]
30
-	parts := strings.Split(startIP, ".")
31
-	rangeStart := parts[3]
32
-	rangeEnd := rangeLimits[1]
33
-	if err := ValidateIPAddress(startIP); err != nil {
34
-		return err
35
-	}
36
-
37
-	//  Manufacture ending IP address for the range.
38
-	parts[3] = rangeEnd
39
-	endIP := strings.Join(parts, ".")
40
-	if ValidateIPAddress(endIP) != nil {
41
-		return fmt.Errorf("Invalid IP range end: %s [%s]", rangeEnd, endIP)
42
-	}
43
-
44
-	// Lastly, ensure start <= end
45
-	start, err := strconv.Atoi(rangeStart)
46
-	if err != nil {
47
-		return fmt.Errorf("Invalid IP range start: %s [%s]", rangeStart, startIP)
48
-	}
49
-
50
-	end, err := strconv.Atoi(rangeEnd)
51
-	if err != nil {
52
-		return fmt.Errorf("Invalid IP range end: %s [%s]", rangeEnd, endIP)
53
-	}
54
-
55
-	if start > end {
56
-		return fmt.Errorf("Invalid IP range %s-%s: start=%v > end=%v", startIP, endIP, start, end)
57
-	}
58
-
59
-	return nil
60
-}
61
-
62
-//  Validate virtual IP range/addresses.
63
-func ValidateVirtualIPs(vips string) error {
64
-	virtualIPs := strings.TrimSpace(vips)
65
-	if len(virtualIPs) < 1 {
66
-		return nil
67
-	}
68
-
69
-	for _, ip := range strings.Split(virtualIPs, ",") {
70
-		if err := ValidateIPAddressRange(ip); err != nil {
71
-			return err
72
-		}
73
-	}
74
-
75
-	return nil
76
-}
77
-
78
-// Validate command line operations.
79
-func ValidateCmdOptions(options *HAConfigCmdOptions, c *Configurator) error {
80
-	service := c.Plugin.GetService()
81
-
82
-	//  If deleting service, check service exists.
83
-	if options.Delete && nil == service {
84
-		return fmt.Errorf("No HA Config %q exists\n", c.Name)
85
-	}
86
-
87
-	//  If creating service, check service doesn't exist.
88
-	if options.Create && service != nil {
89
-		return fmt.Errorf("HA Config %q exists\n", c.Name)
90
-	}
91
-
92
-	return ValidateVirtualIPs(options.VirtualIPs)
93
-}
94 1
deleted file mode 100644
... ...
@@ -1,229 +0,0 @@
1
-package haconfig
2
-
3
-import (
4
-	"testing"
5
-
6
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
7
-)
8
-
9
-func TestValidateIPAddress(t *testing.T) {
10
-	validIPs := []string{"1.1.1.1", "1.1.1.255", "255.255.255.255",
11
-		"8.8.8.8", "0.1.2.3", "255.254.253.252",
12
-	}
13
-
14
-	for _, ip := range validIPs {
15
-		if err := ValidateIPAddress(ip); err != nil {
16
-			t.Errorf("Test valid ip=%q got error %s expected: no error.", ip, err)
17
-		}
18
-	}
19
-
20
-	invalidIPs := []string{"1.1.1.256", "256.256.256.256",
21
-		"1024.512.256.128", "a.b.c.d", "1.2.3.4.abc", "5.6.7.8def",
22
-		"a.12.13.14", "9999.888.77.6",
23
-	}
24
-
25
-	for _, ip := range invalidIPs {
26
-		if err := ValidateIPAddress(ip); err == nil {
27
-			t.Errorf("Test invalid ip=%q got no error expected: error.", ip)
28
-		}
29
-	}
30
-}
31
-
32
-func TestValidateIPAddressRange(t *testing.T) {
33
-	validRanges := []string{"1.1.1.1-1", "1.1.1.1-7", "1.1.1.250-255",
34
-		"255.255.255.255-255", "8.8.8.4-8", "0.1.2.3-255",
35
-		"255.254.253.252-255",
36
-	}
37
-
38
-	for _, iprange := range validRanges {
39
-		if err := ValidateIPAddressRange(iprange); err != nil {
40
-			t.Errorf("Test valid iprange=%q got error %s expected: no error.", iprange, err)
41
-		}
42
-	}
43
-
44
-	invalidRanges := []string{"1.1.1.256-250", "1.1.1.1-0",
45
-		"1.1.1.5-1", "255.255.255.255-259", "1024.512.256.128-255",
46
-		"a.b.c.d-e", "1.2.3.4.abc-def", "5.6.7.8def-1.2.3.4abc",
47
-		"a.12.13.14-55", "9999.888.77.6-66",
48
-	}
49
-
50
-	for _, iprange := range invalidRanges {
51
-		if err := ValidateIPAddressRange(iprange); err == nil {
52
-			t.Errorf("Test invalid iprange=%q got no error expected: error.", iprange)
53
-		}
54
-	}
55
-}
56
-
57
-func TestValidateVirtualIPs(t *testing.T) {
58
-	validVIPs := []string{"", "1.1.1.1-1,2.2.2.2", "4.4.4.4-8",
59
-		"1.1.1.1-7,2.2.2.2,3.3.3.3-5",
60
-		"1.1.1.250-255,255.255.255.255-255", "4.4.4.4-8,8.8.8.4-8",
61
-		"0.1.2.3-255,4.5.6.7,8.9.10.11,12.13.14.15-20",
62
-		"255.254.253.252-255,1.1.1.1",
63
-	}
64
-
65
-	for _, vips := range validVIPs {
66
-		if err := ValidateVirtualIPs(vips); err != nil {
67
-			t.Errorf("Test valid vips=%q got error %s expected: no error.",
68
-				vips, err)
69
-		}
70
-	}
71
-
72
-	invalidVIPs := []string{"1.1.1.256-250,2.2.2.2", "1.1.1.1,2.2.2.2-0",
73
-		"1.1.1.1-5,2.2.2.2,3.3.3.3-1", "255.255.255.255-259",
74
-		"1.2.3.4-5,1024.512.256.128-255", "1.1.1.1,a.b.c.d-e",
75
-		"a.b.c.d-e,5.4.3.2", "1.2.3.4.abc-def",
76
-		"5.6.7.8def-1.2.3.4abc", "4.1.1.1,a.12.13.14-55",
77
-		"8.8.8.8,9999.888.77.6-66,4.4.4.4-8",
78
-	}
79
-
80
-	for _, vips := range invalidVIPs {
81
-		if err := ValidateVirtualIPs(vips); err == nil {
82
-			t.Errorf("Test invalid vips=%q got no error expected: error.", vips)
83
-		}
84
-	}
85
-}
86
-
87
-func getMockConfigurator(options *HAConfigCmdOptions, service *kapi.Service) *Configurator {
88
-	p := &MockPlugin{
89
-		Name:      "mock",
90
-		Options:   options,
91
-		Service:   service,
92
-		CallCount: make(map[string]int, 0),
93
-	}
94
-	return NewConfigurator("mock-plugin", p, nil)
95
-}
96
-
97
-func TestValidateCmdOptionsForCreate(t *testing.T) {
98
-	tests := []struct {
99
-		Name             string
100
-		Create           bool
101
-		Service          *kapi.Service
102
-		ErrorExpectation bool
103
-	}{
104
-		{
105
-			Name:             "create-with-no-service",
106
-			Create:           true,
107
-			ErrorExpectation: false,
108
-		},
109
-		{
110
-			Name:             "create-with-service",
111
-			Create:           true,
112
-			Service:          &kapi.Service{},
113
-			ErrorExpectation: true,
114
-		},
115
-		{
116
-			Name:             "no-create-option-and-service",
117
-			ErrorExpectation: false,
118
-		},
119
-		{
120
-			Name:             "no-create-option-with-service",
121
-			Service:          &kapi.Service{},
122
-			ErrorExpectation: false,
123
-		},
124
-	}
125
-
126
-	for _, tc := range tests {
127
-		options := &HAConfigCmdOptions{Create: tc.Create}
128
-		plugin := &MockPlugin{
129
-			Name:      "mock",
130
-			Options:   options,
131
-			Service:   tc.Service,
132
-			CallCount: make(map[string]int, 0),
133
-		}
134
-		c := NewConfigurator(tc.Name, plugin, nil)
135
-
136
-		err := ValidateCmdOptions(options, c)
137
-		if err != nil && !tc.ErrorExpectation {
138
-			t.Errorf("Test case %q got an error: %v where none was expected.",
139
-				tc.Name, err)
140
-		}
141
-		if nil == err && tc.ErrorExpectation {
142
-			t.Errorf("Test case %q got no error - expected an error.", tc.Name)
143
-		}
144
-	}
145
-}
146
-
147
-func TestValidateCmdOptionsForDelete(t *testing.T) {
148
-	tests := []struct {
149
-		Name             string
150
-		Delete           bool
151
-		Service          *kapi.Service
152
-		ErrorExpectation bool
153
-	}{
154
-		{
155
-			Name:             "delete-with-service",
156
-			Delete:           true,
157
-			Service:          &kapi.Service{},
158
-			ErrorExpectation: false,
159
-		},
160
-		{
161
-			Name:             "delete-with-no-service",
162
-			Delete:           true,
163
-			ErrorExpectation: true,
164
-		},
165
-		{
166
-			Name:             "no-delete-option-and-service",
167
-			ErrorExpectation: false,
168
-		},
169
-		{
170
-			Name:             "no-delete-option-with-service",
171
-			Service:          &kapi.Service{},
172
-			ErrorExpectation: false,
173
-		},
174
-	}
175
-
176
-	for _, tc := range tests {
177
-		options := &HAConfigCmdOptions{Delete: tc.Delete}
178
-		plugin := &MockPlugin{
179
-			Name:      "mock",
180
-			Options:   options,
181
-			Service:   tc.Service,
182
-			CallCount: make(map[string]int, 0),
183
-		}
184
-		c := NewConfigurator(tc.Name, plugin, nil)
185
-
186
-		err := ValidateCmdOptions(options, c)
187
-		if err != nil && !tc.ErrorExpectation {
188
-			t.Errorf("Test case %q got an error: %v where none was expected.",
189
-				tc.Name, err)
190
-		}
191
-		if nil == err && tc.ErrorExpectation {
192
-			t.Errorf("Test case %q got no error - expected an error.", tc.Name)
193
-		}
194
-	}
195
-}
196
-
197
-func TestValidateCmdOptionsVIPs(t *testing.T) {
198
-	validVIPs := []string{"", "1.1.1.1-1,2.2.2.2", "4.4.4.4-8",
199
-		"1.1.1.1-7,2.2.2.2,3.3.3.3-5",
200
-		"1.1.1.250-255,255.255.255.255-255", "4.4.4.4-8,8.8.8.4-8",
201
-		"0.1.2.3-255,4.5.6.7,8.9.10.11,12.13.14.15-20",
202
-		"255.254.253.252-255,1.1.1.1",
203
-	}
204
-
205
-	for _, vips := range validVIPs {
206
-		options := &HAConfigCmdOptions{VirtualIPs: vips}
207
-		c := getMockConfigurator(options, nil)
208
-		if err := ValidateCmdOptions(options, c); err != nil {
209
-			t.Errorf("Test command options valid vips=%q got error %s expected: no error.",
210
-				vips, err)
211
-		}
212
-	}
213
-
214
-	invalidVIPs := []string{"1.1.1.256-250,2.2.2.2", "1.1.1.1,2.2.2.2-0",
215
-		"1.1.1.1-5,2.2.2.2,3.3.3.3-1", "255.255.255.255-259",
216
-		"1.2.3.4-5,1024.512.256.128-255", "1.1.1.1,a.b.c.d-e",
217
-		"a.b.c.d-e,5.4.3.2", "1.2.3.4.abc-def",
218
-		"5.6.7.8def-1.2.3.4abc", "4.1.1.1,a.12.13.14-55",
219
-		"8.8.8.8,9999.888.77.6-66,4.4.4.4-8",
220
-	}
221
-
222
-	for _, vips := range invalidVIPs {
223
-		options := &HAConfigCmdOptions{VirtualIPs: vips}
224
-		c := getMockConfigurator(options, nil)
225
-		if err := ValidateCmdOptions(options, c); err == nil {
226
-			t.Errorf("Test command options invalid vips=%q got no error expected: error.", vips)
227
-		}
228
-	}
229
-}
230 1
new file mode 100644
... ...
@@ -0,0 +1,29 @@
0
+package ipfailover
1
+
2
+import (
3
+	"io"
4
+
5
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
6
+	"github.com/golang/glog"
7
+)
8
+
9
+type Configurator struct {
10
+	Name   string
11
+	Plugin IPFailoverConfiguratorPlugin
12
+	Writer io.Writer
13
+}
14
+
15
+func NewConfigurator(name string, plugin IPFailoverConfiguratorPlugin, out io.Writer) *Configurator {
16
+	glog.V(4).Infof("Creating IP failover configurator: %s", name)
17
+	return &Configurator{Name: name, Plugin: plugin, Writer: out}
18
+}
19
+
20
+func (c *Configurator) Generate() *kapi.List {
21
+	glog.V(4).Infof("Generating IP failover configuration: %s", c.Name)
22
+	return c.Plugin.Generate()
23
+}
24
+
25
+func (c *Configurator) Create() {
26
+	glog.V(4).Infof("Creating IP failover configuration: %s", c.Name)
27
+	c.Plugin.Create(c.Writer)
28
+}
0 29
new file mode 100644
... ...
@@ -0,0 +1,97 @@
0
+package ipfailover
1
+
2
+import (
3
+	"io"
4
+	"testing"
5
+
6
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
7
+
8
+	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
9
+)
10
+
11
+type MockPlugin struct {
12
+	Name      string
13
+	Factory   *clientcmd.Factory
14
+	Options   *IPFailoverConfigCmdOptions
15
+	Service   *kapi.Service
16
+	CallCount map[string]int
17
+}
18
+
19
+func (p *MockPlugin) IncrementCallCount(name string) {
20
+	value, ok := p.CallCount[name]
21
+	if !ok {
22
+		value = 0
23
+	}
24
+	value += 1
25
+	p.CallCount[name] = value
26
+}
27
+
28
+func (p *MockPlugin) GetWatchPort() int {
29
+	p.IncrementCallCount("GetWatchPort")
30
+	return p.Options.WatchPort
31
+}
32
+
33
+func (p *MockPlugin) GetSelector() map[string]string {
34
+	p.IncrementCallCount("GetSelector")
35
+	return map[string]string{DefaultName: p.Name}
36
+}
37
+
38
+func (p *MockPlugin) GetNamespace() string {
39
+	p.IncrementCallCount("GetNamespace")
40
+	return "mock"
41
+}
42
+
43
+func (p *MockPlugin) GetService() *kapi.Service {
44
+	p.IncrementCallCount("GetService")
45
+	return p.Service
46
+}
47
+
48
+func (p *MockPlugin) Generate() *kapi.List {
49
+	p.IncrementCallCount("Generate")
50
+	return &kapi.List{}
51
+}
52
+
53
+func (p *MockPlugin) Create(out io.Writer) {
54
+	p.IncrementCallCount("Create")
55
+}
56
+
57
+func TestNewConfigurator(t *testing.T) {
58
+	plugin := &MockPlugin{}
59
+	c := NewConfigurator("test-configurator", plugin, nil)
60
+	if nil == c {
61
+		t.Errorf("Test for NewConfigurator failed - got nil, expected a new configurator instance")
62
+	}
63
+}
64
+
65
+func makeMockPlugin(name string) *MockPlugin {
66
+	return &MockPlugin{
67
+		Name:      name,
68
+		Options:   &IPFailoverConfigCmdOptions{},
69
+		Service:   &kapi.Service{},
70
+		CallCount: make(map[string]int, 0),
71
+	}
72
+}
73
+
74
+type callback func(name string, c *Configurator)
75
+
76
+func runCallCountTest(t *testing.T, name string, expectation int, cb callback) {
77
+	plugin := makeMockPlugin(name)
78
+	c := NewConfigurator(name, plugin, nil)
79
+	cb(name, c)
80
+	callCount := plugin.CallCount[name]
81
+	if callCount != expectation {
82
+		t.Errorf("Test for Generate failed - got call count %d, expected %d", callCount, expectation)
83
+	}
84
+}
85
+
86
+func TestConfiguratorGenerate(t *testing.T) {
87
+	runCallCountTest(t, "Generate", 1, func(n string, c *Configurator) {
88
+		c.Generate()
89
+	})
90
+}
91
+
92
+func TestConfiguratorCreate(t *testing.T) {
93
+	runCallCountTest(t, "Create", 1, func(n string, c *Configurator) {
94
+		c.Create()
95
+	})
96
+}
0 97
new file mode 100644
... ...
@@ -0,0 +1,16 @@
0
+package ipfailover
1
+
2
+import (
3
+	"io"
4
+
5
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
6
+)
7
+
8
+type IPFailoverConfiguratorPlugin interface {
9
+	GetWatchPort() int
10
+	GetSelector() map[string]string
11
+	GetNamespace() string
12
+	GetService() *kapi.Service
13
+	Generate() *kapi.List
14
+	Create(out io.Writer)
15
+}
0 16
new file mode 100644
... ...
@@ -0,0 +1,168 @@
0
+package keepalived
1
+
2
+import (
3
+	"fmt"
4
+	"strconv"
5
+
6
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
7
+	kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
8
+	kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
9
+	"github.com/golang/glog"
10
+
11
+	dapi "github.com/openshift/origin/pkg/deploy/api"
12
+	"github.com/openshift/origin/pkg/generate/app"
13
+	"github.com/openshift/origin/pkg/ipfailover"
14
+)
15
+
16
+const defaultInterface = "eth0"
17
+const libModulesVolumeName = "lib-modules"
18
+const libModulesPath = "/lib/modules"
19
+
20
+//  Get kube client configuration from a file containing credentials for
21
+//  connecting to the master.
22
+func getClientConfig(path string) *kclient.Config {
23
+	if 0 == len(path) {
24
+		glog.Fatalf("You must specify a .kubeconfig file path containing credentials for connecting to the master with --credentials")
25
+	}
26
+
27
+	rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path, Precedence: []string{}}
28
+	credentials, err := rules.Load()
29
+	if err != nil {
30
+		glog.Fatalf("Could not load credentials from %q: %v", path, err)
31
+	}
32
+
33
+	config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig()
34
+	if err != nil {
35
+		glog.Fatalf("Credentials %q error: %v", path, err)
36
+	}
37
+
38
+	if err := kclient.LoadTLSFiles(config); err != nil {
39
+		glog.Fatalf("Unable to load certificate info using credentials from %q: %v", path, err)
40
+	}
41
+
42
+	return config
43
+}
44
+
45
+//  Generate the IP failover monitor (keepalived) container environment entries.
46
+func generateEnvEntries(name string, options *ipfailover.IPFailoverConfigCmdOptions, kconfig *kclient.Config) app.Environment {
47
+	watchPort := strconv.Itoa(options.WatchPort)
48
+	replicas := strconv.Itoa(options.Replicas)
49
+	insecureStr := strconv.FormatBool(kconfig.Insecure)
50
+	unicastStr := strconv.FormatBool(options.UseUnicast)
51
+
52
+	return app.Environment{
53
+		"OPENSHIFT_MASTER":    kconfig.Host,
54
+		"OPENSHIFT_CA_DATA":   string(kconfig.CAData),
55
+		"OPENSHIFT_KEY_DATA":  string(kconfig.KeyData),
56
+		"OPENSHIFT_CERT_DATA": string(kconfig.CertData),
57
+		"OPENSHIFT_INSECURE":  insecureStr,
58
+
59
+		"OPENSHIFT_HA_CONFIG_NAME":       name,
60
+		"OPENSHIFT_HA_VIRTUAL_IPS":       options.VirtualIPs,
61
+		"OPENSHIFT_HA_NETWORK_INTERFACE": options.NetworkInterface,
62
+		"OPENSHIFT_HA_MONITOR_PORT":      watchPort,
63
+		"OPENSHIFT_HA_REPLICA_COUNT":     replicas,
64
+		"OPENSHIFT_HA_USE_UNICAST":       unicastStr,
65
+		// "OPENSHIFT_HA_UNICAST_PEERS":     "127.0.0.1",
66
+	}
67
+}
68
+
69
+//  Generate the IP failover monitor (keepalived) container configuration.
70
+func generateFailoverMonitorContainerConfig(name string, options *ipfailover.IPFailoverConfigCmdOptions, env app.Environment) *kapi.Container {
71
+	containerName := fmt.Sprintf("%s-%s", name, options.Type)
72
+
73
+	imageName := fmt.Sprintf("%s-%s", options.Type, ipfailover.DefaultName)
74
+	image := options.ImageTemplate.ExpandOrDie(imageName)
75
+
76
+	//  Container port to expose the service interconnects between keepaliveds.
77
+	ports := make([]kapi.ContainerPort, 1)
78
+	ports[0] = kapi.ContainerPort{
79
+		ContainerPort: options.ServicePort,
80
+		HostPort:      options.ServicePort,
81
+	}
82
+
83
+	mounts := make([]kapi.VolumeMount, 1)
84
+	mounts[0] = kapi.VolumeMount{
85
+		Name:      libModulesVolumeName,
86
+		ReadOnly:  true,
87
+		MountPath: libModulesPath,
88
+	}
89
+
90
+	return &kapi.Container{
91
+		Name:            containerName,
92
+		Image:           image,
93
+		Ports:           ports,
94
+		Privileged:      true,
95
+		ImagePullPolicy: kapi.PullIfNotPresent,
96
+		VolumeMounts:    mounts,
97
+		Env:             env.List(),
98
+	}
99
+}
100
+
101
+//  Generate the IP failover monitor (keepalived) container configuration.
102
+func generateContainerConfig(name string, options *ipfailover.IPFailoverConfigCmdOptions) []kapi.Container {
103
+	containers := make([]kapi.Container, 0)
104
+
105
+	if len(options.VirtualIPs) < 1 {
106
+		return containers
107
+	}
108
+
109
+	config := getClientConfig(options.Credentials)
110
+	env := generateEnvEntries(name, options, config)
111
+
112
+	c := generateFailoverMonitorContainerConfig(name, options, env)
113
+	if c != nil {
114
+		containers = append(containers, *c)
115
+	}
116
+
117
+	return containers
118
+}
119
+
120
+//  Generate the IP failover monitor (keepalived) container volume config.
121
+func generateVolumeConfig() []kapi.Volume {
122
+	//  The keepalived container needs access to the kernel modules
123
+	//  directory in order to load the module.
124
+	hostPath := &kapi.HostPathVolumeSource{Path: libModulesPath}
125
+	src := kapi.VolumeSource{HostPath: hostPath}
126
+
127
+	vol := kapi.Volume{Name: libModulesVolumeName, VolumeSource: src}
128
+	return []kapi.Volume{vol}
129
+}
130
+
131
+//  Generate the IP Failover deployment configuration.
132
+func GenerateDeploymentConfig(name string, options *ipfailover.IPFailoverConfigCmdOptions, selector map[string]string) *dapi.DeploymentConfig {
133
+	podTemplate := &kapi.PodTemplateSpec{
134
+		ObjectMeta: kapi.ObjectMeta{Labels: selector},
135
+		Spec: kapi.PodSpec{
136
+			HostNetwork: true,
137
+			Containers:  generateContainerConfig(name, options),
138
+			Volumes:     generateVolumeConfig(),
139
+		},
140
+	}
141
+
142
+	return &dapi.DeploymentConfig{
143
+		ObjectMeta: kapi.ObjectMeta{
144
+			Name:   name,
145
+			Labels: selector,
146
+		},
147
+		Triggers: []dapi.DeploymentTriggerPolicy{
148
+			{Type: dapi.DeploymentTriggerOnConfigChange},
149
+		},
150
+		Template: dapi.DeploymentTemplate{
151
+			Strategy: dapi.DeploymentStrategy{
152
+				Type: dapi.DeploymentStrategyTypeRecreate,
153
+			},
154
+
155
+			// TODO: v0.1 requires a manual resize to the
156
+			//       replicas to match current cluster state.
157
+			//       In the future, the PerNodeController in
158
+			//       kubernetes would remove the need for this
159
+			//       manual intervention.
160
+			ControllerTemplate: kapi.ReplicationControllerSpec{
161
+				Replicas: options.Replicas,
162
+				Selector: selector,
163
+				Template: podTemplate,
164
+			},
165
+		},
166
+	}
167
+}
0 168
new file mode 100644
... ...
@@ -0,0 +1,130 @@
0
+package keepalived
1
+
2
+import (
3
+	"io"
4
+	"os"
5
+	"strings"
6
+
7
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
8
+	"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
9
+	"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
10
+	"github.com/golang/glog"
11
+
12
+	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
13
+	configcmd "github.com/openshift/origin/pkg/config/cmd"
14
+	"github.com/openshift/origin/pkg/generate/app"
15
+	"github.com/openshift/origin/pkg/ipfailover"
16
+)
17
+
18
+//  IP Failover configurator plugin for keepalived sidecar.
19
+type KeepalivedPlugin struct {
20
+	Name    string
21
+	Factory *clientcmd.Factory
22
+	Options *ipfailover.IPFailoverConfigCmdOptions
23
+}
24
+
25
+//  Create a new IPFailoverConfigurator (keepalived) plugin instance.
26
+func NewIPFailoverConfiguratorPlugin(name string, f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions) (*KeepalivedPlugin, error) {
27
+	glog.V(4).Infof("Creating new KeepAlived plugin: %q", name)
28
+
29
+	p := &KeepalivedPlugin{
30
+		Name:    name,
31
+		Factory: f,
32
+		Options: options,
33
+	}
34
+
35
+	return p, nil
36
+}
37
+
38
+//  Get the port to monitor for the IP Failover configuration.
39
+func (p *KeepalivedPlugin) GetWatchPort() int {
40
+	port := p.Options.WatchPort
41
+	if port < 1 {
42
+		port = ipfailover.DefaultWatchPort
43
+	}
44
+
45
+	glog.V(4).Infof("KeepAlived IP Failover config: %q - WatchPort: %+v", p.Name, port)
46
+
47
+	return port
48
+}
49
+
50
+//  Get the selector associated with this IP Failover configurator plugin.
51
+func (p *KeepalivedPlugin) GetSelector() map[string]string {
52
+	if p.Options.Selector == ipfailover.DefaultSelector {
53
+		return map[string]string{ipfailover.DefaultName: p.Name}
54
+	}
55
+
56
+	labels, remove, err := app.LabelsFromSpec(strings.Split(p.Options.Selector, ","))
57
+	if err != nil {
58
+		glog.Fatal(err)
59
+	}
60
+
61
+	if len(remove) > 0 {
62
+		glog.Fatalf("You may not pass negative labels in %q", p.Options.Selector)
63
+	}
64
+
65
+	glog.V(4).Infof("KeepAlived IP Failover config: %q - selector: %+v", p.Name, labels)
66
+
67
+	return labels
68
+}
69
+
70
+//  Get the namespace associated with this IP Failover configurator plugin.
71
+func (p *KeepalivedPlugin) GetNamespace() string {
72
+	namespace, err := p.Factory.OpenShiftClientConfig.Namespace()
73
+	if err != nil {
74
+		glog.Fatalf("Error get OS client config: %v", err)
75
+	}
76
+
77
+	glog.V(4).Infof("KeepAlived IP Failover config: %q - namespace: %q", p.Name, namespace)
78
+
79
+	return namespace
80
+}
81
+
82
+//  Get the service associated with this IP Failover configurator plugin.
83
+func (p *KeepalivedPlugin) GetService() *kapi.Service {
84
+	_, kClient, err := p.Factory.Clients()
85
+	if err != nil {
86
+		glog.Fatalf("Error getting client: %v", err)
87
+	}
88
+
89
+	namespace := p.GetNamespace()
90
+	service, err := kClient.Services(namespace).Get(p.Name)
91
+	if err != nil {
92
+		if errors.IsNotFound(err) {
93
+			glog.V(4).Infof("KeepAlived IP Failover config: %s - no service found", p.Name)
94
+			return nil
95
+		}
96
+		glog.Fatalf("Error getting KeepAlived IP Failover config service %q: %v", p.Name, err)
97
+	}
98
+
99
+	glog.V(4).Infof("KeepAlived IP Failover config: %q service: %+v", p.Name, service)
100
+
101
+	return service
102
+}
103
+
104
+//  Generate the config and services for this IP Failover configuration plugin.
105
+func (p *KeepalivedPlugin) Generate() *kapi.List {
106
+	dc := GenerateDeploymentConfig(p.Name, p.Options, p.GetSelector())
107
+	objects := []runtime.Object{dc}
108
+
109
+	services := &kapi.List{Items: app.AddServices(objects)}
110
+	glog.V(4).Infof("KeepAlived IP Failover config: %q - generated services: %+v", p.Name, services)
111
+
112
+	return services
113
+}
114
+
115
+//  Create the config and services associated with this IP Failover configuration.
116
+func (p *KeepalivedPlugin) Create(out io.Writer) {
117
+	namespace := p.GetNamespace()
118
+
119
+	bulk := configcmd.Bulk{
120
+		Factory: p.Factory.Factory,
121
+		After:   configcmd.NewPrintNameOrErrorAfter(out, os.Stderr),
122
+	}
123
+
124
+	if errs := bulk.Create(p.Generate(), namespace); len(errs) != 0 {
125
+		glog.Fatalf("Error creating config: %+v", errs)
126
+	}
127
+
128
+	glog.V(4).Infof("Created KeepAlived IP Failover config: %q", p.Name)
129
+}
0 130
new file mode 100644
... ...
@@ -0,0 +1,181 @@
0
+package keepalived
1
+
2
+import (
3
+	"testing"
4
+
5
+	"github.com/openshift/origin/pkg/ipfailover"
6
+)
7
+
8
+func TestNewIPFailoverConfiguratorPlugin(t *testing.T) {
9
+	tests := []struct {
10
+		Name             string
11
+		Options          *ipfailover.IPFailoverConfigCmdOptions
12
+		ErrorExpectation bool
13
+	}{
14
+		{
15
+			Name:             "selector",
16
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{Selector: "ipfailover=test-nodes"},
17
+			ErrorExpectation: false,
18
+		},
19
+		{
20
+			Name:             "empty-selector",
21
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{Selector: ""},
22
+			ErrorExpectation: false,
23
+		},
24
+		{
25
+			Name: "vips",
26
+			Options: &ipfailover.IPFailoverConfigCmdOptions{
27
+				VirtualIPs: "1.2.3.4,5.6.7.8-10,11.0.0.12",
28
+			},
29
+			ErrorExpectation: false,
30
+		},
31
+		{
32
+			Name:             "empty-vips",
33
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{VirtualIPs: ""},
34
+			ErrorExpectation: false,
35
+		},
36
+		{
37
+			Name:             "interface",
38
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{NetworkInterface: "eth0"},
39
+			ErrorExpectation: false,
40
+		},
41
+		{
42
+			Name:             "empty-interface",
43
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{NetworkInterface: ""},
44
+			ErrorExpectation: false,
45
+		},
46
+		{
47
+			Name:             "watch-port",
48
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{WatchPort: 999},
49
+			ErrorExpectation: false,
50
+		},
51
+		{
52
+			Name:             "replicas",
53
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{Replicas: 2},
54
+			ErrorExpectation: false,
55
+		},
56
+		{
57
+			Name: "all-options",
58
+			Options: &ipfailover.IPFailoverConfigCmdOptions{
59
+				Selector:         "ipf=v1",
60
+				VirtualIPs:       "9.8.7.6,5.4.3.2-5",
61
+				NetworkInterface: "ipf0",
62
+				WatchPort:        12345,
63
+				Replicas:         1,
64
+			},
65
+			ErrorExpectation: false,
66
+		},
67
+		{
68
+			Name:             "no-options",
69
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{},
70
+			ErrorExpectation: false,
71
+		},
72
+		{
73
+			Name:             "", // empty
74
+			Options:          &ipfailover.IPFailoverConfigCmdOptions{},
75
+			ErrorExpectation: false,
76
+		},
77
+	}
78
+
79
+	for _, tc := range tests {
80
+		p, err := NewIPFailoverConfiguratorPlugin(tc.Name, nil, tc.Options)
81
+		if err != nil && !tc.ErrorExpectation {
82
+			t.Errorf("Test case for %s got an error where none was expected", tc.Name)
83
+		}
84
+
85
+		if nil == err && nil == p {
86
+			t.Errorf("Test case for %s got no error but plugin was not found", tc.Name)
87
+		}
88
+	}
89
+}
90
+
91
+func TestPluginGetWatchPort(t *testing.T) {
92
+	tests := []struct {
93
+		Name      string
94
+		WatchPort int
95
+		Expected  int
96
+	}{
97
+		{
98
+			Name:      "router",
99
+			WatchPort: 80,
100
+			Expected:  80,
101
+		},
102
+		{
103
+			Name:      "service1",
104
+			WatchPort: 9999,
105
+			Expected:  9999,
106
+		},
107
+		{
108
+			Name:      "invalid-port",
109
+			WatchPort: -12345,
110
+			Expected:  80,
111
+		},
112
+		{
113
+			Name:      "invalid-port-2",
114
+			WatchPort: -1,
115
+			Expected:  80,
116
+		},
117
+		{
118
+			Name:      "zero-port",
119
+			WatchPort: 0,
120
+			Expected:  80,
121
+		},
122
+	}
123
+
124
+	for _, tc := range tests {
125
+		options := &ipfailover.IPFailoverConfigCmdOptions{WatchPort: tc.WatchPort}
126
+		p, err := NewIPFailoverConfiguratorPlugin(tc.Name, nil, options)
127
+		if err != nil {
128
+			t.Errorf("Error creating IPFailoverConfigurator plugin - test=%q, error: %v", tc.Name, err)
129
+		}
130
+
131
+		port := p.GetWatchPort()
132
+		if tc.Expected != port {
133
+			t.Errorf("Test case %q expected watch port = %d, got %d",
134
+				tc.Name, tc.Expected, port)
135
+		}
136
+
137
+	}
138
+}
139
+
140
+func TestPluginGetSelector(t *testing.T) {
141
+	tests := []struct {
142
+		Name        string
143
+		Selector    string
144
+		ExpectedKey string
145
+	}{
146
+		{
147
+			Name:        "router",
148
+			Selector:    "ipf=router",
149
+			ExpectedKey: "ipf",
150
+		},
151
+		{
152
+			Name:        "service1",
153
+			Selector:    "service1=us-west",
154
+			ExpectedKey: "service1",
155
+		},
156
+		{
157
+			Name:        "default-selector",
158
+			Selector:    ipfailover.DefaultSelector,
159
+			ExpectedKey: ipfailover.DefaultName,
160
+		},
161
+	}
162
+
163
+	for _, tc := range tests {
164
+		options := &ipfailover.IPFailoverConfigCmdOptions{Selector: tc.Selector}
165
+		p, err := NewIPFailoverConfiguratorPlugin(tc.Name, nil, options)
166
+		if err != nil {
167
+			t.Errorf("Error creating IPFailoverConfigurator plugin - test=%q, error: %v", tc.Name, err)
168
+		}
169
+
170
+		selector := p.GetSelector()
171
+		if len(tc.ExpectedKey) > 0 {
172
+			if _, ok := selector[tc.ExpectedKey]; !ok {
173
+				t.Errorf("Test case %q expected key %q was not found",
174
+					tc.Name, tc.ExpectedKey)
175
+			}
176
+		}
177
+	}
178
+}
179
+
180
+// TODO: tests for Create, Generate, GetService, GetNamespace.
0 181
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+package ipfailover
1
+
2
+import (
3
+	"github.com/openshift/origin/pkg/cmd/util/variable"
4
+)
5
+
6
+const (
7
+	// Default IP Failover resource name.
8
+	DefaultName = "ipfailover"
9
+
10
+	// Default IP Failover type.
11
+	DefaultType = "keepalived"
12
+
13
+	// Default service port.
14
+	DefaultServicePort = 1985
15
+
16
+	// Default IP Failover watched port number.
17
+	DefaultWatchPort = 80
18
+
19
+	// Default resource selector.
20
+	DefaultSelector = "ipfailover=<name>"
21
+
22
+	// Default network interface.
23
+	DefaultInterface = "eth0"
24
+)
25
+
26
+// Options supported by the IP Failover admin command.
27
+type IPFailoverConfigCmdOptions struct {
28
+	Type          string
29
+	ImageTemplate variable.ImageTemplate
30
+	Credentials   string
31
+	ServicePort   int
32
+	Selector      string
33
+	Create        bool
34
+
35
+	//  Failover options.
36
+	VirtualIPs       string
37
+	NetworkInterface string
38
+	WatchPort        int
39
+	Replicas         int
40
+
41
+	//  For the future - currently unused.
42
+	UseUnicast bool
43
+}
0 44
new file mode 100644
... ...
@@ -0,0 +1,88 @@
0
+package ipfailover
1
+
2
+import (
3
+	"fmt"
4
+	"net"
5
+	"strconv"
6
+	"strings"
7
+)
8
+
9
+//  Validate IP address.
10
+func ValidateIPAddress(ip string) error {
11
+	ipaddr := strings.TrimSpace(ip)
12
+	if net.ParseIP(ipaddr) == nil {
13
+		return fmt.Errorf("Invalid IP address: %s", ip)
14
+	}
15
+
16
+	return nil
17
+}
18
+
19
+// Validate an IP address range or single IP address.
20
+func ValidateIPAddressRange(iprange string) error {
21
+	iprange = strings.TrimSpace(iprange)
22
+	if strings.Count(iprange, "-") < 1 {
23
+		return ValidateIPAddress(iprange)
24
+	}
25
+
26
+	// Its an IP range of the form: n.n.n.n-n
27
+	rangeLimits := strings.Split(iprange, "-")
28
+	startIP := rangeLimits[0]
29
+	parts := strings.Split(startIP, ".")
30
+	rangeStart := parts[3]
31
+	rangeEnd := rangeLimits[1]
32
+	if err := ValidateIPAddress(startIP); err != nil {
33
+		return err
34
+	}
35
+
36
+	//  Manufacture ending IP address for the range.
37
+	parts[3] = rangeEnd
38
+	endIP := strings.Join(parts, ".")
39
+	if ValidateIPAddress(endIP) != nil {
40
+		return fmt.Errorf("Invalid IP range end: %s [%s]", rangeEnd, endIP)
41
+	}
42
+
43
+	// Lastly, ensure start <= end
44
+	start, err := strconv.Atoi(rangeStart)
45
+	if err != nil {
46
+		return fmt.Errorf("Invalid IP range start: %s [%s]", rangeStart, startIP)
47
+	}
48
+
49
+	end, err := strconv.Atoi(rangeEnd)
50
+	if err != nil {
51
+		return fmt.Errorf("Invalid IP range end: %s [%s]", rangeEnd, endIP)
52
+	}
53
+
54
+	if start > end {
55
+		return fmt.Errorf("Invalid IP range %s-%s: start=%v > end=%v", startIP, endIP, start, end)
56
+	}
57
+
58
+	return nil
59
+}
60
+
61
+//  Validate virtual IP range/addresses.
62
+func ValidateVirtualIPs(vips string) error {
63
+	virtualIPs := strings.TrimSpace(vips)
64
+	if len(virtualIPs) < 1 {
65
+		return nil
66
+	}
67
+
68
+	for _, ip := range strings.Split(virtualIPs, ",") {
69
+		if err := ValidateIPAddressRange(ip); err != nil {
70
+			return err
71
+		}
72
+	}
73
+
74
+	return nil
75
+}
76
+
77
+// Validate command line operations.
78
+func ValidateCmdOptions(options *IPFailoverConfigCmdOptions, c *Configurator) error {
79
+	service := c.Plugin.GetService()
80
+
81
+	//  If creating service, check service doesn't exist.
82
+	if options.Create && service != nil {
83
+		return fmt.Errorf("IP Failover config %q exists\n", c.Name)
84
+	}
85
+
86
+	return ValidateVirtualIPs(options.VirtualIPs)
87
+}
0 88
new file mode 100644
... ...
@@ -0,0 +1,179 @@
0
+package ipfailover
1
+
2
+import (
3
+	"testing"
4
+
5
+	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
6
+)
7
+
8
+func TestValidateIPAddress(t *testing.T) {
9
+	validIPs := []string{"1.1.1.1", "1.1.1.255", "255.255.255.255",
10
+		"8.8.8.8", "0.1.2.3", "255.254.253.252",
11
+	}
12
+
13
+	for _, ip := range validIPs {
14
+		if err := ValidateIPAddress(ip); err != nil {
15
+			t.Errorf("Test valid ip=%q got error %s expected: no error.", ip, err)
16
+		}
17
+	}
18
+
19
+	invalidIPs := []string{"1.1.1.256", "256.256.256.256",
20
+		"1024.512.256.128", "a.b.c.d", "1.2.3.4.abc", "5.6.7.8def",
21
+		"a.12.13.14", "9999.888.77.6",
22
+	}
23
+
24
+	for _, ip := range invalidIPs {
25
+		if err := ValidateIPAddress(ip); err == nil {
26
+			t.Errorf("Test invalid ip=%q got no error expected: error.", ip)
27
+		}
28
+	}
29
+}
30
+
31
+func TestValidateIPAddressRange(t *testing.T) {
32
+	validRanges := []string{"1.1.1.1-1", "1.1.1.1-7", "1.1.1.250-255",
33
+		"255.255.255.255-255", "8.8.8.4-8", "0.1.2.3-255",
34
+		"255.254.253.252-255",
35
+	}
36
+
37
+	for _, iprange := range validRanges {
38
+		if err := ValidateIPAddressRange(iprange); err != nil {
39
+			t.Errorf("Test valid iprange=%q got error %s expected: no error.", iprange, err)
40
+		}
41
+	}
42
+
43
+	invalidRanges := []string{"1.1.1.256-250", "1.1.1.1-0",
44
+		"1.1.1.5-1", "255.255.255.255-259", "1024.512.256.128-255",
45
+		"a.b.c.d-e", "1.2.3.4.abc-def", "5.6.7.8def-1.2.3.4abc",
46
+		"a.12.13.14-55", "9999.888.77.6-66",
47
+	}
48
+
49
+	for _, iprange := range invalidRanges {
50
+		if err := ValidateIPAddressRange(iprange); err == nil {
51
+			t.Errorf("Test invalid iprange=%q got no error expected: error.", iprange)
52
+		}
53
+	}
54
+}
55
+
56
+func TestValidateVirtualIPs(t *testing.T) {
57
+	validVIPs := []string{"", "1.1.1.1-1,2.2.2.2", "4.4.4.4-8",
58
+		"1.1.1.1-7,2.2.2.2,3.3.3.3-5",
59
+		"1.1.1.250-255,255.255.255.255-255", "4.4.4.4-8,8.8.8.4-8",
60
+		"0.1.2.3-255,4.5.6.7,8.9.10.11,12.13.14.15-20",
61
+		"255.254.253.252-255,1.1.1.1",
62
+	}
63
+
64
+	for _, vips := range validVIPs {
65
+		if err := ValidateVirtualIPs(vips); err != nil {
66
+			t.Errorf("Test valid vips=%q got error %s expected: no error.",
67
+				vips, err)
68
+		}
69
+	}
70
+
71
+	invalidVIPs := []string{"1.1.1.256-250,2.2.2.2", "1.1.1.1,2.2.2.2-0",
72
+		"1.1.1.1-5,2.2.2.2,3.3.3.3-1", "255.255.255.255-259",
73
+		"1.2.3.4-5,1024.512.256.128-255", "1.1.1.1,a.b.c.d-e",
74
+		"a.b.c.d-e,5.4.3.2", "1.2.3.4.abc-def",
75
+		"5.6.7.8def-1.2.3.4abc", "4.1.1.1,a.12.13.14-55",
76
+		"8.8.8.8,9999.888.77.6-66,4.4.4.4-8",
77
+	}
78
+
79
+	for _, vips := range invalidVIPs {
80
+		if err := ValidateVirtualIPs(vips); err == nil {
81
+			t.Errorf("Test invalid vips=%q got no error expected: error.", vips)
82
+		}
83
+	}
84
+}
85
+
86
+func getMockConfigurator(options *IPFailoverConfigCmdOptions, service *kapi.Service) *Configurator {
87
+	p := &MockPlugin{
88
+		Name:      "mock",
89
+		Options:   options,
90
+		Service:   service,
91
+		CallCount: make(map[string]int, 0),
92
+	}
93
+	return NewConfigurator("mock-plugin", p, nil)
94
+}
95
+
96
+func TestValidateCmdOptionsForCreate(t *testing.T) {
97
+	tests := []struct {
98
+		Name             string
99
+		Create           bool
100
+		Service          *kapi.Service
101
+		ErrorExpectation bool
102
+	}{
103
+		{
104
+			Name:             "create-with-no-service",
105
+			Create:           true,
106
+			ErrorExpectation: false,
107
+		},
108
+		{
109
+			Name:             "create-with-service",
110
+			Create:           true,
111
+			Service:          &kapi.Service{},
112
+			ErrorExpectation: true,
113
+		},
114
+		{
115
+			Name:             "no-create-option-and-service",
116
+			ErrorExpectation: false,
117
+		},
118
+		{
119
+			Name:             "no-create-option-with-service",
120
+			Service:          &kapi.Service{},
121
+			ErrorExpectation: false,
122
+		},
123
+	}
124
+
125
+	for _, tc := range tests {
126
+		options := &IPFailoverConfigCmdOptions{Create: tc.Create}
127
+		plugin := &MockPlugin{
128
+			Name:      "mock",
129
+			Options:   options,
130
+			Service:   tc.Service,
131
+			CallCount: make(map[string]int, 0),
132
+		}
133
+		c := NewConfigurator(tc.Name, plugin, nil)
134
+
135
+		err := ValidateCmdOptions(options, c)
136
+		if err != nil && !tc.ErrorExpectation {
137
+			t.Errorf("Test case %q got an error: %v where none was expected.",
138
+				tc.Name, err)
139
+		}
140
+		if nil == err && tc.ErrorExpectation {
141
+			t.Errorf("Test case %q got no error - expected an error.", tc.Name)
142
+		}
143
+	}
144
+}
145
+
146
+func TestValidateCmdOptionsVIPs(t *testing.T) {
147
+	validVIPs := []string{"", "1.1.1.1-1,2.2.2.2", "4.4.4.4-8",
148
+		"1.1.1.1-7,2.2.2.2,3.3.3.3-5",
149
+		"1.1.1.250-255,255.255.255.255-255", "4.4.4.4-8,8.8.8.4-8",
150
+		"0.1.2.3-255,4.5.6.7,8.9.10.11,12.13.14.15-20",
151
+		"255.254.253.252-255,1.1.1.1",
152
+	}
153
+
154
+	for _, vips := range validVIPs {
155
+		options := &IPFailoverConfigCmdOptions{VirtualIPs: vips}
156
+		c := getMockConfigurator(options, nil)
157
+		if err := ValidateCmdOptions(options, c); err != nil {
158
+			t.Errorf("Test command options valid vips=%q got error %s expected: no error.",
159
+				vips, err)
160
+		}
161
+	}
162
+
163
+	invalidVIPs := []string{"1.1.1.256-250,2.2.2.2", "1.1.1.1,2.2.2.2-0",
164
+		"1.1.1.1-5,2.2.2.2,3.3.3.3-1", "255.255.255.255-259",
165
+		"1.2.3.4-5,1024.512.256.128-255", "1.1.1.1,a.b.c.d-e",
166
+		"a.b.c.d-e,5.4.3.2", "1.2.3.4.abc-def",
167
+		"5.6.7.8def-1.2.3.4abc", "4.1.1.1,a.12.13.14-55",
168
+		"8.8.8.8,9999.888.77.6-66,4.4.4.4-8",
169
+	}
170
+
171
+	for _, vips := range invalidVIPs {
172
+		options := &IPFailoverConfigCmdOptions{VirtualIPs: vips}
173
+		c := getMockConfigurator(options, nil)
174
+		if err := ValidateCmdOptions(options, c); err == nil {
175
+			t.Errorf("Test command options invalid vips=%q got no error expected: error.", vips)
176
+		}
177
+	}
178
+}
0 179
deleted file mode 100644
... ...
@@ -1,172 +0,0 @@
1
-package keepalived
2
-
3
-import (
4
-	"fmt"
5
-	"strconv"
6
-
7
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
8
-	kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
9
-	kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
10
-	"github.com/golang/glog"
11
-
12
-	dapi "github.com/openshift/origin/pkg/deploy/api"
13
-	"github.com/openshift/origin/pkg/generate/app"
14
-	"github.com/openshift/origin/pkg/haconfig"
15
-)
16
-
17
-const defaultInterface = "eth0"
18
-const libModulesVolumeName = "lib-modules"
19
-const libModulesPath = "/lib/modules"
20
-
21
-//  Get kube client configuration from a file containing credentials for
22
-//  connecting to the master.
23
-func getClientConfig(path string) *kclient.Config {
24
-	if 0 == len(path) {
25
-		glog.Fatalf("You must specify a .kubeconfig file path containing credentials for connecting to the master with --credentials")
26
-	}
27
-
28
-	rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path, Precedence: []string{}}
29
-	credentials, err := rules.Load()
30
-	if err != nil {
31
-		glog.Fatalf("Could not load credentials from %q: %v", path, err)
32
-	}
33
-
34
-	config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig()
35
-	if err != nil {
36
-		glog.Fatalf("Credentials %q error: %v", path, err)
37
-	}
38
-
39
-	if err := kclient.LoadTLSFiles(config); err != nil {
40
-		glog.Fatalf("Unable to load certificate info using credentials from %q: %v", path, err)
41
-	}
42
-
43
-	return config
44
-}
45
-
46
-//  Generate the HA failover monitor (keepalived) container environment entries.
47
-func generateEnvEntries(name string, options *haconfig.HAConfigCmdOptions, kconfig *kclient.Config) app.Environment {
48
-	watchPort := strconv.Itoa(options.WatchPort)
49
-	replicas := strconv.Itoa(options.Replicas)
50
-	insecureStr := strconv.FormatBool(kconfig.Insecure)
51
-	unicastStr := strconv.FormatBool(options.UseUnicast)
52
-
53
-	return app.Environment{
54
-		"OPENSHIFT_MASTER":    kconfig.Host,
55
-		"OPENSHIFT_CA_DATA":   string(kconfig.CAData),
56
-		"OPENSHIFT_KEY_DATA":  string(kconfig.KeyData),
57
-		"OPENSHIFT_CERT_DATA": string(kconfig.CertData),
58
-		"OPENSHIFT_INSECURE":  insecureStr,
59
-
60
-		"OPENSHIFT_HA_CONFIG_NAME":       name,
61
-		"OPENSHIFT_HA_VIRTUAL_IPS":       options.VirtualIPs,
62
-		"OPENSHIFT_HA_NETWORK_INTERFACE": options.NetworkInterface,
63
-		"OPENSHIFT_HA_MONITOR_PORT":      watchPort,
64
-		"OPENSHIFT_HA_REPLICA_COUNT":     replicas,
65
-		"OPENSHIFT_HA_USE_UNICAST":       unicastStr,
66
-		// "OPENSHIFT_HA_UNICAST_PEERS":     "127.0.0.1",
67
-	}
68
-}
69
-
70
-//  Generate the HA failover monitor (keepalived) container configuration.
71
-func generateFailoverMonitorContainerConfig(name string, options *haconfig.HAConfigCmdOptions, env app.Environment) *kapi.Container {
72
-	containerName := fmt.Sprintf("%s-%s", name, options.Type)
73
-
74
-	imageName := fmt.Sprintf("%s-%s", options.Type, haconfig.DefaultName)
75
-	image := options.ImageTemplate.ExpandOrDie(imageName)
76
-
77
-	ports := make([]kapi.ContainerPort, 1)
78
-	ports[0] = kapi.ContainerPort{
79
-		ContainerPort: options.ServicePort,
80
-		HostPort:      options.ServicePort,
81
-	}
82
-
83
-	mounts := make([]kapi.VolumeMount, 1)
84
-	mounts[0] = kapi.VolumeMount{
85
-		Name:      libModulesVolumeName,
86
-		ReadOnly:  true,
87
-		MountPath: libModulesPath,
88
-	}
89
-
90
-	return &kapi.Container{
91
-		Name:            containerName,
92
-		Image:           image,
93
-		Ports:           ports,
94
-		Privileged:      true,
95
-		ImagePullPolicy: kapi.PullIfNotPresent,
96
-		VolumeMounts:    mounts,
97
-		Env:             env.List(),
98
-	}
99
-}
100
-
101
-//  Generate the HA failover monitor (keepalived) container configuration.
102
-func generateContainerConfig(name string, options *haconfig.HAConfigCmdOptions) []kapi.Container {
103
-	containers := make([]kapi.Container, 0)
104
-
105
-	if len(options.VirtualIPs) < 1 {
106
-		return containers
107
-	}
108
-
109
-	config := getClientConfig(options.Credentials)
110
-	env := generateEnvEntries(name, options, config)
111
-
112
-	c := generateFailoverMonitorContainerConfig(name, options, env)
113
-	if c != nil {
114
-		containers = append(containers, *c)
115
-	}
116
-
117
-	return containers
118
-}
119
-
120
-//  Generate the HA failover monitor (keepalived) container volume config.
121
-func generateVolumeConfig() []kapi.Volume {
122
-	hostPath := &kapi.HostPathVolumeSource{Path: libModulesPath}
123
-	src := kapi.VolumeSource{HostPath: hostPath}
124
-
125
-	vol := kapi.Volume{Name: libModulesVolumeName, VolumeSource: src}
126
-	return []kapi.Volume{vol}
127
-}
128
-
129
-//  Generate the HA deployment template.
130
-func generateDeploymentTemplate(name string, options *haconfig.HAConfigCmdOptions, selector map[string]string) dapi.DeploymentTemplate {
131
-	podTemplate := &kapi.PodTemplateSpec{
132
-		ObjectMeta: kapi.ObjectMeta{Labels: selector},
133
-		Spec: kapi.PodSpec{
134
-			HostNetwork: true,
135
-			Containers:  generateContainerConfig(name, options),
136
-			Volumes:     generateVolumeConfig(),
137
-		},
138
-	}
139
-
140
-	return dapi.DeploymentTemplate{
141
-		Strategy: dapi.DeploymentStrategy{
142
-			Type: dapi.DeploymentStrategyTypeRecreate,
143
-		},
144
-		ControllerTemplate: kapi.ReplicationControllerSpec{
145
-			// TODO: v0.1 requires a manual resize to the
146
-			//       replicas to match current cluster state.
147
-			//       v0.1+ could do this with either a watcher
148
-			//       that updates the replica count or better
149
-			//       yet, some way to kubernetes to say run
150
-			//       pods on each and every node that matches
151
-			//       the selector.
152
-			Replicas: options.Replicas,
153
-			Selector: selector,
154
-			Template: podTemplate,
155
-		},
156
-	}
157
-}
158
-
159
-//  Generate the HA deployment configuration.
160
-func GenerateDeploymentConfig(name string, options *haconfig.HAConfigCmdOptions, selector map[string]string) *dapi.DeploymentConfig {
161
-
162
-	return &dapi.DeploymentConfig{
163
-		ObjectMeta: kapi.ObjectMeta{
164
-			Name:   name,
165
-			Labels: selector,
166
-		},
167
-		Triggers: []dapi.DeploymentTriggerPolicy{
168
-			{Type: dapi.DeploymentTriggerOnConfigChange},
169
-		},
170
-		Template: generateDeploymentTemplate(name, options, selector),
171
-	}
172
-}
173 1
deleted file mode 100644
... ...
@@ -1,78 +0,0 @@
1
-package keepalived
2
-
3
-import (
4
-	"fmt"
5
-	"strings"
6
-
7
-	kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
8
-	"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
9
-	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
10
-)
11
-
12
-type ReporterFunction func(string, error) 
13
-
14
-
15
-//  Delete matching replication controllers.
16
-func deleteReplicationControllers(name string, namespace string, selector map[string]string, kClient kclient.Interface, after ReporterFunction) {
17
-	labels := labels.SelectorFromSet(selector)
18
-	rcInterface := kClient.ReplicationControllers(namespace)
19
-
20
-	rcList, err := rcInterface.List(labels)
21
-	if err != nil {
22
-		after(name, err)
23
-		return
24
-	}
25
-
26
-	for _, rc := range rcList.Items {
27
-		if false == strings.HasPrefix(rc.Name, name) {
28
-			continue
29
-		}
30
-
31
-		rcName := fmt.Sprintf("replicationController/%v", rc.Name)
32
-		err := rcInterface.Delete(rc.Name)
33
-		after(rcName, err)
34
-	}
35
-}
36
-
37
-//  Delete matching Pods for the named deployment.
38
-func deletePods(name string, namespace string, selector map[string]string, kClient kclient.Interface, after ReporterFunction) {
39
-	labels := labels.SelectorFromSet(labels.Set{"deploymentconfig": name})
40
-	podInterface := kClient.Pods(namespace)
41
-	podList, err := podInterface.List(labels)
42
-	if err != nil {
43
-		after(fmt.Sprintf("pods/%v", name), err)
44
-		return
45
-	}
46
-
47
-	for _, pod := range podList.Items {
48
-		podName := fmt.Sprintf("pod/%v", pod.Name)
49
-		err := podInterface.Delete(pod.Name)
50
-		after(podName, err)
51
-	}
52
-}
53
-
54
-//  Cleanup all the deployment artificats.
55
-func CleanupDeployment(name string, ns string, selector map[string]string, f *clientcmd.Factory, after ReporterFunction) {
56
-	//  First up get the OS and kube clients.
57
-	osClient, kClient, err := f.Clients()
58
-	if err != nil {
59
-		after(name, fmt.Errorf("Error getting client: %v", err))
60
-		return
61
-	}
62
-
63
-	//  Delete the matching replication controllers.
64
-	deleteReplicationControllers(name, ns, selector, kClient, after) 
65
-	//  Delete the matching pods.
66
-	deletePods(name, ns, selector, kClient, after)
67
-
68
-	//  Delete the matching service.
69
-	serviceName := fmt.Sprintf("service/%v", name)
70
-	serviceInterface := kClient.Services(ns)
71
-        err = serviceInterface.Delete(name)
72
-        after(serviceName, err)
73
-
74
-	//  And finally delete the deployment config.
75
-	deploymentConfigName := fmt.Sprintf("deploymentConfig/%v", name)
76
-	err = osClient.DeploymentConfigs(ns).Delete(name)
77
-	after(deploymentConfigName, err)
78
-}
79 1
deleted file mode 100644
... ...
@@ -1,147 +0,0 @@
1
-package keepalived
2
-
3
-import (
4
-	"fmt"
5
-	"io"
6
-	"os"
7
-	"strings"
8
-
9
-	kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
10
-	"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
11
-	"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
12
-	"github.com/golang/glog"
13
-
14
-	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
15
-	configcmd "github.com/openshift/origin/pkg/config/cmd"
16
-	"github.com/openshift/origin/pkg/generate/app"
17
-	"github.com/openshift/origin/pkg/haconfig"
18
-)
19
-
20
-//  HA configurator plugin for keepalived sidecar.
21
-type KeepAlivedPlugin struct {
22
-	Name    string
23
-	Factory *clientcmd.Factory
24
-	Options *haconfig.HAConfigCmdOptions
25
-}
26
-
27
-//  Create a new HAConfigurator (keepalived) plugin instance.
28
-func NewHAConfiguratorPlugin(name string, f *clientcmd.Factory, options *haconfig.HAConfigCmdOptions) (*KeepAlivedPlugin, error) {
29
-	glog.V(4).Infof("Creating new KeepAlived plugin: %q", name)
30
-
31
-	p := &KeepAlivedPlugin{
32
-		Name:    name,
33
-		Factory: f,
34
-		Options: options,
35
-	}
36
-
37
-	return p, nil
38
-}
39
-
40
-//  Get the port to monitor for the HA configuration.
41
-func (p *KeepAlivedPlugin) GetWatchPort() int {
42
-	port := p.Options.WatchPort
43
-	if port < 1 {
44
-		port = haconfig.DefaultWatchPort
45
-	}
46
-
47
-	glog.V(4).Infof("KeepAlived HA config: %q - WatchPort: %+v", p.Name, port)
48
-
49
-	return port
50
-}
51
-
52
-//  Get the selector associated with this HA configurator plugin.
53
-func (p *KeepAlivedPlugin) GetSelector() map[string]string {
54
-	if p.Options.Selector == haconfig.DefaultSelector {
55
-		return map[string]string{haconfig.DefaultName: p.Name}
56
-	}
57
-
58
-	labels, remove, err := app.LabelsFromSpec(strings.Split(p.Options.Selector, ","))
59
-	if err != nil {
60
-		glog.Fatal(err)
61
-	}
62
-
63
-	if len(remove) > 0 {
64
-		glog.Fatalf("You may not pass negative labels in %q", p.Options.Selector)
65
-	}
66
-
67
-	glog.V(4).Infof("KeepAlived HA config: %q - selector: %+v", p.Name, labels)
68
-
69
-	return labels
70
-}
71
-
72
-//  Get the namespace associated with this HA configurator plugin.
73
-func (p *KeepAlivedPlugin) GetNamespace() string {
74
-	namespace, err := p.Factory.OpenShiftClientConfig.Namespace()
75
-	if err != nil {
76
-		glog.Fatalf("Error get OS client config: %v", err)
77
-	}
78
-
79
-	glog.V(4).Infof("KeepAlived HA config: %q - namespace: %q", p.Name, namespace)
80
-
81
-	return namespace
82
-}
83
-
84
-//  Get the service associated with this HA configurator plugin.
85
-func (p *KeepAlivedPlugin) GetService() *kapi.Service {
86
-	_, kClient, err := p.Factory.Clients()
87
-	if err != nil {
88
-		glog.Fatalf("Error getting client: %v", err)
89
-	}
90
-
91
-	namespace := p.GetNamespace()
92
-	service, err := kClient.Services(namespace).Get(p.Name)
93
-	if err != nil {
94
-		if errors.IsNotFound(err) {
95
-			glog.V(4).Infof("KeepAlived HA config: %s - no service found", p.Name)
96
-			return nil
97
-		}
98
-		glog.Fatalf("Error getting KeepAlived HA config service %q: %v", p.Name, err)
99
-	}
100
-
101
-	glog.V(4).Infof("KeepAlived HA config: %q service: %+v", p.Name, service)
102
-
103
-	return service
104
-}
105
-
106
-//  Generate the config and services for this HA configuration plugin.
107
-func (p *KeepAlivedPlugin) Generate() *kapi.List {
108
-	dc := GenerateDeploymentConfig(p.Name, p.Options, p.GetSelector())
109
-	objects := []runtime.Object{dc}
110
-
111
-	services := &kapi.List{Items: app.AddServices(objects)}
112
-	glog.V(4).Infof("KeepAlived HA config: %q - generated services: %+v", p.Name, services)
113
-
114
-	return services
115
-}
116
-
117
-//  Create the config and services associated with this HA configuration.
118
-func (p *KeepAlivedPlugin) Create(out io.Writer) {
119
-	namespace := p.GetNamespace()
120
-
121
-	bulk := configcmd.Bulk{
122
-		Factory: p.Factory.Factory,
123
-		After:   configcmd.NewPrintNameOrErrorAfter(out, os.Stderr),
124
-	}
125
-
126
-	if errs := bulk.Create(p.Generate(), namespace); len(errs) != 0 {
127
-		glog.Fatalf("Error creating config: %+v", errs)
128
-	}
129
-
130
-	glog.V(4).Infof("Created KeepAlived HA config: %q", p.Name)
131
-}
132
-
133
-//  Delete the config and services associated with this HA configuration.
134
-func (p *KeepAlivedPlugin) Delete(out io.Writer) {
135
-	namespace := p.GetNamespace()
136
-	selector := p.GetSelector()
137
-
138
-	CleanupDeployment(p.Name, namespace, selector, p.Factory, func(name string, err error) {
139
-		if nil == err {
140
-			fmt.Fprintf(out, "%s\n", name)
141
-		} else {
142
-			fmt.Fprintf(os.Stderr, "Error: %v\n", err)
143
-		}
144
-	})
145
-
146
-	glog.V(4).Infof("Deleted KeepAlived HA config: %q", p.Name)
147
-}
148 1
deleted file mode 100644
... ...
@@ -1,181 +0,0 @@
1
-package keepalived
2
-
3
-import (
4
-	"testing"
5
-
6
-	haconfig "github.com/openshift/origin/pkg/haconfig"
7
-)
8
-
9
-func TestNewHAConfiguratorPlugin(t *testing.T) {
10
-	tests := []struct {
11
-		Name             string
12
-		Options          *haconfig.HAConfigCmdOptions
13
-		ErrorExpectation bool
14
-	}{
15
-		{
16
-			Name:             "selector",
17
-			Options:          &haconfig.HAConfigCmdOptions{Selector: "haconfig=test-nodes"},
18
-			ErrorExpectation: false,
19
-		},
20
-		{
21
-			Name:             "empty-selector",
22
-			Options:          &haconfig.HAConfigCmdOptions{Selector: ""},
23
-			ErrorExpectation: false,
24
-		},
25
-		{
26
-			Name: "vips",
27
-			Options: &haconfig.HAConfigCmdOptions{
28
-				VirtualIPs: "1.2.3.4,5.6.7.8-10,11.0.0.12",
29
-			},
30
-			ErrorExpectation: false,
31
-		},
32
-		{
33
-			Name:             "empty-vips",
34
-			Options:          &haconfig.HAConfigCmdOptions{VirtualIPs: ""},
35
-			ErrorExpectation: false,
36
-		},
37
-		{
38
-			Name:             "interface",
39
-			Options:          &haconfig.HAConfigCmdOptions{NetworkInterface: "eth0"},
40
-			ErrorExpectation: false,
41
-		},
42
-		{
43
-			Name:             "empty-interface",
44
-			Options:          &haconfig.HAConfigCmdOptions{NetworkInterface: ""},
45
-			ErrorExpectation: false,
46
-		},
47
-		{
48
-			Name:             "watch-port",
49
-			Options:          &haconfig.HAConfigCmdOptions{WatchPort: 999},
50
-			ErrorExpectation: false,
51
-		},
52
-		{
53
-			Name:             "replicas",
54
-			Options:          &haconfig.HAConfigCmdOptions{Replicas: 2},
55
-			ErrorExpectation: false,
56
-		},
57
-		{
58
-			Name: "all-options",
59
-			Options: &haconfig.HAConfigCmdOptions{
60
-				Selector:         "hac=v1",
61
-				VirtualIPs:       "9.8.7.6,5.4.3.2-5",
62
-				NetworkInterface: "ha0",
63
-				WatchPort:        12345,
64
-				Replicas:         1,
65
-			},
66
-			ErrorExpectation: false,
67
-		},
68
-		{
69
-			Name:             "no-options",
70
-			Options:          &haconfig.HAConfigCmdOptions{},
71
-			ErrorExpectation: false,
72
-		},
73
-		{
74
-			Name:             "", // empty
75
-			Options:          &haconfig.HAConfigCmdOptions{},
76
-			ErrorExpectation: false,
77
-		},
78
-	}
79
-
80
-	for _, tc := range tests {
81
-		p, err := NewHAConfiguratorPlugin(tc.Name, nil, tc.Options)
82
-		if err != nil && !tc.ErrorExpectation {
83
-			t.Errorf("Test case for %s got an error where none was expected", tc.Name)
84
-		}
85
-
86
-		if nil == err && nil == p {
87
-			t.Errorf("Test case for %s got no error but plugin was not found", tc.Name)
88
-		}
89
-	}
90
-}
91
-
92
-func TestPluginGetWatchPort(t *testing.T) {
93
-	tests := []struct {
94
-		Name      string
95
-		WatchPort int
96
-		Expected  int
97
-	}{
98
-		{
99
-			Name:      "router",
100
-			WatchPort: 80,
101
-			Expected:  80,
102
-		},
103
-		{
104
-			Name:      "service1",
105
-			WatchPort: 9999,
106
-			Expected:  9999,
107
-		},
108
-		{
109
-			Name:      "invalid-port",
110
-			WatchPort: -12345,
111
-			Expected:  80,
112
-		},
113
-		{
114
-			Name:      "invalid-port-2",
115
-			WatchPort: -1,
116
-			Expected:  80,
117
-		},
118
-		{
119
-			Name:      "zero-port",
120
-			WatchPort: 0,
121
-			Expected:  80,
122
-		},
123
-	}
124
-
125
-	for _, tc := range tests {
126
-		options := &haconfig.HAConfigCmdOptions{WatchPort: tc.WatchPort}
127
-		p, err := NewHAConfiguratorPlugin(tc.Name, nil, options)
128
-		if err != nil {
129
-			t.Errorf("Error creating HAConfigurator plugin - test=%q, error: %v", tc.Name, err)
130
-		}
131
-
132
-		port := p.GetWatchPort()
133
-		if tc.Expected != port {
134
-			t.Errorf("Test case %q expected watch port = %d, got %d",
135
-				tc.Name, tc.Expected, port)
136
-		}
137
-
138
-	}
139
-}
140
-
141
-func TestPluginGetSelector(t *testing.T) {
142
-	tests := []struct {
143
-		Name        string
144
-		Selector    string
145
-		ExpectedKey string
146
-	}{
147
-		{
148
-			Name:        "router",
149
-			Selector:    "hac=router",
150
-			ExpectedKey: "hac",
151
-		},
152
-		{
153
-			Name:        "service1",
154
-			Selector:    "service1=us-west",
155
-			ExpectedKey: "service1",
156
-		},
157
-		{
158
-			Name:        "default-selector",
159
-			Selector:    haconfig.DefaultSelector,
160
-			ExpectedKey: haconfig.DefaultName,
161
-		},
162
-	}
163
-
164
-	for _, tc := range tests {
165
-		options := &haconfig.HAConfigCmdOptions{Selector: tc.Selector}
166
-		p, err := NewHAConfiguratorPlugin(tc.Name, nil, options)
167
-		if err != nil {
168
-			t.Errorf("Error creating HAConfigurator plugin - test=%q, error: %v", tc.Name, err)
169
-		}
170
-
171
-		selector := p.GetSelector()
172
-		if len(tc.ExpectedKey) > 0 {
173
-			if _, ok := selector[tc.ExpectedKey]; !ok {
174
-				t.Errorf("Test case %q expected key %q was not found",
175
-					tc.Name, tc.ExpectedKey)
176
-			}
177
-		}
178
-	}
179
-}
180
-
181
-// TODO: tests for Delete, Create, Generate, GetService, GetNamespace.