-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathinit.sh
385 lines (316 loc) · 11.2 KB
/
init.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
#!/bin/bash
set -e
#
# Bootstrapping a new WebPlatform salt master (step 1)
#
# *Cloning Salt configurations*
#
# This script is meant to be run only once per salt master
# so that every code dependencies are cloned and installed
# in a constant fashion.
#
# A salt master should have NO hardcoded files and configuration
# but simply be booted bootstrapped by the three following components.
#
# 1. Cloning Salt configurations (so we can salt the salt master)
# 2. The packages we share accross the infrastructure
# 3. Cloning every webplatform.org software dependencies.
#
# =========================================================================
#
# Note that you can run this bootstrapper on ANY vanilla Ubuntu 14.04 VM and
# it should work just fine. This script takes into account that you might
# might now have a master to start from. If that’s the case, make sure the
# new VM is called "salt.staging.wpdn" (or "salt.production.wpdn") in the
# `/etc/hosts` file and jump to step #5
#
# =========================================================================
#
# STEPS:
#
# From an existing salt-master, do the following;
#
# 1. Get to know the next IP address nova will give:
#
# This is useful so we can tell the new salt master to use the upcoming new
# DNS. Normally, nova rotates +1 private IP addresses. Note that you can
# get to know the IP address, and boot an empty VM from OpenStack Dashboard
# but if you are like the author, better having only shell stuff!
#
# nova list
#
#
# 2. Edit manually `/srv/salt/salt/master.sls`
#
# vi /srv/salt/salt/master.sls
#
# Get the `salt_master_ip:` line. Edit with the IP address you expect nova will
# give (e.g. 10.10.10.129). Run highstate on master
#
# salt salt state.highstate
#
# It should have updated `/srv/userdata.txt` so that the next VM will know at
# boot time that it should listen to itself as a new salt master. Note that
# userdata is part of OpenStack and that this script is run at every reboot,
# stating that on subsequent boots to listen to itself instead of another
# IP, that most likely wont exist anymore, prevents potential
# confusion and misdirected network traffic.
#
#
# 3. Using python-novaclient, launch new future salt-master:
#
# We will have two VMs with name `salt` the new one will not have public IP
# address yet. The last step is to ask OpenStack to change the public IP
# address to the new salt master.
#
# Start the new VM:
#
# nova boot --image Ubuntu-14.04-Trusty \
# --user-data /srv/ops/salt-master/salt-userdata.yml \
# --key_name salt-renoirb \
# --flavor lightspeed \
# --security-groups default,all,dns,log-dest,mw-eventlog,salt-master \
# salt
#
# NOTE: Adjust `key_name` with your secret key that you given in OpenStack dashboard
# you should have on the salt master. That one is only useful among VMs you
# control FROM the salt master. It should be available in
# `/srv/private/pillar/sshkeys/init.sls` as its kept in source control so it
# can replicate the same setup everywhere.
#
#
# 4. Send this bootstrapper file, get new private IP first
#
# Hopefully the new VM will have the IP address we expected at step 1
#
# If it works, after the following commands, we will resume the work on the new VM.
#
# Double check and continue like this, assuming the new VM private IP *is* ending by `129`:
#
# From current salt master:
#
# nova list
# scp /srv/ops/salt-master/init.sh dhc-user@10.10.10.129:~
#
# Remember ...129 (in this example) is *also* called salt. Current salt master has public
# key to be accepted on it. Once the file is moved, we can SSH to the new VM. Note that
# we have to SSH from the current salt master as its the one that already has your current
# private/public key for dhc-user already.
#
#
# 5. Launch this bootstrapper
#
# You must be on the new VM at this step. Copy to the new VM this file and you will be
# just fine.
#
# This bootstrapper will initialize everything we need:
# - Instal that node as a salt master
# - Have all states ready to be called `state.highstate` and effectively make it a salt master
# - Have all states so it can *also* pull all /srv/code repositories so it can sync code around
# - Have all scripts so it can also boot VMs
#
# Run this script
#
# ssh dhc-user@10.10.10.129
# sudo -s
# RUNAS=dhc-user GROUP=dhc-user bash init.sh
#
# And go on with the show...
#
clear
cat << "FOO"
_ _ _ ______ _ _ __
| | | | | | | ___ \ | | | / _|
| | | | ___| |__ | |_/ / | __ _| |_| |_ ___ _ __ _ __ ___
| |/\| |/ _ \ '_ \| __/| |/ _` | __| _/ _ \| '__| '_ ` _ \
\ /\ / __/ |_) | | | | (_| | |_| || (_) | | | | | | | |
\/ \/ \___|_.__/\_| |_|\__,_|\__|_| \___/|_| |_| |_| |_|
WebPlatform Infrastructure
We will make this VM as a new Salt master
FOO
if [ ! -d /srv/ops/salt-master ]; then
apt-get update
apt-get -y install python-git salt-master salt-minion python-dulwich
apt-get -y upgrade
apt-get -y autoremove
fi
if [ $SUDO_USER == "vagrant" ]; then
declare -r IS_WORKBENCH=1
else
declare -r IS_WORKBENCH=0
fi
echo " "
echo " "
echo "Bootstrapping a new Salt master"
declare -r SALT_BIN=`which salt-call`
declare -r DATE=`date`
if [ -z "${RUNAS}" ]; then
echo "You must declare which user your VM initially has e.g. RUNAS=vagrant GROUP=vagrant bash init.sh"
exit 1
fi
if [ -z "${GROUP}" ]; then
echo "You must declare which group your VM initially has. e.g. RUNAS=vagrant GROUP=vagrant bash init.sh"
exit 1
fi
if [ -z "${SALT_BIN}" ]; then
echo "Saltstack doesn’t seem to be installed on that machine"
exit 1
fi
cat << _EOF_
This script is about cloning git repos. Some of those repos have sensitive
data.
In order to import that sensitive data we need a temporary private-public
keypair. With it, we’ll be able to fetch private data along with public.
You can create your own keypair and add them to this script yourself.
You got to make sure you remove that key from Gitolite afterwards
unless you never publish them publicly.
To generate a temporary key, run the following (no need for passphrase):
ssh-keygen -f foo
cat foo.pub
cat foo
If we already have a keypair, you should see below what you need to copy
in Gitolite on source.webplatform.org
OTHERWISE THE SCRIPT WILL BREAK :(
_EOF_
while true; do
read -p "Did you add the public key in your SSH keys? (y/n): " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes (y) or no (n).";;
esac
done
echo $id_rsa_pub > /home/$RUNAS/.ssh/id_rsa.pub
if [ ! -f "/etc/salt/grains" ]; then
clear
echo ""
echo "What is the deployment level of this cluster? Will it be used as the"
echo "new production, staging, or a development workspace?"
echo ""
while true; do
read -p "What is this salt-master targeted level? [staging,production,workbench]: " level
case $level in
staging ) break;;
production ) break;;
workbench ) break;;
* ) echo "Only lowercase is accepted; one of [staging,production,workbench].";;
esac
done
## We are hardcoding the name "salt" here because we EXPLICTLY want that VM to be
## called that name.
echo " * Making sure the hosts file has 127.0.1.1 to declare our environment level"
sed -i "s/^127.0.1.1 $(hostname)/127.0.1.1 salt.${level}.wpdn salt/g" /etc/hosts
grep -q -e "salt" /etc/hosts || printf "127.0.1.1 salt.${level}.wpdn salt" >> /etc/hosts
(cat <<- _EOF_
# This salt master has been created on ${DATE}
# via webplatform/ops salt-master/init.sh script
level: ${level}
_EOF_
) > /etc/salt/grains
echo " * Added new grain; deployment level: ${level}"
else
echo " * Grains already exist on this (future) salt master, did not overwrite"
fi
echo " * Making sure the name of the master is salt"
(cat <<- _EOF_
id: salt
log_level: debug
log_level_logfile: debug
_EOF_
) > /etc/salt/minion.d/overrides.conf
cd /srv
declare -A repos
declare -A options
repos["salt"]="https://github.com/webplatform/salt-states.git"
repos["private"]="https://gitlab.w3.org/webplatform/salt-pillar-private.git"
repos["pillar"]="https://github.com/webplatform/salt-pillar.git"
repos["runner"]="git@source.webplatform.org:runners.git"
repos["ops"]="https://github.com/webplatform/ops.git"
options["salt"]="--branch 201506-refactor --quiet"
options["private"]="--branch master --quiet"
options["pillar"]="--branch master --quiet"
options["runner"]="--quiet"
options["ops"]="--quiet"
echo "We will be cloning our new Salt master config repos:"
for key in ${!repos[@]}; do
if [ ! -d "/srv/${key}/.git" ]; then
echo " * Cloning into /srv/${key}"
mkdir -p /srv/${key}
chown $RUNAS:$GROUP /srv/${key}
(salt-call --local --log-level=quiet git.clone /srv/${key} ${repos[${key}]} opts="${options[${key}]}" user="$RUNAS" identity="/home/$RUNAS/.ssh/id_rsa")
else
echo " * Repo in /srv/${key} already cloned. Did nothing."
fi
done
echo ''
echo "Done cloning config repos"
echo ""
echo "Configuring salt master for initial highstate"
if [ ! -f "/etc/salt/master.d/roots.conf" ]; then
(cat <<- _EOF_
# Set in place by webplatform/ops salt-master/init.sh script, this should be overwritten once you
# make state.highstate.
file_roots:
base:
- /srv/salt
pillar_roots:
base:
- /srv/pillar
- /srv/private/pillar
log_level: debug
log_level_logfile: debug
gitfs_provider: dulwich
fileserver_backend:
- roots
- git
gitfs_remotes:
- https://github.com/webplatform/salt-basesystem.git
- https://github.com/webplatform/redis-formula.git
- https://github.com/webplatform/saltstack-sysctl-formula.git
- https://github.com/webplatform/postgres-formula.git
- https://github.com/webplatform/logrotate-formula.git
- https://github.com/saltstack-formulas/nfs-formula.git
- https://github.com/saltstack-formulas/logstash-formula.git
- https://github.com/webplatform/docker-formula.git
_EOF_
) > /etc/salt/master.d/roots.conf
echo " * Added roots definitions"
echo " * Syncing grains, pillars, states, returners, etc."
salt-call --local --log-level=quiet saltutil.sync_all
else
echo " * Salt-master roots was already present"
fi
echo "Restarting salt (almost there!)"
/usr/sbin/service salt-minion restart
/usr/sbin/service salt-master restart
echo "Going to sleep 10 seconds so that salt-master will see its own minion"
sleep 10
echo "Autoaccepting salt"
salt-key -y -a salt
echo "Removing temporary SSH key"
rm /home/$RUNAS/.ssh/id_rsa{,.pub}
clear
if [ $IS_WORKBENCH == 0 ]; then
echo ""
echo "Step 1 of 3 completed!"
echo ""
echo "Next steps, run:"
echo " salt-key"
echo " apt-get update && time apt-get -y dist-upgrade"
echo " salt-call state.highstate"
echo " bash /srv/ops/salt-master/packages.sh"
echo ""
else
echo ""
echo "Building a Vagrant Workbench:"
echo ""
echo "Step 1 of 3 completed!"
echo ""
echo "If its the first time you build, you have to reboot; `vagrant reload`"
echo ""
echo "Next steps, run:"
echo " salt-call state.highstate"
echo " RUNAS=$RUNAS bash /srv/ops/salt-master/packages.sh"
echo ""
fi
exit 0