1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
|
---
# You may want to change the default to your favourite host (group) you run this on the most.
- hosts: "{{ runtime_hosts | default('CHANGE_ME') }}"
order: inventory
gather_facts: false
# default: all in first step, but that shit requires (int)
serial: 666
tasks:
- name: Gather necessary facts
setup:
filter: "ansible_distribution*"
- name: Set up Red Hat and derivatives
debug:
msg: "System is {{ansible_distribution}} {{ansible_distribution_version}}, checking in."
when: ansible_distribution_file_variety == "RedHat"
changed_when: true
notify: "redhat upd"
- name: Set up Debian and derivatives
debug:
msg: "System is {{ansible_distribution}} {{ansible_distribution_version}}, checking in."
when: ansible_distribution_file_variety == "Debian"
changed_when: true
notify: "debian upd"
- name: Set up SUSE and derivatives
debug:
msg: "System is {{ansible_distribution}} {{ansible_distribution_version}}, checking in."
# SuSE was "renamed" to SUSE somewhen around SLES 11 (now SLE :-} ), so we'll check for both. Even though generation 11
# repositories should be pretty ...deaddish by now.
when: ansible_distribution_file_variety == "SUSE" or ansible_distribution_file_variety == "SuSE"
changed_when: true
notify: "suse upd"
handlers:
- name: Update yum/dnf cache (RHEL)
# We want to see a dedicated failure if the repos cannot be fetched already.
# Cheating here: yum wants a "state" statement to be placed before it takes action, and then - other than stated in the docs -
# we can trigger an action containing update_cache without "name" being mandatory. So we will have no package present with
# updated cache :-)
yum:
state: present
update_cache: "yes"
validate_certs: "yes"
become: true
listen: "redhat upd"
- name: Update repository cache (Debian)
apt:
update_cache: yes
become: true
listen: "debian upd"
- name: Check for upgrades (RHEL)
# yum check-upgrade would normally throw an RC 100 if updates are available.
# But through ansible: RC0! Weeeee
shell: /usr/bin/yum -q -C check-upgrade 2>/dev/null | wc -l
args:
warn: false
register: yue
changed_when: yue.stdout|int > 1
become: true
listen: "redhat upd"
notify:
- "redhat updates available"
- "rkhunter"
- name: Check for upgrades (Debian)
shell:
cmd: apt list --upgradable 2>/dev/null | grep -v ^Listing | wc -l
# ZWEI GEKREUZTE HÄMMER UND EIN GROSSES W
register: aue
# apt will throw an error because it doesn't like piping yet.
# for our purposes, however, everything has already been sufficiently implemented.
failed_when: false
changed_when: aue.stdout|int > 0
notify:
- "debian updates available"
- "rkhunter"
listen: "debian upd"
- name: Check for existence of rkhunter
stat:
path: /usr/bin/rkhunter
register: rkhex
ignore_errors: true
no_log: true
# yum always tosses this arbitrary extra line at you, a simple tr -s does not eradicate it, so - well,
# 0 and 1 are fine. As explained above, the RC is worthless when run through ansible.
listen: "rkhunter"
changed_when:
- rkhex.stat is defined
- rkhex.stat.executable is defined
- rkhex.stat.executable == true
notify: "rkhunter execution"
- name: rkhunter pre-check
shell: rkhunter -c --sk --rwo --ns
become: true
no_log: true
listen: "rkhunter execution"
- name: Upgrade all installed packages (RHEL)
yum:
name: '*'
state: latest
validate_certs: "yes"
skip_broken: "yes"
become: true
listen: "redhat updates available"
# Auto-removal is broken and will nuke packages we previously selected through e.g. ansible.
# See ansible issue #60349. Leaving commented out. -- pff
# - name: Auto-removal of orphaned dependencies (RHEL)
# yum:
# autoremove: "yes"
# when: (ansible_distribution_file_variety == "RedHat") or (ansible_distribution == "Red Hat Enterprise Linux") or (ansible_distribution == "CentOS")
- name: Register requirement for reboot (RHEL)
command: needs-restarting -r
ignore_errors: "yes"
register: nr
changed_when: "nr.rc > 0"
failed_when: false
notify: "Reboot if required"
become: true
# we listen to "redhat upd" here in case a previous reboot was not executed. If undesired, change to "redhat updates available".
listen: "redhat upd"
- name: Clean packages cache (Debian)
command: apt clean
become: true
listen: "debian upd"
- name: Upgrade packages (Debian)
apt:
upgrade: dist
become: true
listen: "debian updates available"
- name: Remove dependencies that are no longer required (Debian)
apt:
autoremove: "yes"
purge: "yes"
become: true
# we listen to "debian upd" here in case a previous cleanup was skipped. Change to "debian updates available" if undesired.
listen: "debian upd"
- name: Check for existence of needrestart (Debian)
stat:
path: /usr/sbin/needrestart
register: nrex
ignore_errors: "yes"
no_log: true
failed_when: false
changed_when:
- nrex.stat.exists == true
- nrex.stat.executable == true
# we listen to "debian upd" here in case a previous reboot was not executed. If undesired, change to "debian updates available".
notify: "debian needrestart"
listen: "debian upd"
- name: Check for outdated kernel (Debian)
shell: /usr/sbin/needrestart -pk
register: kernout
when:
- nrex.stat.exists == true
- nrex.stat.executable == true
become: true
changed_when: "kernout.rc|int == 1"
listen: "debian needrestart"
notify: "Reboot if required"
# failed_when necessary to have a change for RC 1 instead of a failure
failed_when: kernout.rc > 1
- name: Update zypper cache (SUSE)
# we cannot cheat like we did with yum: we need to update any package to refresh the cache with the zypper module. Hence falling back
# to shell.
shell: |
zypper refs && zypper ref
become: true
listen: "suse upd"
- name: Verify Zypper repository availability
# Now, here's the thing with zypper. If you have a dead repository, you need to face the following facts:
# 1. All output goes to stdout. For zypper lu at least on SLE12/openSUSE42 and earlier, this is:
# - The packages available for update
# - Debug output lik "loading repository data..." and "reading installed packages..."
# (could be silenced with -q, but without RC feedback we need the debug strings again, kek.)
# - WARNING(!!) messages
# ... there is no STDERR.
# 2. There is no return code other than 0 for warnings.
# Great. Interaction with automatisms as if that stuff came directly from Redmond.
# So we need to parse the fucking output string in ansible. Let's start with the "repository not available" warnings.
debug:
msg: "Dead repositories existing and no update present, we consider this a failure."
when:
- zypperlu is search("Repository.*appears to be outdated")
- zypperlu is search("No updates found")
listen: "zypperlu"
failed_when: true
- name: Update all packages (SUSE)
# we could narrow this down via type:patch, but that's about all. So fire away.
zypper:
name: '*'
state: latest
become: true
# TODO: suse not productive yet, so we choose an arbitrary listener here. Change to something meaningful when going to production.
listen: "suse upd"
- name: Register requirement for reboot (SUSE)
# change in paradigm: we will now use "needs-rebooting", suse implemented that somewhere between 12 and 15, instead of "ps -sss"
# shell: zypper ps -sss
# todo: what to do if services require a refork?
# shell: zypper ps -sss
shell: zypper needs-rebooting
args:
warn: false
register: zyppout
changed_when: zyppout.rc == 102
failed_when: zyppout.rc != 102 and zyppout.rc != 0
notify: "Reboot if required"
# we listen to "suse upd" here in case a previous reboot was skipped. Change to "suse updates available" if undesired.
listen: "suse upd"
- name: Clean packages cache (RHEL)
# ansible's yum module does not have a dedicated action for this. So shell it is.
# CAUTION: This will only work as long as modern RHEL derivatives (RHEL/CentOS >=8, Fedora >=30) will have yum available as pseudo-alias to dnf.
# Also, despite yum not offering this feature, ansible will warn that there is a yum module and we should consider using it. Turning warnings off.
args:
warn: false
shell: yum clean packages
become: true
# we listen to "redhat upd" here in case a previous cleanup was skipped. Change to "redhat updates available" if undesired.
listen: "redhat upd"
- name: Clean apt cache (Debian)
# ansible's apt module does not have a dedicated action for this yet. So shell it is:
shell: apt clean
become: true
# here, we already listen to "debian updates available" already since we already did a more generic cleanup above (unless narrowed down as well)
listen: "debian updates available"
- name: Clean packages cache (SUSE)
# ansible's zypper module does not have a dedicated action for this yet. So shell it is:
shell: zypper clean
become: true
# we listen to "suse upd" here in case a previous cleanup was skipped. Change to "suse updates available" if undesired.
listen: "suse upd"
- name: rkhunter properties update
command: rkhunter --propupd --rwo --ns
become: true
listen: "rkhunter execution"
- name: Reboot if required
# ignore_errors: yes
reboot:
reboot_timeout: 300
pre_reboot_delay: 5
test_command: uptime
become: true
|