1 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,43 +0,0 @@ |
1 |
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> |
|
2 |
-# |
|
3 |
-# This file is part of Ansible |
|
4 |
-# |
|
5 |
-# Ansible is free software: you can redistribute it and/or modify |
|
6 |
-# it under the terms of the GNU General Public License as published by |
|
7 |
-# the Free Software Foundation, either version 3 of the License, or |
|
8 |
-# (at your option) any later version. |
|
9 |
-# |
|
10 |
-# Ansible is distributed in the hope that it will be useful, |
|
11 |
-# but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 |
-# GNU General Public License for more details. |
|
14 |
-# |
|
15 |
-# You should have received a copy of the GNU General Public License |
|
16 |
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>. |
|
17 |
- |
|
18 |
-# Make coding more python3-ish |
|
19 |
-from __future__ import (absolute_import, division, print_function) |
|
20 |
-__metaclass__ = type |
|
21 |
- |
|
22 |
-class HostLog: |
|
23 |
- |
|
24 |
- def __init__(self, host): |
|
25 |
- self.host = host |
|
26 |
- |
|
27 |
- def add_task_result(self, task_result): |
|
28 |
- pass |
|
29 |
- |
|
30 |
- def has_failures(self): |
|
31 |
- assert False |
|
32 |
- |
|
33 |
- def has_changes(self): |
|
34 |
- assert False |
|
35 |
- |
|
36 |
- def get_tasks(self, are_executed=None, are_changed=None, are_successful=None): |
|
37 |
- assert False |
|
38 |
- |
|
39 |
- def get_current_running_task(self) |
|
40 |
- # atomic decorator likely required? |
|
41 |
- assert False |
|
42 |
- |
|
43 |
- |
44 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,29 +0,0 @@ |
1 |
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> |
|
2 |
-# |
|
3 |
-# This file is part of Ansible |
|
4 |
-# |
|
5 |
-# Ansible is free software: you can redistribute it and/or modify |
|
6 |
-# it under the terms of the GNU General Public License as published by |
|
7 |
-# the Free Software Foundation, either version 3 of the License, or |
|
8 |
-# (at your option) any later version. |
|
9 |
-# |
|
10 |
-# Ansible is distributed in the hope that it will be useful, |
|
11 |
-# but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 |
-# GNU General Public License for more details. |
|
14 |
-# |
|
15 |
-# You should have received a copy of the GNU General Public License |
|
16 |
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>. |
|
17 |
- |
|
18 |
-# Make coding more python3-ish |
|
19 |
-from __future__ import (absolute_import, division, print_function) |
|
20 |
-__metaclass__ = type |
|
21 |
- |
|
22 |
-class HostLogManager: |
|
23 |
- |
|
24 |
- def __init__(self): |
|
25 |
- pass |
|
26 |
- |
|
27 |
- def get_log_for_host(self, host): |
|
28 |
- assert False |
|
29 |
- |
... | ... |
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) |
20 | 20 |
__metaclass__ = type |
21 | 21 |
|
22 | 22 |
from ansible.errors import * |
23 |
+from ansible.playbook.block import Block |
|
23 | 24 |
from ansible.playbook.task import Task |
24 | 25 |
|
25 | 26 |
from ansible.utils.boolean import boolean |
... | ... |
@@ -38,9 +39,10 @@ class HostState: |
38 | 38 |
self.run_state = PlayIterator.ITERATING_SETUP |
39 | 39 |
self.fail_state = PlayIterator.FAILED_NONE |
40 | 40 |
self.pending_setup = False |
41 |
+ self.child_state = None |
|
41 | 42 |
|
42 | 43 |
def __repr__(self): |
43 |
- return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s" % ( |
|
44 |
+ return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % ( |
|
44 | 45 |
self.cur_block, |
45 | 46 |
self.cur_regular_task, |
46 | 47 |
self.cur_rescue_task, |
... | ... |
@@ -49,6 +51,7 @@ class HostState: |
49 | 49 |
self.run_state, |
50 | 50 |
self.fail_state, |
51 | 51 |
self.pending_setup, |
52 |
+ self.child_state, |
|
52 | 53 |
) |
53 | 54 |
|
54 | 55 |
def get_current_block(self): |
... | ... |
@@ -64,6 +67,7 @@ class HostState: |
64 | 64 |
new_state.run_state = self.run_state |
65 | 65 |
new_state.fail_state = self.fail_state |
66 | 66 |
new_state.pending_setup = self.pending_setup |
67 |
+ new_state.child_state = self.child_state |
|
67 | 68 |
return new_state |
68 | 69 |
|
69 | 70 |
class PlayIterator: |
... | ... |
@@ -104,75 +108,35 @@ class PlayIterator: |
104 | 104 |
except KeyError: |
105 | 105 |
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) |
106 | 106 |
|
107 |
- def get_next_task_for_host(self, host, peek=False, lock_step=True): |
|
107 |
+ def get_next_task_for_host(self, host, peek=False): |
|
108 |
+ |
|
108 | 109 |
s = self.get_host_state(host) |
109 | 110 |
|
110 | 111 |
task = None |
111 | 112 |
if s.run_state == self.ITERATING_COMPLETE: |
112 | 113 |
return None |
113 |
- else: |
|
114 |
- while True: |
|
115 |
- try: |
|
116 |
- cur_block = s._blocks[s.cur_block] |
|
117 |
- except IndexError: |
|
118 |
- s.run_state = self.ITERATING_COMPLETE |
|
119 |
- break |
|
120 |
- |
|
121 |
- if s.run_state == self.ITERATING_SETUP: |
|
122 |
- s.run_state = self.ITERATING_TASKS |
|
123 |
- if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts): |
|
124 |
- # mark the host as having gathered facts |
|
125 |
- host.set_gathered_facts(True) |
|
126 |
- |
|
127 |
- task = Task() |
|
128 |
- task.action = 'setup' |
|
129 |
- task.set_loader(self._play._loader) |
|
130 |
- |
|
131 |
- elif s.run_state == self.ITERATING_TASKS: |
|
132 |
- # clear the pending setup flag, since we're past that and it didn't fail |
|
133 |
- if s.pending_setup: |
|
134 |
- s.pending_setup = False |
|
135 |
- |
|
136 |
- if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS: |
|
137 |
- s.run_state = self.ITERATING_RESCUE |
|
138 |
- elif s.cur_regular_task >= len(cur_block.block): |
|
139 |
- s.run_state = self.ITERATING_ALWAYS |
|
140 |
- else: |
|
141 |
- task = cur_block.block[s.cur_regular_task] |
|
142 |
- s.cur_regular_task += 1 |
|
143 |
- break |
|
144 |
- elif s.run_state == self.ITERATING_RESCUE: |
|
145 |
- if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: |
|
146 |
- s.run_state = self.ITERATING_ALWAYS |
|
147 |
- elif s.cur_rescue_task >= len(cur_block.rescue): |
|
148 |
- if len(cur_block.rescue) > 0: |
|
149 |
- s.fail_state = self.FAILED_NONE |
|
150 |
- s.run_state = self.ITERATING_ALWAYS |
|
151 |
- else: |
|
152 |
- task = cur_block.rescue[s.cur_rescue_task] |
|
153 |
- s.cur_rescue_task += 1 |
|
154 |
- break |
|
155 |
- elif s.run_state == self.ITERATING_ALWAYS: |
|
156 |
- if s.cur_always_task >= len(cur_block.always): |
|
157 |
- if s.fail_state != self.FAILED_NONE: |
|
158 |
- s.run_state = self.ITERATING_COMPLETE |
|
159 |
- break |
|
160 |
- else: |
|
161 |
- s.cur_block += 1 |
|
162 |
- s.cur_regular_task = 0 |
|
163 |
- s.cur_rescue_task = 0 |
|
164 |
- s.cur_always_task = 0 |
|
165 |
- s.run_state = self.ITERATING_TASKS |
|
166 |
- else: |
|
167 |
- task= cur_block.always[s.cur_always_task] |
|
168 |
- s.cur_always_task += 1 |
|
169 |
- break |
|
114 |
+ elif s.run_state == self.ITERATING_SETUP: |
|
115 |
+ s.run_state = self.ITERATING_TASKS |
|
116 |
+ s.pending_setup = True |
|
117 |
+ if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts): |
|
118 |
+ if not peek: |
|
119 |
+ # mark the host as having gathered facts |
|
120 |
+ host.set_gathered_facts(True) |
|
121 |
+ |
|
122 |
+ task = Task() |
|
123 |
+ task.action = 'setup' |
|
124 |
+ task.args = {} |
|
125 |
+ task.set_loader(self._play._loader) |
|
126 |
+ else: |
|
127 |
+ s.pending_setup = False |
|
128 |
+ |
|
129 |
+ if not task: |
|
130 |
+ (s, task) = self._get_next_task_from_state(s, peek=peek) |
|
170 | 131 |
|
171 | 132 |
if task and task._role: |
172 | 133 |
# if we had a current role, mark that role as completed |
173 | 134 |
if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek: |
174 | 135 |
s.cur_role._completed = True |
175 |
- |
|
176 | 136 |
s.cur_role = task._role |
177 | 137 |
|
178 | 138 |
if not peek: |
... | ... |
@@ -180,6 +144,86 @@ class PlayIterator: |
180 | 180 |
|
181 | 181 |
return (s, task) |
182 | 182 |
|
183 |
+ |
|
184 |
+ def _get_next_task_from_state(self, state, peek): |
|
185 |
+ |
|
186 |
+ task = None |
|
187 |
+ |
|
188 |
+ # if we previously encountered a child block and we have a |
|
189 |
+ # saved child state, try and get the next task from there |
|
190 |
+ if state.child_state: |
|
191 |
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek) |
|
192 |
+ if task: |
|
193 |
+ return (state.child_state, task) |
|
194 |
+ else: |
|
195 |
+ state.child_state = None |
|
196 |
+ |
|
197 |
+ # try and find the next task, given the current state. |
|
198 |
+ while True: |
|
199 |
+ # try to get the current block from the list of blocks, and |
|
200 |
+ # if we run past the end of the list we know we're done with |
|
201 |
+ # this block |
|
202 |
+ try: |
|
203 |
+ block = state._blocks[state.cur_block] |
|
204 |
+ except IndexError: |
|
205 |
+ state.run_state = self.ITERATING_COMPLETE |
|
206 |
+ return (state, None) |
|
207 |
+ |
|
208 |
+ if state.run_state == self.ITERATING_TASKS: |
|
209 |
+ # clear the pending setup flag, since we're past that and it didn't fail |
|
210 |
+ if state.pending_setup: |
|
211 |
+ state.pending_setup = False |
|
212 |
+ |
|
213 |
+ if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS: |
|
214 |
+ state.run_state = self.ITERATING_RESCUE |
|
215 |
+ elif state.cur_regular_task >= len(block.block): |
|
216 |
+ state.run_state = self.ITERATING_ALWAYS |
|
217 |
+ else: |
|
218 |
+ task = block.block[state.cur_regular_task] |
|
219 |
+ state.cur_regular_task += 1 |
|
220 |
+ |
|
221 |
+ elif state.run_state == self.ITERATING_RESCUE: |
|
222 |
+ if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: |
|
223 |
+ state.run_state = self.ITERATING_ALWAYS |
|
224 |
+ elif state.cur_rescue_task >= len(block.rescue): |
|
225 |
+ if len(block.rescue) > 0: |
|
226 |
+ state.fail_state = self.FAILED_NONE |
|
227 |
+ state.run_state = self.ITERATING_ALWAYS |
|
228 |
+ else: |
|
229 |
+ task = block.rescue[state.cur_rescue_task] |
|
230 |
+ state.cur_rescue_task += 1 |
|
231 |
+ |
|
232 |
+ elif state.run_state == self.ITERATING_ALWAYS: |
|
233 |
+ if state.cur_always_task >= len(block.always): |
|
234 |
+ if state.fail_state != self.FAILED_NONE: |
|
235 |
+ state.run_state = self.ITERATING_COMPLETE |
|
236 |
+ else: |
|
237 |
+ state.cur_block += 1 |
|
238 |
+ state.cur_regular_task = 0 |
|
239 |
+ state.cur_rescue_task = 0 |
|
240 |
+ state.cur_always_task = 0 |
|
241 |
+ state.run_state = self.ITERATING_TASKS |
|
242 |
+ state.child_state = None |
|
243 |
+ else: |
|
244 |
+ task = block.always[state.cur_always_task] |
|
245 |
+ state.cur_always_task += 1 |
|
246 |
+ |
|
247 |
+ elif state.run_state == self.ITERATING_COMPLETE: |
|
248 |
+ return (state, None) |
|
249 |
+ |
|
250 |
+ # if the current task is actually a child block, we dive into it |
|
251 |
+ if isinstance(task, Block): |
|
252 |
+ state.child_state = HostState(blocks=[task]) |
|
253 |
+ state.child_state.run_state = self.ITERATING_TASKS |
|
254 |
+ state.child_state.cur_role = state.cur_role |
|
255 |
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek) |
|
256 |
+ |
|
257 |
+ # if something above set the task, break out of the loop now |
|
258 |
+ if task: |
|
259 |
+ break |
|
260 |
+ |
|
261 |
+ return (state, task) |
|
262 |
+ |
|
183 | 263 |
def mark_host_failed(self, host): |
184 | 264 |
s = self.get_host_state(host) |
185 | 265 |
if s.pending_setup: |
... | ... |
@@ -206,25 +250,41 @@ class PlayIterator: |
206 | 206 |
the different processes, and not all data structures are preserved. This method |
207 | 207 |
allows us to find the original task passed into the executor engine. |
208 | 208 |
''' |
209 |
+ def _search_block(block, task): |
|
210 |
+ for t in block.block: |
|
211 |
+ if isinstance(t, Block): |
|
212 |
+ res = _search_block(t, task) |
|
213 |
+ if res: |
|
214 |
+ return res |
|
215 |
+ elif t._uuid == task._uuid: |
|
216 |
+ return t |
|
217 |
+ for t in block.rescue: |
|
218 |
+ if isinstance(t, Block): |
|
219 |
+ res = _search_block(t, task) |
|
220 |
+ if res: |
|
221 |
+ return res |
|
222 |
+ elif t._uuid == task._uuid: |
|
223 |
+ return t |
|
224 |
+ for t in block.always: |
|
225 |
+ if isinstance(t, Block): |
|
226 |
+ res = _search_block(t, task) |
|
227 |
+ if res: |
|
228 |
+ return res |
|
229 |
+ elif t._uuid == task._uuid: |
|
230 |
+ return t |
|
231 |
+ return None |
|
232 |
+ |
|
209 | 233 |
s = self.get_host_state(host) |
210 | 234 |
for block in s._blocks: |
211 |
- if block.block: |
|
212 |
- for t in block.block: |
|
213 |
- if t._uuid == task._uuid: |
|
214 |
- return t |
|
215 |
- if block.rescue: |
|
216 |
- for t in block.rescue: |
|
217 |
- if t._uuid == task._uuid: |
|
218 |
- return t |
|
219 |
- if block.always: |
|
220 |
- for t in block.always: |
|
221 |
- if t._uuid == task._uuid: |
|
222 |
- return t |
|
235 |
+ res = _search_block(block, task) |
|
236 |
+ if res: |
|
237 |
+ return res |
|
238 |
+ |
|
223 | 239 |
return None |
224 | 240 |
|
225 | 241 |
def add_tasks(self, host, task_list): |
226 | 242 |
s = self.get_host_state(host) |
227 |
- target_block = s._blocks[s.cur_block].copy() |
|
243 |
+ target_block = s._blocks[s.cur_block].copy(exclude_parent=True) |
|
228 | 244 |
|
229 | 245 |
if s.run_state == self.ITERATING_TASKS: |
230 | 246 |
before = target_block.block[:s.cur_regular_task] |
... | ... |
@@ -26,6 +26,7 @@ from ansible.errors import * |
26 | 26 |
from ansible.executor.task_queue_manager import TaskQueueManager |
27 | 27 |
from ansible.playbook import Playbook |
28 | 28 |
|
29 |
+from ansible.utils.color import colorize, hostcolor |
|
29 | 30 |
from ansible.utils.debug import debug |
30 | 31 |
|
31 | 32 |
class PlaybookExecutor: |
... | ... |
@@ -70,8 +71,8 @@ class PlaybookExecutor: |
70 | 70 |
|
71 | 71 |
for batch in self._get_serialized_batches(new_play): |
72 | 72 |
if len(batch) == 0: |
73 |
- self._tqm._callback.playbook_on_play_start(new_play.name) |
|
74 |
- self._tqm._callback.playbook_on_no_hosts_matched() |
|
73 |
+ self._tqm.send_callback('v2_playbook_on_play_start', new_play) |
|
74 |
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched') |
|
75 | 75 |
result = 0 |
76 | 76 |
break |
77 | 77 |
# restrict the inventory to the hosts in the serialized batch |
... | ... |
@@ -90,6 +91,36 @@ class PlaybookExecutor: |
90 | 90 |
raise |
91 | 91 |
|
92 | 92 |
self._cleanup() |
93 |
+ |
|
94 |
+ # FIXME: this stat summary stuff should be cleaned up and moved |
|
95 |
+ # to a new method, if it even belongs here... |
|
96 |
+ self._tqm._display.banner("PLAY RECAP") |
|
97 |
+ |
|
98 |
+ hosts = sorted(self._tqm._stats.processed.keys()) |
|
99 |
+ for h in hosts: |
|
100 |
+ t = self._tqm._stats.summarize(h) |
|
101 |
+ |
|
102 |
+ self._tqm._display.display("%s : %s %s %s %s" % ( |
|
103 |
+ hostcolor(h, t), |
|
104 |
+ colorize('ok', t['ok'], 'green'), |
|
105 |
+ colorize('changed', t['changed'], 'yellow'), |
|
106 |
+ colorize('unreachable', t['unreachable'], 'red'), |
|
107 |
+ colorize('failed', t['failures'], 'red')), |
|
108 |
+ screen_only=True |
|
109 |
+ ) |
|
110 |
+ |
|
111 |
+ self._tqm._display.display("%s : %s %s %s %s" % ( |
|
112 |
+ hostcolor(h, t, False), |
|
113 |
+ colorize('ok', t['ok'], None), |
|
114 |
+ colorize('changed', t['changed'], None), |
|
115 |
+ colorize('unreachable', t['unreachable'], None), |
|
116 |
+ colorize('failed', t['failures'], None)), |
|
117 |
+ log_only=True |
|
118 |
+ ) |
|
119 |
+ |
|
120 |
+ self._tqm._display.display("", screen_only=True) |
|
121 |
+ # END STATS STUFF |
|
122 |
+ |
|
93 | 123 |
return result |
94 | 124 |
|
95 | 125 |
def _cleanup(self, signum=None, framenum=None): |
96 | 126 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,51 @@ |
0 |
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> |
|
1 |
+# |
|
2 |
+# This file is part of Ansible |
|
3 |
+# |
|
4 |
+# Ansible is free software: you can redistribute it and/or modify |
|
5 |
+# it under the terms of the GNU General Public License as published by |
|
6 |
+# the Free Software Foundation, either version 3 of the License, or |
|
7 |
+# (at your option) any later version. |
|
8 |
+# |
|
9 |
+# Ansible is distributed in the hope that it will be useful, |
|
10 |
+# but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 |
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 |
+# GNU General Public License for more details. |
|
13 |
+# |
|
14 |
+# You should have received a copy of the GNU General Public License |
|
15 |
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>. |
|
16 |
+ |
|
17 |
+# Make coding more python3-ish |
|
18 |
+from __future__ import (absolute_import, division, print_function) |
|
19 |
+__metaclass__ = type |
|
20 |
+ |
|
21 |
+class AggregateStats: |
|
22 |
+ ''' holds stats about per-host activity during playbook runs ''' |
|
23 |
+ |
|
24 |
+ def __init__(self): |
|
25 |
+ |
|
26 |
+ self.processed = {} |
|
27 |
+ self.failures = {} |
|
28 |
+ self.ok = {} |
|
29 |
+ self.dark = {} |
|
30 |
+ self.changed = {} |
|
31 |
+ self.skipped = {} |
|
32 |
+ |
|
33 |
+ def increment(self, what, host): |
|
34 |
+ ''' helper function to bump a statistic ''' |
|
35 |
+ |
|
36 |
+ self.processed[host] = 1 |
|
37 |
+ prev = (getattr(self, what)).get(host, 0) |
|
38 |
+ getattr(self, what)[host] = prev+1 |
|
39 |
+ |
|
40 |
+ def summarize(self, host): |
|
41 |
+ ''' return information about a particular host ''' |
|
42 |
+ |
|
43 |
+ return dict( |
|
44 |
+ ok = self.ok.get(host, 0), |
|
45 |
+ failures = self.failures.get(host, 0), |
|
46 |
+ unreachable = self.dark.get(host,0), |
|
47 |
+ changed = self.changed.get(host, 0), |
|
48 |
+ skipped = self.skipped.get(host, 0) |
|
49 |
+ ) |
|
50 |
+ |
... | ... |
@@ -237,10 +237,14 @@ class TaskExecutor: |
237 | 237 |
if self._task.poll > 0: |
238 | 238 |
result = self._poll_async_result(result=result) |
239 | 239 |
|
240 |
- # update the local copy of vars with the registered value, if specified |
|
240 |
+ # update the local copy of vars with the registered value, if specified, |
|
241 |
+ # or any facts which may have been generated by the module execution |
|
241 | 242 |
if self._task.register: |
242 | 243 |
vars_copy[self._task.register] = result |
243 | 244 |
|
245 |
+ if 'ansible_facts' in result: |
|
246 |
+ vars_copy.update(result['ansible_facts']) |
|
247 |
+ |
|
244 | 248 |
# create a conditional object to evaluate task conditions |
245 | 249 |
cond = Conditional(loader=self._loader) |
246 | 250 |
|
... | ... |
@@ -266,6 +270,15 @@ class TaskExecutor: |
266 | 266 |
if attempt < retries - 1: |
267 | 267 |
time.sleep(delay) |
268 | 268 |
|
269 |
+ # do the final update of the local variables here, for both registered |
|
270 |
+ # values and any facts which may have been created |
|
271 |
+ if self._task.register: |
|
272 |
+ variables[self._task.register] = result |
|
273 |
+ |
|
274 |
+ if 'ansible_facts' in result: |
|
275 |
+ variables.update(result['ansible_facts']) |
|
276 |
+ |
|
277 |
+ # and return |
|
269 | 278 |
debug("attempt loop complete, returning result") |
270 | 279 |
return result |
271 | 280 |
|
... | ... |
@@ -29,9 +29,11 @@ from ansible.executor.connection_info import ConnectionInformation |
29 | 29 |
from ansible.executor.play_iterator import PlayIterator |
30 | 30 |
from ansible.executor.process.worker import WorkerProcess |
31 | 31 |
from ansible.executor.process.result import ResultProcess |
32 |
+from ansible.executor.stats import AggregateStats |
|
32 | 33 |
from ansible.plugins import callback_loader, strategy_loader |
33 | 34 |
|
34 | 35 |
from ansible.utils.debug import debug |
36 |
+from ansible.utils.display import Display |
|
35 | 37 |
|
36 | 38 |
__all__ = ['TaskQueueManager'] |
37 | 39 |
|
... | ... |
@@ -53,6 +55,9 @@ class TaskQueueManager: |
53 | 53 |
self._variable_manager = variable_manager |
54 | 54 |
self._loader = loader |
55 | 55 |
self._options = options |
56 |
+ self._stats = AggregateStats() |
|
57 |
+ |
|
58 |
+ self._display = Display() |
|
56 | 59 |
|
57 | 60 |
# a special flag to help us exit cleanly |
58 | 61 |
self._terminated = False |
... | ... |
@@ -66,9 +71,14 @@ class TaskQueueManager: |
66 | 66 |
|
67 | 67 |
self._final_q = multiprocessing.Queue() |
68 | 68 |
|
69 |
- # FIXME: hard-coded the default callback plugin here, which |
|
70 |
- # should be configurable. |
|
71 |
- self._callback = callback_loader.get(callback) |
|
69 |
+ # load all available callback plugins |
|
70 |
+ # FIXME: we need an option to white-list callback plugins |
|
71 |
+ self._callback_plugins = [] |
|
72 |
+ for callback_plugin in callback_loader.all(class_only=True): |
|
73 |
+ if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: |
|
74 |
+ self._callback_plugins.append(callback_plugin(self._display)) |
|
75 |
+ else: |
|
76 |
+ self._callback_plugins.append(callback_plugin()) |
|
72 | 77 |
|
73 | 78 |
# create the pool of worker threads, based on the number of forks specified |
74 | 79 |
try: |
... | ... |
@@ -131,16 +141,11 @@ class TaskQueueManager: |
131 | 131 |
''' |
132 | 132 |
|
133 | 133 |
connection_info = ConnectionInformation(play, self._options) |
134 |
- self._callback.set_connection_info(connection_info) |
|
135 |
- |
|
136 |
- # run final validation on the play now, to make sure fields are templated |
|
137 |
- # FIXME: is this even required? Everything is validated and merged at the |
|
138 |
- # task level, so else in the play needs to be templated |
|
139 |
- #all_vars = self._vmw.get_vars(loader=self._dlw, play=play) |
|
140 |
- #all_vars = self._vmw.get_vars(loader=self._loader, play=play) |
|
141 |
- #play.post_validate(all_vars=all_vars) |
|
134 |
+ for callback_plugin in self._callback_plugins: |
|
135 |
+ if hasattr(callback_plugin, 'set_connection_info'): |
|
136 |
+ callback_plugin.set_connection_info(connection_info) |
|
142 | 137 |
|
143 |
- self._callback.playbook_on_play_start(play.name) |
|
138 |
+ self.send_callback('v2_playbook_on_play_start', play) |
|
144 | 139 |
|
145 | 140 |
# initialize the shared dictionary containing the notified handlers |
146 | 141 |
self._initialize_notified_handlers(play.handlers) |
... | ... |
@@ -172,9 +177,6 @@ class TaskQueueManager: |
172 | 172 |
def get_inventory(self): |
173 | 173 |
return self._inventory |
174 | 174 |
|
175 |
- def get_callback(self): |
|
176 |
- return self._callback |
|
177 |
- |
|
178 | 175 |
def get_variable_manager(self): |
179 | 176 |
return self._variable_manager |
180 | 177 |
|
... | ... |
@@ -201,3 +203,18 @@ class TaskQueueManager: |
201 | 201 |
|
202 | 202 |
def terminate(self): |
203 | 203 |
self._terminated = True |
204 |
+ |
|
205 |
+ def send_callback(self, method_name, *args, **kwargs): |
|
206 |
+ for callback_plugin in self._callback_plugins: |
|
207 |
+ # a plugin that set self.disabled to True will not be called |
|
208 |
+ # see osx_say.py example for such a plugin |
|
209 |
+ if getattr(callback_plugin, 'disabled', False): |
|
210 |
+ continue |
|
211 |
+ methods = [ |
|
212 |
+ getattr(callback_plugin, method_name, None), |
|
213 |
+ getattr(callback_plugin, 'on_any', None) |
|
214 |
+ ] |
|
215 |
+ for method in methods: |
|
216 |
+ if method is not None: |
|
217 |
+ method(*args, **kwargs) |
|
218 |
+ |
... | ... |
@@ -99,11 +99,14 @@ class DataLoader(): |
99 | 99 |
def path_exists(self, path): |
100 | 100 |
return os.path.exists(path) |
101 | 101 |
|
102 |
+ def is_file(self, path): |
|
103 |
+ return os.path.isfile(path) |
|
104 |
+ |
|
102 | 105 |
def is_directory(self, path): |
103 | 106 |
return os.path.isdir(path) |
104 | 107 |
|
105 |
- def is_file(self, path): |
|
106 |
- return os.path.isfile(path) |
|
108 |
+ def list_directory(self, path): |
|
109 |
+ return os.path.listdir(path) |
|
107 | 110 |
|
108 | 111 |
def _safe_load(self, stream, file_name=None): |
109 | 112 |
''' Implements yaml.safe_load(), except using our custom loader class. ''' |
... | ... |
@@ -43,6 +43,7 @@ class Block(Base, Become, Conditional, Taggable): |
43 | 43 |
self._task_include = task_include |
44 | 44 |
self._use_handlers = use_handlers |
45 | 45 |
self._dep_chain = [] |
46 |
+ self._vars = dict() |
|
46 | 47 |
|
47 | 48 |
super(Block, self).__init__() |
48 | 49 |
|
... | ... |
@@ -56,9 +57,12 @@ class Block(Base, Become, Conditional, Taggable): |
56 | 56 |
|
57 | 57 |
if self._role: |
58 | 58 |
all_vars.update(self._role.get_vars()) |
59 |
+ if self._parent_block: |
|
60 |
+ all_vars.update(self._parent_block.get_vars()) |
|
59 | 61 |
if self._task_include: |
60 | 62 |
all_vars.update(self._task_include.get_vars()) |
61 | 63 |
|
64 |
+ all_vars.update(self._vars) |
|
62 | 65 |
return all_vars |
63 | 66 |
|
64 | 67 |
@staticmethod |
... | ... |
@@ -131,25 +135,29 @@ class Block(Base, Become, Conditional, Taggable): |
131 | 131 |
# use_handlers=self._use_handlers, |
132 | 132 |
# ) |
133 | 133 |
|
134 |
- def copy(self): |
|
134 |
+ def copy(self, exclude_parent=False): |
|
135 | 135 |
def _dupe_task_list(task_list, new_block): |
136 | 136 |
new_task_list = [] |
137 | 137 |
for task in task_list: |
138 |
- new_task = task.copy(exclude_block=True) |
|
139 |
- new_task._block = new_block |
|
138 |
+ if isinstance(task, Block): |
|
139 |
+ new_task = task.copy(exclude_parent=True) |
|
140 |
+ new_task._parent_block = new_block |
|
141 |
+ else: |
|
142 |
+ new_task = task.copy(exclude_block=True) |
|
143 |
+ new_task._block = new_block |
|
140 | 144 |
new_task_list.append(new_task) |
141 | 145 |
return new_task_list |
142 | 146 |
|
143 | 147 |
new_me = super(Block, self).copy() |
144 | 148 |
new_me._use_handlers = self._use_handlers |
145 |
- new_me._dep_chain = self._dep_chain[:] |
|
149 |
+ new_me._dep_chain = self._dep_chain[:] |
|
146 | 150 |
|
147 | 151 |
new_me.block = _dupe_task_list(self.block or [], new_me) |
148 | 152 |
new_me.rescue = _dupe_task_list(self.rescue or [], new_me) |
149 | 153 |
new_me.always = _dupe_task_list(self.always or [], new_me) |
150 | 154 |
|
151 | 155 |
new_me._parent_block = None |
152 |
- if self._parent_block: |
|
156 |
+ if self._parent_block and not exclude_parent: |
|
153 | 157 |
new_me._parent_block = self._parent_block.copy() |
154 | 158 |
|
155 | 159 |
new_me._role = None |
... | ... |
@@ -260,7 +268,7 @@ class Block(Base, Become, Conditional, Taggable): |
260 | 260 |
value = self._attributes[attr] |
261 | 261 |
if not value: |
262 | 262 |
if self._parent_block: |
263 |
- value = getattr(self._block, attr) |
|
263 |
+ value = getattr(self._parent_block, attr) |
|
264 | 264 |
elif self._role: |
265 | 265 |
value = getattr(self._role, attr) |
266 | 266 |
if not value and len(self._dep_chain): |
... | ... |
@@ -60,9 +60,9 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler |
60 | 60 |
''' |
61 | 61 |
|
62 | 62 |
# we import here to prevent a circular dependency with imports |
63 |
+ from ansible.playbook.block import Block |
|
63 | 64 |
from ansible.playbook.handler import Handler |
64 | 65 |
from ansible.playbook.task import Task |
65 |
- #from ansible.playbook.task_include import TaskInclude |
|
66 | 66 |
|
67 | 67 |
assert type(ds) == list |
68 | 68 |
|
... | ... |
@@ -71,27 +71,17 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler |
71 | 71 |
if not isinstance(task, dict): |
72 | 72 |
raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds) |
73 | 73 |
|
74 |
- #if 'include' in task: |
|
75 |
- # cur_basedir = None |
|
76 |
- # if isinstance(task, AnsibleBaseYAMLObject) and loader: |
|
77 |
- # pos_info = task.get_position_info() |
|
78 |
- # new_basedir = os.path.dirname(pos_info[0]) |
|
79 |
- # cur_basedir = loader.get_basedir() |
|
80 |
- # loader.set_basedir(new_basedir) |
|
81 |
- |
|
82 |
- # t = TaskInclude.load( |
|
83 |
- # task, |
|
84 |
- # block=block, |
|
85 |
- # role=role, |
|
86 |
- # task_include=task_include, |
|
87 |
- # use_handlers=use_handlers, |
|
88 |
- # loader=loader |
|
89 |
- # ) |
|
90 |
- |
|
91 |
- # if cur_basedir and loader: |
|
92 |
- # loader.set_basedir(cur_basedir) |
|
93 |
- #else: |
|
94 |
- if True: |
|
74 |
+ if 'block' in task: |
|
75 |
+ t = Block.load( |
|
76 |
+ task, |
|
77 |
+ parent_block=block, |
|
78 |
+ role=role, |
|
79 |
+ task_include=task_include, |
|
80 |
+ use_handlers=use_handlers, |
|
81 |
+ variable_manager=variable_manager, |
|
82 |
+ loader=loader, |
|
83 |
+ ) |
|
84 |
+ else: |
|
95 | 85 |
if use_handlers: |
96 | 86 |
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) |
97 | 87 |
else: |
... | ... |
@@ -120,15 +110,3 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader |
120 | 120 |
|
121 | 121 |
return roles |
122 | 122 |
|
123 |
-def compile_block_list(block_list): |
|
124 |
- ''' |
|
125 |
- Given a list of blocks, compile them into a flat list of tasks |
|
126 |
- ''' |
|
127 |
- |
|
128 |
- task_list = [] |
|
129 |
- |
|
130 |
- for block in block_list: |
|
131 |
- task_list.extend(block.compile()) |
|
132 |
- |
|
133 |
- return task_list |
|
134 |
- |
... | ... |
@@ -24,7 +24,7 @@ from ansible.errors import AnsibleError, AnsibleParserError |
24 | 24 |
from ansible.playbook.attribute import Attribute, FieldAttribute |
25 | 25 |
from ansible.playbook.base import Base |
26 | 26 |
from ansible.playbook.become import Become |
27 |
-from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list |
|
27 |
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles |
|
28 | 28 |
from ansible.playbook.role import Role |
29 | 29 |
from ansible.playbook.taggable import Taggable |
30 | 30 |
|
... | ... |
@@ -32,7 +32,7 @@ from ansible.playbook.attribute import FieldAttribute |
32 | 32 |
from ansible.playbook.base import Base |
33 | 33 |
from ansible.playbook.become import Become |
34 | 34 |
from ansible.playbook.conditional import Conditional |
35 |
-from ansible.playbook.helpers import load_list_of_blocks, compile_block_list |
|
35 |
+from ansible.playbook.helpers import load_list_of_blocks |
|
36 | 36 |
from ansible.playbook.role.include import RoleInclude |
37 | 37 |
from ansible.playbook.role.metadata import RoleMetadata |
38 | 38 |
from ansible.playbook.taggable import Taggable |
... | ... |
@@ -78,7 +78,7 @@ class Task(Base, Conditional, Taggable, Become): |
78 | 78 |
# FIXME: this should not be a Task |
79 | 79 |
_meta = FieldAttribute(isa='string') |
80 | 80 |
|
81 |
- _name = FieldAttribute(isa='string') |
|
81 |
+ _name = FieldAttribute(isa='string', default='') |
|
82 | 82 |
|
83 | 83 |
_no_log = FieldAttribute(isa='bool') |
84 | 84 |
_notify = FieldAttribute(isa='list') |
... | ... |
@@ -167,7 +167,6 @@ class Task(Base, Conditional, Taggable, Become): |
167 | 167 |
args_parser = ModuleArgsParser(task_ds=ds) |
168 | 168 |
(action, args, delegate_to) = args_parser.parse() |
169 | 169 |
|
170 |
- |
|
171 | 170 |
new_ds['action'] = action |
172 | 171 |
new_ds['args'] = args |
173 | 172 |
new_ds['delegate_to'] = delegate_to |
... | ... |
@@ -199,6 +198,8 @@ class Task(Base, Conditional, Taggable, Become): |
199 | 199 |
|
200 | 200 |
def get_vars(self): |
201 | 201 |
all_vars = self.vars.copy() |
202 |
+ if self._block: |
|
203 |
+ all_vars.update(self._block.get_vars()) |
|
202 | 204 |
if self._task_include: |
203 | 205 |
all_vars.update(self._task_include.get_vars()) |
204 | 206 |
|
... | ... |
@@ -240,7 +240,10 @@ class PluginLoader: |
240 | 240 |
continue |
241 | 241 |
if path not in self._module_cache: |
242 | 242 |
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) |
243 |
- yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) |
|
243 |
+ if kwargs.get('class_only', False): |
|
244 |
+ yield getattr(self._module_cache[path], self.class_name) |
|
245 |
+ else: |
|
246 |
+ yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) |
|
244 | 247 |
|
245 | 248 |
action_loader = PluginLoader( |
246 | 249 |
'ActionModule', |
... | ... |
@@ -231,7 +231,7 @@ class ActionModule(ActionBase): |
231 | 231 |
self._remove_tempfile_if_content_defined(content, content_tempfile) |
232 | 232 |
|
233 | 233 |
# fix file permissions when the copy is done as a different user |
234 |
- if (self._connection_info.become and self._connection_info.become_user != 'root': |
|
234 |
+ if self._connection_info.become and self._connection_info.become_user != 'root': |
|
235 | 235 |
self._remote_chmod('a+r', tmp_src, tmp) |
236 | 236 |
|
237 | 237 |
if raw: |
... | ... |
@@ -19,7 +19,7 @@ |
19 | 19 |
from __future__ import (absolute_import, division, print_function) |
20 | 20 |
__metaclass__ = type |
21 | 21 |
|
22 |
-from ansible.utils.display import Display |
|
22 |
+#from ansible.utils.display import Display |
|
23 | 23 |
|
24 | 24 |
__all__ = ["CallbackBase"] |
25 | 25 |
|
... | ... |
@@ -34,8 +34,8 @@ class CallbackBase: |
34 | 34 |
# FIXME: the list of functions here needs to be updated once we have |
35 | 35 |
# finalized the list of callback methods used in the default callback |
36 | 36 |
|
37 |
- def __init__(self): |
|
38 |
- self._display = Display() |
|
37 |
+ def __init__(self, display): |
|
38 |
+ self._display = display |
|
39 | 39 |
|
40 | 40 |
def set_connection_info(self, conn_info): |
41 | 41 |
# FIXME: this is a temporary hack, as the connection info object |
... | ... |
@@ -30,25 +30,15 @@ class CallbackModule(CallbackBase): |
30 | 30 |
to stdout when new callback events are received. |
31 | 31 |
''' |
32 | 32 |
|
33 |
- def _print_banner(self, msg, color=None): |
|
34 |
- ''' |
|
35 |
- Prints a header-looking line with stars taking up to 80 columns |
|
36 |
- of width (3 columns, minimum) |
|
37 |
- ''' |
|
38 |
- msg = msg.strip() |
|
39 |
- star_len = (80 - len(msg)) |
|
40 |
- if star_len < 0: |
|
41 |
- star_len = 3 |
|
42 |
- stars = "*" * star_len |
|
43 |
- self._display.display("\n%s %s" % (msg, stars), color=color) |
|
44 |
- |
|
45 |
- def on_any(self, *args, **kwargs): |
|
33 |
+ CALLBACK_VERSION = 2.0 |
|
34 |
+ |
|
35 |
+ def v2_on_any(self, *args, **kwargs): |
|
46 | 36 |
pass |
47 | 37 |
|
48 |
- def runner_on_failed(self, task, result, ignore_errors=False): |
|
38 |
+ def v2_runner_on_failed(self, result, ignore_errors=False): |
|
49 | 39 |
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red') |
50 | 40 |
|
51 |
- def runner_on_ok(self, task, result): |
|
41 |
+ def v2_runner_on_ok(self, result): |
|
52 | 42 |
|
53 | 43 |
if result._task.action == 'include': |
54 | 44 |
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name) |
... | ... |
@@ -68,7 +58,7 @@ class CallbackModule(CallbackBase): |
68 | 68 |
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) |
69 | 69 |
self._display.display(msg, color=color) |
70 | 70 |
|
71 |
- def runner_on_skipped(self, task, result): |
|
71 |
+ def v2_runner_on_skipped(self, result): |
|
72 | 72 |
msg = "skipping: [%s]" % result._host.get_name() |
73 | 73 |
if self._display._verbosity > 0 or 'verbose_always' in result._result: |
74 | 74 |
indent = None |
... | ... |
@@ -78,57 +68,66 @@ class CallbackModule(CallbackBase): |
78 | 78 |
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) |
79 | 79 |
self._display.display(msg, color='cyan') |
80 | 80 |
|
81 |
- def runner_on_unreachable(self, task, result): |
|
81 |
+ def v2_runner_on_unreachable(self, result): |
|
82 | 82 |
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red') |
83 | 83 |
|
84 |
- def runner_on_no_hosts(self, task): |
|
84 |
+ def v2_runner_on_no_hosts(self, task): |
|
85 |
+ pass |
|
86 |
+ |
|
87 |
+ def v2_runner_on_async_poll(self, result): |
|
85 | 88 |
pass |
86 | 89 |
|
87 |
- def runner_on_async_poll(self, host, res, jid, clock): |
|
90 |
+ def v2_runner_on_async_ok(self, result): |
|
88 | 91 |
pass |
89 | 92 |
|
90 |
- def runner_on_async_ok(self, host, res, jid): |
|
93 |
+ def v2_runner_on_async_failed(self, result): |
|
91 | 94 |
pass |
92 | 95 |
|
93 |
- def runner_on_async_failed(self, host, res, jid): |
|
96 |
+ def v2_runner_on_file_diff(self, result, diff): |
|
94 | 97 |
pass |
95 | 98 |
|
96 |
- def playbook_on_start(self): |
|
99 |
+ def v2_playbook_on_start(self): |
|
97 | 100 |
pass |
98 | 101 |
|
99 |
- def playbook_on_notify(self, host, handler): |
|
102 |
+ def v2_playbook_on_notify(self, result, handler): |
|
100 | 103 |
pass |
101 | 104 |
|
102 |
- def playbook_on_no_hosts_matched(self): |
|
105 |
+ def v2_playbook_on_no_hosts_matched(self): |
|
103 | 106 |
self._display.display("skipping: no hosts matched", color='cyan') |
104 | 107 |
|
105 |
- def playbook_on_no_hosts_remaining(self): |
|
106 |
- self._print_banner("NO MORE HOSTS LEFT") |
|
108 |
+ def v2_playbook_on_no_hosts_remaining(self): |
|
109 |
+ self._display.banner("NO MORE HOSTS LEFT") |
|
107 | 110 |
|
108 |
- def playbook_on_task_start(self, name, is_conditional): |
|
109 |
- self._print_banner("TASK [%s]" % name.strip()) |
|
111 |
+ def v2_playbook_on_task_start(self, task, is_conditional): |
|
112 |
+ self._display.banner("TASK [%s]" % task.get_name().strip()) |
|
110 | 113 |
|
111 |
- def playbook_on_cleanup_task_start(self, name): |
|
112 |
- self._print_banner("CLEANUP TASK [%s]" % name.strip()) |
|
114 |
+ def v2_playbook_on_cleanup_task_start(self, task): |
|
115 |
+ self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) |
|
113 | 116 |
|
114 |
- def playbook_on_handler_task_start(self, name): |
|
115 |
- self._print_banner("RUNNING HANDLER [%s]" % name.strip()) |
|
117 |
+ def v2_playbook_on_handler_task_start(self, task): |
|
118 |
+ self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) |
|
116 | 119 |
|
117 |
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): |
|
120 |
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): |
|
118 | 121 |
pass |
119 | 122 |
|
120 |
- def playbook_on_setup(self): |
|
123 |
+ def v2_playbook_on_setup(self): |
|
121 | 124 |
pass |
122 | 125 |
|
123 |
- def playbook_on_import_for_host(self, host, imported_file): |
|
126 |
+ def v2_playbook_on_import_for_host(self, result, imported_file): |
|
124 | 127 |
pass |
125 | 128 |
|
126 |
- def playbook_on_not_import_for_host(self, host, missing_file): |
|
129 |
+ def v2_playbook_on_not_import_for_host(self, result, missing_file): |
|
127 | 130 |
pass |
128 | 131 |
|
129 |
- def playbook_on_play_start(self, name): |
|
130 |
- self._print_banner("PLAY [%s]" % name.strip()) |
|
132 |
+ def v2_playbook_on_play_start(self, play): |
|
133 |
+ name = play.get_name().strip() |
|
134 |
+ if not name: |
|
135 |
+ msg = "PLAY" |
|
136 |
+ else: |
|
137 |
+ msg = "PLAY [%s]" % name |
|
138 |
+ |
|
139 |
+ self._display.banner(name) |
|
131 | 140 |
|
132 |
- def playbook_on_stats(self, stats): |
|
141 |
+ def v2_playbook_on_stats(self, stats): |
|
133 | 142 |
pass |
134 | 143 |
|
... | ... |
@@ -28,7 +28,7 @@ from ansible.inventory.host import Host |
28 | 28 |
from ansible.inventory.group import Group |
29 | 29 |
|
30 | 30 |
from ansible.playbook.handler import Handler |
31 |
-from ansible.playbook.helpers import load_list_of_blocks, compile_block_list |
|
31 |
+from ansible.playbook.helpers import load_list_of_blocks |
|
32 | 32 |
from ansible.playbook.role import ROLE_CACHE, hash_params |
33 | 33 |
from ansible.plugins import module_loader |
34 | 34 |
from ansible.utils.debug import debug |
... | ... |
@@ -49,7 +49,7 @@ class StrategyBase: |
49 | 49 |
self._inventory = tqm.get_inventory() |
50 | 50 |
self._workers = tqm.get_workers() |
51 | 51 |
self._notified_handlers = tqm.get_notified_handlers() |
52 |
- self._callback = tqm.get_callback() |
|
52 |
+ #self._callback = tqm.get_callback() |
|
53 | 53 |
self._variable_manager = tqm.get_variable_manager() |
54 | 54 |
self._loader = tqm.get_loader() |
55 | 55 |
self._final_q = tqm._final_q |
... | ... |
@@ -73,6 +73,9 @@ class StrategyBase: |
73 | 73 |
debug("running handlers") |
74 | 74 |
result &= self.run_handlers(iterator, connection_info) |
75 | 75 |
|
76 |
+ # send the stats callback |
|
77 |
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) |
|
78 |
+ |
|
76 | 79 |
if not result: |
77 | 80 |
if num_unreachable > 0: |
78 | 81 |
return 3 |
... | ... |
@@ -84,7 +87,7 @@ class StrategyBase: |
84 | 84 |
return 0 |
85 | 85 |
|
86 | 86 |
def get_hosts_remaining(self, play): |
87 |
- return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts] |
|
87 |
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts] |
|
88 | 88 |
|
89 | 89 |
def get_failed_hosts(self, play): |
90 | 90 |
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts] |
... | ... |
@@ -132,17 +135,23 @@ class StrategyBase: |
132 | 132 |
task = task_result._task |
133 | 133 |
if result[0] == 'host_task_failed': |
134 | 134 |
if not task.ignore_errors: |
135 |
- debug("marking %s as failed" % host.get_name()) |
|
135 |
+ debug("marking %s as failed" % host.name) |
|
136 | 136 |
iterator.mark_host_failed(host) |
137 |
- self._tqm._failed_hosts[host.get_name()] = True |
|
138 |
- self._callback.runner_on_failed(task, task_result) |
|
137 |
+ self._tqm._failed_hosts[host.name] = True |
|
138 |
+ self._tqm._stats.increment('failures', host.name) |
|
139 |
+ self._tqm.send_callback('v2_runner_on_failed', task_result) |
|
139 | 140 |
elif result[0] == 'host_unreachable': |
140 |
- self._tqm._unreachable_hosts[host.get_name()] = True |
|
141 |
- self._callback.runner_on_unreachable(task, task_result) |
|
141 |
+ self._tqm._unreachable_hosts[host.name] = True |
|
142 |
+ self._tqm._stats.increment('dark', host.name) |
|
143 |
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result) |
|
142 | 144 |
elif result[0] == 'host_task_skipped': |
143 |
- self._callback.runner_on_skipped(task, task_result) |
|
145 |
+ self._tqm._stats.increment('skipped', host.name) |
|
146 |
+ self._tqm.send_callback('v2_runner_on_skipped', task_result) |
|
144 | 147 |
elif result[0] == 'host_task_ok': |
145 |
- self._callback.runner_on_ok(task, task_result) |
|
148 |
+ self._tqm._stats.increment('ok', host.name) |
|
149 |
+ if 'changed' in task_result._result and task_result._result['changed']: |
|
150 |
+ self._tqm._stats.increment('changed', host.name) |
|
151 |
+ self._tqm.send_callback('v2_runner_on_ok', task_result) |
|
146 | 152 |
|
147 | 153 |
self._pending_results -= 1 |
148 | 154 |
if host.name in self._blocked_hosts: |
... | ... |
@@ -160,22 +169,6 @@ class StrategyBase: |
160 | 160 |
|
161 | 161 |
ret_results.append(task_result) |
162 | 162 |
|
163 |
- #elif result[0] == 'include': |
|
164 |
- # host = result[1] |
|
165 |
- # task = result[2] |
|
166 |
- # include_file = result[3] |
|
167 |
- # include_vars = result[4] |
|
168 |
- # |
|
169 |
- # if isinstance(task, Handler): |
|
170 |
- # # FIXME: figure out how to make includes work for handlers |
|
171 |
- # pass |
|
172 |
- # else: |
|
173 |
- # original_task = iterator.get_original_task(host, task) |
|
174 |
- # if original_task and original_task._role: |
|
175 |
- # include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file) |
|
176 |
- # new_tasks = self._load_included_file(original_task, include_file, include_vars) |
|
177 |
- # iterator.add_tasks(host, new_tasks) |
|
178 |
- |
|
179 | 163 |
elif result[0] == 'add_host': |
180 | 164 |
task_result = result[1] |
181 | 165 |
new_host_info = task_result.get('add_host', dict()) |
... | ... |
@@ -322,14 +315,11 @@ class StrategyBase: |
322 | 322 |
loader=self._loader |
323 | 323 |
) |
324 | 324 |
|
325 |
- |
|
326 |
- task_list = compile_block_list(block_list) |
|
327 |
- |
|
328 | 325 |
# set the vars for this task from those specified as params to the include |
329 |
- for t in task_list: |
|
330 |
- t.vars = included_file._args.copy() |
|
326 |
+ for b in block_list: |
|
327 |
+ b._vars = included_file._args.copy() |
|
331 | 328 |
|
332 |
- return task_list |
|
329 |
+ return block_list |
|
333 | 330 |
|
334 | 331 |
def cleanup(self, iterator, connection_info): |
335 | 332 |
''' |
... | ... |
@@ -361,7 +351,7 @@ class StrategyBase: |
361 | 361 |
while work_to_do: |
362 | 362 |
work_to_do = False |
363 | 363 |
for host in failed_hosts: |
364 |
- host_name = host.get_name() |
|
364 |
+ host_name = host.name |
|
365 | 365 |
|
366 | 366 |
if host_name in self._tqm._failed_hosts: |
367 | 367 |
iterator.mark_host_failed(host) |
... | ... |
@@ -377,7 +367,7 @@ class StrategyBase: |
377 | 377 |
self._blocked_hosts[host_name] = True |
378 | 378 |
task = iterator.get_next_task_for_host(host) |
379 | 379 |
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) |
380 |
- self._callback.playbook_on_cleanup_task_start(task.get_name()) |
|
380 |
+ self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task) |
|
381 | 381 |
self._queue_task(host, task, task_vars, connection_info) |
382 | 382 |
|
383 | 383 |
self._process_pending_results(iterator) |
... | ... |
@@ -398,31 +388,28 @@ class StrategyBase: |
398 | 398 |
# FIXME: getting the handlers from the iterators play should be |
399 | 399 |
# a method on the iterator, which may also filter the list |
400 | 400 |
# of handlers based on the notified list |
401 |
- handlers = compile_block_list(iterator._play.handlers) |
|
402 |
- |
|
403 |
- debug("handlers are: %s" % handlers) |
|
404 |
- for handler in handlers: |
|
405 |
- handler_name = handler.get_name() |
|
406 |
- |
|
407 |
- if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): |
|
408 |
- if not len(self.get_hosts_remaining(iterator._play)): |
|
409 |
- self._callback.playbook_on_no_hosts_remaining() |
|
410 |
- result = False |
|
411 |
- break |
|
412 |
- |
|
413 |
- self._callback.playbook_on_handler_task_start(handler_name) |
|
414 |
- for host in self._notified_handlers[handler_name]: |
|
415 |
- if not handler.has_triggered(host): |
|
416 |
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) |
|
417 |
- self._queue_task(host, handler, task_vars, connection_info) |
|
418 |
- handler.flag_for_host(host) |
|
419 |
- |
|
420 |
- self._process_pending_results(iterator) |
|
421 |
- |
|
422 |
- self._wait_on_pending_results(iterator) |
|
423 |
- |
|
424 |
- # wipe the notification list |
|
425 |
- self._notified_handlers[handler_name] = [] |
|
426 | 401 |
|
427 |
- debug("done running handlers, result is: %s" % result) |
|
402 |
+ for handler_block in iterator._play.handlers: |
|
403 |
+ debug("handlers are: %s" % handlers) |
|
404 |
+ # FIXME: handlers need to support the rescue/always portions of blocks too, |
|
405 |
+ # but this may take some work in the iterator and gets tricky when |
|
406 |
+ # we consider the ability of meta tasks to flush handlers |
|
407 |
+ for handler in handler_block.block: |
|
408 |
+ handler_name = handler.get_name() |
|
409 |
+ if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): |
|
410 |
+ if not len(self.get_hosts_remaining(iterator._play)): |
|
411 |
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') |
|
412 |
+ result = False |
|
413 |
+ break |
|
414 |
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) |
|
415 |
+ for host in self._notified_handlers[handler_name]: |
|
416 |
+ if not handler.has_triggered(host): |
|
417 |
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) |
|
418 |
+ self._queue_task(host, handler, task_vars, connection_info) |
|
419 |
+ handler.flag_for_host(host) |
|
420 |
+ self._process_pending_results(iterator) |
|
421 |
+ self._wait_on_pending_results(iterator) |
|
422 |
+ # wipe the notification list |
|
423 |
+ self._notified_handlers[handler_name] = [] |
|
424 |
+ debug("done running handlers, result is: %s" % result) |
|
428 | 425 |
return result |
... | ... |
@@ -21,6 +21,7 @@ __metaclass__ = type |
21 | 21 |
|
22 | 22 |
from ansible.errors import AnsibleError |
23 | 23 |
from ansible.executor.play_iterator import PlayIterator |
24 |
+from ansible.playbook.block import Block |
|
24 | 25 |
from ansible.playbook.task import Task |
25 | 26 |
from ansible.plugins import action_loader |
26 | 27 |
from ansible.plugins.strategies import StrategyBase |
... | ... |
@@ -52,6 +53,9 @@ class StrategyModule(StrategyBase): |
52 | 52 |
lowest_cur_block = len(iterator._blocks) |
53 | 53 |
|
54 | 54 |
for (k, v) in host_tasks.iteritems(): |
55 |
+ if v is None: |
|
56 |
+ continue |
|
57 |
+ |
|
55 | 58 |
(s, t) = v |
56 | 59 |
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE: |
57 | 60 |
lowest_cur_block = s.cur_block |
... | ... |
@@ -131,7 +135,7 @@ class StrategyModule(StrategyBase): |
131 | 131 |
debug("done getting the remaining hosts for this loop") |
132 | 132 |
if len(hosts_left) == 0: |
133 | 133 |
debug("out of hosts to run on") |
134 |
- self._callback.playbook_on_no_hosts_remaining() |
|
134 |
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') |
|
135 | 135 |
result = False |
136 | 136 |
break |
137 | 137 |
|
... | ... |
@@ -184,7 +188,6 @@ class StrategyModule(StrategyBase): |
184 | 184 |
meta_action = task.args.get('_raw_params') |
185 | 185 |
if meta_action == 'noop': |
186 | 186 |
# FIXME: issue a callback for the noop here? |
187 |
- print("%s => NOOP" % host) |
|
188 | 187 |
continue |
189 | 188 |
elif meta_action == 'flush_handlers': |
190 | 189 |
self.run_handlers(iterator, connection_info) |
... | ... |
@@ -192,7 +195,7 @@ class StrategyModule(StrategyBase): |
192 | 192 |
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) |
193 | 193 |
else: |
194 | 194 |
if not callback_sent: |
195 |
- self._callback.playbook_on_task_start(task.get_name(), False) |
|
195 |
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) |
|
196 | 196 |
callback_sent = True |
197 | 197 |
|
198 | 198 |
self._blocked_hosts[host.get_name()] = True |
... | ... |
@@ -234,6 +237,10 @@ class StrategyModule(StrategyBase): |
234 | 234 |
include_results = [ res._result ] |
235 | 235 |
|
236 | 236 |
for include_result in include_results: |
237 |
+ # if the task result was skipped or failed, continue |
|
238 |
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: |
|
239 |
+ continue |
|
240 |
+ |
|
237 | 241 |
original_task = iterator.get_original_task(res._host, res._task) |
238 | 242 |
if original_task and original_task._role: |
239 | 243 |
include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include']) |
... | ... |
@@ -263,27 +270,31 @@ class StrategyModule(StrategyBase): |
263 | 263 |
noop_task.args['_raw_params'] = 'noop' |
264 | 264 |
noop_task.set_loader(iterator._play._loader) |
265 | 265 |
|
266 |
- all_tasks = dict((host, []) for host in hosts_left) |
|
266 |
+ all_blocks = dict((host, []) for host in hosts_left) |
|
267 | 267 |
for included_file in included_files: |
268 | 268 |
# included hosts get the task list while those excluded get an equal-length |
269 | 269 |
# list of noop tasks, to make sure that they continue running in lock-step |
270 | 270 |
try: |
271 |
- new_tasks = self._load_included_file(included_file) |
|
271 |
+ new_blocks = self._load_included_file(included_file) |
|
272 | 272 |
except AnsibleError, e: |
273 | 273 |
for host in included_file._hosts: |
274 | 274 |
iterator.mark_host_failed(host) |
275 | 275 |
# FIXME: callback here? |
276 | 276 |
print(e) |
277 | 277 |
|
278 |
- noop_tasks = [noop_task for t in new_tasks] |
|
279 |
- for host in hosts_left: |
|
280 |
- if host in included_file._hosts: |
|
281 |
- all_tasks[host].extend(new_tasks) |
|
282 |
- else: |
|
283 |
- all_tasks[host].extend(noop_tasks) |
|
278 |
+ for new_block in new_blocks: |
|
279 |
+ noop_block = Block(parent_block=task._block) |
|
280 |
+ noop_block.block = [noop_task for t in new_block.block] |
|
281 |
+ noop_block.always = [noop_task for t in new_block.always] |
|
282 |
+ noop_block.rescue = [noop_task for t in new_block.rescue] |
|
283 |
+ for host in hosts_left: |
|
284 |
+ if host in included_file._hosts: |
|
285 |
+ all_blocks[host].append(new_block) |
|
286 |
+ else: |
|
287 |
+ all_blocks[host].append(noop_block) |
|
284 | 288 |
|
285 | 289 |
for host in hosts_left: |
286 |
- iterator.add_tasks(host, all_tasks[host]) |
|
290 |
+ iterator.add_tasks(host, all_blocks[host]) |
|
287 | 291 |
|
288 | 292 |
debug("results queue empty") |
289 | 293 |
except (IOError, EOFError), e: |
... | ... |
@@ -68,6 +68,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, |
68 | 68 |
default=None) |
69 | 69 |
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", |
70 | 70 |
help="set additional variables as key=value or YAML/JSON", default=[]) |
71 |
+ parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', |
|
72 |
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) |
|
71 | 73 |
|
72 | 74 |
if subset_opts: |
73 | 75 |
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', |
... | ... |
@@ -73,3 +73,20 @@ def stringc(text, color): |
73 | 73 |
|
74 | 74 |
# --- end "pretty" |
75 | 75 |
|
76 |
+def colorize(lead, num, color): |
|
77 |
+ """ Print 'lead' = 'num' in 'color' """ |
|
78 |
+ if num != 0 and ANSIBLE_COLOR and color is not None: |
|
79 |
+ return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) |
|
80 |
+ else: |
|
81 |
+ return "%s=%-4s" % (lead, str(num)) |
|
82 |
+ |
|
83 |
+def hostcolor(host, stats, color=True): |
|
84 |
+ if ANSIBLE_COLOR and color: |
|
85 |
+ if stats['failures'] != 0 or stats['unreachable'] != 0: |
|
86 |
+ return "%-37s" % stringc(host, 'red') |
|
87 |
+ elif stats['changed'] != 0: |
|
88 |
+ return "%-37s" % stringc(host, 'yellow') |
|
89 |
+ else: |
|
90 |
+ return "%-37s" % stringc(host, 'green') |
|
91 |
+ return "%-26s" % host |
|
92 |
+ |
... | ... |
@@ -112,3 +112,15 @@ class Display: |
112 | 112 |
if C.SYSTEM_WARNINGS: |
113 | 113 |
self._warning(msg) |
114 | 114 |
|
115 |
+ def banner(self, msg, color=None): |
|
116 |
+ ''' |
|
117 |
+ Prints a header-looking line with stars taking up to 80 columns |
|
118 |
+ of width (3 columns, minimum) |
|
119 |
+ ''' |
|
120 |
+ msg = msg.strip() |
|
121 |
+ star_len = (80 - len(msg)) |
|
122 |
+ if star_len < 0: |
|
123 |
+ star_len = 3 |
|
124 |
+ stars = "*" * star_len |
|
125 |
+ self.display("\n%s %s" % (msg, stars), color=color) |
|
126 |
+ |
... | ... |
@@ -162,10 +162,9 @@ class VariableManager: |
162 | 162 |
all_vars = self._combine_vars(all_vars, self._group_vars_files['all']) |
163 | 163 |
|
164 | 164 |
for group in host.get_groups(): |
165 |
- group_name = group.get_name() |
|
166 | 165 |
all_vars = self._combine_vars(all_vars, group.get_vars()) |
167 |
- if group_name in self._group_vars_files and group_name != 'all': |
|
168 |
- all_vars = self._combine_vars(all_vars, self._group_vars_files[group_name]) |
|
166 |
+ if group.name in self._group_vars_files and group.name != 'all': |
|
167 |
+ all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name]) |
|
169 | 168 |
|
170 | 169 |
host_name = host.get_name() |
171 | 170 |
if host_name in self._host_vars_files: |
... | ... |
@@ -228,7 +227,7 @@ class VariableManager: |
228 | 228 |
''' |
229 | 229 |
|
230 | 230 |
(name, ext) = os.path.splitext(os.path.basename(path)) |
231 |
- if ext not in ('yml', 'yaml'): |
|
231 |
+ if ext not in ('.yml', '.yaml'): |
|
232 | 232 |
return os.path.basename(path) |
233 | 233 |
else: |
234 | 234 |
return name |
... | ... |
@@ -239,11 +238,11 @@ class VariableManager: |
239 | 239 |
basename of the file without the extension |
240 | 240 |
''' |
241 | 241 |
|
242 |
- if os.path.isdir(path): |
|
242 |
+ if loader.is_directory(path): |
|
243 | 243 |
data = dict() |
244 | 244 |
|
245 | 245 |
try: |
246 |
- names = os.listdir(path) |
|
246 |
+ names = loader.list_directory(path) |
|
247 | 247 |
except os.error, err: |
248 | 248 |
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror)) |
249 | 249 |
|
... | ... |
@@ -270,7 +269,7 @@ class VariableManager: |
270 | 270 |
the extension, for matching against a given inventory host name |
271 | 271 |
''' |
272 | 272 |
|
273 |
- if os.path.exists(path): |
|
273 |
+ if loader.path_exists(path): |
|
274 | 274 |
(name, data) = self._load_inventory_file(path, loader) |
275 | 275 |
self._host_vars_files[name] = data |
276 | 276 |
|
... | ... |
@@ -281,7 +280,7 @@ class VariableManager: |
281 | 281 |
the extension, for matching against a given inventory host name |
282 | 282 |
''' |
283 | 283 |
|
284 |
- if os.path.exists(path): |
|
284 |
+ if loader.path_exists(path): |
|
285 | 285 |
(name, data) = self._load_inventory_file(path, loader) |
286 | 286 |
self._group_vars_files[name] = data |
287 | 287 |
|
... | ... |
@@ -1,4 +1,4 @@ |
1 | 1 |
- debug: msg="this is the include, a=={{a}}" |
2 |
-- debug: msg="this is the second debug in the include" |
|
3 |
-- debug: msg="this is the third debug in the include, and a is still {{a}}" |
|
2 |
+#- debug: msg="this is the second debug in the include" |
|
3 |
+#- debug: msg="this is the third debug in the include, and a is still {{a}}" |
|
4 | 4 |
|
... | ... |
@@ -47,6 +47,9 @@ class DictDataLoader(DataLoader): |
47 | 47 |
def is_directory(self, path): |
48 | 48 |
return path in self._known_directories |
49 | 49 |
|
50 |
+ def list_directory(self, path): |
|
51 |
+ return [x for x in self._known_directories] |
|
52 |
+ |
|
50 | 53 |
def _add_known_directory(self, directory): |
51 | 54 |
if directory not in self._known_directories: |
52 | 55 |
self._known_directories.append(directory) |
... | ... |
@@ -75,9 +75,3 @@ class TestBlock(unittest.TestCase): |
75 | 75 |
self.assertEqual(len(b.block), 1) |
76 | 76 |
assert isinstance(b.block[0], Task) |
77 | 77 |
|
78 |
- def test_block_compile(self): |
|
79 |
- ds = [dict(action='foo')] |
|
80 |
- b = Block.load(ds) |
|
81 |
- tasks = b.compile() |
|
82 |
- self.assertEqual(len(tasks), 1) |
|
83 |
- self.assertIsInstance(tasks[0], Task) |
... | ... |
@@ -24,6 +24,7 @@ from ansible.compat.tests.mock import patch, MagicMock |
24 | 24 |
|
25 | 25 |
from ansible.errors import AnsibleError, AnsibleParserError |
26 | 26 |
from ansible.playbook import Playbook |
27 |
+from ansible.vars import VariableManager |
|
27 | 28 |
|
28 | 29 |
from test.mock.loader import DictDataLoader |
29 | 30 |
|
... | ... |
@@ -36,7 +37,8 @@ class TestPlaybook(unittest.TestCase): |
36 | 36 |
pass |
37 | 37 |
|
38 | 38 |
def test_empty_playbook(self): |
39 |
- p = Playbook() |
|
39 |
+ fake_loader = DictDataLoader({}) |
|
40 |
+ p = Playbook(loader=fake_loader) |
|
40 | 41 |
|
41 | 42 |
def test_basic_playbook(self): |
42 | 43 |
fake_loader = DictDataLoader({ |
... | ... |
@@ -61,6 +63,7 @@ class TestPlaybook(unittest.TestCase): |
61 | 61 |
|
62 | 62 |
""", |
63 | 63 |
}) |
64 |
- self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", fake_loader) |
|
65 |
- self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", fake_loader) |
|
64 |
+ vm = VariableManager() |
|
65 |
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader) |
|
66 |
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader) |
|
66 | 67 |
|
67 | 68 |
deleted file mode 100644 |
... | ... |
@@ -1,64 +0,0 @@ |
1 |
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> |
|
2 |
-# |
|
3 |
-# This file is part of Ansible |
|
4 |
-# |
|
5 |
-# Ansible is free software: you can redistribute it and/or modify |
|
6 |
-# it under the terms of the GNU General Public License as published by |
|
7 |
-# the Free Software Foundation, either version 3 of the License, or |
|
8 |
-# (at your option) any later version. |
|
9 |
-# |
|
10 |
-# Ansible is distributed in the hope that it will be useful, |
|
11 |
-# but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 |
-# GNU General Public License for more details. |
|
14 |
-# |
|
15 |
-# You should have received a copy of the GNU General Public License |
|
16 |
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>. |
|
17 |
- |
|
18 |
-# Make coding more python3-ish |
|
19 |
-from __future__ import (absolute_import, division, print_function) |
|
20 |
-__metaclass__ = type |
|
21 |
- |
|
22 |
-from ansible.compat.tests import unittest |
|
23 |
-from ansible.errors import AnsibleParserError |
|
24 |
-from ansible.parsing.yaml.objects import AnsibleMapping |
|
25 |
-from ansible.playbook.task_include import TaskInclude |
|
26 |
- |
|
27 |
-from test.mock.loader import DictDataLoader |
|
28 |
- |
|
29 |
-class TestTaskInclude(unittest.TestCase): |
|
30 |
- |
|
31 |
- def setUp(self): |
|
32 |
- self._fake_loader = DictDataLoader({ |
|
33 |
- "foo.yml": """ |
|
34 |
- - shell: echo "hello world" |
|
35 |
- """ |
|
36 |
- }) |
|
37 |
- |
|
38 |
- pass |
|
39 |
- |
|
40 |
- def tearDown(self): |
|
41 |
- pass |
|
42 |
- |
|
43 |
- def test_empty_task_include(self): |
|
44 |
- ti = TaskInclude() |
|
45 |
- |
|
46 |
- def test_basic_task_include(self): |
|
47 |
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader) |
|
48 |
- tasks = ti.compile() |
|
49 |
- |
|
50 |
- def test_task_include_with_loop(self): |
|
51 |
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader) |
|
52 |
- |
|
53 |
- def test_task_include_with_conditional(self): |
|
54 |
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader) |
|
55 |
- |
|
56 |
- def test_task_include_with_tags(self): |
|
57 |
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader) |
|
58 |
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader) |
|
59 |
- |
|
60 |
- def test_task_include_errors(self): |
|
61 |
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader) |
|
62 |
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader) |
|
63 |
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader) |
|
64 |
- |
... | ... |
@@ -35,8 +35,10 @@ class TestVariableManager(unittest.TestCase): |
35 | 35 |
pass |
36 | 36 |
|
37 | 37 |
def test_basic_manager(self): |
38 |
+ fake_loader = DictDataLoader({}) |
|
39 |
+ |
|
38 | 40 |
v = VariableManager() |
39 |
- self.assertEqual(v.get_vars(), dict()) |
|
41 |
+ self.assertEqual(v.get_vars(loader=fake_loader), dict()) |
|
40 | 42 |
|
41 | 43 |
self.assertEqual( |
42 | 44 |
v._merge_dicts( |
... | ... |
@@ -52,23 +54,26 @@ class TestVariableManager(unittest.TestCase): |
52 | 52 |
) |
53 | 53 |
|
54 | 54 |
|
55 |
- def test_manager_extra_vars(self): |
|
55 |
+ def test_variable_manager_extra_vars(self): |
|
56 |
+ fake_loader = DictDataLoader({}) |
|
57 |
+ |
|
56 | 58 |
extra_vars = dict(a=1, b=2, c=3) |
57 | 59 |
v = VariableManager() |
58 | 60 |
v.set_extra_vars(extra_vars) |
59 | 61 |
|
60 |
- self.assertEqual(v.get_vars(), extra_vars) |
|
61 |
- self.assertIsNot(v.extra_vars, extra_vars) |
|
62 |
+ for (key, val) in extra_vars.iteritems(): |
|
63 |
+ self.assertEqual(v.get_vars(loader=fake_loader).get(key), val) |
|
64 |
+ self.assertIsNot(v.extra_vars.get(key), val) |
|
62 | 65 |
|
63 |
- def test_manager_host_vars_file(self): |
|
66 |
+ def test_variable_manager_host_vars_file(self): |
|
64 | 67 |
fake_loader = DictDataLoader({ |
65 | 68 |
"host_vars/hostname1.yml": """ |
66 | 69 |
foo: bar |
67 | 70 |
""" |
68 | 71 |
}) |
69 | 72 |
|
70 |
- v = VariableManager(loader=fake_loader) |
|
71 |
- v.add_host_vars_file("host_vars/hostname1.yml") |
|
73 |
+ v = VariableManager() |
|
74 |
+ v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader) |
|
72 | 75 |
self.assertIn("hostname1", v._host_vars_files) |
73 | 76 |
self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar")) |
74 | 77 |
|
... | ... |
@@ -77,37 +82,43 @@ class TestVariableManager(unittest.TestCase): |
77 | 77 |
mock_host.get_vars.return_value = dict() |
78 | 78 |
mock_host.get_groups.return_value = () |
79 | 79 |
|
80 |
- self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) |
|
80 |
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") |
|
81 | 81 |
|
82 |
- def test_manager_group_vars_file(self): |
|
82 |
+ def test_variable_manager_group_vars_file(self): |
|
83 | 83 |
fake_loader = DictDataLoader({ |
84 | 84 |
"group_vars/somegroup.yml": """ |
85 | 85 |
foo: bar |
86 | 86 |
""" |
87 | 87 |
}) |
88 | 88 |
|
89 |
- v = VariableManager(loader=fake_loader) |
|
90 |
- v.add_group_vars_file("group_vars/somegroup.yml") |
|
89 |
+ v = VariableManager() |
|
90 |
+ v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader) |
|
91 | 91 |
self.assertIn("somegroup", v._group_vars_files) |
92 | 92 |
self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar")) |
93 | 93 |
|
94 |
+ mock_group = MagicMock() |
|
95 |
+ mock_group.name.return_value = "somegroup" |
|
96 |
+ mock_group.get_ancestors.return_value = () |
|
97 |
+ |
|
94 | 98 |
mock_host = MagicMock() |
95 | 99 |
mock_host.get_name.return_value = "hostname1" |
96 | 100 |
mock_host.get_vars.return_value = dict() |
97 |
- mock_host.get_groups.return_value = ["somegroup"] |
|
101 |
+ mock_host.get_groups.return_value = (mock_group) |
|
102 |
+ |
|
103 |
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") |
|
98 | 104 |
|
99 |
- self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) |
|
105 |
+ def test_variable_manager_play_vars(self): |
|
106 |
+ fake_loader = DictDataLoader({}) |
|
100 | 107 |
|
101 |
- def test_manager_play_vars(self): |
|
102 | 108 |
mock_play = MagicMock() |
103 | 109 |
mock_play.get_vars.return_value = dict(foo="bar") |
104 | 110 |
mock_play.get_roles.return_value = [] |
105 | 111 |
mock_play.get_vars_files.return_value = [] |
106 | 112 |
|
107 | 113 |
v = VariableManager() |
108 |
- self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) |
|
114 |
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") |
|
109 | 115 |
|
110 |
- def test_manager_play_vars_files(self): |
|
116 |
+ def test_variable_manager_play_vars_files(self): |
|
111 | 117 |
fake_loader = DictDataLoader({ |
112 | 118 |
"/path/to/somefile.yml": """ |
113 | 119 |
foo: bar |
... | ... |
@@ -119,13 +130,15 @@ class TestVariableManager(unittest.TestCase): |
119 | 119 |
mock_play.get_roles.return_value = [] |
120 | 120 |
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml'] |
121 | 121 |
|
122 |
- v = VariableManager(loader=fake_loader) |
|
123 |
- self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) |
|
122 |
+ v = VariableManager() |
|
123 |
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") |
|
124 |
+ |
|
125 |
+ def test_variable_manager_task_vars(self): |
|
126 |
+ fake_loader = DictDataLoader({}) |
|
124 | 127 |
|
125 |
- def test_manager_task_vars(self): |
|
126 | 128 |
mock_task = MagicMock() |
127 | 129 |
mock_task.get_vars.return_value = dict(foo="bar") |
128 | 130 |
|
129 | 131 |
v = VariableManager() |
130 |
- self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar")) |
|
132 |
+ self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar") |
|
131 | 133 |
|