Browse code

Rename s3 module to aws_s3. Update CHANGELOG.md. (#28398)

Use aws_s3 in examples.

Sloane Hertel authored on 2017/08/23 00:17:20
Showing 4 changed files
... ...
@@ -55,6 +55,7 @@ Ansible Changes By Release
55 55
 * panos_service (use M(panos_object) instead)
56 56
 * panos_security_policy: In 2.4 use M(panos_security_rule) instead.
57 57
 * panos_nat_policy, In 2.4 use M(panos_nat_rule) instead.
58
+* s3 (removed in 2.7), replaced by aws_s3
58 59
 
59 60
 #### Removed Deprecated Modules:
60 61
 * eos_template (use eos_config instead)
... ...
@@ -170,6 +171,7 @@ Ansible Changes By Release
170 170
 #### Cloud
171 171
 - amazon
172 172
   * aws_api_gateway
173
+  * aws_s3
173 174
   * aws_s3_bucket_facts
174 175
   * data_pipeline
175 176
   * dynamodb_ttl
176 177
new file mode 120000
... ...
@@ -0,0 +1 @@
0
+aws_s3.py
0 1
\ No newline at end of file
1 2
new file mode 100644
... ...
@@ -0,0 +1,792 @@
0
+#!/usr/bin/python
1
+# This file is part of Ansible
2
+#
3
+# Ansible is free software: you can redistribute it and/or modify
4
+# it under the terms of the GNU General Public License as published by
5
+# the Free Software Foundation, either version 3 of the License, or
6
+# (at your option) any later version.
7
+#
8
+# Ansible is distributed in the hope that it will be useful,
9
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
+# GNU General Public License for more details.
12
+#
13
+# You should have received a copy of the GNU General Public License
14
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
15
+
16
+ANSIBLE_METADATA = {'metadata_version': '1.1',
17
+                    'status': ['stableinterface'],
18
+                    'supported_by': 'certified'}
19
+
20
+
21
+DOCUMENTATION = '''
22
+---
23
+module: aws_s3
24
+short_description: manage objects in S3.
25
+description:
26
+    - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
27
+      deleting both objects and buckets, retrieving objects as files or strings and generating download links.
28
+      This module has a dependency on boto3 and botocore.
29
+version_added: "1.1"
30
+options:
31
+  aws_access_key:
32
+    description:
33
+      - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
34
+    aliases: [ 'ec2_access_key', 'access_key' ]
35
+  aws_secret_key:
36
+    description:
37
+      - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
38
+    aliases: ['ec2_secret_key', 'secret_key']
39
+  bucket:
40
+    description:
41
+      - Bucket name.
42
+    required: true
43
+  dest:
44
+    description:
45
+      - The destination file path when downloading an object/key with a GET operation.
46
+    version_added: "1.3"
47
+  encrypt:
48
+    description:
49
+      - When set for PUT mode, asks for server-side encryption.
50
+    default: True
51
+    version_added: "2.0"
52
+  expiration:
53
+    description:
54
+      - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
55
+    default: 600
56
+  headers:
57
+    description:
58
+      - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
59
+    version_added: "2.0"
60
+  marker:
61
+    description:
62
+      - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
63
+    version_added: "2.0"
64
+  max_keys:
65
+    description:
66
+      - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
67
+    default: 1000
68
+    version_added: "2.0"
69
+  metadata:
70
+    description:
71
+      - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
72
+    version_added: "1.6"
73
+  mode:
74
+    description:
75
+      - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
76
+        getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
77
+        and delobj (delete object, Ansible 2.0+).
78
+    required: true
79
+    choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
80
+  object:
81
+    description:
82
+      - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
83
+  permission:
84
+    description:
85
+      - This option lets the user set the canned permissions on the object/bucket that are created.
86
+        The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read' for a bucket or
87
+        'private', 'public-read', 'public-read-write', 'aws-exec-read', 'authenticated-read', 'bucket-owner-read',
88
+        'bucket-owner-full-control' for an object. Multiple permissions can be specified as a list.
89
+    default: private
90
+    version_added: "2.0"
91
+  prefix:
92
+    description:
93
+      - Limits the response to keys that begin with the specified prefix for list mode
94
+    default: ""
95
+    version_added: "2.0"
96
+  version:
97
+    description:
98
+      - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
99
+    version_added: "2.0"
100
+  overwrite:
101
+    description:
102
+      - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
103
+        Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
104
+    default: 'always'
105
+    version_added: "1.2"
106
+  region:
107
+    description:
108
+     - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables
109
+       are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the
110
+       region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
111
+    version_added: "1.8"
112
+  retries:
113
+    description:
114
+     - On recoverable failure, how many times to retry before actually failing.
115
+    default: 0
116
+    version_added: "2.0"
117
+  s3_url:
118
+    description:
119
+      - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc.  Otherwise assumes AWS
120
+    aliases: [ S3_URL ]
121
+  rgw:
122
+    description:
123
+      - Enable Ceph RGW S3 support. This option requires an explicit url via s3_url.
124
+    default: false
125
+    version_added: "2.2"
126
+  src:
127
+    description:
128
+      - The source file path when performing a PUT operation.
129
+    version_added: "1.3"
130
+  ignore_nonexistent_bucket:
131
+    description:
132
+      - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
133
+        GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
134
+        ignore_nonexistent_bucket: True."
135
+    version_added: "2.3"
136
+
137
+requirements: [ "boto3", "botocore" ]
138
+author:
139
+    - "Lester Wade (@lwade)"
140
+    - "Sloane Hertel (@s-hertel)"
141
+extends_documentation_fragment: aws
142
+'''
143
+
144
+EXAMPLES = '''
145
+- name: Simple PUT operation
146
+  aws_s3:
147
+    bucket: mybucket
148
+    object: /my/desired/key.txt
149
+    src: /usr/local/myfile.txt
150
+    mode: put
151
+
152
+- name: Simple PUT operation in Ceph RGW S3
153
+  aws_s3:
154
+    bucket: mybucket
155
+    object: /my/desired/key.txt
156
+    src: /usr/local/myfile.txt
157
+    mode: put
158
+    rgw: true
159
+    s3_url: "http://localhost:8000"
160
+
161
+- name: Simple GET operation
162
+  aws_s3:
163
+    bucket: mybucket
164
+    object: /my/desired/key.txt
165
+    dest: /usr/local/myfile.txt
166
+    mode: get
167
+
168
+- name: Get a specific version of an object.
169
+  aws_s3:
170
+    bucket: mybucket
171
+    object: /my/desired/key.txt
172
+    version: 48c9ee5131af7a716edc22df9772aa6f
173
+    dest: /usr/local/myfile.txt
174
+    mode: get
175
+
176
+- name: PUT/upload with metadata
177
+  aws_s3:
178
+    bucket: mybucket
179
+    object: /my/desired/key.txt
180
+    src: /usr/local/myfile.txt
181
+    mode: put
182
+    metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
183
+
184
+- name: PUT/upload with custom headers
185
+  aws_s3:
186
+    bucket: mybucket
187
+    object: /my/desired/key.txt
188
+    src: /usr/local/myfile.txt
189
+    mode: put
190
+    headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
191
+
192
+- name: List keys simple
193
+  aws_s3:
194
+    bucket: mybucket
195
+    mode: list
196
+
197
+- name: List keys all options
198
+  aws_s3:
199
+    bucket: mybucket
200
+    mode: list
201
+    prefix: /my/desired/
202
+    marker: /my/desired/0023.txt
203
+    max_keys: 472
204
+
205
+- name: Create an empty bucket
206
+  aws_s3:
207
+    bucket: mybucket
208
+    mode: create
209
+    permission: public-read
210
+
211
+- name: Create a bucket with key as directory, in the EU region
212
+  aws_s3:
213
+    bucket: mybucket
214
+    object: /my/directory/path
215
+    mode: create
216
+    region: eu-west-1
217
+
218
+- name: Delete a bucket and all contents
219
+  aws_s3:
220
+    bucket: mybucket
221
+    mode: delete
222
+
223
+- name: GET an object but don't download if the file checksums match. New in 2.0
224
+  aws_s3:
225
+    bucket: mybucket
226
+    object: /my/desired/key.txt
227
+    dest: /usr/local/myfile.txt
228
+    mode: get
229
+    overwrite: different
230
+
231
+- name: Delete an object from a bucket
232
+  aws_s3:
233
+    bucket: mybucket
234
+    object: /my/desired/key.txt
235
+    mode: delobj
236
+'''
237
+
238
+import os
239
+import traceback
240
+from ansible.module_utils.six.moves.urllib.parse import urlparse
241
+from ssl import SSLError
242
+from ansible.module_utils.basic import AnsibleModule, to_text, to_native
243
+from ansible.module_utils.ec2 import ec2_argument_spec, camel_dict_to_snake_dict, get_aws_connection_info, boto3_conn, HAS_BOTO3
244
+
245
+try:
246
+    import botocore
247
+except ImportError:
248
+    pass  # will be detected by imported HAS_BOTO3
249
+
250
+
251
+def key_check(module, s3, bucket, obj, version=None, validate=True):
252
+    exists = True
253
+    try:
254
+        if version:
255
+            s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
256
+        else:
257
+            s3.head_object(Bucket=bucket, Key=obj)
258
+    except botocore.exceptions.ClientError as e:
259
+        # if a client error is thrown, check if it's a 404 error
260
+        # if it's a 404 error, then the object does not exist
261
+        error_code = int(e.response['Error']['Code'])
262
+        if error_code == 404:
263
+            exists = False
264
+        elif error_code == 403 and validate is False:
265
+            pass
266
+        else:
267
+            module.fail_json(msg="Failed while looking up object (during key check) %s." % obj,
268
+                             exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
269
+    return exists
270
+
271
+
272
+def keysum(module, s3, bucket, obj, version=None):
273
+    if version:
274
+        key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
275
+    else:
276
+        key_check = s3.head_object(Bucket=bucket, Key=obj)
277
+    if not key_check:
278
+        return None
279
+    md5_remote = key_check['ETag'][1:-1]
280
+    if '-' in md5_remote:  # Check for multipart, etag is not md5
281
+        return None
282
+    return md5_remote
283
+
284
+
285
+def bucket_check(module, s3, bucket, validate=True):
286
+    exists = True
287
+    try:
288
+        s3.head_bucket(Bucket=bucket)
289
+    except botocore.exceptions.ClientError as e:
290
+        # If a client error is thrown, then check that it was a 404 error.
291
+        # If it was a 404 error, then the bucket does not exist.
292
+        error_code = int(e.response['Error']['Code'])
293
+        if error_code == 404:
294
+            exists = False
295
+        elif error_code == 403 and validate is False:
296
+            pass
297
+        else:
298
+            module.fail_json(msg="Failed while looking up bucket (during bucket_check) %s." % bucket,
299
+                             exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
300
+    except botocore.exceptions.EndpointConnectionError as e:
301
+        module.fail_json(msg="Invalid endpoint provided: %s" % to_text(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
302
+    return exists
303
+
304
+
305
+def create_bucket(module, s3, bucket, location=None):
306
+    if module.check_mode:
307
+        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
308
+    configuration = {}
309
+    if location not in ('us-east-1', None):
310
+        configuration['LocationConstraint'] = location
311
+    try:
312
+        if len(configuration) > 0:
313
+            s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
314
+        else:
315
+            s3.create_bucket(Bucket=bucket)
316
+        for acl in module.params.get('permission'):
317
+            s3.put_bucket_acl(ACL=acl, Bucket=bucket)
318
+    except botocore.exceptions.ClientError as e:
319
+        module.fail_json(msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).",
320
+                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
321
+
322
+    if bucket:
323
+        return True
324
+
325
+
326
+def paginated_list(s3, **pagination_params):
327
+    pg = s3.get_paginator('list_objects_v2')
328
+    for page in pg.paginate(**pagination_params):
329
+        yield [data['Key'] for data in page.get('Contents', [])]
330
+
331
+
332
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
333
+    pagination_params = {'Bucket': bucket}
334
+    for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
335
+        pagination_params[param_name] = param_value
336
+    try:
337
+        keys = sum(paginated_list(s3, **pagination_params), [])
338
+        module.exit_json(msg="LIST operation complete", s3_keys=keys)
339
+    except botocore.exceptions.ClientError as e:
340
+        module.fail_json(msg="Failed while listing the keys in the bucket {0}".format(bucket),
341
+                         exception=traceback.format_exc(),
342
+                         **camel_dict_to_snake_dict(e.response))
343
+
344
+
345
+def delete_bucket(module, s3, bucket):
346
+    if module.check_mode:
347
+        module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
348
+    try:
349
+        exists = bucket_check(module, s3, bucket)
350
+        if exists is False:
351
+            return False
352
+        # if there are contents then we need to delete them before we can delete the bucket
353
+        for keys in paginated_list(s3, Bucket=bucket):
354
+            formatted_keys = [{'Key': key} for key in keys]
355
+            s3.delete_objects(Bucket=bucket, Delete={'Objects': formatted_keys})
356
+        s3.delete_bucket(Bucket=bucket)
357
+        return True
358
+    except botocore.exceptions.ClientError as e:
359
+        module.fail_json(msg="Failed while deleting bucket %s.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
360
+
361
+
362
+def delete_key(module, s3, bucket, obj):
363
+    if module.check_mode:
364
+        module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
365
+    try:
366
+        s3.delete_object(Bucket=bucket, Key=obj)
367
+        module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
368
+    except botocore.exceptions.ClientError as e:
369
+        module.fail_json(msg="Failed while trying to delete %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
370
+
371
+
372
+def create_dirkey(module, s3, bucket, obj):
373
+    if module.check_mode:
374
+        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
375
+    try:
376
+        bucket = s3.Bucket(bucket)
377
+        key = bucket.new_key(obj)
378
+        key.set_contents_from_string('')
379
+        for acl in module.params.get('permission'):
380
+            s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
381
+        module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
382
+    except botocore.exceptions.ClientError as e:
383
+        module.fail_json(msg="Failed while creating object %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
384
+
385
+
386
+def path_check(path):
387
+    if os.path.exists(path):
388
+        return True
389
+    else:
390
+        return False
391
+
392
+
393
+def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
394
+    if module.check_mode:
395
+        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
396
+    try:
397
+        if metadata:
398
+            extra = {'Metadata': dict(metadata)}
399
+            s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
400
+        else:
401
+            s3.upload_file(Filename=src, Bucket=bucket, Key=obj)
402
+        for acl in module.params.get('permission'):
403
+            s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
404
+        url = s3.generate_presigned_url(ClientMethod='put_object',
405
+                                        Params={'Bucket': bucket, 'Key': obj},
406
+                                        ExpiresIn=expiry)
407
+        module.exit_json(msg="PUT operation complete", url=url, changed=True)
408
+    except botocore.exceptions.ClientError as e:
409
+        module.fail_json(msg="Unable to complete PUT operation.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
410
+
411
+
412
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
413
+    if module.check_mode:
414
+        module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
415
+    # retries is the number of loops; range/xrange needs to be one
416
+    # more to get that count of loops.
417
+    try:
418
+        if version:
419
+            key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
420
+        else:
421
+            key = s3.get_object(Bucket=bucket, Key=obj)
422
+    except botocore.exceptions.ClientError as e:
423
+        if e.response['Error']['Code'] != "404":
424
+            module.fail_json(msg="Could not find the key %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
425
+
426
+    for x in range(0, retries + 1):
427
+        try:
428
+            s3.download_file(bucket, obj, dest)
429
+            module.exit_json(msg="GET operation complete", changed=True)
430
+        except botocore.exceptions.ClientError as e:
431
+            # actually fail on last pass through the loop.
432
+            if x >= retries:
433
+                module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
434
+            # otherwise, try again, this may be a transient timeout.
435
+            pass
436
+        except SSLError as e:  # will ClientError catch SSLError?
437
+            # actually fail on last pass through the loop.
438
+            if x >= retries:
439
+                module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
440
+            # otherwise, try again, this may be a transient timeout.
441
+            pass
442
+
443
+
444
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
445
+    if module.check_mode:
446
+        module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
447
+    try:
448
+        if version:
449
+            contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
450
+        else:
451
+            contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
452
+        module.exit_json(msg="GET operation complete", contents=contents, changed=True)
453
+    except botocore.exceptions.ClientError as e:
454
+        module.fail_json(msg="Failed while getting contents of object %s as a string." % obj,
455
+                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
456
+
457
+
458
+def get_download_url(module, s3, bucket, obj, expiry, changed=True):
459
+    try:
460
+        url = s3.generate_presigned_url(ClientMethod='get_object',
461
+                                        Params={'Bucket': bucket, 'Key': obj},
462
+                                        ExpiresIn=expiry)
463
+        module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
464
+    except botocore.exceptions.ClientError as e:
465
+        module.fail_json(msg="Failed while getting download url.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
466
+
467
+
468
+def is_fakes3(s3_url):
469
+    """ Return True if s3_url has scheme fakes3:// """
470
+    if s3_url is not None:
471
+        return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
472
+    else:
473
+        return False
474
+
475
+
476
+def is_walrus(s3_url):
477
+    """ Return True if it's Walrus endpoint, not S3
478
+
479
+    We assume anything other than *.amazonaws.com is Walrus"""
480
+    if s3_url is not None:
481
+        o = urlparse(s3_url)
482
+        return not o.netloc.endswith('amazonaws.com')
483
+    else:
484
+        return False
485
+
486
+
487
+def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url):
488
+    if s3_url and rgw:  # TODO - test this
489
+        rgw = urlparse(s3_url)
490
+        params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
491
+    elif is_fakes3(s3_url):
492
+        for kw in ['is_secure', 'host', 'port'] and list(aws_connect_kwargs.keys()):
493
+            del aws_connect_kwargs[kw]
494
+        fakes3 = urlparse(s3_url)
495
+        if fakes3.scheme == 'fakes3s':
496
+            protocol = "https"
497
+        else:
498
+            protocol = "http"
499
+        params = dict(service_name='s3', endpoint_url="%s://%s:%s" % (protocol, fakes3.hostname, to_text(fakes3.port)),
500
+                      use_ssl=fakes3.scheme == 'fakes3s', region_name=None, **aws_connect_kwargs)
501
+    elif is_walrus(s3_url):
502
+        walrus = urlparse(s3_url).hostname
503
+        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs)
504
+    else:
505
+        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
506
+    return boto3_conn(**params)
507
+
508
+
509
+def main():
510
+    argument_spec = ec2_argument_spec()
511
+    argument_spec.update(
512
+        dict(
513
+            bucket=dict(required=True),
514
+            dest=dict(default=None),
515
+            encrypt=dict(default=True, type='bool'),
516
+            expiry=dict(default=600, type='int', aliases=['expiration']),
517
+            headers=dict(type='dict'),
518
+            marker=dict(default=""),
519
+            max_keys=dict(default=1000, type='int'),
520
+            metadata=dict(type='dict'),
521
+            mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
522
+            object=dict(),
523
+            permission=dict(type='list', default=['private']),
524
+            version=dict(default=None),
525
+            overwrite=dict(aliases=['force'], default='always'),
526
+            prefix=dict(default=""),
527
+            retries=dict(aliases=['retry'], type='int', default=0),
528
+            s3_url=dict(aliases=['S3_URL']),
529
+            rgw=dict(default='no', type='bool'),
530
+            src=dict(),
531
+            ignore_nonexistent_bucket=dict(default=False, type='bool')
532
+        ),
533
+    )
534
+    module = AnsibleModule(
535
+        argument_spec=argument_spec,
536
+        supports_check_mode=True,
537
+    )
538
+
539
+    if module._name == 's3':
540
+        module.deprecate("The 's3' module is being renamed 'aws_s3'", version=2.7)
541
+
542
+    if not HAS_BOTO3:
543
+        module.fail_json(msg='boto3 and botocore required for this module')
544
+
545
+    bucket = module.params.get('bucket')
546
+    encrypt = module.params.get('encrypt')
547
+    expiry = module.params.get('expiry')
548
+    dest = module.params.get('dest', '')
549
+    headers = module.params.get('headers')
550
+    marker = module.params.get('marker')
551
+    max_keys = module.params.get('max_keys')
552
+    metadata = module.params.get('metadata')
553
+    mode = module.params.get('mode')
554
+    obj = module.params.get('object')
555
+    version = module.params.get('version')
556
+    overwrite = module.params.get('overwrite')
557
+    prefix = module.params.get('prefix')
558
+    retries = module.params.get('retries')
559
+    s3_url = module.params.get('s3_url')
560
+    rgw = module.params.get('rgw')
561
+    src = module.params.get('src')
562
+    ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
563
+
564
+    if dest:
565
+        dest = os.path.expanduser(dest)
566
+
567
+    object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
568
+    bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
569
+
570
+    if overwrite not in ['always', 'never', 'different']:
571
+        if module.boolean(overwrite):
572
+            overwrite = 'always'
573
+        else:
574
+            overwrite = 'never'
575
+
576
+    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
577
+
578
+    if region in ('us-east-1', '', None):
579
+        # default to US Standard region
580
+        location = 'us-east-1'
581
+    else:
582
+        # Boto uses symbolic names for locations but region strings will
583
+        # actually work fine for everything except us-east-1 (US Standard)
584
+        location = region
585
+
586
+    if module.params.get('object'):
587
+        obj = module.params['object']
588
+
589
+    # Bucket deletion does not require obj.  Prevents ambiguity with delobj.
590
+    if obj and mode == "delete":
591
+        module.fail_json(msg='Parameter obj cannot be used with mode=delete')
592
+
593
+    # allow eucarc environment variables to be used if ansible vars aren't set
594
+    if not s3_url and 'S3_URL' in os.environ:
595
+        s3_url = os.environ['S3_URL']
596
+
597
+    # rgw requires an explicit url
598
+    if rgw and not s3_url:
599
+        module.fail_json(msg='rgw flavour requires s3_url')
600
+
601
+    # Look at s3_url and tweak connection settings
602
+    # if connecting to RGW, Walrus or fakes3
603
+    for key in ['validate_certs', 'security_token', 'profile_name']:
604
+        aws_connect_kwargs.pop(key, None)
605
+    try:
606
+        s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
607
+    except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
608
+        module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
609
+                         exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
610
+
611
+    validate = not ignore_nonexistent_bucket
612
+
613
+    # separate types of ACLs
614
+    bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
615
+    object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
616
+    error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
617
+    if error_acl:
618
+        module.fail_json(msg='Unknown permission specified: %s' % error_acl)
619
+
620
+    # First, we check to see if the bucket exists, we get "bucket" returned.
621
+    bucketrtn = bucket_check(module, s3, bucket, validate=validate)
622
+
623
+    if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
624
+        module.fail_json(msg="Source bucket cannot be found.")
625
+
626
+    # If our mode is a GET operation (download), go through the procedure as appropriate ...
627
+    if mode == 'get':
628
+        # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
629
+        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
630
+        if keyrtn is False:
631
+            module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
632
+
633
+        # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
634
+        # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
635
+        if path_check(dest):
636
+            # Determine if the remote and local object are identical
637
+            if keysum(module, s3, bucket, obj, version=version) == module.md5(dest):
638
+                sum_matches = True
639
+                if overwrite == 'always':
640
+                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
641
+                else:
642
+                    module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
643
+            else:
644
+                sum_matches = False
645
+
646
+                if overwrite in ('always', 'different'):
647
+                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
648
+                else:
649
+                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
650
+        else:
651
+            download_s3file(module, s3, bucket, obj, dest, retries, version=version)
652
+
653
+    # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
654
+    if mode == 'put':
655
+
656
+        # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
657
+        # these were separated into the variables bucket_acl and object_acl above
658
+
659
+        # Lets check the src path.
660
+        if not path_check(src):
661
+            module.fail_json(msg="Local object for PUT does not exist")
662
+
663
+        # Lets check to see if bucket exists to get ground truth.
664
+        if bucketrtn:
665
+            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
666
+
667
+        # Lets check key state. Does it exist and if it does, compute the etag md5sum.
668
+        if bucketrtn and keyrtn:
669
+            # Compare the local and remote object
670
+            if module.md5(src) == keysum(module, s3, bucket, obj):
671
+                sum_matches = True
672
+                if overwrite == 'always':
673
+                    # only use valid object acls for the upload_s3file function
674
+                    module.params['permission'] = object_acl
675
+                    upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
676
+                else:
677
+                    get_download_url(module, s3, bucket, obj, expiry, changed=False)
678
+            else:
679
+                sum_matches = False
680
+                if overwrite in ('always', 'different'):
681
+                    # only use valid object acls for the upload_s3file function
682
+                    module.params['permission'] = object_acl
683
+                    upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
684
+                else:
685
+                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
686
+
687
+        # If neither exist (based on bucket existence), we can create both.
688
+        if not bucketrtn:
689
+            # only use valid bucket acls for create_bucket function
690
+            module.params['permission'] = bucket_acl
691
+            create_bucket(module, s3, bucket, location)
692
+            # only use valid object acls for the upload_s3file function
693
+            module.params['permission'] = object_acl
694
+            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
695
+
696
+        # If bucket exists but key doesn't, just upload.
697
+        if bucketrtn and not keyrtn:
698
+            # only use valid object acls for the upload_s3file function
699
+            module.params['permission'] = object_acl
700
+            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
701
+
702
+    # Delete an object from a bucket, not the entire bucket
703
+    if mode == 'delobj':
704
+        if obj is None:
705
+            module.fail_json(msg="object parameter is required")
706
+        if bucket:
707
+            deletertn = delete_key(module, s3, bucket, obj)
708
+            if deletertn is True:
709
+                module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
710
+        else:
711
+            module.fail_json(msg="Bucket parameter is required.")
712
+
713
+    # Delete an entire bucket, including all objects in the bucket
714
+    if mode == 'delete':
715
+        if bucket:
716
+            deletertn = delete_bucket(module, s3, bucket)
717
+            if deletertn is True:
718
+                module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
719
+        else:
720
+            module.fail_json(msg="Bucket parameter is required.")
721
+
722
+    # Support for listing a set of keys
723
+    if mode == 'list':
724
+        exists = bucket_check(module, s3, bucket)
725
+
726
+        # If the bucket does not exist then bail out
727
+        if not exists:
728
+            module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
729
+
730
+        list_keys(module, s3, bucket, prefix, marker, max_keys)
731
+
732
+    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
733
+    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
734
+    if mode == 'create':
735
+
736
+        # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
737
+        # these were separated above into the variables bucket_acl and object_acl
738
+
739
+        if bucket and not obj:
740
+            if bucketrtn:
741
+                module.exit_json(msg="Bucket already exists.", changed=False)
742
+            else:
743
+                # only use valid bucket acls when creating the bucket
744
+                module.params['permission'] = bucket_acl
745
+                module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
746
+        if bucket and obj:
747
+            if obj.endswith('/'):
748
+                dirobj = obj
749
+            else:
750
+                dirobj = obj + "/"
751
+            if bucketrtn:
752
+                if key_check(module, s3, bucket, dirobj):
753
+                    module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
754
+                else:
755
+                    # setting valid object acls for the create_dirkey function
756
+                    module.params['permission'] = object_acl
757
+                    create_dirkey(module, s3, bucket, dirobj)
758
+            else:
759
+                # only use valid bucket acls for the create_bucket function
760
+                module.params['permission'] = bucket_acl
761
+                created = create_bucket(module, s3, bucket, location)
762
+                # only use valid object acls for the create_dirkey function
763
+                module.params['permission'] = object_acl
764
+                create_dirkey(module, s3, bucket, dirobj)
765
+
766
+    # Support for grabbing the time-expired URL for an object in S3/Walrus.
767
+    if mode == 'geturl':
768
+        if not bucket and not obj:
769
+            module.fail_json(msg="Bucket and Object parameters must be set")
770
+
771
+        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
772
+        if keyrtn:
773
+            get_download_url(module, s3, bucket, obj, expiry)
774
+        else:
775
+            module.fail_json(msg="Key %s does not exist." % obj)
776
+
777
+    if mode == 'getstr':
778
+        if bucket and obj:
779
+            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
780
+            if keyrtn:
781
+                download_s3str(module, s3, bucket, obj, version=version)
782
+            elif version is not None:
783
+                module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
784
+            else:
785
+                module.fail_json(msg="Key %s does not exist." % obj)
786
+
787
+    module.exit_json(failed=False)
788
+
789
+
790
+if __name__ == '__main__':
791
+    main()
0 792
deleted file mode 100644
... ...
@@ -1,789 +0,0 @@
1
-#!/usr/bin/python
2
-# This file is part of Ansible
3
-#
4
-# Ansible is free software: you can redistribute it and/or modify
5
-# it under the terms of the GNU General Public License as published by
6
-# the Free Software Foundation, either version 3 of the License, or
7
-# (at your option) any later version.
8
-#
9
-# Ansible is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU General Public License
15
-# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-ANSIBLE_METADATA = {'metadata_version': '1.1',
18
-                    'status': ['stableinterface'],
19
-                    'supported_by': 'certified'}
20
-
21
-
22
-DOCUMENTATION = '''
23
-module: s3
24
-short_description: manage objects in S3.
25
-description:
26
-    - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
27
-      deleting both objects and buckets, retrieving objects as files or strings and generating download links.
28
-      This module has a dependency on boto3 and botocore.
29
-version_added: "1.1"
30
-options:
31
-  aws_access_key:
32
-    description:
33
-      - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
34
-    aliases: [ 'ec2_access_key', 'access_key' ]
35
-  aws_secret_key:
36
-    description:
37
-      - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
38
-    aliases: ['ec2_secret_key', 'secret_key']
39
-  bucket:
40
-    description:
41
-      - Bucket name.
42
-    required: true
43
-  dest:
44
-    description:
45
-      - The destination file path when downloading an object/key with a GET operation.
46
-    version_added: "1.3"
47
-  encrypt:
48
-    description:
49
-      - When set for PUT mode, asks for server-side encryption.
50
-    default: True
51
-    version_added: "2.0"
52
-  expiration:
53
-    description:
54
-      - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
55
-    default: 600
56
-  headers:
57
-    description:
58
-      - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
59
-    version_added: "2.0"
60
-  marker:
61
-    description:
62
-      - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
63
-    version_added: "2.0"
64
-  max_keys:
65
-    description:
66
-      - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
67
-    default: 1000
68
-    version_added: "2.0"
69
-  metadata:
70
-    description:
71
-      - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
72
-    version_added: "1.6"
73
-  mode:
74
-    description:
75
-      - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
76
-        getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
77
-        and delobj (delete object, Ansible 2.0+).
78
-    required: true
79
-    choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
80
-  object:
81
-    description:
82
-      - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
83
-  permission:
84
-    description:
85
-      - This option lets the user set the canned permissions on the object/bucket that are created.
86
-        The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read' for a bucket or
87
-        'private', 'public-read', 'public-read-write', 'aws-exec-read', 'authenticated-read', 'bucket-owner-read',
88
-        'bucket-owner-full-control' for an object. Multiple permissions can be specified as a list.
89
-    default: private
90
-    version_added: "2.0"
91
-  prefix:
92
-    description:
93
-      - Limits the response to keys that begin with the specified prefix for list mode
94
-    default: ""
95
-    version_added: "2.0"
96
-  version:
97
-    description:
98
-      - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
99
-    version_added: "2.0"
100
-  overwrite:
101
-    description:
102
-      - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
103
-        Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
104
-    default: 'always'
105
-    version_added: "1.2"
106
-  region:
107
-    description:
108
-     - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables
109
-       are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the
110
-       region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
111
-    version_added: "1.8"
112
-  retries:
113
-    description:
114
-     - On recoverable failure, how many times to retry before actually failing.
115
-    default: 0
116
-    version_added: "2.0"
117
-  s3_url:
118
-    description:
119
-      - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc.  Otherwise assumes AWS
120
-    aliases: [ S3_URL ]
121
-  rgw:
122
-    description:
123
-      - Enable Ceph RGW S3 support. This option requires an explicit url via s3_url.
124
-    default: false
125
-    version_added: "2.2"
126
-  src:
127
-    description:
128
-      - The source file path when performing a PUT operation.
129
-    version_added: "1.3"
130
-  ignore_nonexistent_bucket:
131
-    description:
132
-      - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
133
-        GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
134
-        ignore_nonexistent_bucket: True."
135
-    version_added: "2.3"
136
-
137
-requirements: [ "boto3", "botocore" ]
138
-author:
139
-    - "Lester Wade (@lwade)"
140
-    - "Sloane Hertel (@s-hertel)"
141
-extends_documentation_fragment: aws
142
-'''
143
-
144
-EXAMPLES = '''
145
-- name: Simple PUT operation
146
-  s3:
147
-    bucket: mybucket
148
-    object: /my/desired/key.txt
149
-    src: /usr/local/myfile.txt
150
-    mode: put
151
-
152
-- name: Simple PUT operation in Ceph RGW S3
153
-  s3:
154
-    bucket: mybucket
155
-    object: /my/desired/key.txt
156
-    src: /usr/local/myfile.txt
157
-    mode: put
158
-    rgw: true
159
-    s3_url: "http://localhost:8000"
160
-
161
-- name: Simple GET operation
162
-  s3:
163
-    bucket: mybucket
164
-    object: /my/desired/key.txt
165
-    dest: /usr/local/myfile.txt
166
-    mode: get
167
-
168
-- name: Get a specific version of an object.
169
-  s3:
170
-    bucket: mybucket
171
-    object: /my/desired/key.txt
172
-    version: 48c9ee5131af7a716edc22df9772aa6f
173
-    dest: /usr/local/myfile.txt
174
-    mode: get
175
-
176
-- name: PUT/upload with metadata
177
-  s3:
178
-    bucket: mybucket
179
-    object: /my/desired/key.txt
180
-    src: /usr/local/myfile.txt
181
-    mode: put
182
-    metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
183
-
184
-- name: PUT/upload with custom headers
185
-  s3:
186
-    bucket: mybucket
187
-    object: /my/desired/key.txt
188
-    src: /usr/local/myfile.txt
189
-    mode: put
190
-    headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
191
-
192
-- name: List keys simple
193
-  s3:
194
-    bucket: mybucket
195
-    mode: list
196
-
197
-- name: List keys all options
198
-  s3:
199
-    bucket: mybucket
200
-    mode: list
201
-    prefix: /my/desired/
202
-    marker: /my/desired/0023.txt
203
-    max_keys: 472
204
-
205
-- name: Create an empty bucket
206
-  s3:
207
-    bucket: mybucket
208
-    mode: create
209
-    permission: public-read
210
-
211
-- name: Create a bucket with key as directory, in the EU region
212
-  s3:
213
-    bucket: mybucket
214
-    object: /my/directory/path
215
-    mode: create
216
-    region: eu-west-1
217
-
218
-- name: Delete a bucket and all contents
219
-  s3:
220
-    bucket: mybucket
221
-    mode: delete
222
-
223
-- name: GET an object but don't download if the file checksums match. New in 2.0
224
-  s3:
225
-    bucket: mybucket
226
-    object: /my/desired/key.txt
227
-    dest: /usr/local/myfile.txt
228
-    mode: get
229
-    overwrite: different
230
-
231
-- name: Delete an object from a bucket
232
-  s3:
233
-    bucket: mybucket
234
-    object: /my/desired/key.txt
235
-    mode: delobj
236
-'''
237
-
238
-import os
239
-import traceback
240
-from ansible.module_utils.six.moves.urllib.parse import urlparse
241
-from ssl import SSLError
242
-from ansible.module_utils.basic import AnsibleModule, to_text, to_native
243
-from ansible.module_utils.ec2 import ec2_argument_spec, camel_dict_to_snake_dict, get_aws_connection_info, boto3_conn, HAS_BOTO3
244
-
245
-try:
246
-    import botocore
247
-except ImportError:
248
-    pass  # will be detected by imported HAS_BOTO3
249
-
250
-
251
-def key_check(module, s3, bucket, obj, version=None, validate=True):
252
-    exists = True
253
-    try:
254
-        if version:
255
-            s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
256
-        else:
257
-            s3.head_object(Bucket=bucket, Key=obj)
258
-    except botocore.exceptions.ClientError as e:
259
-        # if a client error is thrown, check if it's a 404 error
260
-        # if it's a 404 error, then the object does not exist
261
-        error_code = int(e.response['Error']['Code'])
262
-        if error_code == 404:
263
-            exists = False
264
-        elif error_code == 403 and validate is False:
265
-            pass
266
-        else:
267
-            module.fail_json(msg="Failed while looking up object (during key check) %s." % obj,
268
-                             exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
269
-    return exists
270
-
271
-
272
-def keysum(module, s3, bucket, obj, version=None):
273
-    if version:
274
-        key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
275
-    else:
276
-        key_check = s3.head_object(Bucket=bucket, Key=obj)
277
-    if not key_check:
278
-        return None
279
-    md5_remote = key_check['ETag'][1:-1]
280
-    if '-' in md5_remote:  # Check for multipart, etag is not md5
281
-        return None
282
-    return md5_remote
283
-
284
-
285
-def bucket_check(module, s3, bucket, validate=True):
286
-    exists = True
287
-    try:
288
-        s3.head_bucket(Bucket=bucket)
289
-    except botocore.exceptions.ClientError as e:
290
-        # If a client error is thrown, then check that it was a 404 error.
291
-        # If it was a 404 error, then the bucket does not exist.
292
-        error_code = int(e.response['Error']['Code'])
293
-        if error_code == 404:
294
-            exists = False
295
-        elif error_code == 403 and validate is False:
296
-            pass
297
-        else:
298
-            module.fail_json(msg="Failed while looking up bucket (during bucket_check) %s." % bucket,
299
-                             exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
300
-    except botocore.exceptions.EndpointConnectionError as e:
301
-        module.fail_json(msg="Invalid endpoint provided: %s" % to_text(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
302
-    return exists
303
-
304
-
305
-def create_bucket(module, s3, bucket, location=None):
306
-    if module.check_mode:
307
-        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
308
-    configuration = {}
309
-    if location not in ('us-east-1', None):
310
-        configuration['LocationConstraint'] = location
311
-    try:
312
-        if len(configuration) > 0:
313
-            s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
314
-        else:
315
-            s3.create_bucket(Bucket=bucket)
316
-        for acl in module.params.get('permission'):
317
-            s3.put_bucket_acl(ACL=acl, Bucket=bucket)
318
-    except botocore.exceptions.ClientError as e:
319
-        module.fail_json(msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).",
320
-                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
321
-
322
-    if bucket:
323
-        return True
324
-
325
-
326
-def paginated_list(s3, **pagination_params):
327
-    pg = s3.get_paginator('list_objects_v2')
328
-    for page in pg.paginate(**pagination_params):
329
-        yield [data['Key'] for data in page.get('Contents', [])]
330
-
331
-
332
-def list_keys(module, s3, bucket, prefix, marker, max_keys):
333
-    pagination_params = {'Bucket': bucket}
334
-    for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
335
-        pagination_params[param_name] = param_value
336
-    try:
337
-        keys = sum(paginated_list(s3, **pagination_params), [])
338
-        module.exit_json(msg="LIST operation complete", s3_keys=keys)
339
-    except botocore.exceptions.ClientError as e:
340
-        module.fail_json(msg="Failed while listing the keys in the bucket {0}".format(bucket),
341
-                         exception=traceback.format_exc(),
342
-                         **camel_dict_to_snake_dict(e.response))
343
-
344
-
345
-def delete_bucket(module, s3, bucket):
346
-    if module.check_mode:
347
-        module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
348
-    try:
349
-        exists = bucket_check(module, s3, bucket)
350
-        if exists is False:
351
-            return False
352
-        # if there are contents then we need to delete them before we can delete the bucket
353
-        for keys in paginated_list(s3, Bucket=bucket):
354
-            formatted_keys = [{'Key': key} for key in keys]
355
-            s3.delete_objects(Bucket=bucket, Delete={'Objects': formatted_keys})
356
-        s3.delete_bucket(Bucket=bucket)
357
-        return True
358
-    except botocore.exceptions.ClientError as e:
359
-        module.fail_json(msg="Failed while deleting bucket %s.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
360
-
361
-
362
-def delete_key(module, s3, bucket, obj):
363
-    if module.check_mode:
364
-        module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
365
-    try:
366
-        s3.delete_object(Bucket=bucket, Key=obj)
367
-        module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
368
-    except botocore.exceptions.ClientError as e:
369
-        module.fail_json(msg="Failed while trying to delete %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
370
-
371
-
372
-def create_dirkey(module, s3, bucket, obj):
373
-    if module.check_mode:
374
-        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
375
-    try:
376
-        bucket = s3.Bucket(bucket)
377
-        key = bucket.new_key(obj)
378
-        key.set_contents_from_string('')
379
-        for acl in module.params.get('permission'):
380
-            s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
381
-        module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
382
-    except botocore.exceptions.ClientError as e:
383
-        module.fail_json(msg="Failed while creating object %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
384
-
385
-
386
-def path_check(path):
387
-    if os.path.exists(path):
388
-        return True
389
-    else:
390
-        return False
391
-
392
-
393
-def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
394
-    if module.check_mode:
395
-        module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
396
-    try:
397
-        if metadata:
398
-            extra = {'Metadata': dict(metadata)}
399
-            s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
400
-        else:
401
-            s3.upload_file(Filename=src, Bucket=bucket, Key=obj)
402
-        for acl in module.params.get('permission'):
403
-            s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
404
-        url = s3.generate_presigned_url(ClientMethod='put_object',
405
-                                        Params={'Bucket': bucket, 'Key': obj},
406
-                                        ExpiresIn=expiry)
407
-        module.exit_json(msg="PUT operation complete", url=url, changed=True)
408
-    except botocore.exceptions.ClientError as e:
409
-        module.fail_json(msg="Unable to complete PUT operation.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
410
-
411
-
412
-def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
413
-    if module.check_mode:
414
-        module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
415
-    # retries is the number of loops; range/xrange needs to be one
416
-    # more to get that count of loops.
417
-    try:
418
-        if version:
419
-            key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
420
-        else:
421
-            key = s3.get_object(Bucket=bucket, Key=obj)
422
-    except botocore.exceptions.ClientError as e:
423
-        if e.response['Error']['Code'] != "404":
424
-            module.fail_json(msg="Could not find the key %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
425
-
426
-    for x in range(0, retries + 1):
427
-        try:
428
-            s3.download_file(bucket, obj, dest)
429
-            module.exit_json(msg="GET operation complete", changed=True)
430
-        except botocore.exceptions.ClientError as e:
431
-            # actually fail on last pass through the loop.
432
-            if x >= retries:
433
-                module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
434
-            # otherwise, try again, this may be a transient timeout.
435
-            pass
436
-        except SSLError as e:  # will ClientError catch SSLError?
437
-            # actually fail on last pass through the loop.
438
-            if x >= retries:
439
-                module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
440
-            # otherwise, try again, this may be a transient timeout.
441
-            pass
442
-
443
-
444
-def download_s3str(module, s3, bucket, obj, version=None, validate=True):
445
-    if module.check_mode:
446
-        module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
447
-    try:
448
-        if version:
449
-            contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
450
-        else:
451
-            contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
452
-        module.exit_json(msg="GET operation complete", contents=contents, changed=True)
453
-    except botocore.exceptions.ClientError as e:
454
-        module.fail_json(msg="Failed while getting contents of object %s as a string." % obj,
455
-                         exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
456
-
457
-
458
-def get_download_url(module, s3, bucket, obj, expiry, changed=True):
459
-    try:
460
-        url = s3.generate_presigned_url(ClientMethod='get_object',
461
-                                        Params={'Bucket': bucket, 'Key': obj},
462
-                                        ExpiresIn=expiry)
463
-        module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
464
-    except botocore.exceptions.ClientError as e:
465
-        module.fail_json(msg="Failed while getting download url.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
466
-
467
-
468
-def is_fakes3(s3_url):
469
-    """ Return True if s3_url has scheme fakes3:// """
470
-    if s3_url is not None:
471
-        return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
472
-    else:
473
-        return False
474
-
475
-
476
-def is_walrus(s3_url):
477
-    """ Return True if it's Walrus endpoint, not S3
478
-
479
-    We assume anything other than *.amazonaws.com is Walrus"""
480
-    if s3_url is not None:
481
-        o = urlparse(s3_url)
482
-        return not o.netloc.endswith('amazonaws.com')
483
-    else:
484
-        return False
485
-
486
-
487
-def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url):
488
-    if s3_url and rgw:  # TODO - test this
489
-        rgw = urlparse(s3_url)
490
-        params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
491
-    elif is_fakes3(s3_url):
492
-        for kw in ['is_secure', 'host', 'port'] and list(aws_connect_kwargs.keys()):
493
-            del aws_connect_kwargs[kw]
494
-        fakes3 = urlparse(s3_url)
495
-        if fakes3.scheme == 'fakes3s':
496
-            protocol = "https"
497
-        else:
498
-            protocol = "http"
499
-        params = dict(service_name='s3', endpoint_url="%s://%s:%s" % (protocol, fakes3.hostname, to_text(fakes3.port)),
500
-                      use_ssl=fakes3.scheme == 'fakes3s', region_name=None, **aws_connect_kwargs)
501
-    elif is_walrus(s3_url):
502
-        walrus = urlparse(s3_url).hostname
503
-        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs)
504
-    else:
505
-        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
506
-    return boto3_conn(**params)
507
-
508
-
509
-def main():
510
-    argument_spec = ec2_argument_spec()
511
-    argument_spec.update(
512
-        dict(
513
-            bucket=dict(required=True),
514
-            dest=dict(default=None),
515
-            encrypt=dict(default=True, type='bool'),
516
-            expiry=dict(default=600, type='int', aliases=['expiration']),
517
-            headers=dict(type='dict'),
518
-            marker=dict(default=""),
519
-            max_keys=dict(default=1000, type='int'),
520
-            metadata=dict(type='dict'),
521
-            mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
522
-            object=dict(),
523
-            permission=dict(type='list', default=['private']),
524
-            version=dict(default=None),
525
-            overwrite=dict(aliases=['force'], default='always'),
526
-            prefix=dict(default=""),
527
-            retries=dict(aliases=['retry'], type='int', default=0),
528
-            s3_url=dict(aliases=['S3_URL']),
529
-            rgw=dict(default='no', type='bool'),
530
-            src=dict(),
531
-            ignore_nonexistent_bucket=dict(default=False, type='bool')
532
-        ),
533
-    )
534
-    module = AnsibleModule(
535
-        argument_spec=argument_spec,
536
-        supports_check_mode=True,
537
-    )
538
-
539
-    if not HAS_BOTO3:
540
-        module.fail_json(msg='boto3 and botocore required for this module')
541
-
542
-    bucket = module.params.get('bucket')
543
-    encrypt = module.params.get('encrypt')
544
-    expiry = module.params.get('expiry')
545
-    dest = module.params.get('dest', '')
546
-    headers = module.params.get('headers')
547
-    marker = module.params.get('marker')
548
-    max_keys = module.params.get('max_keys')
549
-    metadata = module.params.get('metadata')
550
-    mode = module.params.get('mode')
551
-    obj = module.params.get('object')
552
-    version = module.params.get('version')
553
-    overwrite = module.params.get('overwrite')
554
-    prefix = module.params.get('prefix')
555
-    retries = module.params.get('retries')
556
-    s3_url = module.params.get('s3_url')
557
-    rgw = module.params.get('rgw')
558
-    src = module.params.get('src')
559
-    ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
560
-
561
-    if dest:
562
-        dest = os.path.expanduser(dest)
563
-
564
-    object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
565
-    bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
566
-
567
-    if overwrite not in ['always', 'never', 'different']:
568
-        if module.boolean(overwrite):
569
-            overwrite = 'always'
570
-        else:
571
-            overwrite = 'never'
572
-
573
-    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
574
-
575
-    if region in ('us-east-1', '', None):
576
-        # default to US Standard region
577
-        location = 'us-east-1'
578
-    else:
579
-        # Boto uses symbolic names for locations but region strings will
580
-        # actually work fine for everything except us-east-1 (US Standard)
581
-        location = region
582
-
583
-    if module.params.get('object'):
584
-        obj = module.params['object']
585
-
586
-    # Bucket deletion does not require obj.  Prevents ambiguity with delobj.
587
-    if obj and mode == "delete":
588
-        module.fail_json(msg='Parameter obj cannot be used with mode=delete')
589
-
590
-    # allow eucarc environment variables to be used if ansible vars aren't set
591
-    if not s3_url and 'S3_URL' in os.environ:
592
-        s3_url = os.environ['S3_URL']
593
-
594
-    # rgw requires an explicit url
595
-    if rgw and not s3_url:
596
-        module.fail_json(msg='rgw flavour requires s3_url')
597
-
598
-    # Look at s3_url and tweak connection settings
599
-    # if connecting to RGW, Walrus or fakes3
600
-    for key in ['validate_certs', 'security_token', 'profile_name']:
601
-        aws_connect_kwargs.pop(key, None)
602
-    try:
603
-        s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
604
-    except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
605
-        module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
606
-                         exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
607
-
608
-    validate = not ignore_nonexistent_bucket
609
-
610
-    # separate types of ACLs
611
-    bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
612
-    object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
613
-    error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
614
-    if error_acl:
615
-        module.fail_json(msg='Unknown permission specified: %s' % error_acl)
616
-
617
-    # First, we check to see if the bucket exists, we get "bucket" returned.
618
-    bucketrtn = bucket_check(module, s3, bucket, validate=validate)
619
-
620
-    if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
621
-        module.fail_json(msg="Source bucket cannot be found.")
622
-
623
-    # If our mode is a GET operation (download), go through the procedure as appropriate ...
624
-    if mode == 'get':
625
-        # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
626
-        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
627
-        if keyrtn is False:
628
-            module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
629
-
630
-        # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
631
-        # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
632
-        if path_check(dest):
633
-            # Determine if the remote and local object are identical
634
-            if keysum(module, s3, bucket, obj, version=version) == module.md5(dest):
635
-                sum_matches = True
636
-                if overwrite == 'always':
637
-                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
638
-                else:
639
-                    module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
640
-            else:
641
-                sum_matches = False
642
-
643
-                if overwrite in ('always', 'different'):
644
-                    download_s3file(module, s3, bucket, obj, dest, retries, version=version)
645
-                else:
646
-                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
647
-        else:
648
-            download_s3file(module, s3, bucket, obj, dest, retries, version=version)
649
-
650
-    # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
651
-    if mode == 'put':
652
-
653
-        # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
654
-        # these were separated into the variables bucket_acl and object_acl above
655
-
656
-        # Lets check the src path.
657
-        if not path_check(src):
658
-            module.fail_json(msg="Local object for PUT does not exist")
659
-
660
-        # Lets check to see if bucket exists to get ground truth.
661
-        if bucketrtn:
662
-            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
663
-
664
-        # Lets check key state. Does it exist and if it does, compute the etag md5sum.
665
-        if bucketrtn and keyrtn:
666
-            # Compare the local and remote object
667
-            if module.md5(src) == keysum(module, s3, bucket, obj):
668
-                sum_matches = True
669
-                if overwrite == 'always':
670
-                    # only use valid object acls for the upload_s3file function
671
-                    module.params['permission'] = object_acl
672
-                    upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
673
-                else:
674
-                    get_download_url(module, s3, bucket, obj, expiry, changed=False)
675
-            else:
676
-                sum_matches = False
677
-                if overwrite in ('always', 'different'):
678
-                    # only use valid object acls for the upload_s3file function
679
-                    module.params['permission'] = object_acl
680
-                    upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
681
-                else:
682
-                    module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
683
-
684
-        # If neither exist (based on bucket existence), we can create both.
685
-        if not bucketrtn:
686
-            # only use valid bucket acls for create_bucket function
687
-            module.params['permission'] = bucket_acl
688
-            create_bucket(module, s3, bucket, location)
689
-            # only use valid object acls for the upload_s3file function
690
-            module.params['permission'] = object_acl
691
-            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
692
-
693
-        # If bucket exists but key doesn't, just upload.
694
-        if bucketrtn and not keyrtn:
695
-            # only use valid object acls for the upload_s3file function
696
-            module.params['permission'] = object_acl
697
-            upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
698
-
699
-    # Delete an object from a bucket, not the entire bucket
700
-    if mode == 'delobj':
701
-        if obj is None:
702
-            module.fail_json(msg="object parameter is required")
703
-        if bucket:
704
-            deletertn = delete_key(module, s3, bucket, obj)
705
-            if deletertn is True:
706
-                module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
707
-        else:
708
-            module.fail_json(msg="Bucket parameter is required.")
709
-
710
-    # Delete an entire bucket, including all objects in the bucket
711
-    if mode == 'delete':
712
-        if bucket:
713
-            deletertn = delete_bucket(module, s3, bucket)
714
-            if deletertn is True:
715
-                module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
716
-        else:
717
-            module.fail_json(msg="Bucket parameter is required.")
718
-
719
-    # Support for listing a set of keys
720
-    if mode == 'list':
721
-        exists = bucket_check(module, s3, bucket)
722
-
723
-        # If the bucket does not exist then bail out
724
-        if not exists:
725
-            module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
726
-
727
-        list_keys(module, s3, bucket, prefix, marker, max_keys)
728
-
729
-    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
730
-    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
731
-    if mode == 'create':
732
-
733
-        # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
734
-        # these were separated above into the variables bucket_acl and object_acl
735
-
736
-        if bucket and not obj:
737
-            if bucketrtn:
738
-                module.exit_json(msg="Bucket already exists.", changed=False)
739
-            else:
740
-                # only use valid bucket acls when creating the bucket
741
-                module.params['permission'] = bucket_acl
742
-                module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
743
-        if bucket and obj:
744
-            if obj.endswith('/'):
745
-                dirobj = obj
746
-            else:
747
-                dirobj = obj + "/"
748
-            if bucketrtn:
749
-                if key_check(module, s3, bucket, dirobj):
750
-                    module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
751
-                else:
752
-                    # setting valid object acls for the create_dirkey function
753
-                    module.params['permission'] = object_acl
754
-                    create_dirkey(module, s3, bucket, dirobj)
755
-            else:
756
-                # only use valid bucket acls for the create_bucket function
757
-                module.params['permission'] = bucket_acl
758
-                created = create_bucket(module, s3, bucket, location)
759
-                # only use valid object acls for the create_dirkey function
760
-                module.params['permission'] = object_acl
761
-                create_dirkey(module, s3, bucket, dirobj)
762
-
763
-    # Support for grabbing the time-expired URL for an object in S3/Walrus.
764
-    if mode == 'geturl':
765
-        if not bucket and not obj:
766
-            module.fail_json(msg="Bucket and Object parameters must be set")
767
-
768
-        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
769
-        if keyrtn:
770
-            get_download_url(module, s3, bucket, obj, expiry)
771
-        else:
772
-            module.fail_json(msg="Key %s does not exist." % obj)
773
-
774
-    if mode == 'getstr':
775
-        if bucket and obj:
776
-            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
777
-            if keyrtn:
778
-                download_s3str(module, s3, bucket, obj, version=version)
779
-            elif version is not None:
780
-                module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
781
-            else:
782
-                module.fail_json(msg="Key %s does not exist." % obj)
783
-
784
-    module.exit_json(failed=False)
785
-
786
-
787
-if __name__ == '__main__':
788
-    main()