[awslogs] Auto-detect region and set user-agent
| ... | ... |
@@ -2,8 +2,10 @@ |
| 2 | 2 |
package awslogs |
| 3 | 3 |
|
| 4 | 4 |
import ( |
| 5 |
+ "errors" |
|
| 5 | 6 |
"fmt" |
| 6 | 7 |
"os" |
| 8 |
+ "runtime" |
|
| 7 | 9 |
"sort" |
| 8 | 10 |
"strings" |
| 9 | 11 |
"sync" |
| ... | ... |
@@ -12,8 +14,12 @@ import ( |
| 12 | 12 |
"github.com/Sirupsen/logrus" |
| 13 | 13 |
"github.com/aws/aws-sdk-go/aws" |
| 14 | 14 |
"github.com/aws/aws-sdk-go/aws/awserr" |
| 15 |
+ "github.com/aws/aws-sdk-go/aws/defaults" |
|
| 16 |
+ "github.com/aws/aws-sdk-go/aws/ec2metadata" |
|
| 17 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 15 | 18 |
"github.com/aws/aws-sdk-go/service/cloudwatchlogs" |
| 16 | 19 |
"github.com/docker/docker/daemon/logger" |
| 20 |
+ "github.com/docker/docker/version" |
|
| 17 | 21 |
) |
| 18 | 22 |
|
| 19 | 23 |
const ( |
| ... | ... |
@@ -35,6 +41,8 @@ const ( |
| 35 | 35 |
resourceAlreadyExistsCode = "ResourceAlreadyExistsException" |
| 36 | 36 |
dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" |
| 37 | 37 |
invalidSequenceTokenCode = "InvalidSequenceTokenException" |
| 38 |
+ |
|
| 39 |
+ userAgentHeader = "User-Agent" |
|
| 38 | 40 |
) |
| 39 | 41 |
|
| 40 | 42 |
type logStream struct {
|
| ... | ... |
@@ -52,12 +60,16 @@ type api interface {
|
| 52 | 52 |
PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) |
| 53 | 53 |
} |
| 54 | 54 |
|
| 55 |
+type regionFinder interface {
|
|
| 56 |
+ Region() (string, error) |
|
| 57 |
+} |
|
| 58 |
+ |
|
| 55 | 59 |
type byTimestamp []*cloudwatchlogs.InputLogEvent |
| 56 | 60 |
|
| 57 | 61 |
// init registers the awslogs driver and sets the default region, if provided |
| 58 | 62 |
func init() {
|
| 59 | 63 |
if os.Getenv(regionEnvKey) != "" {
|
| 60 |
- aws.DefaultConfig.Region = aws.String(os.Getenv(regionEnvKey)) |
|
| 64 |
+ defaults.DefaultConfig.Region = aws.String(os.Getenv(regionEnvKey)) |
|
| 61 | 65 |
} |
| 62 | 66 |
if err := logger.RegisterLogDriver(name, New); err != nil {
|
| 63 | 67 |
logrus.Fatal(err) |
| ... | ... |
@@ -79,19 +91,17 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
| 79 | 79 |
if ctx.Config[logStreamKey] != "" {
|
| 80 | 80 |
logStreamName = ctx.Config[logStreamKey] |
| 81 | 81 |
} |
| 82 |
- config := aws.DefaultConfig |
|
| 83 |
- if ctx.Config[regionKey] != "" {
|
|
| 84 |
- config = aws.DefaultConfig.Merge(&aws.Config{
|
|
| 85 |
- Region: aws.String(ctx.Config[regionKey]), |
|
| 86 |
- }) |
|
| 82 |
+ client, err := newAWSLogsClient(ctx) |
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ return nil, err |
|
| 87 | 85 |
} |
| 88 | 86 |
containerStream := &logStream{
|
| 89 | 87 |
logStreamName: logStreamName, |
| 90 | 88 |
logGroupName: logGroupName, |
| 91 |
- client: cloudwatchlogs.New(config), |
|
| 89 |
+ client: client, |
|
| 92 | 90 |
messages: make(chan *logger.Message, 4096), |
| 93 | 91 |
} |
| 94 |
- err := containerStream.create() |
|
| 92 |
+ err = containerStream.create() |
|
| 95 | 93 |
if err != nil {
|
| 96 | 94 |
return nil, err |
| 97 | 95 |
} |
| ... | ... |
@@ -100,6 +110,52 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
| 100 | 100 |
return containerStream, nil |
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 |
+// newRegionFinder is a variable such that the implementation |
|
| 104 |
+// can be swapped out for unit tests. |
|
| 105 |
+var newRegionFinder = func() regionFinder {
|
|
| 106 |
+ return ec2metadata.New(nil) |
|
| 107 |
+} |
|
| 108 |
+ |
|
| 109 |
+// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. |
|
| 110 |
+// Customizations to the default client from the SDK include a Docker-specific |
|
| 111 |
+// User-Agent string and automatic region detection using the EC2 Instance |
|
| 112 |
+// Metadata Service when region is otherwise unspecified. |
|
| 113 |
+func newAWSLogsClient(ctx logger.Context) (api, error) {
|
|
| 114 |
+ config := defaults.DefaultConfig |
|
| 115 |
+ if ctx.Config[regionKey] != "" {
|
|
| 116 |
+ config = defaults.DefaultConfig.Merge(&aws.Config{
|
|
| 117 |
+ Region: aws.String(ctx.Config[regionKey]), |
|
| 118 |
+ }) |
|
| 119 |
+ } |
|
| 120 |
+ if config.Region == nil || *config.Region == "" {
|
|
| 121 |
+ logrus.Info("Trying to get region from EC2 Metadata")
|
|
| 122 |
+ ec2MetadataClient := newRegionFinder() |
|
| 123 |
+ region, err := ec2MetadataClient.Region() |
|
| 124 |
+ if err != nil {
|
|
| 125 |
+ logrus.WithFields(logrus.Fields{
|
|
| 126 |
+ "error": err, |
|
| 127 |
+ }).Error("Could not get region from EC2 metadata, environment, or log option")
|
|
| 128 |
+ return nil, errors.New("Cannot determine region for awslogs driver")
|
|
| 129 |
+ } |
|
| 130 |
+ config.Region = ®ion |
|
| 131 |
+ } |
|
| 132 |
+ logrus.WithFields(logrus.Fields{
|
|
| 133 |
+ "region": *config.Region, |
|
| 134 |
+ }).Debug("Created awslogs client")
|
|
| 135 |
+ client := cloudwatchlogs.New(config) |
|
| 136 |
+ |
|
| 137 |
+ client.Handlers.Build.PushBackNamed(request.NamedHandler{
|
|
| 138 |
+ Name: "DockerUserAgentHandler", |
|
| 139 |
+ Fn: func(r *request.Request) {
|
|
| 140 |
+ currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) |
|
| 141 |
+ r.HTTPRequest.Header.Set(userAgentHeader, |
|
| 142 |
+ fmt.Sprintf("Docker %s (%s) %s",
|
|
| 143 |
+ version.VERSION, runtime.GOOS, currentAgent)) |
|
| 144 |
+ }, |
|
| 145 |
+ }) |
|
| 146 |
+ return client, nil |
|
| 147 |
+} |
|
| 148 |
+ |
|
| 103 | 149 |
// Name returns the name of the awslogs logging driver |
| 104 | 150 |
func (l *logStream) Name() string {
|
| 105 | 151 |
return name |
| ... | ... |
@@ -291,12 +347,6 @@ func ValidateLogOpt(cfg map[string]string) error {
|
| 291 | 291 |
if cfg[logGroupKey] == "" {
|
| 292 | 292 |
return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey)
|
| 293 | 293 |
} |
| 294 |
- if cfg[regionKey] == "" && os.Getenv(regionEnvKey) == "" {
|
|
| 295 |
- return fmt.Errorf( |
|
| 296 |
- "must specify a value for environment variable '%s' or log opt '%s'", |
|
| 297 |
- regionEnvKey, |
|
| 298 |
- regionKey) |
|
| 299 |
- } |
|
| 300 | 294 |
return nil |
| 301 | 295 |
} |
| 302 | 296 |
|
| ... | ... |
@@ -2,14 +2,19 @@ package awslogs |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 |
+ "fmt" |
|
| 6 |
+ "net/http" |
|
| 7 |
+ "runtime" |
|
| 5 | 8 |
"strings" |
| 6 | 9 |
"testing" |
| 7 | 10 |
"time" |
| 8 | 11 |
|
| 9 | 12 |
"github.com/aws/aws-sdk-go/aws" |
| 10 | 13 |
"github.com/aws/aws-sdk-go/aws/awserr" |
| 14 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 11 | 15 |
"github.com/aws/aws-sdk-go/service/cloudwatchlogs" |
| 12 | 16 |
"github.com/docker/docker/daemon/logger" |
| 17 |
+ "github.com/docker/docker/version" |
|
| 13 | 18 |
) |
| 14 | 19 |
|
| 15 | 20 |
const ( |
| ... | ... |
@@ -20,6 +25,56 @@ const ( |
| 20 | 20 |
logline = "this is a log line" |
| 21 | 21 |
) |
| 22 | 22 |
|
| 23 |
+func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
|
|
| 24 |
+ ctx := logger.Context{
|
|
| 25 |
+ Config: map[string]string{
|
|
| 26 |
+ regionKey: "us-east-1", |
|
| 27 |
+ }, |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ client, err := newAWSLogsClient(ctx) |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ t.Fatal(err) |
|
| 33 |
+ } |
|
| 34 |
+ realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) |
|
| 35 |
+ if !ok {
|
|
| 36 |
+ t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs")
|
|
| 37 |
+ } |
|
| 38 |
+ buildHandlerList := realClient.Handlers.Build |
|
| 39 |
+ request := &request.Request{
|
|
| 40 |
+ HTTPRequest: &http.Request{
|
|
| 41 |
+ Header: http.Header{},
|
|
| 42 |
+ }, |
|
| 43 |
+ } |
|
| 44 |
+ buildHandlerList.Run(request) |
|
| 45 |
+ expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s",
|
|
| 46 |
+ version.VERSION, runtime.GOOS, aws.SDKName, aws.SDKVersion) |
|
| 47 |
+ userAgent := request.HTTPRequest.Header.Get("User-Agent")
|
|
| 48 |
+ if userAgent != expectedUserAgentString {
|
|
| 49 |
+ t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"",
|
|
| 50 |
+ expectedUserAgentString, userAgent) |
|
| 51 |
+ } |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+func TestNewAWSLogsClientRegionDetect(t *testing.T) {
|
|
| 55 |
+ ctx := logger.Context{
|
|
| 56 |
+ Config: map[string]string{},
|
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ mockMetadata := newMockMetadataClient() |
|
| 60 |
+ newRegionFinder = func() regionFinder {
|
|
| 61 |
+ return mockMetadata |
|
| 62 |
+ } |
|
| 63 |
+ mockMetadata.regionResult <- ®ionResult{
|
|
| 64 |
+ successResult: "us-east-1", |
|
| 65 |
+ } |
|
| 66 |
+ |
|
| 67 |
+ _, err := newAWSLogsClient(ctx) |
|
| 68 |
+ if err != nil {
|
|
| 69 |
+ t.Fatal(err) |
|
| 70 |
+ } |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 23 | 73 |
func TestCreateSuccess(t *testing.T) {
|
| 24 | 74 |
mockClient := newMockClient() |
| 25 | 75 |
stream := &logStream{
|
| ... | ... |
@@ -49,6 +49,26 @@ func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) |
| 49 | 49 |
return output.successResult, output.errorResult |
| 50 | 50 |
} |
| 51 | 51 |
|
| 52 |
+type mockmetadataclient struct {
|
|
| 53 |
+ regionResult chan *regionResult |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+type regionResult struct {
|
|
| 57 |
+ successResult string |
|
| 58 |
+ errorResult error |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+func newMockMetadataClient() *mockmetadataclient {
|
|
| 62 |
+ return &mockmetadataclient{
|
|
| 63 |
+ regionResult: make(chan *regionResult, 1), |
|
| 64 |
+ } |
|
| 65 |
+} |
|
| 66 |
+ |
|
| 67 |
+func (m *mockmetadataclient) Region() (string, error) {
|
|
| 68 |
+ output := <-m.regionResult |
|
| 69 |
+ return output.successResult, output.errorResult |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 52 | 72 |
func test() {
|
| 53 | 73 |
_ = &logStream{
|
| 54 | 74 |
client: newMockClient(), |
| ... | ... |
@@ -34,9 +34,10 @@ You can use the `--log-opt NAME=VALUE` flag to specify Amazon CloudWatch Logs lo |
| 34 | 34 |
|
| 35 | 35 |
### awslogs-region |
| 36 | 36 |
|
| 37 |
-You must specify a region for the `awslogs` logging driver. You can specify the |
|
| 38 |
-region with either the `awslogs-region` log option or `AWS_REGION` environment |
|
| 39 |
-variable: |
|
| 37 |
+The `awslogs` logging driver sends your Docker logs to a specific region. Use |
|
| 38 |
+the `awslogs-region` log option or the `AWS_REGION` environment variable to set |
|
| 39 |
+the region. By default, if your Docker daemon is running on an EC2 instance |
|
| 40 |
+and no region is set, the driver uses the instance's region. |
|
| 40 | 41 |
|
| 41 | 42 |
docker run --log-driver=awslogs --log-opt awslogs-region=us-east-1 ... |
| 42 | 43 |
|
| ... | ... |
@@ -64,7 +64,7 @@ clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c |
| 64 | 64 |
clone git gopkg.in/fsnotify.v1 v1.2.0 |
| 65 | 65 |
|
| 66 | 66 |
# awslogs deps |
| 67 |
-clone git github.com/aws/aws-sdk-go v0.7.1 |
|
| 67 |
+clone git github.com/aws/aws-sdk-go v0.9.9 |
|
| 68 | 68 |
clone git github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1 |
| 69 | 69 |
|
| 70 | 70 |
clean |
| ... | ... |
@@ -113,7 +113,7 @@ func newRequestError(err Error, statusCode int, requestID string) *requestError |
| 113 | 113 |
// Error returns the string representation of the error. |
| 114 | 114 |
// Satisfies the error interface. |
| 115 | 115 |
func (r requestError) Error() string {
|
| 116 |
- extra := fmt.Sprintf("status code: %d, request id: [%s]",
|
|
| 116 |
+ extra := fmt.Sprintf("status code: %d, request id: %s",
|
|
| 117 | 117 |
r.statusCode, r.requestID) |
| 118 | 118 |
return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) |
| 119 | 119 |
} |
| ... | ... |
@@ -2,48 +2,20 @@ package aws |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"net/http" |
| 5 |
- "os" |
|
| 6 | 5 |
"time" |
| 7 | 6 |
|
| 8 | 7 |
"github.com/aws/aws-sdk-go/aws/credentials" |
| 9 | 8 |
) |
| 10 | 9 |
|
| 11 |
-// DefaultChainCredentials is a Credentials which will find the first available |
|
| 12 |
-// credentials Value from the list of Providers. |
|
| 13 |
-// |
|
| 14 |
-// This should be used in the default case. Once the type of credentials are |
|
| 15 |
-// known switching to the specific Credentials will be more efficient. |
|
| 16 |
-var DefaultChainCredentials = credentials.NewChainCredentials( |
|
| 17 |
- []credentials.Provider{
|
|
| 18 |
- &credentials.EnvProvider{},
|
|
| 19 |
- &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
|
| 20 |
- &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
|
|
| 21 |
- }) |
|
| 22 |
- |
|
| 23 | 10 |
// The default number of retries for a service. The value of -1 indicates that |
| 24 | 11 |
// the service specific retry default will be used. |
| 25 | 12 |
const DefaultRetries = -1 |
| 26 | 13 |
|
| 27 |
-// DefaultConfig is the default all service configuration will be based off of. |
|
| 28 |
-// By default, all clients use this structure for initialization options unless |
|
| 29 |
-// a custom configuration object is passed in. |
|
| 30 |
-// |
|
| 31 |
-// You may modify this global structure to change all default configuration |
|
| 32 |
-// in the SDK. Note that configuration options are copied by value, so any |
|
| 33 |
-// modifications must happen before constructing a client. |
|
| 34 |
-var DefaultConfig = NewConfig(). |
|
| 35 |
- WithCredentials(DefaultChainCredentials). |
|
| 36 |
- WithRegion(os.Getenv("AWS_REGION")).
|
|
| 37 |
- WithHTTPClient(http.DefaultClient). |
|
| 38 |
- WithMaxRetries(DefaultRetries). |
|
| 39 |
- WithLogger(NewDefaultLogger()). |
|
| 40 |
- WithLogLevel(LogOff) |
|
| 41 |
- |
|
| 42 | 14 |
// A Config provides service configuration for service clients. By default, |
| 43 |
-// all clients will use the {DefaultConfig} structure.
|
|
| 15 |
+// all clients will use the {defaults.DefaultConfig} structure.
|
|
| 44 | 16 |
type Config struct {
|
| 45 | 17 |
// The credentials object to use when signing requests. Defaults to |
| 46 |
- // {DefaultChainCredentials}.
|
|
| 18 |
+ // {defaults.DefaultChainCredentials}.
|
|
| 47 | 19 |
Credentials *credentials.Credentials |
| 48 | 20 |
|
| 49 | 21 |
// An optional endpoint URL (hostname only or fully qualified URI) |
| ... | ... |
@@ -102,6 +74,8 @@ type Config struct {
|
| 102 | 102 |
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html |
| 103 | 103 |
// Amazon S3: Virtual Hosting of Buckets |
| 104 | 104 |
S3ForcePathStyle *bool |
| 105 |
+ |
|
| 106 |
+ SleepDelay func(time.Duration) |
|
| 105 | 107 |
} |
| 106 | 108 |
|
| 107 | 109 |
// NewConfig returns a new Config pointer that can be chained with builder methods to |
| ... | ... |
@@ -190,6 +164,13 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
| 190 | 190 |
return c |
| 191 | 191 |
} |
| 192 | 192 |
|
| 193 |
+// WithSleepDelay overrides the function used to sleep while waiting for the |
|
| 194 |
+// next retry. Defaults to time.Sleep. |
|
| 195 |
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
|
| 196 |
+ c.SleepDelay = fn |
|
| 197 |
+ return c |
|
| 198 |
+} |
|
| 199 |
+ |
|
| 193 | 200 |
// Merge returns a new Config with the other Config's attribute values merged into |
| 194 | 201 |
// this Config. If the other Config's attribute is nil it will not be merged into |
| 195 | 202 |
// the new Config to be returned. |
| ... | ... |
@@ -244,6 +225,10 @@ func (c Config) Merge(other *Config) *Config {
|
| 244 | 244 |
dst.S3ForcePathStyle = other.S3ForcePathStyle |
| 245 | 245 |
} |
| 246 | 246 |
|
| 247 |
+ if other.SleepDelay != nil {
|
|
| 248 |
+ dst.SleepDelay = other.SleepDelay |
|
| 249 |
+ } |
|
| 250 |
+ |
|
| 247 | 251 |
return &dst |
| 248 | 252 |
} |
| 249 | 253 |
|
| 250 | 254 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,357 @@ |
| 0 |
+package aws |
|
| 1 |
+ |
|
| 2 |
+import "time" |
|
| 3 |
+ |
|
| 4 |
+// String returns a pointer to of the string value passed in. |
|
| 5 |
+func String(v string) *string {
|
|
| 6 |
+ return &v |
|
| 7 |
+} |
|
| 8 |
+ |
|
| 9 |
+// StringValue returns the value of the string pointer passed in or |
|
| 10 |
+// "" if the pointer is nil. |
|
| 11 |
+func StringValue(v *string) string {
|
|
| 12 |
+ if v != nil {
|
|
| 13 |
+ return *v |
|
| 14 |
+ } |
|
| 15 |
+ return "" |
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+// StringSlice converts a slice of string values into a slice of |
|
| 19 |
+// string pointers |
|
| 20 |
+func StringSlice(src []string) []*string {
|
|
| 21 |
+ dst := make([]*string, len(src)) |
|
| 22 |
+ for i := 0; i < len(src); i++ {
|
|
| 23 |
+ dst[i] = &(src[i]) |
|
| 24 |
+ } |
|
| 25 |
+ return dst |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// StringValueSlice converts a slice of string pointers into a slice of |
|
| 29 |
+// string values |
|
| 30 |
+func StringValueSlice(src []*string) []string {
|
|
| 31 |
+ dst := make([]string, len(src)) |
|
| 32 |
+ for i := 0; i < len(src); i++ {
|
|
| 33 |
+ if src[i] != nil {
|
|
| 34 |
+ dst[i] = *(src[i]) |
|
| 35 |
+ } |
|
| 36 |
+ } |
|
| 37 |
+ return dst |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+// StringMap converts a string map of string values into a string |
|
| 41 |
+// map of string pointers |
|
| 42 |
+func StringMap(src map[string]string) map[string]*string {
|
|
| 43 |
+ dst := make(map[string]*string) |
|
| 44 |
+ for k, val := range src {
|
|
| 45 |
+ v := val |
|
| 46 |
+ dst[k] = &v |
|
| 47 |
+ } |
|
| 48 |
+ return dst |
|
| 49 |
+} |
|
| 50 |
+ |
|
| 51 |
+// StringValueMap converts a string map of string pointers into a string |
|
| 52 |
+// map of string values |
|
| 53 |
+func StringValueMap(src map[string]*string) map[string]string {
|
|
| 54 |
+ dst := make(map[string]string) |
|
| 55 |
+ for k, val := range src {
|
|
| 56 |
+ if val != nil {
|
|
| 57 |
+ dst[k] = *val |
|
| 58 |
+ } |
|
| 59 |
+ } |
|
| 60 |
+ return dst |
|
| 61 |
+} |
|
| 62 |
+ |
|
| 63 |
+// Bool returns a pointer to of the bool value passed in. |
|
| 64 |
+func Bool(v bool) *bool {
|
|
| 65 |
+ return &v |
|
| 66 |
+} |
|
| 67 |
+ |
|
| 68 |
+// BoolValue returns the value of the bool pointer passed in or |
|
| 69 |
+// false if the pointer is nil. |
|
| 70 |
+func BoolValue(v *bool) bool {
|
|
| 71 |
+ if v != nil {
|
|
| 72 |
+ return *v |
|
| 73 |
+ } |
|
| 74 |
+ return false |
|
| 75 |
+} |
|
| 76 |
+ |
|
| 77 |
+// BoolSlice converts a slice of bool values into a slice of |
|
| 78 |
+// bool pointers |
|
| 79 |
+func BoolSlice(src []bool) []*bool {
|
|
| 80 |
+ dst := make([]*bool, len(src)) |
|
| 81 |
+ for i := 0; i < len(src); i++ {
|
|
| 82 |
+ dst[i] = &(src[i]) |
|
| 83 |
+ } |
|
| 84 |
+ return dst |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+// BoolValueSlice converts a slice of bool pointers into a slice of |
|
| 88 |
+// bool values |
|
| 89 |
+func BoolValueSlice(src []*bool) []bool {
|
|
| 90 |
+ dst := make([]bool, len(src)) |
|
| 91 |
+ for i := 0; i < len(src); i++ {
|
|
| 92 |
+ if src[i] != nil {
|
|
| 93 |
+ dst[i] = *(src[i]) |
|
| 94 |
+ } |
|
| 95 |
+ } |
|
| 96 |
+ return dst |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+// BoolMap converts a string map of bool values into a string |
|
| 100 |
+// map of bool pointers |
|
| 101 |
+func BoolMap(src map[string]bool) map[string]*bool {
|
|
| 102 |
+ dst := make(map[string]*bool) |
|
| 103 |
+ for k, val := range src {
|
|
| 104 |
+ v := val |
|
| 105 |
+ dst[k] = &v |
|
| 106 |
+ } |
|
| 107 |
+ return dst |
|
| 108 |
+} |
|
| 109 |
+ |
|
| 110 |
+// BoolValueMap converts a string map of bool pointers into a string |
|
| 111 |
+// map of bool values |
|
| 112 |
+func BoolValueMap(src map[string]*bool) map[string]bool {
|
|
| 113 |
+ dst := make(map[string]bool) |
|
| 114 |
+ for k, val := range src {
|
|
| 115 |
+ if val != nil {
|
|
| 116 |
+ dst[k] = *val |
|
| 117 |
+ } |
|
| 118 |
+ } |
|
| 119 |
+ return dst |
|
| 120 |
+} |
|
| 121 |
+ |
|
| 122 |
+// Int returns a pointer to of the int value passed in. |
|
| 123 |
+func Int(v int) *int {
|
|
| 124 |
+ return &v |
|
| 125 |
+} |
|
| 126 |
+ |
|
| 127 |
+// IntValue returns the value of the int pointer passed in or |
|
| 128 |
+// 0 if the pointer is nil. |
|
| 129 |
+func IntValue(v *int) int {
|
|
| 130 |
+ if v != nil {
|
|
| 131 |
+ return *v |
|
| 132 |
+ } |
|
| 133 |
+ return 0 |
|
| 134 |
+} |
|
| 135 |
+ |
|
| 136 |
+// IntSlice converts a slice of int values into a slice of |
|
| 137 |
+// int pointers |
|
| 138 |
+func IntSlice(src []int) []*int {
|
|
| 139 |
+ dst := make([]*int, len(src)) |
|
| 140 |
+ for i := 0; i < len(src); i++ {
|
|
| 141 |
+ dst[i] = &(src[i]) |
|
| 142 |
+ } |
|
| 143 |
+ return dst |
|
| 144 |
+} |
|
| 145 |
+ |
|
| 146 |
+// IntValueSlice converts a slice of int pointers into a slice of |
|
| 147 |
+// int values |
|
| 148 |
+func IntValueSlice(src []*int) []int {
|
|
| 149 |
+ dst := make([]int, len(src)) |
|
| 150 |
+ for i := 0; i < len(src); i++ {
|
|
| 151 |
+ if src[i] != nil {
|
|
| 152 |
+ dst[i] = *(src[i]) |
|
| 153 |
+ } |
|
| 154 |
+ } |
|
| 155 |
+ return dst |
|
| 156 |
+} |
|
| 157 |
+ |
|
| 158 |
+// IntMap converts a string map of int values into a string |
|
| 159 |
+// map of int pointers |
|
| 160 |
+func IntMap(src map[string]int) map[string]*int {
|
|
| 161 |
+ dst := make(map[string]*int) |
|
| 162 |
+ for k, val := range src {
|
|
| 163 |
+ v := val |
|
| 164 |
+ dst[k] = &v |
|
| 165 |
+ } |
|
| 166 |
+ return dst |
|
| 167 |
+} |
|
| 168 |
+ |
|
| 169 |
+// IntValueMap converts a string map of int pointers into a string |
|
| 170 |
+// map of int values |
|
| 171 |
+func IntValueMap(src map[string]*int) map[string]int {
|
|
| 172 |
+ dst := make(map[string]int) |
|
| 173 |
+ for k, val := range src {
|
|
| 174 |
+ if val != nil {
|
|
| 175 |
+ dst[k] = *val |
|
| 176 |
+ } |
|
| 177 |
+ } |
|
| 178 |
+ return dst |
|
| 179 |
+} |
|
| 180 |
+ |
|
| 181 |
+// Int64 returns a pointer to of the int64 value passed in. |
|
| 182 |
+func Int64(v int64) *int64 {
|
|
| 183 |
+ return &v |
|
| 184 |
+} |
|
| 185 |
+ |
|
| 186 |
+// Int64Value returns the value of the int64 pointer passed in or |
|
| 187 |
+// 0 if the pointer is nil. |
|
| 188 |
+func Int64Value(v *int64) int64 {
|
|
| 189 |
+ if v != nil {
|
|
| 190 |
+ return *v |
|
| 191 |
+ } |
|
| 192 |
+ return 0 |
|
| 193 |
+} |
|
| 194 |
+ |
|
| 195 |
+// Int64Slice converts a slice of int64 values into a slice of |
|
| 196 |
+// int64 pointers |
|
| 197 |
+func Int64Slice(src []int64) []*int64 {
|
|
| 198 |
+ dst := make([]*int64, len(src)) |
|
| 199 |
+ for i := 0; i < len(src); i++ {
|
|
| 200 |
+ dst[i] = &(src[i]) |
|
| 201 |
+ } |
|
| 202 |
+ return dst |
|
| 203 |
+} |
|
| 204 |
+ |
|
| 205 |
+// Int64ValueSlice converts a slice of int64 pointers into a slice of |
|
| 206 |
+// int64 values |
|
| 207 |
+func Int64ValueSlice(src []*int64) []int64 {
|
|
| 208 |
+ dst := make([]int64, len(src)) |
|
| 209 |
+ for i := 0; i < len(src); i++ {
|
|
| 210 |
+ if src[i] != nil {
|
|
| 211 |
+ dst[i] = *(src[i]) |
|
| 212 |
+ } |
|
| 213 |
+ } |
|
| 214 |
+ return dst |
|
| 215 |
+} |
|
| 216 |
+ |
|
| 217 |
+// Int64Map converts a string map of int64 values into a string |
|
| 218 |
+// map of int64 pointers |
|
| 219 |
+func Int64Map(src map[string]int64) map[string]*int64 {
|
|
| 220 |
+ dst := make(map[string]*int64) |
|
| 221 |
+ for k, val := range src {
|
|
| 222 |
+ v := val |
|
| 223 |
+ dst[k] = &v |
|
| 224 |
+ } |
|
| 225 |
+ return dst |
|
| 226 |
+} |
|
| 227 |
+ |
|
| 228 |
+// Int64ValueMap converts a string map of int64 pointers into a string |
|
| 229 |
+// map of int64 values |
|
| 230 |
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|
| 231 |
+ dst := make(map[string]int64) |
|
| 232 |
+ for k, val := range src {
|
|
| 233 |
+ if val != nil {
|
|
| 234 |
+ dst[k] = *val |
|
| 235 |
+ } |
|
| 236 |
+ } |
|
| 237 |
+ return dst |
|
| 238 |
+} |
|
| 239 |
+ |
|
| 240 |
+// Float64 returns a pointer to of the float64 value passed in. |
|
| 241 |
+func Float64(v float64) *float64 {
|
|
| 242 |
+ return &v |
|
| 243 |
+} |
|
| 244 |
+ |
|
| 245 |
+// Float64Value returns the value of the float64 pointer passed in or |
|
| 246 |
+// 0 if the pointer is nil. |
|
| 247 |
+func Float64Value(v *float64) float64 {
|
|
| 248 |
+ if v != nil {
|
|
| 249 |
+ return *v |
|
| 250 |
+ } |
|
| 251 |
+ return 0 |
|
| 252 |
+} |
|
| 253 |
+ |
|
| 254 |
+// Float64Slice converts a slice of float64 values into a slice of |
|
| 255 |
+// float64 pointers |
|
| 256 |
+func Float64Slice(src []float64) []*float64 {
|
|
| 257 |
+ dst := make([]*float64, len(src)) |
|
| 258 |
+ for i := 0; i < len(src); i++ {
|
|
| 259 |
+ dst[i] = &(src[i]) |
|
| 260 |
+ } |
|
| 261 |
+ return dst |
|
| 262 |
+} |
|
| 263 |
+ |
|
| 264 |
+// Float64ValueSlice converts a slice of float64 pointers into a slice of |
|
| 265 |
+// float64 values |
|
| 266 |
+func Float64ValueSlice(src []*float64) []float64 {
|
|
| 267 |
+ dst := make([]float64, len(src)) |
|
| 268 |
+ for i := 0; i < len(src); i++ {
|
|
| 269 |
+ if src[i] != nil {
|
|
| 270 |
+ dst[i] = *(src[i]) |
|
| 271 |
+ } |
|
| 272 |
+ } |
|
| 273 |
+ return dst |
|
| 274 |
+} |
|
| 275 |
+ |
|
| 276 |
+// Float64Map converts a string map of float64 values into a string |
|
| 277 |
+// map of float64 pointers |
|
| 278 |
+func Float64Map(src map[string]float64) map[string]*float64 {
|
|
| 279 |
+ dst := make(map[string]*float64) |
|
| 280 |
+ for k, val := range src {
|
|
| 281 |
+ v := val |
|
| 282 |
+ dst[k] = &v |
|
| 283 |
+ } |
|
| 284 |
+ return dst |
|
| 285 |
+} |
|
| 286 |
+ |
|
| 287 |
+// Float64ValueMap converts a string map of float64 pointers into a string |
|
| 288 |
+// map of float64 values |
|
| 289 |
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
|
| 290 |
+ dst := make(map[string]float64) |
|
| 291 |
+ for k, val := range src {
|
|
| 292 |
+ if val != nil {
|
|
| 293 |
+ dst[k] = *val |
|
| 294 |
+ } |
|
| 295 |
+ } |
|
| 296 |
+ return dst |
|
| 297 |
+} |
|
| 298 |
+ |
|
| 299 |
+// Time returns a pointer to of the time.Time value passed in. |
|
| 300 |
+func Time(v time.Time) *time.Time {
|
|
| 301 |
+ return &v |
|
| 302 |
+} |
|
| 303 |
+ |
|
| 304 |
+// TimeValue returns the value of the time.Time pointer passed in or |
|
| 305 |
+// time.Time{} if the pointer is nil.
|
|
| 306 |
+func TimeValue(v *time.Time) time.Time {
|
|
| 307 |
+ if v != nil {
|
|
| 308 |
+ return *v |
|
| 309 |
+ } |
|
| 310 |
+ return time.Time{}
|
|
| 311 |
+} |
|
| 312 |
+ |
|
| 313 |
+// TimeSlice converts a slice of time.Time values into a slice of |
|
| 314 |
+// time.Time pointers |
|
| 315 |
+func TimeSlice(src []time.Time) []*time.Time {
|
|
| 316 |
+ dst := make([]*time.Time, len(src)) |
|
| 317 |
+ for i := 0; i < len(src); i++ {
|
|
| 318 |
+ dst[i] = &(src[i]) |
|
| 319 |
+ } |
|
| 320 |
+ return dst |
|
| 321 |
+} |
|
| 322 |
+ |
|
| 323 |
+// TimeValueSlice converts a slice of time.Time pointers into a slice of |
|
| 324 |
+// time.Time values |
|
| 325 |
+func TimeValueSlice(src []*time.Time) []time.Time {
|
|
| 326 |
+ dst := make([]time.Time, len(src)) |
|
| 327 |
+ for i := 0; i < len(src); i++ {
|
|
| 328 |
+ if src[i] != nil {
|
|
| 329 |
+ dst[i] = *(src[i]) |
|
| 330 |
+ } |
|
| 331 |
+ } |
|
| 332 |
+ return dst |
|
| 333 |
+} |
|
| 334 |
+ |
|
| 335 |
+// TimeMap converts a string map of time.Time values into a string |
|
| 336 |
+// map of time.Time pointers |
|
| 337 |
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
|
| 338 |
+ dst := make(map[string]*time.Time) |
|
| 339 |
+ for k, val := range src {
|
|
| 340 |
+ v := val |
|
| 341 |
+ dst[k] = &v |
|
| 342 |
+ } |
|
| 343 |
+ return dst |
|
| 344 |
+} |
|
| 345 |
+ |
|
| 346 |
+// TimeValueMap converts a string map of time.Time pointers into a string |
|
| 347 |
+// map of time.Time values |
|
| 348 |
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
|
| 349 |
+ dst := make(map[string]time.Time) |
|
| 350 |
+ for k, val := range src {
|
|
| 351 |
+ if val != nil {
|
|
| 352 |
+ dst[k] = *val |
|
| 353 |
+ } |
|
| 354 |
+ } |
|
| 355 |
+ return dst |
|
| 356 |
+} |
| 0 | 357 |
deleted file mode 100644 |
| ... | ... |
@@ -1,357 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-import "time" |
|
| 4 |
- |
|
| 5 |
-// String returns a pointer to of the string value passed in. |
|
| 6 |
-func String(v string) *string {
|
|
| 7 |
- return &v |
|
| 8 |
-} |
|
| 9 |
- |
|
| 10 |
-// StringValue returns the value of the string pointer passed in or |
|
| 11 |
-// "" if the pointer is nil. |
|
| 12 |
-func StringValue(v *string) string {
|
|
| 13 |
- if v != nil {
|
|
| 14 |
- return *v |
|
| 15 |
- } |
|
| 16 |
- return "" |
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 |
-// StringSlice converts a slice of string values into a slice of |
|
| 20 |
-// string pointers |
|
| 21 |
-func StringSlice(src []string) []*string {
|
|
| 22 |
- dst := make([]*string, len(src)) |
|
| 23 |
- for i := 0; i < len(src); i++ {
|
|
| 24 |
- dst[i] = &(src[i]) |
|
| 25 |
- } |
|
| 26 |
- return dst |
|
| 27 |
-} |
|
| 28 |
- |
|
| 29 |
-// StringValueSlice converts a slice of string pointers into a slice of |
|
| 30 |
-// string values |
|
| 31 |
-func StringValueSlice(src []*string) []string {
|
|
| 32 |
- dst := make([]string, len(src)) |
|
| 33 |
- for i := 0; i < len(src); i++ {
|
|
| 34 |
- if src[i] != nil {
|
|
| 35 |
- dst[i] = *(src[i]) |
|
| 36 |
- } |
|
| 37 |
- } |
|
| 38 |
- return dst |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// StringMap converts a string map of string values into a string |
|
| 42 |
-// map of string pointers |
|
| 43 |
-func StringMap(src map[string]string) map[string]*string {
|
|
| 44 |
- dst := make(map[string]*string) |
|
| 45 |
- for k, val := range src {
|
|
| 46 |
- v := val |
|
| 47 |
- dst[k] = &v |
|
| 48 |
- } |
|
| 49 |
- return dst |
|
| 50 |
-} |
|
| 51 |
- |
|
| 52 |
-// StringValueMap converts a string map of string pointers into a string |
|
| 53 |
-// map of string values |
|
| 54 |
-func StringValueMap(src map[string]*string) map[string]string {
|
|
| 55 |
- dst := make(map[string]string) |
|
| 56 |
- for k, val := range src {
|
|
| 57 |
- if val != nil {
|
|
| 58 |
- dst[k] = *val |
|
| 59 |
- } |
|
| 60 |
- } |
|
| 61 |
- return dst |
|
| 62 |
-} |
|
| 63 |
- |
|
| 64 |
-// Bool returns a pointer to of the bool value passed in. |
|
| 65 |
-func Bool(v bool) *bool {
|
|
| 66 |
- return &v |
|
| 67 |
-} |
|
| 68 |
- |
|
| 69 |
-// BoolValue returns the value of the bool pointer passed in or |
|
| 70 |
-// false if the pointer is nil. |
|
| 71 |
-func BoolValue(v *bool) bool {
|
|
| 72 |
- if v != nil {
|
|
| 73 |
- return *v |
|
| 74 |
- } |
|
| 75 |
- return false |
|
| 76 |
-} |
|
| 77 |
- |
|
| 78 |
-// BoolSlice converts a slice of bool values into a slice of |
|
| 79 |
-// bool pointers |
|
| 80 |
-func BoolSlice(src []bool) []*bool {
|
|
| 81 |
- dst := make([]*bool, len(src)) |
|
| 82 |
- for i := 0; i < len(src); i++ {
|
|
| 83 |
- dst[i] = &(src[i]) |
|
| 84 |
- } |
|
| 85 |
- return dst |
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-// BoolValueSlice converts a slice of bool pointers into a slice of |
|
| 89 |
-// bool values |
|
| 90 |
-func BoolValueSlice(src []*bool) []bool {
|
|
| 91 |
- dst := make([]bool, len(src)) |
|
| 92 |
- for i := 0; i < len(src); i++ {
|
|
| 93 |
- if src[i] != nil {
|
|
| 94 |
- dst[i] = *(src[i]) |
|
| 95 |
- } |
|
| 96 |
- } |
|
| 97 |
- return dst |
|
| 98 |
-} |
|
| 99 |
- |
|
| 100 |
-// BoolMap converts a string map of bool values into a string |
|
| 101 |
-// map of bool pointers |
|
| 102 |
-func BoolMap(src map[string]bool) map[string]*bool {
|
|
| 103 |
- dst := make(map[string]*bool) |
|
| 104 |
- for k, val := range src {
|
|
| 105 |
- v := val |
|
| 106 |
- dst[k] = &v |
|
| 107 |
- } |
|
| 108 |
- return dst |
|
| 109 |
-} |
|
| 110 |
- |
|
| 111 |
-// BoolValueMap converts a string map of bool pointers into a string |
|
| 112 |
-// map of bool values |
|
| 113 |
-func BoolValueMap(src map[string]*bool) map[string]bool {
|
|
| 114 |
- dst := make(map[string]bool) |
|
| 115 |
- for k, val := range src {
|
|
| 116 |
- if val != nil {
|
|
| 117 |
- dst[k] = *val |
|
| 118 |
- } |
|
| 119 |
- } |
|
| 120 |
- return dst |
|
| 121 |
-} |
|
| 122 |
- |
|
| 123 |
-// Int returns a pointer to of the int value passed in. |
|
| 124 |
-func Int(v int) *int {
|
|
| 125 |
- return &v |
|
| 126 |
-} |
|
| 127 |
- |
|
| 128 |
-// IntValue returns the value of the int pointer passed in or |
|
| 129 |
-// 0 if the pointer is nil. |
|
| 130 |
-func IntValue(v *int) int {
|
|
| 131 |
- if v != nil {
|
|
| 132 |
- return *v |
|
| 133 |
- } |
|
| 134 |
- return 0 |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-// IntSlice converts a slice of int values into a slice of |
|
| 138 |
-// int pointers |
|
| 139 |
-func IntSlice(src []int) []*int {
|
|
| 140 |
- dst := make([]*int, len(src)) |
|
| 141 |
- for i := 0; i < len(src); i++ {
|
|
| 142 |
- dst[i] = &(src[i]) |
|
| 143 |
- } |
|
| 144 |
- return dst |
|
| 145 |
-} |
|
| 146 |
- |
|
| 147 |
-// IntValueSlice converts a slice of int pointers into a slice of |
|
| 148 |
-// int values |
|
| 149 |
-func IntValueSlice(src []*int) []int {
|
|
| 150 |
- dst := make([]int, len(src)) |
|
| 151 |
- for i := 0; i < len(src); i++ {
|
|
| 152 |
- if src[i] != nil {
|
|
| 153 |
- dst[i] = *(src[i]) |
|
| 154 |
- } |
|
| 155 |
- } |
|
| 156 |
- return dst |
|
| 157 |
-} |
|
| 158 |
- |
|
| 159 |
-// IntMap converts a string map of int values into a string |
|
| 160 |
-// map of int pointers |
|
| 161 |
-func IntMap(src map[string]int) map[string]*int {
|
|
| 162 |
- dst := make(map[string]*int) |
|
| 163 |
- for k, val := range src {
|
|
| 164 |
- v := val |
|
| 165 |
- dst[k] = &v |
|
| 166 |
- } |
|
| 167 |
- return dst |
|
| 168 |
-} |
|
| 169 |
- |
|
| 170 |
-// IntValueMap converts a string map of int pointers into a string |
|
| 171 |
-// map of int values |
|
| 172 |
-func IntValueMap(src map[string]*int) map[string]int {
|
|
| 173 |
- dst := make(map[string]int) |
|
| 174 |
- for k, val := range src {
|
|
| 175 |
- if val != nil {
|
|
| 176 |
- dst[k] = *val |
|
| 177 |
- } |
|
| 178 |
- } |
|
| 179 |
- return dst |
|
| 180 |
-} |
|
| 181 |
- |
|
| 182 |
-// Int64 returns a pointer to of the int64 value passed in. |
|
| 183 |
-func Int64(v int64) *int64 {
|
|
| 184 |
- return &v |
|
| 185 |
-} |
|
| 186 |
- |
|
| 187 |
-// Int64Value returns the value of the int64 pointer passed in or |
|
| 188 |
-// 0 if the pointer is nil. |
|
| 189 |
-func Int64Value(v *int64) int64 {
|
|
| 190 |
- if v != nil {
|
|
| 191 |
- return *v |
|
| 192 |
- } |
|
| 193 |
- return 0 |
|
| 194 |
-} |
|
| 195 |
- |
|
| 196 |
-// Int64Slice converts a slice of int64 values into a slice of |
|
| 197 |
-// int64 pointers |
|
| 198 |
-func Int64Slice(src []int64) []*int64 {
|
|
| 199 |
- dst := make([]*int64, len(src)) |
|
| 200 |
- for i := 0; i < len(src); i++ {
|
|
| 201 |
- dst[i] = &(src[i]) |
|
| 202 |
- } |
|
| 203 |
- return dst |
|
| 204 |
-} |
|
| 205 |
- |
|
| 206 |
-// Int64ValueSlice converts a slice of int64 pointers into a slice of |
|
| 207 |
-// int64 values |
|
| 208 |
-func Int64ValueSlice(src []*int64) []int64 {
|
|
| 209 |
- dst := make([]int64, len(src)) |
|
| 210 |
- for i := 0; i < len(src); i++ {
|
|
| 211 |
- if src[i] != nil {
|
|
| 212 |
- dst[i] = *(src[i]) |
|
| 213 |
- } |
|
| 214 |
- } |
|
| 215 |
- return dst |
|
| 216 |
-} |
|
| 217 |
- |
|
| 218 |
-// Int64Map converts a string map of int64 values into a string |
|
| 219 |
-// map of int64 pointers |
|
| 220 |
-func Int64Map(src map[string]int64) map[string]*int64 {
|
|
| 221 |
- dst := make(map[string]*int64) |
|
| 222 |
- for k, val := range src {
|
|
| 223 |
- v := val |
|
| 224 |
- dst[k] = &v |
|
| 225 |
- } |
|
| 226 |
- return dst |
|
| 227 |
-} |
|
| 228 |
- |
|
| 229 |
-// Int64ValueMap converts a string map of int64 pointers into a string |
|
| 230 |
-// map of int64 values |
|
| 231 |
-func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|
| 232 |
- dst := make(map[string]int64) |
|
| 233 |
- for k, val := range src {
|
|
| 234 |
- if val != nil {
|
|
| 235 |
- dst[k] = *val |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- return dst |
|
| 239 |
-} |
|
| 240 |
- |
|
| 241 |
-// Float64 returns a pointer to of the float64 value passed in. |
|
| 242 |
-func Float64(v float64) *float64 {
|
|
| 243 |
- return &v |
|
| 244 |
-} |
|
| 245 |
- |
|
| 246 |
-// Float64Value returns the value of the float64 pointer passed in or |
|
| 247 |
-// 0 if the pointer is nil. |
|
| 248 |
-func Float64Value(v *float64) float64 {
|
|
| 249 |
- if v != nil {
|
|
| 250 |
- return *v |
|
| 251 |
- } |
|
| 252 |
- return 0 |
|
| 253 |
-} |
|
| 254 |
- |
|
| 255 |
-// Float64Slice converts a slice of float64 values into a slice of |
|
| 256 |
-// float64 pointers |
|
| 257 |
-func Float64Slice(src []float64) []*float64 {
|
|
| 258 |
- dst := make([]*float64, len(src)) |
|
| 259 |
- for i := 0; i < len(src); i++ {
|
|
| 260 |
- dst[i] = &(src[i]) |
|
| 261 |
- } |
|
| 262 |
- return dst |
|
| 263 |
-} |
|
| 264 |
- |
|
| 265 |
-// Float64ValueSlice converts a slice of float64 pointers into a slice of |
|
| 266 |
-// float64 values |
|
| 267 |
-func Float64ValueSlice(src []*float64) []float64 {
|
|
| 268 |
- dst := make([]float64, len(src)) |
|
| 269 |
- for i := 0; i < len(src); i++ {
|
|
| 270 |
- if src[i] != nil {
|
|
| 271 |
- dst[i] = *(src[i]) |
|
| 272 |
- } |
|
| 273 |
- } |
|
| 274 |
- return dst |
|
| 275 |
-} |
|
| 276 |
- |
|
| 277 |
-// Float64Map converts a string map of float64 values into a string |
|
| 278 |
-// map of float64 pointers |
|
| 279 |
-func Float64Map(src map[string]float64) map[string]*float64 {
|
|
| 280 |
- dst := make(map[string]*float64) |
|
| 281 |
- for k, val := range src {
|
|
| 282 |
- v := val |
|
| 283 |
- dst[k] = &v |
|
| 284 |
- } |
|
| 285 |
- return dst |
|
| 286 |
-} |
|
| 287 |
- |
|
| 288 |
-// Float64ValueMap converts a string map of float64 pointers into a string |
|
| 289 |
-// map of float64 values |
|
| 290 |
-func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
|
| 291 |
- dst := make(map[string]float64) |
|
| 292 |
- for k, val := range src {
|
|
| 293 |
- if val != nil {
|
|
| 294 |
- dst[k] = *val |
|
| 295 |
- } |
|
| 296 |
- } |
|
| 297 |
- return dst |
|
| 298 |
-} |
|
| 299 |
- |
|
| 300 |
-// Time returns a pointer to of the time.Time value passed in. |
|
| 301 |
-func Time(v time.Time) *time.Time {
|
|
| 302 |
- return &v |
|
| 303 |
-} |
|
| 304 |
- |
|
| 305 |
-// TimeValue returns the value of the time.Time pointer passed in or |
|
| 306 |
-// time.Time{} if the pointer is nil.
|
|
| 307 |
-func TimeValue(v *time.Time) time.Time {
|
|
| 308 |
- if v != nil {
|
|
| 309 |
- return *v |
|
| 310 |
- } |
|
| 311 |
- return time.Time{}
|
|
| 312 |
-} |
|
| 313 |
- |
|
| 314 |
-// TimeSlice converts a slice of time.Time values into a slice of |
|
| 315 |
-// time.Time pointers |
|
| 316 |
-func TimeSlice(src []time.Time) []*time.Time {
|
|
| 317 |
- dst := make([]*time.Time, len(src)) |
|
| 318 |
- for i := 0; i < len(src); i++ {
|
|
| 319 |
- dst[i] = &(src[i]) |
|
| 320 |
- } |
|
| 321 |
- return dst |
|
| 322 |
-} |
|
| 323 |
- |
|
| 324 |
-// TimeValueSlice converts a slice of time.Time pointers into a slice of |
|
| 325 |
-// time.Time values |
|
| 326 |
-func TimeValueSlice(src []*time.Time) []time.Time {
|
|
| 327 |
- dst := make([]time.Time, len(src)) |
|
| 328 |
- for i := 0; i < len(src); i++ {
|
|
| 329 |
- if src[i] != nil {
|
|
| 330 |
- dst[i] = *(src[i]) |
|
| 331 |
- } |
|
| 332 |
- } |
|
| 333 |
- return dst |
|
| 334 |
-} |
|
| 335 |
- |
|
| 336 |
-// TimeMap converts a string map of time.Time values into a string |
|
| 337 |
-// map of time.Time pointers |
|
| 338 |
-func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
|
| 339 |
- dst := make(map[string]*time.Time) |
|
| 340 |
- for k, val := range src {
|
|
| 341 |
- v := val |
|
| 342 |
- dst[k] = &v |
|
| 343 |
- } |
|
| 344 |
- return dst |
|
| 345 |
-} |
|
| 346 |
- |
|
| 347 |
-// TimeValueMap converts a string map of time.Time pointers into a string |
|
| 348 |
-// map of time.Time values |
|
| 349 |
-func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
|
| 350 |
- dst := make(map[string]time.Time) |
|
| 351 |
- for k, val := range src {
|
|
| 352 |
- if val != nil {
|
|
| 353 |
- dst[k] = *val |
|
| 354 |
- } |
|
| 355 |
- } |
|
| 356 |
- return dst |
|
| 357 |
-} |
| 358 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,136 @@ |
| 0 |
+package corehandlers |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "net/http" |
|
| 8 |
+ "net/url" |
|
| 9 |
+ "regexp" |
|
| 10 |
+ "strconv" |
|
| 11 |
+ |
|
| 12 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 13 |
+ "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 14 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 15 |
+) |
|
| 16 |
+ |
|
| 17 |
+// Interface for matching types which also have a Len method. |
|
| 18 |
+type lener interface {
|
|
| 19 |
+ Len() int |
|
| 20 |
+} |
|
| 21 |
+ |
|
| 22 |
+// BuildContentLength builds the content length of a request based on the body, |
|
| 23 |
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable |
|
| 24 |
+// to determine request body length and no "Content-Length" was specified it will panic. |
|
| 25 |
+var BuildContentLengthHandler = request.NamedHandler{"core.BuildContentLengthHandler", func(r *request.Request) {
|
|
| 26 |
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
|
| 27 |
+ length, _ := strconv.ParseInt(slength, 10, 64) |
|
| 28 |
+ r.HTTPRequest.ContentLength = length |
|
| 29 |
+ return |
|
| 30 |
+ } |
|
| 31 |
+ |
|
| 32 |
+ var length int64 |
|
| 33 |
+ switch body := r.Body.(type) {
|
|
| 34 |
+ case nil: |
|
| 35 |
+ length = 0 |
|
| 36 |
+ case lener: |
|
| 37 |
+ length = int64(body.Len()) |
|
| 38 |
+ case io.Seeker: |
|
| 39 |
+ r.BodyStart, _ = body.Seek(0, 1) |
|
| 40 |
+ end, _ := body.Seek(0, 2) |
|
| 41 |
+ body.Seek(r.BodyStart, 0) // make sure to seek back to original location |
|
| 42 |
+ length = end - r.BodyStart |
|
| 43 |
+ default: |
|
| 44 |
+ panic("Cannot get length of body, must provide `ContentLength`")
|
|
| 45 |
+ } |
|
| 46 |
+ |
|
| 47 |
+ r.HTTPRequest.ContentLength = length |
|
| 48 |
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
|
| 49 |
+}} |
|
| 50 |
+ |
|
| 51 |
+// UserAgentHandler is a request handler for injecting User agent into requests. |
|
| 52 |
+var UserAgentHandler = request.NamedHandler{"core.UserAgentHandler", func(r *request.Request) {
|
|
| 53 |
+ r.HTTPRequest.Header.Set("User-Agent", aws.SDKName+"/"+aws.SDKVersion)
|
|
| 54 |
+}} |
|
| 55 |
+ |
|
| 56 |
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
|
| 57 |
+ |
|
| 58 |
+// SendHandler is a request handler to send service request using HTTP client. |
|
| 59 |
+var SendHandler = request.NamedHandler{"core.SendHandler", func(r *request.Request) {
|
|
| 60 |
+ var err error |
|
| 61 |
+ r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest) |
|
| 62 |
+ if err != nil {
|
|
| 63 |
+ // Capture the case where url.Error is returned for error processing |
|
| 64 |
+ // response. e.g. 301 without location header comes back as string |
|
| 65 |
+ // error and r.HTTPResponse is nil. Other url redirect errors will |
|
| 66 |
+ // comeback in a similar method. |
|
| 67 |
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
|
|
| 68 |
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
|
| 69 |
+ code, _ := strconv.ParseInt(s[1], 10, 64) |
|
| 70 |
+ r.HTTPResponse = &http.Response{
|
|
| 71 |
+ StatusCode: int(code), |
|
| 72 |
+ Status: http.StatusText(int(code)), |
|
| 73 |
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
| 74 |
+ } |
|
| 75 |
+ return |
|
| 76 |
+ } |
|
| 77 |
+ } |
|
| 78 |
+ if r.HTTPResponse == nil {
|
|
| 79 |
+ // Add a dummy request response object to ensure the HTTPResponse |
|
| 80 |
+ // value is consistent. |
|
| 81 |
+ r.HTTPResponse = &http.Response{
|
|
| 82 |
+ StatusCode: int(0), |
|
| 83 |
+ Status: http.StatusText(int(0)), |
|
| 84 |
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
| 85 |
+ } |
|
| 86 |
+ } |
|
| 87 |
+ // Catch all other request errors. |
|
| 88 |
+ r.Error = awserr.New("RequestError", "send request failed", err)
|
|
| 89 |
+ r.Retryable = aws.Bool(true) // network errors are retryable |
|
| 90 |
+ } |
|
| 91 |
+}} |
|
| 92 |
+ |
|
| 93 |
+// ValidateResponseHandler is a request handler to validate service response. |
|
| 94 |
+var ValidateResponseHandler = request.NamedHandler{"core.ValidateResponseHandler", func(r *request.Request) {
|
|
| 95 |
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
|
| 96 |
+ // this may be replaced by an UnmarshalError handler |
|
| 97 |
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
|
|
| 98 |
+ } |
|
| 99 |
+}} |
|
| 100 |
+ |
|
| 101 |
+// AfterRetryHandler performs final checks to determine if the request should |
|
| 102 |
+// be retried and how long to delay. |
|
| 103 |
+var AfterRetryHandler = request.NamedHandler{"core.AfterRetryHandler", func(r *request.Request) {
|
|
| 104 |
+ // If one of the other handlers already set the retry state |
|
| 105 |
+ // we don't want to override it based on the service's state |
|
| 106 |
+ if r.Retryable == nil {
|
|
| 107 |
+ r.Retryable = aws.Bool(r.ShouldRetry(r)) |
|
| 108 |
+ } |
|
| 109 |
+ |
|
| 110 |
+ if r.WillRetry() {
|
|
| 111 |
+ r.RetryDelay = r.RetryRules(r) |
|
| 112 |
+ r.Service.Config.SleepDelay(r.RetryDelay) |
|
| 113 |
+ |
|
| 114 |
+ // when the expired token exception occurs the credentials |
|
| 115 |
+ // need to be expired locally so that the next request to |
|
| 116 |
+ // get credentials will trigger a credentials refresh. |
|
| 117 |
+ if r.IsErrorExpired() {
|
|
| 118 |
+ r.Service.Config.Credentials.Expire() |
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ r.RetryCount++ |
|
| 122 |
+ r.Error = nil |
|
| 123 |
+ } |
|
| 124 |
+}} |
|
| 125 |
+ |
|
| 126 |
+// ValidateEndpointHandler is a request handler to validate a request had the |
|
| 127 |
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or |
|
| 128 |
+// region is not valid. |
|
| 129 |
+var ValidateEndpointHandler = request.NamedHandler{"core.ValidateEndpointHandler", func(r *request.Request) {
|
|
| 130 |
+ if r.Service.SigningRegion == "" && aws.StringValue(r.Service.Config.Region) == "" {
|
|
| 131 |
+ r.Error = aws.ErrMissingRegion |
|
| 132 |
+ } else if r.Service.Endpoint == "" {
|
|
| 133 |
+ r.Error = aws.ErrMissingEndpoint |
|
| 134 |
+ } |
|
| 135 |
+}} |
| 0 | 136 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,144 @@ |
| 0 |
+package corehandlers |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "reflect" |
|
| 5 |
+ "strconv" |
|
| 6 |
+ "strings" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 9 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+// ValidateParameters is a request handler to validate the input parameters. |
|
| 13 |
+// Validating parameters only has meaning if done prior to the request being sent. |
|
| 14 |
+var ValidateParametersHandler = request.NamedHandler{"core.ValidateParametersHandler", func(r *request.Request) {
|
|
| 15 |
+ if r.ParamsFilled() {
|
|
| 16 |
+ v := validator{errors: []string{}}
|
|
| 17 |
+ v.validateAny(reflect.ValueOf(r.Params), "") |
|
| 18 |
+ |
|
| 19 |
+ if count := len(v.errors); count > 0 {
|
|
| 20 |
+ format := "%d validation errors:\n- %s" |
|
| 21 |
+ msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) |
|
| 22 |
+ r.Error = awserr.New("InvalidParameter", msg, nil)
|
|
| 23 |
+ } |
|
| 24 |
+ } |
|
| 25 |
+}} |
|
| 26 |
+ |
|
| 27 |
+// A validator validates values. Collects validations errors which occurs. |
|
| 28 |
+type validator struct {
|
|
| 29 |
+ errors []string |
|
| 30 |
+} |
|
| 31 |
+ |
|
| 32 |
+// validateAny will validate any struct, slice or map type. All validations |
|
| 33 |
+// are also performed recursively for nested types. |
|
| 34 |
+func (v *validator) validateAny(value reflect.Value, path string) {
|
|
| 35 |
+ value = reflect.Indirect(value) |
|
| 36 |
+ if !value.IsValid() {
|
|
| 37 |
+ return |
|
| 38 |
+ } |
|
| 39 |
+ |
|
| 40 |
+ switch value.Kind() {
|
|
| 41 |
+ case reflect.Struct: |
|
| 42 |
+ v.validateStruct(value, path) |
|
| 43 |
+ case reflect.Slice: |
|
| 44 |
+ for i := 0; i < value.Len(); i++ {
|
|
| 45 |
+ v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
|
|
| 46 |
+ } |
|
| 47 |
+ case reflect.Map: |
|
| 48 |
+ for _, n := range value.MapKeys() {
|
|
| 49 |
+ v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
|
|
| 50 |
+ } |
|
| 51 |
+ } |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// validateStruct will validate the struct value's fields. If the structure has |
|
| 55 |
+// nested types those types will be validated also. |
|
| 56 |
+func (v *validator) validateStruct(value reflect.Value, path string) {
|
|
| 57 |
+ prefix := "." |
|
| 58 |
+ if path == "" {
|
|
| 59 |
+ prefix = "" |
|
| 60 |
+ } |
|
| 61 |
+ |
|
| 62 |
+ for i := 0; i < value.Type().NumField(); i++ {
|
|
| 63 |
+ f := value.Type().Field(i) |
|
| 64 |
+ if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
|
|
| 65 |
+ continue |
|
| 66 |
+ } |
|
| 67 |
+ fvalue := value.FieldByName(f.Name) |
|
| 68 |
+ |
|
| 69 |
+ err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) |
|
| 70 |
+ if err != nil {
|
|
| 71 |
+ v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
|
|
| 72 |
+ continue |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ v.validateAny(fvalue, path+prefix+f.Name) |
|
| 76 |
+ } |
|
| 77 |
+} |
|
| 78 |
+ |
|
| 79 |
+type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error |
|
| 80 |
+ |
|
| 81 |
+func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
|
|
| 82 |
+ for _, fn := range funcs {
|
|
| 83 |
+ if err := fn(f, fvalue); err != nil {
|
|
| 84 |
+ return err |
|
| 85 |
+ } |
|
| 86 |
+ } |
|
| 87 |
+ return nil |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 90 |
+// Validates that a field has a valid value provided for required fields. |
|
| 91 |
+func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
|
|
| 92 |
+ if f.Tag.Get("required") == "" {
|
|
| 93 |
+ return nil |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ switch fvalue.Kind() {
|
|
| 97 |
+ case reflect.Ptr, reflect.Slice, reflect.Map: |
|
| 98 |
+ if fvalue.IsNil() {
|
|
| 99 |
+ return fmt.Errorf("missing required parameter")
|
|
| 100 |
+ } |
|
| 101 |
+ default: |
|
| 102 |
+ if !fvalue.IsValid() {
|
|
| 103 |
+ return fmt.Errorf("missing required parameter")
|
|
| 104 |
+ } |
|
| 105 |
+ } |
|
| 106 |
+ return nil |
|
| 107 |
+} |
|
| 108 |
+ |
|
| 109 |
+// Validates that if a value is provided for a field, that value must be at |
|
| 110 |
+// least a minimum length. |
|
| 111 |
+func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
|
|
| 112 |
+ minStr := f.Tag.Get("min")
|
|
| 113 |
+ if minStr == "" {
|
|
| 114 |
+ return nil |
|
| 115 |
+ } |
|
| 116 |
+ min, _ := strconv.ParseInt(minStr, 10, 64) |
|
| 117 |
+ |
|
| 118 |
+ kind := fvalue.Kind() |
|
| 119 |
+ if kind == reflect.Ptr {
|
|
| 120 |
+ if fvalue.IsNil() {
|
|
| 121 |
+ return nil |
|
| 122 |
+ } |
|
| 123 |
+ fvalue = fvalue.Elem() |
|
| 124 |
+ } |
|
| 125 |
+ |
|
| 126 |
+ switch fvalue.Kind() {
|
|
| 127 |
+ case reflect.String: |
|
| 128 |
+ if int64(fvalue.Len()) < min {
|
|
| 129 |
+ return fmt.Errorf("field too short, minimum length %d", min)
|
|
| 130 |
+ } |
|
| 131 |
+ case reflect.Slice, reflect.Map: |
|
| 132 |
+ if fvalue.IsNil() {
|
|
| 133 |
+ return nil |
|
| 134 |
+ } |
|
| 135 |
+ if int64(fvalue.Len()) < min {
|
|
| 136 |
+ return fmt.Errorf("field too short, minimum length %d", min)
|
|
| 137 |
+ } |
|
| 138 |
+ |
|
| 139 |
+ // TODO min can also apply to number minimum value. |
|
| 140 |
+ |
|
| 141 |
+ } |
|
| 142 |
+ return nil |
|
| 143 |
+} |
| 0 | 144 |
deleted file mode 100644 |
| ... | ... |
@@ -1,162 +0,0 @@ |
| 1 |
-package credentials |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bufio" |
|
| 5 |
- "encoding/json" |
|
| 6 |
- "fmt" |
|
| 7 |
- "net/http" |
|
| 8 |
- "time" |
|
| 9 |
- |
|
| 10 |
- "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" |
|
| 14 |
- |
|
| 15 |
-// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if |
|
| 16 |
-// those credentials are expired. |
|
| 17 |
-// |
|
| 18 |
-// Example how to configure the EC2RoleProvider with custom http Client, Endpoint |
|
| 19 |
-// or ExpiryWindow |
|
| 20 |
-// |
|
| 21 |
-// p := &credentials.EC2RoleProvider{
|
|
| 22 |
-// // Pass in a custom timeout to be used when requesting |
|
| 23 |
-// // IAM EC2 Role credentials. |
|
| 24 |
-// Client: &http.Client{
|
|
| 25 |
-// Timeout: 10 * time.Second, |
|
| 26 |
-// }, |
|
| 27 |
-// // Use default EC2 Role metadata endpoint, Alternate endpoints can be |
|
| 28 |
-// // specified setting Endpoint to something else. |
|
| 29 |
-// Endpoint: "", |
|
| 30 |
-// // Do not use early expiry of credentials. If a non zero value is |
|
| 31 |
-// // specified the credentials will be expired early |
|
| 32 |
-// ExpiryWindow: 0, |
|
| 33 |
-// } |
|
| 34 |
-type EC2RoleProvider struct {
|
|
| 35 |
- Expiry |
|
| 36 |
- |
|
| 37 |
- // Endpoint must be fully quantified URL |
|
| 38 |
- Endpoint string |
|
| 39 |
- |
|
| 40 |
- // HTTP client to use when connecting to EC2 service |
|
| 41 |
- Client *http.Client |
|
| 42 |
- |
|
| 43 |
- // ExpiryWindow will allow the credentials to trigger refreshing prior to |
|
| 44 |
- // the credentials actually expiring. This is beneficial so race conditions |
|
| 45 |
- // with expiring credentials do not cause request to fail unexpectedly |
|
| 46 |
- // due to ExpiredTokenException exceptions. |
|
| 47 |
- // |
|
| 48 |
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true |
|
| 49 |
- // 10 seconds before the credentials are actually expired. |
|
| 50 |
- // |
|
| 51 |
- // If ExpiryWindow is 0 or less it will be ignored. |
|
| 52 |
- ExpiryWindow time.Duration |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// NewEC2RoleCredentials returns a pointer to a new Credentials object |
|
| 56 |
-// wrapping the EC2RoleProvider. |
|
| 57 |
-// |
|
| 58 |
-// Takes a custom http.Client which can be configured for custom handling of |
|
| 59 |
-// things such as timeout. |
|
| 60 |
-// |
|
| 61 |
-// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving |
|
| 62 |
-// role and credentials. |
|
| 63 |
-// |
|
| 64 |
-// Window is the expiry window that will be subtracted from the expiry returned |
|
| 65 |
-// by the role credential request. This is done so that the credentials will |
|
| 66 |
-// expire sooner than their actual lifespan. |
|
| 67 |
-func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials {
|
|
| 68 |
- return NewCredentials(&EC2RoleProvider{
|
|
| 69 |
- Endpoint: endpoint, |
|
| 70 |
- Client: client, |
|
| 71 |
- ExpiryWindow: window, |
|
| 72 |
- }) |
|
| 73 |
-} |
|
| 74 |
- |
|
| 75 |
-// Retrieve retrieves credentials from the EC2 service. |
|
| 76 |
-// Error will be returned if the request fails, or unable to extract |
|
| 77 |
-// the desired credentials. |
|
| 78 |
-func (m *EC2RoleProvider) Retrieve() (Value, error) {
|
|
| 79 |
- if m.Client == nil {
|
|
| 80 |
- m.Client = http.DefaultClient |
|
| 81 |
- } |
|
| 82 |
- if m.Endpoint == "" {
|
|
| 83 |
- m.Endpoint = metadataCredentialsEndpoint |
|
| 84 |
- } |
|
| 85 |
- |
|
| 86 |
- credsList, err := requestCredList(m.Client, m.Endpoint) |
|
| 87 |
- if err != nil {
|
|
| 88 |
- return Value{}, err
|
|
| 89 |
- } |
|
| 90 |
- |
|
| 91 |
- if len(credsList) == 0 {
|
|
| 92 |
- return Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
|
| 93 |
- } |
|
| 94 |
- credsName := credsList[0] |
|
| 95 |
- |
|
| 96 |
- roleCreds, err := requestCred(m.Client, m.Endpoint, credsName) |
|
| 97 |
- if err != nil {
|
|
| 98 |
- return Value{}, err
|
|
| 99 |
- } |
|
| 100 |
- |
|
| 101 |
- m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) |
|
| 102 |
- |
|
| 103 |
- return Value{
|
|
| 104 |
- AccessKeyID: roleCreds.AccessKeyID, |
|
| 105 |
- SecretAccessKey: roleCreds.SecretAccessKey, |
|
| 106 |
- SessionToken: roleCreds.Token, |
|
| 107 |
- }, nil |
|
| 108 |
-} |
|
| 109 |
- |
|
| 110 |
-// A ec2RoleCredRespBody provides the shape for deserializing credential |
|
| 111 |
-// request responses. |
|
| 112 |
-type ec2RoleCredRespBody struct {
|
|
| 113 |
- Expiration time.Time |
|
| 114 |
- AccessKeyID string |
|
| 115 |
- SecretAccessKey string |
|
| 116 |
- Token string |
|
| 117 |
-} |
|
| 118 |
- |
|
| 119 |
-// requestCredList requests a list of credentials from the EC2 service. |
|
| 120 |
-// If there are no credentials, or there is an error making or receiving the request |
|
| 121 |
-func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
|
| 122 |
- resp, err := client.Get(endpoint) |
|
| 123 |
- if err != nil {
|
|
| 124 |
- return nil, awserr.New("ListEC2Role", "failed to list EC2 Roles", err)
|
|
| 125 |
- } |
|
| 126 |
- defer resp.Body.Close() |
|
| 127 |
- |
|
| 128 |
- credsList := []string{}
|
|
| 129 |
- s := bufio.NewScanner(resp.Body) |
|
| 130 |
- for s.Scan() {
|
|
| 131 |
- credsList = append(credsList, s.Text()) |
|
| 132 |
- } |
|
| 133 |
- |
|
| 134 |
- if err := s.Err(); err != nil {
|
|
| 135 |
- return nil, awserr.New("ReadEC2Role", "failed to read list of EC2 Roles", err)
|
|
| 136 |
- } |
|
| 137 |
- |
|
| 138 |
- return credsList, nil |
|
| 139 |
-} |
|
| 140 |
- |
|
| 141 |
-// requestCred requests the credentials for a specific credentials from the EC2 service. |
|
| 142 |
-// |
|
| 143 |
-// If the credentials cannot be found, or there is an error reading the response |
|
| 144 |
-// and error will be returned. |
|
| 145 |
-func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) {
|
|
| 146 |
- resp, err := client.Get(endpoint + credsName) |
|
| 147 |
- if err != nil {
|
|
| 148 |
- return nil, awserr.New("GetEC2RoleCredentials",
|
|
| 149 |
- fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
|
|
| 150 |
- err) |
|
| 151 |
- } |
|
| 152 |
- defer resp.Body.Close() |
|
| 153 |
- |
|
| 154 |
- respCreds := &ec2RoleCredRespBody{}
|
|
| 155 |
- if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil {
|
|
| 156 |
- return nil, awserr.New("DecodeEC2RoleCredentials",
|
|
| 157 |
- fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
|
|
| 158 |
- err) |
|
| 159 |
- } |
|
| 160 |
- |
|
| 161 |
- return respCreds, nil |
|
| 162 |
-} |
| 163 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,168 @@ |
| 0 |
+package ec2rolecreds |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bufio" |
|
| 4 |
+ "encoding/json" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "path" |
|
| 7 |
+ "strings" |
|
| 8 |
+ "time" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 11 |
+ "github.com/aws/aws-sdk-go/aws/credentials" |
|
| 12 |
+ "github.com/aws/aws-sdk-go/aws/ec2metadata" |
|
| 13 |
+) |
|
| 14 |
+ |
|
| 15 |
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if |
|
| 16 |
+// those credentials are expired. |
|
| 17 |
+// |
|
| 18 |
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint |
|
| 19 |
+// or ExpiryWindow |
|
| 20 |
+// |
|
| 21 |
+// p := &ec2rolecreds.EC2RoleProvider{
|
|
| 22 |
+// // Pass in a custom timeout to be used when requesting |
|
| 23 |
+// // IAM EC2 Role credentials. |
|
| 24 |
+// Client: &http.Client{
|
|
| 25 |
+// Timeout: 10 * time.Second, |
|
| 26 |
+// }, |
|
| 27 |
+// // Use default EC2 Role metadata endpoint, Alternate endpoints can be |
|
| 28 |
+// // specified setting Endpoint to something else. |
|
| 29 |
+// Endpoint: "", |
|
| 30 |
+// // Do not use early expiry of credentials. If a non zero value is |
|
| 31 |
+// // specified the credentials will be expired early |
|
| 32 |
+// ExpiryWindow: 0, |
|
| 33 |
+// } |
|
| 34 |
+type EC2RoleProvider struct {
|
|
| 35 |
+ credentials.Expiry |
|
| 36 |
+ |
|
| 37 |
+ // EC2Metadata client to use when connecting to EC2 metadata service |
|
| 38 |
+ Client *ec2metadata.Client |
|
| 39 |
+ |
|
| 40 |
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to |
|
| 41 |
+ // the credentials actually expiring. This is beneficial so race conditions |
|
| 42 |
+ // with expiring credentials do not cause request to fail unexpectedly |
|
| 43 |
+ // due to ExpiredTokenException exceptions. |
|
| 44 |
+ // |
|
| 45 |
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true |
|
| 46 |
+ // 10 seconds before the credentials are actually expired. |
|
| 47 |
+ // |
|
| 48 |
+ // If ExpiryWindow is 0 or less it will be ignored. |
|
| 49 |
+ ExpiryWindow time.Duration |
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+// NewCredentials returns a pointer to a new Credentials object |
|
| 53 |
+// wrapping the EC2RoleProvider. |
|
| 54 |
+// |
|
| 55 |
+// Takes a custom http.Client which can be configured for custom handling of |
|
| 56 |
+// things such as timeout. |
|
| 57 |
+// |
|
| 58 |
+// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving |
|
| 59 |
+// role and credentials. |
|
| 60 |
+// |
|
| 61 |
+// Window is the expiry window that will be subtracted from the expiry returned |
|
| 62 |
+// by the role credential request. This is done so that the credentials will |
|
| 63 |
+// expire sooner than their actual lifespan. |
|
| 64 |
+func NewCredentials(client *ec2metadata.Client, window time.Duration) *credentials.Credentials {
|
|
| 65 |
+ return credentials.NewCredentials(&EC2RoleProvider{
|
|
| 66 |
+ Client: client, |
|
| 67 |
+ ExpiryWindow: window, |
|
| 68 |
+ }) |
|
| 69 |
+} |
|
| 70 |
+ |
|
| 71 |
+// Retrieve retrieves credentials from the EC2 service. |
|
| 72 |
+// Error will be returned if the request fails, or unable to extract |
|
| 73 |
+// the desired credentials. |
|
| 74 |
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
|
| 75 |
+ if m.Client == nil {
|
|
| 76 |
+ m.Client = ec2metadata.New(nil) |
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ credsList, err := requestCredList(m.Client) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ return credentials.Value{}, err
|
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ if len(credsList) == 0 {
|
|
| 85 |
+ return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
|
| 86 |
+ } |
|
| 87 |
+ credsName := credsList[0] |
|
| 88 |
+ |
|
| 89 |
+ roleCreds, err := requestCred(m.Client, credsName) |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return credentials.Value{}, err
|
|
| 92 |
+ } |
|
| 93 |
+ |
|
| 94 |
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) |
|
| 95 |
+ |
|
| 96 |
+ return credentials.Value{
|
|
| 97 |
+ AccessKeyID: roleCreds.AccessKeyID, |
|
| 98 |
+ SecretAccessKey: roleCreds.SecretAccessKey, |
|
| 99 |
+ SessionToken: roleCreds.Token, |
|
| 100 |
+ }, nil |
|
| 101 |
+} |
|
| 102 |
+ |
|
| 103 |
+// A ec2RoleCredRespBody provides the shape for deserializing credential |
|
| 104 |
+// request responses. |
|
| 105 |
+type ec2RoleCredRespBody struct {
|
|
| 106 |
+ // Success State |
|
| 107 |
+ Expiration time.Time |
|
| 108 |
+ AccessKeyID string |
|
| 109 |
+ SecretAccessKey string |
|
| 110 |
+ Token string |
|
| 111 |
+ |
|
| 112 |
+ // Error state |
|
| 113 |
+ Code string |
|
| 114 |
+ Message string |
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+const iamSecurityCredsPath = "/iam/security-credentials" |
|
| 118 |
+ |
|
| 119 |
+// requestCredList requests a list of credentials from the EC2 service. |
|
| 120 |
+// If there are no credentials, or there is an error making or receiving the request |
|
| 121 |
+func requestCredList(client *ec2metadata.Client) ([]string, error) {
|
|
| 122 |
+ resp, err := client.GetMetadata(iamSecurityCredsPath) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
|
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ credsList := []string{}
|
|
| 128 |
+ s := bufio.NewScanner(strings.NewReader(resp)) |
|
| 129 |
+ for s.Scan() {
|
|
| 130 |
+ credsList = append(credsList, s.Text()) |
|
| 131 |
+ } |
|
| 132 |
+ |
|
| 133 |
+ if err := s.Err(); err != nil {
|
|
| 134 |
+ return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err)
|
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ return credsList, nil |
|
| 138 |
+} |
|
| 139 |
+ |
|
| 140 |
+// requestCred requests the credentials for a specific credentials from the EC2 service. |
|
| 141 |
+// |
|
| 142 |
+// If the credentials cannot be found, or there is an error reading the response |
|
| 143 |
+// and error will be returned. |
|
| 144 |
+func requestCred(client *ec2metadata.Client, credsName string) (ec2RoleCredRespBody, error) {
|
|
| 145 |
+ resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return ec2RoleCredRespBody{},
|
|
| 148 |
+ awserr.New("EC2RoleRequestError",
|
|
| 149 |
+ fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
|
|
| 150 |
+ err) |
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ respCreds := ec2RoleCredRespBody{}
|
|
| 154 |
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
|
| 155 |
+ return ec2RoleCredRespBody{},
|
|
| 156 |
+ awserr.New("SerializationError",
|
|
| 157 |
+ fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
|
|
| 158 |
+ err) |
|
| 159 |
+ } |
|
| 160 |
+ |
|
| 161 |
+ if respCreds.Code != "Success" {
|
|
| 162 |
+ // If an error code was returned something failed requesting the role. |
|
| 163 |
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ return respCreds, nil |
|
| 167 |
+} |
| ... | ... |
@@ -22,8 +22,12 @@ var ( |
| 22 | 22 |
// |
| 23 | 23 |
// Profile ini file example: $HOME/.aws/credentials |
| 24 | 24 |
type SharedCredentialsProvider struct {
|
| 25 |
- // Path to the shared credentials file. If empty will default to current user's |
|
| 26 |
- // home directory. |
|
| 25 |
+ // Path to the shared credentials file. |
|
| 26 |
+ // |
|
| 27 |
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the |
|
| 28 |
+ // env value is empty will default to current user's home directory. |
|
| 29 |
+ // Linux/OSX: "$HOME/.aws/credentials" |
|
| 30 |
+ // Windows: "%USERPROFILE%\.aws\credentials" |
|
| 27 | 31 |
Filename string |
| 28 | 32 |
|
| 29 | 33 |
// AWS Profile to extract credentials from the shared credentials file. If empty |
| ... | ... |
@@ -106,6 +110,10 @@ func loadProfile(filename, profile string) (Value, error) {
|
| 106 | 106 |
// Will return an error if the user's home directory path cannot be found. |
| 107 | 107 |
func (p *SharedCredentialsProvider) filename() (string, error) {
|
| 108 | 108 |
if p.Filename == "" {
|
| 109 |
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
|
|
| 110 |
+ return p.Filename, nil |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 109 | 113 |
homeDir := os.Getenv("HOME") // *nix
|
| 110 | 114 |
if homeDir == "" { // Windows
|
| 111 | 115 |
homeDir = os.Getenv("USERPROFILE")
|
| 112 | 116 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,39 @@ |
| 0 |
+package defaults |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "net/http" |
|
| 4 |
+ "os" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 8 |
+ "github.com/aws/aws-sdk-go/aws/credentials" |
|
| 9 |
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+// DefaultChainCredentials is a Credentials which will find the first available |
|
| 13 |
+// credentials Value from the list of Providers. |
|
| 14 |
+// |
|
| 15 |
+// This should be used in the default case. Once the type of credentials are |
|
| 16 |
+// known switching to the specific Credentials will be more efficient. |
|
| 17 |
+var DefaultChainCredentials = credentials.NewChainCredentials( |
|
| 18 |
+ []credentials.Provider{
|
|
| 19 |
+ &credentials.EnvProvider{},
|
|
| 20 |
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
|
| 21 |
+ &ec2rolecreds.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
|
|
| 22 |
+ }) |
|
| 23 |
+ |
|
| 24 |
+// DefaultConfig is the default all service configuration will be based off of. |
|
| 25 |
+// By default, all clients use this structure for initialization options unless |
|
| 26 |
+// a custom configuration object is passed in. |
|
| 27 |
+// |
|
| 28 |
+// You may modify this global structure to change all default configuration |
|
| 29 |
+// in the SDK. Note that configuration options are copied by value, so any |
|
| 30 |
+// modifications must happen before constructing a client. |
|
| 31 |
+var DefaultConfig = aws.NewConfig(). |
|
| 32 |
+ WithCredentials(DefaultChainCredentials). |
|
| 33 |
+ WithRegion(os.Getenv("AWS_REGION")).
|
|
| 34 |
+ WithHTTPClient(http.DefaultClient). |
|
| 35 |
+ WithMaxRetries(aws.DefaultRetries). |
|
| 36 |
+ WithLogger(aws.NewDefaultLogger()). |
|
| 37 |
+ WithLogLevel(aws.LogOff). |
|
| 38 |
+ WithSleepDelay(time.Sleep) |
| 0 | 39 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,43 @@ |
| 0 |
+package ec2metadata |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "path" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+// GetMetadata uses the path provided to request |
|
| 9 |
+func (c *Client) GetMetadata(p string) (string, error) {
|
|
| 10 |
+ op := &request.Operation{
|
|
| 11 |
+ Name: "GetMetadata", |
|
| 12 |
+ HTTPMethod: "GET", |
|
| 13 |
+ HTTPPath: path.Join("/", "meta-data", p),
|
|
| 14 |
+ } |
|
| 15 |
+ |
|
| 16 |
+ output := &metadataOutput{}
|
|
| 17 |
+ req := request.New(c.Service.ServiceInfo, c.Service.Handlers, c.Service.Retryer, op, nil, output) |
|
| 18 |
+ |
|
| 19 |
+ return output.Content, req.Send() |
|
| 20 |
+} |
|
| 21 |
+ |
|
| 22 |
+// Region returns the region the instance is running in. |
|
| 23 |
+func (c *Client) Region() (string, error) {
|
|
| 24 |
+ resp, err := c.GetMetadata("placement/availability-zone")
|
|
| 25 |
+ if err != nil {
|
|
| 26 |
+ return "", err |
|
| 27 |
+ } |
|
| 28 |
+ |
|
| 29 |
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2 |
|
| 30 |
+ return resp[:len(resp)-1], nil |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// Available returns if the application has access to the EC2 Metadata service. |
|
| 34 |
+// Can be used to determine if application is running within an EC2 Instance and |
|
| 35 |
+// the metadata service is available. |
|
| 36 |
+func (c *Client) Available() bool {
|
|
| 37 |
+ if _, err := c.GetMetadata("instance-id"); err != nil {
|
|
| 38 |
+ return false |
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ return true |
|
| 42 |
+} |
| 0 | 43 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,135 @@ |
| 0 |
+package ec2metadata |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "io/ioutil" |
|
| 4 |
+ "net/http" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 7 |
+ "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 8 |
+ "github.com/aws/aws-sdk-go/aws/credentials" |
|
| 9 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 10 |
+ "github.com/aws/aws-sdk-go/aws/service" |
|
| 11 |
+ "github.com/aws/aws-sdk-go/aws/service/serviceinfo" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+// DefaultRetries states the default number of times the service client will |
|
| 15 |
+// attempt to retry a failed request before failing. |
|
| 16 |
+const DefaultRetries = 3 |
|
| 17 |
+ |
|
| 18 |
+// A Config provides the configuration for the EC2 Metadata service. |
|
| 19 |
+type Config struct {
|
|
| 20 |
+ // An optional endpoint URL (hostname only or fully qualified URI) |
|
| 21 |
+ // that overrides the default service endpoint for a client. Set this |
|
| 22 |
+ // to nil, or `""` to use the default service endpoint. |
|
| 23 |
+ Endpoint *string |
|
| 24 |
+ |
|
| 25 |
+ // The HTTP client to use when sending requests. Defaults to |
|
| 26 |
+ // `http.DefaultClient`. |
|
| 27 |
+ HTTPClient *http.Client |
|
| 28 |
+ |
|
| 29 |
+ // An integer value representing the logging level. The default log level |
|
| 30 |
+ // is zero (LogOff), which represents no logging. To enable logging set |
|
| 31 |
+ // to a LogLevel Value. |
|
| 32 |
+ Logger aws.Logger |
|
| 33 |
+ |
|
| 34 |
+ // The logger writer interface to write logging messages to. Defaults to |
|
| 35 |
+ // standard out. |
|
| 36 |
+ LogLevel *aws.LogLevelType |
|
| 37 |
+ |
|
| 38 |
+ // The maximum number of times that a request will be retried for failures. |
|
| 39 |
+ // Defaults to DefaultRetries for the number of retries to be performed |
|
| 40 |
+ // per request. |
|
| 41 |
+ MaxRetries *int |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// A Client is an EC2 Metadata service Client. |
|
| 45 |
+type Client struct {
|
|
| 46 |
+ *service.Service |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+// New creates a new instance of the EC2 Metadata service client. |
|
| 50 |
+// |
|
| 51 |
+// In the general use case the configuration for this service client should not |
|
| 52 |
+// be needed and `nil` can be provided. Configuration is only needed if the |
|
| 53 |
+// `ec2metadata.Config` defaults need to be overridden. Eg. Setting LogLevel. |
|
| 54 |
+// |
|
| 55 |
+// @note This configuration will NOT be merged with the default AWS service |
|
| 56 |
+// client configuration `defaults.DefaultConfig`. Due to circular dependencies |
|
| 57 |
+// with the defaults package and credentials EC2 Role Provider. |
|
| 58 |
+func New(config *Config) *Client {
|
|
| 59 |
+ service := &service.Service{
|
|
| 60 |
+ ServiceInfo: serviceinfo.ServiceInfo{
|
|
| 61 |
+ Config: copyConfig(config), |
|
| 62 |
+ ServiceName: "Client", |
|
| 63 |
+ Endpoint: "http://169.254.169.254/latest", |
|
| 64 |
+ APIVersion: "latest", |
|
| 65 |
+ }, |
|
| 66 |
+ } |
|
| 67 |
+ service.Initialize() |
|
| 68 |
+ service.Handlers.Unmarshal.PushBack(unmarshalHandler) |
|
| 69 |
+ service.Handlers.UnmarshalError.PushBack(unmarshalError) |
|
| 70 |
+ service.Handlers.Validate.Clear() |
|
| 71 |
+ service.Handlers.Validate.PushBack(validateEndpointHandler) |
|
| 72 |
+ |
|
| 73 |
+ return &Client{service}
|
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+func copyConfig(config *Config) *aws.Config {
|
|
| 77 |
+ if config == nil {
|
|
| 78 |
+ config = &Config{}
|
|
| 79 |
+ } |
|
| 80 |
+ c := &aws.Config{
|
|
| 81 |
+ Credentials: credentials.AnonymousCredentials, |
|
| 82 |
+ Endpoint: config.Endpoint, |
|
| 83 |
+ HTTPClient: config.HTTPClient, |
|
| 84 |
+ Logger: config.Logger, |
|
| 85 |
+ LogLevel: config.LogLevel, |
|
| 86 |
+ MaxRetries: config.MaxRetries, |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ if c.HTTPClient == nil {
|
|
| 90 |
+ c.HTTPClient = http.DefaultClient |
|
| 91 |
+ } |
|
| 92 |
+ if c.Logger == nil {
|
|
| 93 |
+ c.Logger = aws.NewDefaultLogger() |
|
| 94 |
+ } |
|
| 95 |
+ if c.LogLevel == nil {
|
|
| 96 |
+ c.LogLevel = aws.LogLevel(aws.LogOff) |
|
| 97 |
+ } |
|
| 98 |
+ if c.MaxRetries == nil {
|
|
| 99 |
+ c.MaxRetries = aws.Int(DefaultRetries) |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ return c |
|
| 103 |
+} |
|
| 104 |
+ |
|
| 105 |
+type metadataOutput struct {
|
|
| 106 |
+ Content string |
|
| 107 |
+} |
|
| 108 |
+ |
|
| 109 |
+func unmarshalHandler(r *request.Request) {
|
|
| 110 |
+ defer r.HTTPResponse.Body.Close() |
|
| 111 |
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body) |
|
| 112 |
+ if err != nil {
|
|
| 113 |
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
|
| 114 |
+ } |
|
| 115 |
+ |
|
| 116 |
+ data := r.Data.(*metadataOutput) |
|
| 117 |
+ data.Content = string(b) |
|
| 118 |
+} |
|
| 119 |
+ |
|
| 120 |
+func unmarshalError(r *request.Request) {
|
|
| 121 |
+ defer r.HTTPResponse.Body.Close() |
|
| 122 |
+ _, err := ioutil.ReadAll(r.HTTPResponse.Body) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ // TODO extract the error... |
|
| 128 |
+} |
|
| 129 |
+ |
|
| 130 |
+func validateEndpointHandler(r *request.Request) {
|
|
| 131 |
+ if r.Service.Endpoint == "" {
|
|
| 132 |
+ r.Error = aws.ErrMissingEndpoint |
|
| 133 |
+ } |
|
| 134 |
+} |
| 0 | 135 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,17 @@ |
| 0 |
+package aws |
|
| 1 |
+ |
|
| 2 |
+import "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 3 |
+ |
|
| 4 |
+var ( |
|
| 5 |
+ // ErrMissingRegion is an error that is returned if region configuration is |
|
| 6 |
+ // not found. |
|
| 7 |
+ // |
|
| 8 |
+ // @readonly |
|
| 9 |
+ ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
|
|
| 10 |
+ |
|
| 11 |
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be |
|
| 12 |
+ // resolved for a service. |
|
| 13 |
+ // |
|
| 14 |
+ // @readonly |
|
| 15 |
+ ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
|
| 16 |
+) |
| 0 | 17 |
deleted file mode 100644 |
| ... | ... |
@@ -1,157 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "fmt" |
|
| 6 |
- "io" |
|
| 7 |
- "io/ioutil" |
|
| 8 |
- "net/http" |
|
| 9 |
- "net/url" |
|
| 10 |
- "regexp" |
|
| 11 |
- "strconv" |
|
| 12 |
- "time" |
|
| 13 |
- |
|
| 14 |
- "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 15 |
-) |
|
| 16 |
- |
|
| 17 |
-var sleepDelay = func(delay time.Duration) {
|
|
| 18 |
- time.Sleep(delay) |
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-// Interface for matching types which also have a Len method. |
|
| 22 |
-type lener interface {
|
|
| 23 |
- Len() int |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-// BuildContentLength builds the content length of a request based on the body, |
|
| 27 |
-// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable |
|
| 28 |
-// to determine request body length and no "Content-Length" was specified it will panic. |
|
| 29 |
-func BuildContentLength(r *Request) {
|
|
| 30 |
- if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
|
| 31 |
- length, _ := strconv.ParseInt(slength, 10, 64) |
|
| 32 |
- r.HTTPRequest.ContentLength = length |
|
| 33 |
- return |
|
| 34 |
- } |
|
| 35 |
- |
|
| 36 |
- var length int64 |
|
| 37 |
- switch body := r.Body.(type) {
|
|
| 38 |
- case nil: |
|
| 39 |
- length = 0 |
|
| 40 |
- case lener: |
|
| 41 |
- length = int64(body.Len()) |
|
| 42 |
- case io.Seeker: |
|
| 43 |
- r.bodyStart, _ = body.Seek(0, 1) |
|
| 44 |
- end, _ := body.Seek(0, 2) |
|
| 45 |
- body.Seek(r.bodyStart, 0) // make sure to seek back to original location |
|
| 46 |
- length = end - r.bodyStart |
|
| 47 |
- default: |
|
| 48 |
- panic("Cannot get length of body, must provide `ContentLength`")
|
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- r.HTTPRequest.ContentLength = length |
|
| 52 |
- r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// UserAgentHandler is a request handler for injecting User agent into requests. |
|
| 56 |
-func UserAgentHandler(r *Request) {
|
|
| 57 |
- r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion)
|
|
| 58 |
-} |
|
| 59 |
- |
|
| 60 |
-var reStatusCode = regexp.MustCompile(`^(\d+)`) |
|
| 61 |
- |
|
| 62 |
-// SendHandler is a request handler to send service request using HTTP client. |
|
| 63 |
-func SendHandler(r *Request) {
|
|
| 64 |
- var err error |
|
| 65 |
- r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest) |
|
| 66 |
- if err != nil {
|
|
| 67 |
- // Capture the case where url.Error is returned for error processing |
|
| 68 |
- // response. e.g. 301 without location header comes back as string |
|
| 69 |
- // error and r.HTTPResponse is nil. Other url redirect errors will |
|
| 70 |
- // comeback in a similar method. |
|
| 71 |
- if e, ok := err.(*url.Error); ok {
|
|
| 72 |
- if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil {
|
|
| 73 |
- code, _ := strconv.ParseInt(s[1], 10, 64) |
|
| 74 |
- r.HTTPResponse = &http.Response{
|
|
| 75 |
- StatusCode: int(code), |
|
| 76 |
- Status: http.StatusText(int(code)), |
|
| 77 |
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
| 78 |
- } |
|
| 79 |
- return |
|
| 80 |
- } |
|
| 81 |
- } |
|
| 82 |
- if r.HTTPRequest == nil {
|
|
| 83 |
- // Add a dummy request response object to ensure the HTTPResponse |
|
| 84 |
- // value is consistent. |
|
| 85 |
- r.HTTPResponse = &http.Response{
|
|
| 86 |
- StatusCode: int(0), |
|
| 87 |
- Status: http.StatusText(int(0)), |
|
| 88 |
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
| 89 |
- } |
|
| 90 |
- } |
|
| 91 |
- // Catch all other request errors. |
|
| 92 |
- r.Error = awserr.New("RequestError", "send request failed", err)
|
|
| 93 |
- r.Retryable = Bool(true) // network errors are retryable |
|
| 94 |
- } |
|
| 95 |
-} |
|
| 96 |
- |
|
| 97 |
-// ValidateResponseHandler is a request handler to validate service response. |
|
| 98 |
-func ValidateResponseHandler(r *Request) {
|
|
| 99 |
- if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
|
| 100 |
- // this may be replaced by an UnmarshalError handler |
|
| 101 |
- r.Error = awserr.New("UnknownError", "unknown error", nil)
|
|
| 102 |
- } |
|
| 103 |
-} |
|
| 104 |
- |
|
| 105 |
-// AfterRetryHandler performs final checks to determine if the request should |
|
| 106 |
-// be retried and how long to delay. |
|
| 107 |
-func AfterRetryHandler(r *Request) {
|
|
| 108 |
- // If one of the other handlers already set the retry state |
|
| 109 |
- // we don't want to override it based on the service's state |
|
| 110 |
- if r.Retryable == nil {
|
|
| 111 |
- r.Retryable = Bool(r.Service.ShouldRetry(r)) |
|
| 112 |
- } |
|
| 113 |
- |
|
| 114 |
- if r.WillRetry() {
|
|
| 115 |
- r.RetryDelay = r.Service.RetryRules(r) |
|
| 116 |
- sleepDelay(r.RetryDelay) |
|
| 117 |
- |
|
| 118 |
- // when the expired token exception occurs the credentials |
|
| 119 |
- // need to be expired locally so that the next request to |
|
| 120 |
- // get credentials will trigger a credentials refresh. |
|
| 121 |
- if r.Error != nil {
|
|
| 122 |
- if err, ok := r.Error.(awserr.Error); ok {
|
|
| 123 |
- if isCodeExpiredCreds(err.Code()) {
|
|
| 124 |
- r.Config.Credentials.Expire() |
|
| 125 |
- } |
|
| 126 |
- } |
|
| 127 |
- } |
|
| 128 |
- |
|
| 129 |
- r.RetryCount++ |
|
| 130 |
- r.Error = nil |
|
| 131 |
- } |
|
| 132 |
-} |
|
| 133 |
- |
|
| 134 |
-var ( |
|
| 135 |
- // ErrMissingRegion is an error that is returned if region configuration is |
|
| 136 |
- // not found. |
|
| 137 |
- // |
|
| 138 |
- // @readonly |
|
| 139 |
- ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
|
|
| 140 |
- |
|
| 141 |
- // ErrMissingEndpoint is an error that is returned if an endpoint cannot be |
|
| 142 |
- // resolved for a service. |
|
| 143 |
- // |
|
| 144 |
- // @readonly |
|
| 145 |
- ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
|
| 146 |
-) |
|
| 147 |
- |
|
| 148 |
-// ValidateEndpointHandler is a request handler to validate a request had the |
|
| 149 |
-// appropriate Region and Endpoint set. Will set r.Error if the endpoint or |
|
| 150 |
-// region is not valid. |
|
| 151 |
-func ValidateEndpointHandler(r *Request) {
|
|
| 152 |
- if r.Service.SigningRegion == "" && StringValue(r.Service.Config.Region) == "" {
|
|
| 153 |
- r.Error = ErrMissingRegion |
|
| 154 |
- } else if r.Service.Endpoint == "" {
|
|
| 155 |
- r.Error = ErrMissingEndpoint |
|
| 156 |
- } |
|
| 157 |
-} |
| 158 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,85 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-// A Handlers provides a collection of request handlers for various |
|
| 4 |
-// stages of handling requests. |
|
| 5 |
-type Handlers struct {
|
|
| 6 |
- Validate HandlerList |
|
| 7 |
- Build HandlerList |
|
| 8 |
- Sign HandlerList |
|
| 9 |
- Send HandlerList |
|
| 10 |
- ValidateResponse HandlerList |
|
| 11 |
- Unmarshal HandlerList |
|
| 12 |
- UnmarshalMeta HandlerList |
|
| 13 |
- UnmarshalError HandlerList |
|
| 14 |
- Retry HandlerList |
|
| 15 |
- AfterRetry HandlerList |
|
| 16 |
-} |
|
| 17 |
- |
|
| 18 |
-// copy returns of this handler's lists. |
|
| 19 |
-func (h *Handlers) copy() Handlers {
|
|
| 20 |
- return Handlers{
|
|
| 21 |
- Validate: h.Validate.copy(), |
|
| 22 |
- Build: h.Build.copy(), |
|
| 23 |
- Sign: h.Sign.copy(), |
|
| 24 |
- Send: h.Send.copy(), |
|
| 25 |
- ValidateResponse: h.ValidateResponse.copy(), |
|
| 26 |
- Unmarshal: h.Unmarshal.copy(), |
|
| 27 |
- UnmarshalError: h.UnmarshalError.copy(), |
|
| 28 |
- UnmarshalMeta: h.UnmarshalMeta.copy(), |
|
| 29 |
- Retry: h.Retry.copy(), |
|
| 30 |
- AfterRetry: h.AfterRetry.copy(), |
|
| 31 |
- } |
|
| 32 |
-} |
|
| 33 |
- |
|
| 34 |
-// Clear removes callback functions for all handlers |
|
| 35 |
-func (h *Handlers) Clear() {
|
|
| 36 |
- h.Validate.Clear() |
|
| 37 |
- h.Build.Clear() |
|
| 38 |
- h.Send.Clear() |
|
| 39 |
- h.Sign.Clear() |
|
| 40 |
- h.Unmarshal.Clear() |
|
| 41 |
- h.UnmarshalMeta.Clear() |
|
| 42 |
- h.UnmarshalError.Clear() |
|
| 43 |
- h.ValidateResponse.Clear() |
|
| 44 |
- h.Retry.Clear() |
|
| 45 |
- h.AfterRetry.Clear() |
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-// A HandlerList manages zero or more handlers in a list. |
|
| 49 |
-type HandlerList struct {
|
|
| 50 |
- list []func(*Request) |
|
| 51 |
-} |
|
| 52 |
- |
|
| 53 |
-// copy creates a copy of the handler list. |
|
| 54 |
-func (l *HandlerList) copy() HandlerList {
|
|
| 55 |
- var n HandlerList |
|
| 56 |
- n.list = append([]func(*Request){}, l.list...)
|
|
| 57 |
- return n |
|
| 58 |
-} |
|
| 59 |
- |
|
| 60 |
-// Clear clears the handler list. |
|
| 61 |
-func (l *HandlerList) Clear() {
|
|
| 62 |
- l.list = []func(*Request){}
|
|
| 63 |
-} |
|
| 64 |
- |
|
| 65 |
-// Len returns the number of handlers in the list. |
|
| 66 |
-func (l *HandlerList) Len() int {
|
|
| 67 |
- return len(l.list) |
|
| 68 |
-} |
|
| 69 |
- |
|
| 70 |
-// PushBack pushes handlers f to the back of the handler list. |
|
| 71 |
-func (l *HandlerList) PushBack(f ...func(*Request)) {
|
|
| 72 |
- l.list = append(l.list, f...) |
|
| 73 |
-} |
|
| 74 |
- |
|
| 75 |
-// PushFront pushes handlers f to the front of the handler list. |
|
| 76 |
-func (l *HandlerList) PushFront(f ...func(*Request)) {
|
|
| 77 |
- l.list = append(f, l.list...) |
|
| 78 |
-} |
|
| 79 |
- |
|
| 80 |
-// Run executes all handlers in the list with a given request object. |
|
| 81 |
-func (l *HandlerList) Run(r *Request) {
|
|
| 82 |
- for _, f := range l.list {
|
|
| 83 |
- f(r) |
|
| 84 |
- } |
|
| 85 |
-} |
| ... | ... |
@@ -62,6 +62,15 @@ const ( |
| 62 | 62 |
// see the body content of requests and responses made while using the SDK |
| 63 | 63 |
// Will also enable LogDebug. |
| 64 | 64 |
LogDebugWithHTTPBody |
| 65 |
+ |
|
| 66 |
+ // LogDebugWithRequestRetries states the SDK should log when service requests will |
|
| 67 |
+ // be retried. This should be used to log when you want to log when service |
|
| 68 |
+ // requests are being retried. Will also enable LogDebug. |
|
| 69 |
+ LogDebugWithRequestRetries |
|
| 70 |
+ |
|
| 71 |
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail |
|
| 72 |
+ // to build, send, validate, or unmarshal. |
|
| 73 |
+ LogDebugWithRequestErrors |
|
| 65 | 74 |
) |
| 66 | 75 |
|
| 67 | 76 |
// A Logger is a minimalistic interface for the SDK to log messages to. Should |
| 68 | 77 |
deleted file mode 100644 |
| ... | ... |
@@ -1,89 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "reflect" |
|
| 6 |
- "strings" |
|
| 7 |
- |
|
| 8 |
- "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-// ValidateParameters is a request handler to validate the input parameters. |
|
| 12 |
-// Validating parameters only has meaning if done prior to the request being sent. |
|
| 13 |
-func ValidateParameters(r *Request) {
|
|
| 14 |
- if r.ParamsFilled() {
|
|
| 15 |
- v := validator{errors: []string{}}
|
|
| 16 |
- v.validateAny(reflect.ValueOf(r.Params), "") |
|
| 17 |
- |
|
| 18 |
- if count := len(v.errors); count > 0 {
|
|
| 19 |
- format := "%d validation errors:\n- %s" |
|
| 20 |
- msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) |
|
| 21 |
- r.Error = awserr.New("InvalidParameter", msg, nil)
|
|
| 22 |
- } |
|
| 23 |
- } |
|
| 24 |
-} |
|
| 25 |
- |
|
| 26 |
-// A validator validates values. Collects validations errors which occurs. |
|
| 27 |
-type validator struct {
|
|
| 28 |
- errors []string |
|
| 29 |
-} |
|
| 30 |
- |
|
| 31 |
-// validateAny will validate any struct, slice or map type. All validations |
|
| 32 |
-// are also performed recursively for nested types. |
|
| 33 |
-func (v *validator) validateAny(value reflect.Value, path string) {
|
|
| 34 |
- value = reflect.Indirect(value) |
|
| 35 |
- if !value.IsValid() {
|
|
| 36 |
- return |
|
| 37 |
- } |
|
| 38 |
- |
|
| 39 |
- switch value.Kind() {
|
|
| 40 |
- case reflect.Struct: |
|
| 41 |
- v.validateStruct(value, path) |
|
| 42 |
- case reflect.Slice: |
|
| 43 |
- for i := 0; i < value.Len(); i++ {
|
|
| 44 |
- v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
|
|
| 45 |
- } |
|
| 46 |
- case reflect.Map: |
|
| 47 |
- for _, n := range value.MapKeys() {
|
|
| 48 |
- v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
|
|
| 49 |
- } |
|
| 50 |
- } |
|
| 51 |
-} |
|
| 52 |
- |
|
| 53 |
-// validateStruct will validate the struct value's fields. If the structure has |
|
| 54 |
-// nested types those types will be validated also. |
|
| 55 |
-func (v *validator) validateStruct(value reflect.Value, path string) {
|
|
| 56 |
- prefix := "." |
|
| 57 |
- if path == "" {
|
|
| 58 |
- prefix = "" |
|
| 59 |
- } |
|
| 60 |
- |
|
| 61 |
- for i := 0; i < value.Type().NumField(); i++ {
|
|
| 62 |
- f := value.Type().Field(i) |
|
| 63 |
- if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
|
|
| 64 |
- continue |
|
| 65 |
- } |
|
| 66 |
- fvalue := value.FieldByName(f.Name) |
|
| 67 |
- |
|
| 68 |
- notset := false |
|
| 69 |
- if f.Tag.Get("required") != "" {
|
|
| 70 |
- switch fvalue.Kind() {
|
|
| 71 |
- case reflect.Ptr, reflect.Slice, reflect.Map: |
|
| 72 |
- if fvalue.IsNil() {
|
|
| 73 |
- notset = true |
|
| 74 |
- } |
|
| 75 |
- default: |
|
| 76 |
- if !fvalue.IsValid() {
|
|
| 77 |
- notset = true |
|
| 78 |
- } |
|
| 79 |
- } |
|
| 80 |
- } |
|
| 81 |
- |
|
| 82 |
- if notset {
|
|
| 83 |
- msg := "missing required parameter: " + path + prefix + f.Name |
|
| 84 |
- v.errors = append(v.errors, msg) |
|
| 85 |
- } else {
|
|
| 86 |
- v.validateAny(fvalue, path+prefix+f.Name) |
|
| 87 |
- } |
|
| 88 |
- } |
|
| 89 |
-} |
| 90 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,312 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "io" |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "net/http" |
|
| 8 |
- "net/url" |
|
| 9 |
- "reflect" |
|
| 10 |
- "strings" |
|
| 11 |
- "time" |
|
| 12 |
- |
|
| 13 |
- "github.com/aws/aws-sdk-go/aws/awsutil" |
|
| 14 |
-) |
|
| 15 |
- |
|
| 16 |
-// A Request is the service request to be made. |
|
| 17 |
-type Request struct {
|
|
| 18 |
- *Service |
|
| 19 |
- Handlers Handlers |
|
| 20 |
- Time time.Time |
|
| 21 |
- ExpireTime time.Duration |
|
| 22 |
- Operation *Operation |
|
| 23 |
- HTTPRequest *http.Request |
|
| 24 |
- HTTPResponse *http.Response |
|
| 25 |
- Body io.ReadSeeker |
|
| 26 |
- bodyStart int64 // offset from beginning of Body that the request body starts |
|
| 27 |
- Params interface{}
|
|
| 28 |
- Error error |
|
| 29 |
- Data interface{}
|
|
| 30 |
- RequestID string |
|
| 31 |
- RetryCount uint |
|
| 32 |
- Retryable *bool |
|
| 33 |
- RetryDelay time.Duration |
|
| 34 |
- |
|
| 35 |
- built bool |
|
| 36 |
-} |
|
| 37 |
- |
|
| 38 |
-// An Operation is the service API operation to be made. |
|
| 39 |
-type Operation struct {
|
|
| 40 |
- Name string |
|
| 41 |
- HTTPMethod string |
|
| 42 |
- HTTPPath string |
|
| 43 |
- *Paginator |
|
| 44 |
-} |
|
| 45 |
- |
|
| 46 |
-// Paginator keeps track of pagination configuration for an API operation. |
|
| 47 |
-type Paginator struct {
|
|
| 48 |
- InputTokens []string |
|
| 49 |
- OutputTokens []string |
|
| 50 |
- LimitToken string |
|
| 51 |
- TruncationToken string |
|
| 52 |
-} |
|
| 53 |
- |
|
| 54 |
-// NewRequest returns a new Request pointer for the service API |
|
| 55 |
-// operation and parameters. |
|
| 56 |
-// |
|
| 57 |
-// Params is any value of input parameters to be the request payload. |
|
| 58 |
-// Data is pointer value to an object which the request's response |
|
| 59 |
-// payload will be deserialized to. |
|
| 60 |
-func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request {
|
|
| 61 |
- method := operation.HTTPMethod |
|
| 62 |
- if method == "" {
|
|
| 63 |
- method = "POST" |
|
| 64 |
- } |
|
| 65 |
- p := operation.HTTPPath |
|
| 66 |
- if p == "" {
|
|
| 67 |
- p = "/" |
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- httpReq, _ := http.NewRequest(method, "", nil) |
|
| 71 |
- httpReq.URL, _ = url.Parse(service.Endpoint + p) |
|
| 72 |
- |
|
| 73 |
- r := &Request{
|
|
| 74 |
- Service: service, |
|
| 75 |
- Handlers: service.Handlers.copy(), |
|
| 76 |
- Time: time.Now(), |
|
| 77 |
- ExpireTime: 0, |
|
| 78 |
- Operation: operation, |
|
| 79 |
- HTTPRequest: httpReq, |
|
| 80 |
- Body: nil, |
|
| 81 |
- Params: params, |
|
| 82 |
- Error: nil, |
|
| 83 |
- Data: data, |
|
| 84 |
- } |
|
| 85 |
- r.SetBufferBody([]byte{})
|
|
| 86 |
- |
|
| 87 |
- return r |
|
| 88 |
-} |
|
| 89 |
- |
|
| 90 |
-// WillRetry returns if the request's can be retried. |
|
| 91 |
-func (r *Request) WillRetry() bool {
|
|
| 92 |
- return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.Service.MaxRetries() |
|
| 93 |
-} |
|
| 94 |
- |
|
| 95 |
-// ParamsFilled returns if the request's parameters have been populated |
|
| 96 |
-// and the parameters are valid. False is returned if no parameters are |
|
| 97 |
-// provided or invalid. |
|
| 98 |
-func (r *Request) ParamsFilled() bool {
|
|
| 99 |
- return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() |
|
| 100 |
-} |
|
| 101 |
- |
|
| 102 |
-// DataFilled returns true if the request's data for response deserialization |
|
| 103 |
-// target has been set and is a valid. False is returned if data is not |
|
| 104 |
-// set, or is invalid. |
|
| 105 |
-func (r *Request) DataFilled() bool {
|
|
| 106 |
- return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() |
|
| 107 |
-} |
|
| 108 |
- |
|
| 109 |
-// SetBufferBody will set the request's body bytes that will be sent to |
|
| 110 |
-// the service API. |
|
| 111 |
-func (r *Request) SetBufferBody(buf []byte) {
|
|
| 112 |
- r.SetReaderBody(bytes.NewReader(buf)) |
|
| 113 |
-} |
|
| 114 |
- |
|
| 115 |
-// SetStringBody sets the body of the request to be backed by a string. |
|
| 116 |
-func (r *Request) SetStringBody(s string) {
|
|
| 117 |
- r.SetReaderBody(strings.NewReader(s)) |
|
| 118 |
-} |
|
| 119 |
- |
|
| 120 |
-// SetReaderBody will set the request's body reader. |
|
| 121 |
-func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
|
| 122 |
- r.HTTPRequest.Body = ioutil.NopCloser(reader) |
|
| 123 |
- r.Body = reader |
|
| 124 |
-} |
|
| 125 |
- |
|
| 126 |
-// Presign returns the request's signed URL. Error will be returned |
|
| 127 |
-// if the signing fails. |
|
| 128 |
-func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
|
| 129 |
- r.ExpireTime = expireTime |
|
| 130 |
- r.Sign() |
|
| 131 |
- if r.Error != nil {
|
|
| 132 |
- return "", r.Error |
|
| 133 |
- } |
|
| 134 |
- return r.HTTPRequest.URL.String(), nil |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-// Build will build the request's object so it can be signed and sent |
|
| 138 |
-// to the service. Build will also validate all the request's parameters. |
|
| 139 |
-// Anny additional build Handlers set on this request will be run |
|
| 140 |
-// in the order they were set. |
|
| 141 |
-// |
|
| 142 |
-// The request will only be built once. Multiple calls to build will have |
|
| 143 |
-// no effect. |
|
| 144 |
-// |
|
| 145 |
-// If any Validate or Build errors occur the build will stop and the error |
|
| 146 |
-// which occurred will be returned. |
|
| 147 |
-func (r *Request) Build() error {
|
|
| 148 |
- if !r.built {
|
|
| 149 |
- r.Error = nil |
|
| 150 |
- r.Handlers.Validate.Run(r) |
|
| 151 |
- if r.Error != nil {
|
|
| 152 |
- return r.Error |
|
| 153 |
- } |
|
| 154 |
- r.Handlers.Build.Run(r) |
|
| 155 |
- r.built = true |
|
| 156 |
- } |
|
| 157 |
- |
|
| 158 |
- return r.Error |
|
| 159 |
-} |
|
| 160 |
- |
|
| 161 |
-// Sign will sign the request retuning error if errors are encountered. |
|
| 162 |
-// |
|
| 163 |
-// Send will build the request prior to signing. All Sign Handlers will |
|
| 164 |
-// be executed in the order they were set. |
|
| 165 |
-func (r *Request) Sign() error {
|
|
| 166 |
- r.Build() |
|
| 167 |
- if r.Error != nil {
|
|
| 168 |
- return r.Error |
|
| 169 |
- } |
|
| 170 |
- |
|
| 171 |
- r.Handlers.Sign.Run(r) |
|
| 172 |
- return r.Error |
|
| 173 |
-} |
|
| 174 |
- |
|
| 175 |
-// Send will send the request returning error if errors are encountered. |
|
| 176 |
-// |
|
| 177 |
-// Send will sign the request prior to sending. All Send Handlers will |
|
| 178 |
-// be executed in the order they were set. |
|
| 179 |
-func (r *Request) Send() error {
|
|
| 180 |
- for {
|
|
| 181 |
- r.Sign() |
|
| 182 |
- if r.Error != nil {
|
|
| 183 |
- return r.Error |
|
| 184 |
- } |
|
| 185 |
- |
|
| 186 |
- if BoolValue(r.Retryable) {
|
|
| 187 |
- // Re-seek the body back to the original point in for a retry so that |
|
| 188 |
- // send will send the body's contents again in the upcoming request. |
|
| 189 |
- r.Body.Seek(r.bodyStart, 0) |
|
| 190 |
- } |
|
| 191 |
- r.Retryable = nil |
|
| 192 |
- |
|
| 193 |
- r.Handlers.Send.Run(r) |
|
| 194 |
- if r.Error != nil {
|
|
| 195 |
- r.Handlers.Retry.Run(r) |
|
| 196 |
- r.Handlers.AfterRetry.Run(r) |
|
| 197 |
- if r.Error != nil {
|
|
| 198 |
- return r.Error |
|
| 199 |
- } |
|
| 200 |
- continue |
|
| 201 |
- } |
|
| 202 |
- |
|
| 203 |
- r.Handlers.UnmarshalMeta.Run(r) |
|
| 204 |
- r.Handlers.ValidateResponse.Run(r) |
|
| 205 |
- if r.Error != nil {
|
|
| 206 |
- r.Handlers.UnmarshalError.Run(r) |
|
| 207 |
- r.Handlers.Retry.Run(r) |
|
| 208 |
- r.Handlers.AfterRetry.Run(r) |
|
| 209 |
- if r.Error != nil {
|
|
| 210 |
- return r.Error |
|
| 211 |
- } |
|
| 212 |
- continue |
|
| 213 |
- } |
|
| 214 |
- |
|
| 215 |
- r.Handlers.Unmarshal.Run(r) |
|
| 216 |
- if r.Error != nil {
|
|
| 217 |
- r.Handlers.Retry.Run(r) |
|
| 218 |
- r.Handlers.AfterRetry.Run(r) |
|
| 219 |
- if r.Error != nil {
|
|
| 220 |
- return r.Error |
|
| 221 |
- } |
|
| 222 |
- continue |
|
| 223 |
- } |
|
| 224 |
- |
|
| 225 |
- break |
|
| 226 |
- } |
|
| 227 |
- |
|
| 228 |
- return nil |
|
| 229 |
-} |
|
| 230 |
- |
|
| 231 |
-// HasNextPage returns true if this request has more pages of data available. |
|
| 232 |
-func (r *Request) HasNextPage() bool {
|
|
| 233 |
- return r.nextPageTokens() != nil |
|
| 234 |
-} |
|
| 235 |
- |
|
| 236 |
-// nextPageTokens returns the tokens to use when asking for the next page of |
|
| 237 |
-// data. |
|
| 238 |
-func (r *Request) nextPageTokens() []interface{} {
|
|
| 239 |
- if r.Operation.Paginator == nil {
|
|
| 240 |
- return nil |
|
| 241 |
- } |
|
| 242 |
- |
|
| 243 |
- if r.Operation.TruncationToken != "" {
|
|
| 244 |
- tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken) |
|
| 245 |
- if tr == nil || len(tr) == 0 {
|
|
| 246 |
- return nil |
|
| 247 |
- } |
|
| 248 |
- switch v := tr[0].(type) {
|
|
| 249 |
- case bool: |
|
| 250 |
- if v == false {
|
|
| 251 |
- return nil |
|
| 252 |
- } |
|
| 253 |
- } |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- found := false |
|
| 257 |
- tokens := make([]interface{}, len(r.Operation.OutputTokens))
|
|
| 258 |
- |
|
| 259 |
- for i, outtok := range r.Operation.OutputTokens {
|
|
| 260 |
- v := awsutil.ValuesAtAnyPath(r.Data, outtok) |
|
| 261 |
- if v != nil && len(v) > 0 {
|
|
| 262 |
- found = true |
|
| 263 |
- tokens[i] = v[0] |
|
| 264 |
- } |
|
| 265 |
- } |
|
| 266 |
- |
|
| 267 |
- if found {
|
|
| 268 |
- return tokens |
|
| 269 |
- } |
|
| 270 |
- return nil |
|
| 271 |
-} |
|
| 272 |
- |
|
| 273 |
-// NextPage returns a new Request that can be executed to return the next |
|
| 274 |
-// page of result data. Call .Send() on this request to execute it. |
|
| 275 |
-func (r *Request) NextPage() *Request {
|
|
| 276 |
- tokens := r.nextPageTokens() |
|
| 277 |
- if tokens == nil {
|
|
| 278 |
- return nil |
|
| 279 |
- } |
|
| 280 |
- |
|
| 281 |
- data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() |
|
| 282 |
- nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data) |
|
| 283 |
- for i, intok := range nr.Operation.InputTokens {
|
|
| 284 |
- awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i]) |
|
| 285 |
- } |
|
| 286 |
- return nr |
|
| 287 |
-} |
|
| 288 |
- |
|
| 289 |
-// EachPage iterates over each page of a paginated request object. The fn |
|
| 290 |
-// parameter should be a function with the following sample signature: |
|
| 291 |
-// |
|
| 292 |
-// func(page *T, lastPage bool) bool {
|
|
| 293 |
-// return true // return false to stop iterating |
|
| 294 |
-// } |
|
| 295 |
-// |
|
| 296 |
-// Where "T" is the structure type matching the output structure of the given |
|
| 297 |
-// operation. For example, a request object generated by |
|
| 298 |
-// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput |
|
| 299 |
-// as the structure "T". The lastPage value represents whether the page is |
|
| 300 |
-// the last page of data or not. The return value of this function should |
|
| 301 |
-// return true to keep iterating or false to stop. |
|
| 302 |
-func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
|
| 303 |
- for page := r; page != nil; page = page.NextPage() {
|
|
| 304 |
- page.Send() |
|
| 305 |
- shouldContinue := fn(page.Data, !page.HasNextPage()) |
|
| 306 |
- if page.Error != nil || !shouldContinue {
|
|
| 307 |
- return page.Error |
|
| 308 |
- } |
|
| 309 |
- } |
|
| 310 |
- |
|
| 311 |
- return nil |
|
| 312 |
-} |
| 313 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,112 @@ |
| 0 |
+package request |
|
| 1 |
+ |
|
| 2 |
+// A Handlers provides a collection of request handlers for various |
|
| 3 |
+// stages of handling requests. |
|
| 4 |
+type Handlers struct {
|
|
| 5 |
+ Validate HandlerList |
|
| 6 |
+ Build HandlerList |
|
| 7 |
+ Sign HandlerList |
|
| 8 |
+ Send HandlerList |
|
| 9 |
+ ValidateResponse HandlerList |
|
| 10 |
+ Unmarshal HandlerList |
|
| 11 |
+ UnmarshalMeta HandlerList |
|
| 12 |
+ UnmarshalError HandlerList |
|
| 13 |
+ Retry HandlerList |
|
| 14 |
+ AfterRetry HandlerList |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+// Copy returns of this handler's lists. |
|
| 18 |
+func (h *Handlers) Copy() Handlers {
|
|
| 19 |
+ return Handlers{
|
|
| 20 |
+ Validate: h.Validate.copy(), |
|
| 21 |
+ Build: h.Build.copy(), |
|
| 22 |
+ Sign: h.Sign.copy(), |
|
| 23 |
+ Send: h.Send.copy(), |
|
| 24 |
+ ValidateResponse: h.ValidateResponse.copy(), |
|
| 25 |
+ Unmarshal: h.Unmarshal.copy(), |
|
| 26 |
+ UnmarshalError: h.UnmarshalError.copy(), |
|
| 27 |
+ UnmarshalMeta: h.UnmarshalMeta.copy(), |
|
| 28 |
+ Retry: h.Retry.copy(), |
|
| 29 |
+ AfterRetry: h.AfterRetry.copy(), |
|
| 30 |
+ } |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// Clear removes callback functions for all handlers |
|
| 34 |
+func (h *Handlers) Clear() {
|
|
| 35 |
+ h.Validate.Clear() |
|
| 36 |
+ h.Build.Clear() |
|
| 37 |
+ h.Send.Clear() |
|
| 38 |
+ h.Sign.Clear() |
|
| 39 |
+ h.Unmarshal.Clear() |
|
| 40 |
+ h.UnmarshalMeta.Clear() |
|
| 41 |
+ h.UnmarshalError.Clear() |
|
| 42 |
+ h.ValidateResponse.Clear() |
|
| 43 |
+ h.Retry.Clear() |
|
| 44 |
+ h.AfterRetry.Clear() |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+// A HandlerList manages zero or more handlers in a list. |
|
| 48 |
+type HandlerList struct {
|
|
| 49 |
+ list []NamedHandler |
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+// A NamedHandler is a struct that contains a name and function callback. |
|
| 53 |
+type NamedHandler struct {
|
|
| 54 |
+ Name string |
|
| 55 |
+ Fn func(*Request) |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+// copy creates a copy of the handler list. |
|
| 59 |
+func (l *HandlerList) copy() HandlerList {
|
|
| 60 |
+ var n HandlerList |
|
| 61 |
+ n.list = append([]NamedHandler{}, l.list...)
|
|
| 62 |
+ return n |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+// Clear clears the handler list. |
|
| 66 |
+func (l *HandlerList) Clear() {
|
|
| 67 |
+ l.list = []NamedHandler{}
|
|
| 68 |
+} |
|
| 69 |
+ |
|
| 70 |
+// Len returns the number of handlers in the list. |
|
| 71 |
+func (l *HandlerList) Len() int {
|
|
| 72 |
+ return len(l.list) |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+// PushBack pushes handler f to the back of the handler list. |
|
| 76 |
+func (l *HandlerList) PushBack(f func(*Request)) {
|
|
| 77 |
+ l.list = append(l.list, NamedHandler{"__anonymous", f})
|
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+// PushFront pushes handler f to the front of the handler list. |
|
| 81 |
+func (l *HandlerList) PushFront(f func(*Request)) {
|
|
| 82 |
+ l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
|
|
| 83 |
+} |
|
| 84 |
+ |
|
| 85 |
+// PushBackNamed pushes named handler f to the back of the handler list. |
|
| 86 |
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
|
| 87 |
+ l.list = append(l.list, n) |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 90 |
+// PushFrontNamed pushes named handler f to the front of the handler list. |
|
| 91 |
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
|
| 92 |
+ l.list = append([]NamedHandler{n}, l.list...)
|
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+// Remove removes a NamedHandler n |
|
| 96 |
+func (l *HandlerList) Remove(n NamedHandler) {
|
|
| 97 |
+ newlist := []NamedHandler{}
|
|
| 98 |
+ for _, m := range l.list {
|
|
| 99 |
+ if m.Name != n.Name {
|
|
| 100 |
+ newlist = append(newlist, m) |
|
| 101 |
+ } |
|
| 102 |
+ } |
|
| 103 |
+ l.list = newlist |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// Run executes all handlers in the list with a given request object. |
|
| 107 |
+func (l *HandlerList) Run(r *Request) {
|
|
| 108 |
+ for _, f := range l.list {
|
|
| 109 |
+ f.Fn(r) |
|
| 110 |
+ } |
|
| 111 |
+} |
| 0 | 112 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,348 @@ |
| 0 |
+package request |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "net/http" |
|
| 8 |
+ "net/url" |
|
| 9 |
+ "reflect" |
|
| 10 |
+ "strings" |
|
| 11 |
+ "time" |
|
| 12 |
+ |
|
| 13 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 14 |
+ "github.com/aws/aws-sdk-go/aws/awsutil" |
|
| 15 |
+ "github.com/aws/aws-sdk-go/aws/service/serviceinfo" |
|
| 16 |
+) |
|
| 17 |
+ |
|
| 18 |
+// A Request is the service request to be made. |
|
| 19 |
+type Request struct {
|
|
| 20 |
+ Retryer |
|
| 21 |
+ Service serviceinfo.ServiceInfo |
|
| 22 |
+ Handlers Handlers |
|
| 23 |
+ Time time.Time |
|
| 24 |
+ ExpireTime time.Duration |
|
| 25 |
+ Operation *Operation |
|
| 26 |
+ HTTPRequest *http.Request |
|
| 27 |
+ HTTPResponse *http.Response |
|
| 28 |
+ Body io.ReadSeeker |
|
| 29 |
+ BodyStart int64 // offset from beginning of Body that the request body starts |
|
| 30 |
+ Params interface{}
|
|
| 31 |
+ Error error |
|
| 32 |
+ Data interface{}
|
|
| 33 |
+ RequestID string |
|
| 34 |
+ RetryCount uint |
|
| 35 |
+ Retryable *bool |
|
| 36 |
+ RetryDelay time.Duration |
|
| 37 |
+ |
|
| 38 |
+ built bool |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+// An Operation is the service API operation to be made. |
|
| 42 |
+type Operation struct {
|
|
| 43 |
+ Name string |
|
| 44 |
+ HTTPMethod string |
|
| 45 |
+ HTTPPath string |
|
| 46 |
+ *Paginator |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+// Paginator keeps track of pagination configuration for an API operation. |
|
| 50 |
+type Paginator struct {
|
|
| 51 |
+ InputTokens []string |
|
| 52 |
+ OutputTokens []string |
|
| 53 |
+ LimitToken string |
|
| 54 |
+ TruncationToken string |
|
| 55 |
+} |
|
| 56 |
+ |
|
| 57 |
+// New returns a new Request pointer for the service API |
|
| 58 |
+// operation and parameters. |
|
| 59 |
+// |
|
| 60 |
+// Params is any value of input parameters to be the request payload. |
|
| 61 |
+// Data is pointer value to an object which the request's response |
|
| 62 |
+// payload will be deserialized to. |
|
| 63 |
+func New(service serviceinfo.ServiceInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
|
|
| 64 |
+ method := operation.HTTPMethod |
|
| 65 |
+ if method == "" {
|
|
| 66 |
+ method = "POST" |
|
| 67 |
+ } |
|
| 68 |
+ p := operation.HTTPPath |
|
| 69 |
+ if p == "" {
|
|
| 70 |
+ p = "/" |
|
| 71 |
+ } |
|
| 72 |
+ |
|
| 73 |
+ httpReq, _ := http.NewRequest(method, "", nil) |
|
| 74 |
+ httpReq.URL, _ = url.Parse(service.Endpoint + p) |
|
| 75 |
+ |
|
| 76 |
+ r := &Request{
|
|
| 77 |
+ Retryer: retryer, |
|
| 78 |
+ Service: service, |
|
| 79 |
+ Handlers: handlers.Copy(), |
|
| 80 |
+ Time: time.Now(), |
|
| 81 |
+ ExpireTime: 0, |
|
| 82 |
+ Operation: operation, |
|
| 83 |
+ HTTPRequest: httpReq, |
|
| 84 |
+ Body: nil, |
|
| 85 |
+ Params: params, |
|
| 86 |
+ Error: nil, |
|
| 87 |
+ Data: data, |
|
| 88 |
+ } |
|
| 89 |
+ r.SetBufferBody([]byte{})
|
|
| 90 |
+ |
|
| 91 |
+ return r |
|
| 92 |
+} |
|
| 93 |
+ |
|
| 94 |
+// WillRetry returns if the request's can be retried. |
|
| 95 |
+func (r *Request) WillRetry() bool {
|
|
| 96 |
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+// ParamsFilled returns if the request's parameters have been populated |
|
| 100 |
+// and the parameters are valid. False is returned if no parameters are |
|
| 101 |
+// provided or invalid. |
|
| 102 |
+func (r *Request) ParamsFilled() bool {
|
|
| 103 |
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// DataFilled returns true if the request's data for response deserialization |
|
| 107 |
+// target has been set and is a valid. False is returned if data is not |
|
| 108 |
+// set, or is invalid. |
|
| 109 |
+func (r *Request) DataFilled() bool {
|
|
| 110 |
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() |
|
| 111 |
+} |
|
| 112 |
+ |
|
| 113 |
+// SetBufferBody will set the request's body bytes that will be sent to |
|
| 114 |
+// the service API. |
|
| 115 |
+func (r *Request) SetBufferBody(buf []byte) {
|
|
| 116 |
+ r.SetReaderBody(bytes.NewReader(buf)) |
|
| 117 |
+} |
|
| 118 |
+ |
|
| 119 |
+// SetStringBody sets the body of the request to be backed by a string. |
|
| 120 |
+func (r *Request) SetStringBody(s string) {
|
|
| 121 |
+ r.SetReaderBody(strings.NewReader(s)) |
|
| 122 |
+} |
|
| 123 |
+ |
|
| 124 |
+// SetReaderBody will set the request's body reader. |
|
| 125 |
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
|
| 126 |
+ r.HTTPRequest.Body = ioutil.NopCloser(reader) |
|
| 127 |
+ r.Body = reader |
|
| 128 |
+} |
|
| 129 |
+ |
|
| 130 |
+// Presign returns the request's signed URL. Error will be returned |
|
| 131 |
+// if the signing fails. |
|
| 132 |
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
|
| 133 |
+ r.ExpireTime = expireTime |
|
| 134 |
+ r.Sign() |
|
| 135 |
+ if r.Error != nil {
|
|
| 136 |
+ return "", r.Error |
|
| 137 |
+ } |
|
| 138 |
+ return r.HTTPRequest.URL.String(), nil |
|
| 139 |
+} |
|
| 140 |
+ |
|
| 141 |
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
|
| 142 |
+ if !r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
|
|
| 143 |
+ return |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ retryStr := "not retrying" |
|
| 147 |
+ if retrying {
|
|
| 148 |
+ retryStr = "will retry" |
|
| 149 |
+ } |
|
| 150 |
+ |
|
| 151 |
+ r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
|
|
| 152 |
+ stage, r.Service.ServiceName, r.Operation.Name, retryStr, err)) |
|
| 153 |
+} |
|
| 154 |
+ |
|
| 155 |
+// Build will build the request's object so it can be signed and sent |
|
| 156 |
+// to the service. Build will also validate all the request's parameters. |
|
| 157 |
+// Anny additional build Handlers set on this request will be run |
|
| 158 |
+// in the order they were set. |
|
| 159 |
+// |
|
| 160 |
+// The request will only be built once. Multiple calls to build will have |
|
| 161 |
+// no effect. |
|
| 162 |
+// |
|
| 163 |
+// If any Validate or Build errors occur the build will stop and the error |
|
| 164 |
+// which occurred will be returned. |
|
| 165 |
+func (r *Request) Build() error {
|
|
| 166 |
+ if !r.built {
|
|
| 167 |
+ r.Error = nil |
|
| 168 |
+ r.Handlers.Validate.Run(r) |
|
| 169 |
+ if r.Error != nil {
|
|
| 170 |
+ debugLogReqError(r, "Validate Request", false, r.Error) |
|
| 171 |
+ return r.Error |
|
| 172 |
+ } |
|
| 173 |
+ r.Handlers.Build.Run(r) |
|
| 174 |
+ r.built = true |
|
| 175 |
+ } |
|
| 176 |
+ |
|
| 177 |
+ return r.Error |
|
| 178 |
+} |
|
| 179 |
+ |
|
| 180 |
+// Sign will sign the request retuning error if errors are encountered. |
|
| 181 |
+// |
|
| 182 |
+// Send will build the request prior to signing. All Sign Handlers will |
|
| 183 |
+// be executed in the order they were set. |
|
| 184 |
+func (r *Request) Sign() error {
|
|
| 185 |
+ r.Build() |
|
| 186 |
+ if r.Error != nil {
|
|
| 187 |
+ debugLogReqError(r, "Build Request", false, r.Error) |
|
| 188 |
+ return r.Error |
|
| 189 |
+ } |
|
| 190 |
+ |
|
| 191 |
+ r.Handlers.Sign.Run(r) |
|
| 192 |
+ return r.Error |
|
| 193 |
+} |
|
| 194 |
+ |
|
| 195 |
+// Send will send the request returning error if errors are encountered. |
|
| 196 |
+// |
|
| 197 |
+// Send will sign the request prior to sending. All Send Handlers will |
|
| 198 |
+// be executed in the order they were set. |
|
| 199 |
+func (r *Request) Send() error {
|
|
| 200 |
+ for {
|
|
| 201 |
+ r.Sign() |
|
| 202 |
+ if r.Error != nil {
|
|
| 203 |
+ return r.Error |
|
| 204 |
+ } |
|
| 205 |
+ |
|
| 206 |
+ if aws.BoolValue(r.Retryable) {
|
|
| 207 |
+ if r.Service.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
|
| 208 |
+ r.Service.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
|
| 209 |
+ r.Service.ServiceName, r.Operation.Name, r.RetryCount)) |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ // Re-seek the body back to the original point in for a retry so that |
|
| 213 |
+ // send will send the body's contents again in the upcoming request. |
|
| 214 |
+ r.Body.Seek(r.BodyStart, 0) |
|
| 215 |
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body) |
|
| 216 |
+ } |
|
| 217 |
+ r.Retryable = nil |
|
| 218 |
+ |
|
| 219 |
+ r.Handlers.Send.Run(r) |
|
| 220 |
+ if r.Error != nil {
|
|
| 221 |
+ err := r.Error |
|
| 222 |
+ r.Handlers.Retry.Run(r) |
|
| 223 |
+ r.Handlers.AfterRetry.Run(r) |
|
| 224 |
+ if r.Error != nil {
|
|
| 225 |
+ debugLogReqError(r, "Send Request", false, r.Error) |
|
| 226 |
+ return r.Error |
|
| 227 |
+ } |
|
| 228 |
+ debugLogReqError(r, "Send Request", true, err) |
|
| 229 |
+ continue |
|
| 230 |
+ } |
|
| 231 |
+ |
|
| 232 |
+ r.Handlers.UnmarshalMeta.Run(r) |
|
| 233 |
+ r.Handlers.ValidateResponse.Run(r) |
|
| 234 |
+ if r.Error != nil {
|
|
| 235 |
+ err := r.Error |
|
| 236 |
+ r.Handlers.UnmarshalError.Run(r) |
|
| 237 |
+ r.Handlers.Retry.Run(r) |
|
| 238 |
+ r.Handlers.AfterRetry.Run(r) |
|
| 239 |
+ if r.Error != nil {
|
|
| 240 |
+ debugLogReqError(r, "Validate Response", false, r.Error) |
|
| 241 |
+ return r.Error |
|
| 242 |
+ } |
|
| 243 |
+ debugLogReqError(r, "Validate Response", true, err) |
|
| 244 |
+ continue |
|
| 245 |
+ } |
|
| 246 |
+ |
|
| 247 |
+ r.Handlers.Unmarshal.Run(r) |
|
| 248 |
+ if r.Error != nil {
|
|
| 249 |
+ err := r.Error |
|
| 250 |
+ r.Handlers.Retry.Run(r) |
|
| 251 |
+ r.Handlers.AfterRetry.Run(r) |
|
| 252 |
+ if r.Error != nil {
|
|
| 253 |
+ debugLogReqError(r, "Unmarshal Response", false, r.Error) |
|
| 254 |
+ return r.Error |
|
| 255 |
+ } |
|
| 256 |
+ debugLogReqError(r, "Unmarshal Response", true, err) |
|
| 257 |
+ continue |
|
| 258 |
+ } |
|
| 259 |
+ |
|
| 260 |
+ break |
|
| 261 |
+ } |
|
| 262 |
+ |
|
| 263 |
+ return nil |
|
| 264 |
+} |
|
| 265 |
+ |
|
| 266 |
+// HasNextPage returns true if this request has more pages of data available. |
|
| 267 |
+func (r *Request) HasNextPage() bool {
|
|
| 268 |
+ return r.nextPageTokens() != nil |
|
| 269 |
+} |
|
| 270 |
+ |
|
| 271 |
+// nextPageTokens returns the tokens to use when asking for the next page of |
|
| 272 |
+// data. |
|
| 273 |
+func (r *Request) nextPageTokens() []interface{} {
|
|
| 274 |
+ if r.Operation.Paginator == nil {
|
|
| 275 |
+ return nil |
|
| 276 |
+ } |
|
| 277 |
+ |
|
| 278 |
+ if r.Operation.TruncationToken != "" {
|
|
| 279 |
+ tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken) |
|
| 280 |
+ if tr == nil || len(tr) == 0 {
|
|
| 281 |
+ return nil |
|
| 282 |
+ } |
|
| 283 |
+ switch v := tr[0].(type) {
|
|
| 284 |
+ case bool: |
|
| 285 |
+ if v == false {
|
|
| 286 |
+ return nil |
|
| 287 |
+ } |
|
| 288 |
+ } |
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ found := false |
|
| 292 |
+ tokens := make([]interface{}, len(r.Operation.OutputTokens))
|
|
| 293 |
+ |
|
| 294 |
+ for i, outtok := range r.Operation.OutputTokens {
|
|
| 295 |
+ v := awsutil.ValuesAtAnyPath(r.Data, outtok) |
|
| 296 |
+ if v != nil && len(v) > 0 {
|
|
| 297 |
+ found = true |
|
| 298 |
+ tokens[i] = v[0] |
|
| 299 |
+ } |
|
| 300 |
+ } |
|
| 301 |
+ |
|
| 302 |
+ if found {
|
|
| 303 |
+ return tokens |
|
| 304 |
+ } |
|
| 305 |
+ return nil |
|
| 306 |
+} |
|
| 307 |
+ |
|
| 308 |
+// NextPage returns a new Request that can be executed to return the next |
|
| 309 |
+// page of result data. Call .Send() on this request to execute it. |
|
| 310 |
+func (r *Request) NextPage() *Request {
|
|
| 311 |
+ tokens := r.nextPageTokens() |
|
| 312 |
+ if tokens == nil {
|
|
| 313 |
+ return nil |
|
| 314 |
+ } |
|
| 315 |
+ |
|
| 316 |
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() |
|
| 317 |
+ nr := New(r.Service, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) |
|
| 318 |
+ for i, intok := range nr.Operation.InputTokens {
|
|
| 319 |
+ awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i]) |
|
| 320 |
+ } |
|
| 321 |
+ return nr |
|
| 322 |
+} |
|
| 323 |
+ |
|
| 324 |
+// EachPage iterates over each page of a paginated request object. The fn |
|
| 325 |
+// parameter should be a function with the following sample signature: |
|
| 326 |
+// |
|
| 327 |
+// func(page *T, lastPage bool) bool {
|
|
| 328 |
+// return true // return false to stop iterating |
|
| 329 |
+// } |
|
| 330 |
+// |
|
| 331 |
+// Where "T" is the structure type matching the output structure of the given |
|
| 332 |
+// operation. For example, a request object generated by |
|
| 333 |
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput |
|
| 334 |
+// as the structure "T". The lastPage value represents whether the page is |
|
| 335 |
+// the last page of data or not. The return value of this function should |
|
| 336 |
+// return true to keep iterating or false to stop. |
|
| 337 |
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
|
| 338 |
+ for page := r; page != nil; page = page.NextPage() {
|
|
| 339 |
+ page.Send() |
|
| 340 |
+ shouldContinue := fn(page.Data, !page.HasNextPage()) |
|
| 341 |
+ if page.Error != nil || !shouldContinue {
|
|
| 342 |
+ return page.Error |
|
| 343 |
+ } |
|
| 344 |
+ } |
|
| 345 |
+ |
|
| 346 |
+ return nil |
|
| 347 |
+} |
| 0 | 348 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,71 @@ |
| 0 |
+package request |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "time" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+// Retryer is an interface to control retry logic for a given service. |
|
| 9 |
+// The default implementation used by most services is the service.DefaultRetryer |
|
| 10 |
+// structure, which contains basic retry logic using exponential backoff. |
|
| 11 |
+type Retryer interface {
|
|
| 12 |
+ RetryRules(*Request) time.Duration |
|
| 13 |
+ ShouldRetry(*Request) bool |
|
| 14 |
+ MaxRetries() uint |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+// retryableCodes is a collection of service response codes which are retry-able |
|
| 18 |
+// without any further action. |
|
| 19 |
+var retryableCodes = map[string]struct{}{
|
|
| 20 |
+ "RequestError": {},
|
|
| 21 |
+ "ProvisionedThroughputExceededException": {},
|
|
| 22 |
+ "Throttling": {},
|
|
| 23 |
+ "ThrottlingException": {},
|
|
| 24 |
+ "RequestLimitExceeded": {},
|
|
| 25 |
+ "RequestThrottled": {},
|
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// credsExpiredCodes is a collection of error codes which signify the credentials |
|
| 29 |
+// need to be refreshed. Expired tokens require refreshing of credentials, and |
|
| 30 |
+// resigning before the request can be retried. |
|
| 31 |
+var credsExpiredCodes = map[string]struct{}{
|
|
| 32 |
+ "ExpiredToken": {},
|
|
| 33 |
+ "ExpiredTokenException": {},
|
|
| 34 |
+ "RequestExpired": {}, // EC2 Only
|
|
| 35 |
+} |
|
| 36 |
+ |
|
| 37 |
+func isCodeRetryable(code string) bool {
|
|
| 38 |
+ if _, ok := retryableCodes[code]; ok {
|
|
| 39 |
+ return true |
|
| 40 |
+ } |
|
| 41 |
+ |
|
| 42 |
+ return isCodeExpiredCreds(code) |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+func isCodeExpiredCreds(code string) bool {
|
|
| 46 |
+ _, ok := credsExpiredCodes[code] |
|
| 47 |
+ return ok |
|
| 48 |
+} |
|
| 49 |
+ |
|
| 50 |
+// IsErrorRetryable returns whether the error is retryable, based on its Code. |
|
| 51 |
+// Returns false if the request has no Error set. |
|
| 52 |
+func (r *Request) IsErrorRetryable() bool {
|
|
| 53 |
+ if r.Error != nil {
|
|
| 54 |
+ if err, ok := r.Error.(awserr.Error); ok {
|
|
| 55 |
+ return isCodeRetryable(err.Code()) |
|
| 56 |
+ } |
|
| 57 |
+ } |
|
| 58 |
+ return false |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+// IsErrorExpired returns whether the error code is a credential expiry error. |
|
| 62 |
+// Returns false if the request has no Error set. |
|
| 63 |
+func (r *Request) IsErrorExpired() bool {
|
|
| 64 |
+ if r.Error != nil {
|
|
| 65 |
+ if err, ok := r.Error.(awserr.Error); ok {
|
|
| 66 |
+ return isCodeExpiredCreds(err.Code()) |
|
| 67 |
+ } |
|
| 68 |
+ } |
|
| 69 |
+ return false |
|
| 70 |
+} |
| 0 | 71 |
deleted file mode 100644 |
| ... | ... |
@@ -1,194 +0,0 @@ |
| 1 |
-package aws |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "math" |
|
| 6 |
- "math/rand" |
|
| 7 |
- "net/http" |
|
| 8 |
- "net/http/httputil" |
|
| 9 |
- "regexp" |
|
| 10 |
- "time" |
|
| 11 |
- |
|
| 12 |
- "github.com/aws/aws-sdk-go/aws/awserr" |
|
| 13 |
- "github.com/aws/aws-sdk-go/internal/endpoints" |
|
| 14 |
-) |
|
| 15 |
- |
|
| 16 |
-// A Service implements the base service request and response handling |
|
| 17 |
-// used by all services. |
|
| 18 |
-type Service struct {
|
|
| 19 |
- Config *Config |
|
| 20 |
- Handlers Handlers |
|
| 21 |
- ServiceName string |
|
| 22 |
- APIVersion string |
|
| 23 |
- Endpoint string |
|
| 24 |
- SigningName string |
|
| 25 |
- SigningRegion string |
|
| 26 |
- JSONVersion string |
|
| 27 |
- TargetPrefix string |
|
| 28 |
- RetryRules func(*Request) time.Duration |
|
| 29 |
- ShouldRetry func(*Request) bool |
|
| 30 |
- DefaultMaxRetries uint |
|
| 31 |
-} |
|
| 32 |
- |
|
| 33 |
-var schemeRE = regexp.MustCompile("^([^:]+)://")
|
|
| 34 |
- |
|
| 35 |
-// NewService will return a pointer to a new Server object initialized. |
|
| 36 |
-func NewService(config *Config) *Service {
|
|
| 37 |
- svc := &Service{Config: config}
|
|
| 38 |
- svc.Initialize() |
|
| 39 |
- return svc |
|
| 40 |
-} |
|
| 41 |
- |
|
| 42 |
-// Initialize initializes the service. |
|
| 43 |
-func (s *Service) Initialize() {
|
|
| 44 |
- if s.Config == nil {
|
|
| 45 |
- s.Config = &Config{}
|
|
| 46 |
- } |
|
| 47 |
- if s.Config.HTTPClient == nil {
|
|
| 48 |
- s.Config.HTTPClient = http.DefaultClient |
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- if s.RetryRules == nil {
|
|
| 52 |
- s.RetryRules = retryRules |
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- if s.ShouldRetry == nil {
|
|
| 56 |
- s.ShouldRetry = shouldRetry |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- s.DefaultMaxRetries = 3 |
|
| 60 |
- s.Handlers.Validate.PushBack(ValidateEndpointHandler) |
|
| 61 |
- s.Handlers.Build.PushBack(UserAgentHandler) |
|
| 62 |
- s.Handlers.Sign.PushBack(BuildContentLength) |
|
| 63 |
- s.Handlers.Send.PushBack(SendHandler) |
|
| 64 |
- s.Handlers.AfterRetry.PushBack(AfterRetryHandler) |
|
| 65 |
- s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler) |
|
| 66 |
- s.AddDebugHandlers() |
|
| 67 |
- s.buildEndpoint() |
|
| 68 |
- |
|
| 69 |
- if !BoolValue(s.Config.DisableParamValidation) {
|
|
| 70 |
- s.Handlers.Validate.PushBack(ValidateParameters) |
|
| 71 |
- } |
|
| 72 |
-} |
|
| 73 |
- |
|
| 74 |
-// buildEndpoint builds the endpoint values the service will use to make requests with. |
|
| 75 |
-func (s *Service) buildEndpoint() {
|
|
| 76 |
- if StringValue(s.Config.Endpoint) != "" {
|
|
| 77 |
- s.Endpoint = *s.Config.Endpoint |
|
| 78 |
- } else {
|
|
| 79 |
- s.Endpoint, s.SigningRegion = |
|
| 80 |
- endpoints.EndpointForRegion(s.ServiceName, StringValue(s.Config.Region)) |
|
| 81 |
- } |
|
| 82 |
- |
|
| 83 |
- if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
|
|
| 84 |
- scheme := "https" |
|
| 85 |
- if BoolValue(s.Config.DisableSSL) {
|
|
| 86 |
- scheme = "http" |
|
| 87 |
- } |
|
| 88 |
- s.Endpoint = scheme + "://" + s.Endpoint |
|
| 89 |
- } |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// AddDebugHandlers injects debug logging handlers into the service to log request |
|
| 93 |
-// debug information. |
|
| 94 |
-func (s *Service) AddDebugHandlers() {
|
|
| 95 |
- if !s.Config.LogLevel.AtLeast(LogDebug) {
|
|
| 96 |
- return |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- s.Handlers.Send.PushFront(logRequest) |
|
| 100 |
- s.Handlers.Send.PushBack(logResponse) |
|
| 101 |
-} |
|
| 102 |
- |
|
| 103 |
-const logReqMsg = `DEBUG: Request %s/%s Details: |
|
| 104 |
-%s |
|
| 105 |
- |
|
| 106 |
-func logRequest(r *Request) {
|
|
| 107 |
- logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody) |
|
| 108 |
- dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) |
|
| 109 |
- |
|
| 110 |
- r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ServiceName, r.Operation.Name, string(dumpedBody))) |
|
| 111 |
-} |
|
| 112 |
- |
|
| 113 |
-const logRespMsg = `DEBUG: Response %s/%s Details: |
|
| 114 |
-%s |
|
| 115 |
- |
|
| 116 |
-func logResponse(r *Request) {
|
|
| 117 |
- var msg = "no reponse data" |
|
| 118 |
- if r.HTTPResponse != nil {
|
|
| 119 |
- logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody) |
|
| 120 |
- dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) |
|
| 121 |
- msg = string(dumpedBody) |
|
| 122 |
- } else if r.Error != nil {
|
|
| 123 |
- msg = r.Error.Error() |
|
| 124 |
- } |
|
| 125 |
- r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ServiceName, r.Operation.Name, msg)) |
|
| 126 |
-} |
|
| 127 |
- |
|
| 128 |
-// MaxRetries returns the number of maximum returns the service will use to make |
|
| 129 |
-// an individual API request. |
|
| 130 |
-func (s *Service) MaxRetries() uint {
|
|
| 131 |
- if IntValue(s.Config.MaxRetries) < 0 {
|
|
| 132 |
- return s.DefaultMaxRetries |
|
| 133 |
- } |
|
| 134 |
- return uint(IntValue(s.Config.MaxRetries)) |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) |
|
| 138 |
- |
|
| 139 |
-// retryRules returns the delay duration before retrying this request again |
|
| 140 |
-func retryRules(r *Request) time.Duration {
|
|
| 141 |
- |
|
| 142 |
- delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30) |
|
| 143 |
- return time.Duration(delay) * time.Millisecond |
|
| 144 |
-} |
|
| 145 |
- |
|
| 146 |
-// retryableCodes is a collection of service response codes which are retry-able |
|
| 147 |
-// without any further action. |
|
| 148 |
-var retryableCodes = map[string]struct{}{
|
|
| 149 |
- "RequestError": {},
|
|
| 150 |
- "ProvisionedThroughputExceededException": {},
|
|
| 151 |
- "Throttling": {},
|
|
| 152 |
- "ThrottlingException": {},
|
|
| 153 |
- "RequestLimitExceeded": {},
|
|
| 154 |
- "RequestThrottled": {},
|
|
| 155 |
-} |
|
| 156 |
- |
|
| 157 |
-// credsExpiredCodes is a collection of error codes which signify the credentials |
|
| 158 |
-// need to be refreshed. Expired tokens require refreshing of credentials, and |
|
| 159 |
-// resigning before the request can be retried. |
|
| 160 |
-var credsExpiredCodes = map[string]struct{}{
|
|
| 161 |
- "ExpiredToken": {},
|
|
| 162 |
- "ExpiredTokenException": {},
|
|
| 163 |
- "RequestExpired": {}, // EC2 Only
|
|
| 164 |
-} |
|
| 165 |
- |
|
| 166 |
-func isCodeRetryable(code string) bool {
|
|
| 167 |
- if _, ok := retryableCodes[code]; ok {
|
|
| 168 |
- return true |
|
| 169 |
- } |
|
| 170 |
- |
|
| 171 |
- return isCodeExpiredCreds(code) |
|
| 172 |
-} |
|
| 173 |
- |
|
| 174 |
-func isCodeExpiredCreds(code string) bool {
|
|
| 175 |
- _, ok := credsExpiredCodes[code] |
|
| 176 |
- return ok |
|
| 177 |
-} |
|
| 178 |
- |
|
| 179 |
-// shouldRetry returns if the request should be retried. |
|
| 180 |
-func shouldRetry(r *Request) bool {
|
|
| 181 |
- if r.HTTPResponse.StatusCode >= 500 {
|
|
| 182 |
- return true |
|
| 183 |
- } |
|
| 184 |
- if r.Error != nil {
|
|
| 185 |
- if err, ok := r.Error.(awserr.Error); ok {
|
|
| 186 |
- return isCodeRetryable(err.Code()) |
|
| 187 |
- } |
|
| 188 |
- } |
|
| 189 |
- return false |
|
| 190 |
-} |
| 191 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,51 @@ |
| 0 |
+package service |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "math" |
|
| 4 |
+ "math/rand" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 8 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+// DefaultRetryer implements basic retry logic using exponential backoff for |
|
| 12 |
+// most services. If you want to implement custom retry logic, implement the |
|
| 13 |
+// request.Retryer interface or create a structure type that composes this |
|
| 14 |
+// struct and override the specific methods. For example, to override only |
|
| 15 |
+// the MaxRetries method: |
|
| 16 |
+// |
|
| 17 |
+// type retryer struct {
|
|
| 18 |
+// service.DefaultRetryer |
|
| 19 |
+// } |
|
| 20 |
+// |
|
| 21 |
+// // This implementation always has 100 max retries |
|
| 22 |
+// func (d retryer) MaxRetries() uint { return 100 }
|
|
| 23 |
+type DefaultRetryer struct {
|
|
| 24 |
+ *Service |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+// MaxRetries returns the number of maximum returns the service will use to make |
|
| 28 |
+// an individual API request. |
|
| 29 |
+func (d DefaultRetryer) MaxRetries() uint {
|
|
| 30 |
+ if aws.IntValue(d.Service.Config.MaxRetries) < 0 {
|
|
| 31 |
+ return d.DefaultMaxRetries |
|
| 32 |
+ } |
|
| 33 |
+ return uint(aws.IntValue(d.Service.Config.MaxRetries)) |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) |
|
| 37 |
+ |
|
| 38 |
+// RetryRules returns the delay duration before retrying this request again |
|
| 39 |
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
|
| 40 |
+ delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30) |
|
| 41 |
+ return time.Duration(delay) * time.Millisecond |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// ShouldRetry returns if the request should be retried. |
|
| 45 |
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|
| 46 |
+ if r.HTTPResponse.StatusCode >= 500 {
|
|
| 47 |
+ return true |
|
| 48 |
+ } |
|
| 49 |
+ return r.IsErrorRetryable() |
|
| 50 |
+} |
| 0 | 51 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,133 @@ |
| 0 |
+package service |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "net/http" |
|
| 6 |
+ "net/http/httputil" |
|
| 7 |
+ "regexp" |
|
| 8 |
+ "time" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/aws/aws-sdk-go/aws" |
|
| 11 |
+ "github.com/aws/aws-sdk-go/aws/corehandlers" |
|
| 12 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 13 |
+ "github.com/aws/aws-sdk-go/aws/service/serviceinfo" |
|
| 14 |
+ "github.com/aws/aws-sdk-go/internal/endpoints" |
|
| 15 |
+) |
|
| 16 |
+ |
|
| 17 |
+// A Service implements the base service request and response handling |
|
| 18 |
+// used by all services. |
|
| 19 |
+type Service struct {
|
|
| 20 |
+ serviceinfo.ServiceInfo |
|
| 21 |
+ request.Retryer |
|
| 22 |
+ DefaultMaxRetries uint |
|
| 23 |
+ Handlers request.Handlers |
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+var schemeRE = regexp.MustCompile("^([^:]+)://")
|
|
| 27 |
+ |
|
| 28 |
+// New will return a pointer to a new Server object initialized. |
|
| 29 |
+func New(config *aws.Config) *Service {
|
|
| 30 |
+ svc := &Service{ServiceInfo: serviceinfo.ServiceInfo{Config: config}}
|
|
| 31 |
+ svc.Initialize() |
|
| 32 |
+ return svc |
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// Initialize initializes the service. |
|
| 36 |
+func (s *Service) Initialize() {
|
|
| 37 |
+ if s.Config == nil {
|
|
| 38 |
+ s.Config = &aws.Config{}
|
|
| 39 |
+ } |
|
| 40 |
+ if s.Config.HTTPClient == nil {
|
|
| 41 |
+ s.Config.HTTPClient = http.DefaultClient |
|
| 42 |
+ } |
|
| 43 |
+ if s.Config.SleepDelay == nil {
|
|
| 44 |
+ s.Config.SleepDelay = time.Sleep |
|
| 45 |
+ } |
|
| 46 |
+ |
|
| 47 |
+ s.Retryer = DefaultRetryer{s}
|
|
| 48 |
+ s.DefaultMaxRetries = 3 |
|
| 49 |
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) |
|
| 50 |
+ s.Handlers.Build.PushBackNamed(corehandlers.UserAgentHandler) |
|
| 51 |
+ s.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) |
|
| 52 |
+ s.Handlers.Send.PushBackNamed(corehandlers.SendHandler) |
|
| 53 |
+ s.Handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) |
|
| 54 |
+ s.Handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) |
|
| 55 |
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
|
|
| 56 |
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) |
|
| 57 |
+ } |
|
| 58 |
+ s.AddDebugHandlers() |
|
| 59 |
+ s.buildEndpoint() |
|
| 60 |
+} |
|
| 61 |
+ |
|
| 62 |
+// NewRequest returns a new Request pointer for the service API |
|
| 63 |
+// operation and parameters. |
|
| 64 |
+func (s *Service) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
|
| 65 |
+ return request.New(s.ServiceInfo, s.Handlers, s.Retryer, operation, params, data) |
|
| 66 |
+} |
|
| 67 |
+ |
|
| 68 |
+// buildEndpoint builds the endpoint values the service will use to make requests with. |
|
| 69 |
+func (s *Service) buildEndpoint() {
|
|
| 70 |
+ if aws.StringValue(s.Config.Endpoint) != "" {
|
|
| 71 |
+ s.Endpoint = *s.Config.Endpoint |
|
| 72 |
+ } else if s.Endpoint == "" {
|
|
| 73 |
+ s.Endpoint, s.SigningRegion = |
|
| 74 |
+ endpoints.EndpointForRegion(s.ServiceName, aws.StringValue(s.Config.Region)) |
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 77 |
+ if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
|
|
| 78 |
+ scheme := "https" |
|
| 79 |
+ if aws.BoolValue(s.Config.DisableSSL) {
|
|
| 80 |
+ scheme = "http" |
|
| 81 |
+ } |
|
| 82 |
+ s.Endpoint = scheme + "://" + s.Endpoint |
|
| 83 |
+ } |
|
| 84 |
+} |
|
| 85 |
+ |
|
| 86 |
+// AddDebugHandlers injects debug logging handlers into the service to log request |
|
| 87 |
+// debug information. |
|
| 88 |
+func (s *Service) AddDebugHandlers() {
|
|
| 89 |
+ if !s.Config.LogLevel.AtLeast(aws.LogDebug) {
|
|
| 90 |
+ return |
|
| 91 |
+ } |
|
| 92 |
+ |
|
| 93 |
+ s.Handlers.Send.PushFront(logRequest) |
|
| 94 |
+ s.Handlers.Send.PushBack(logResponse) |
|
| 95 |
+} |
|
| 96 |
+ |
|
| 97 |
+const logReqMsg = `DEBUG: Request %s/%s Details: |
|
| 98 |
+---[ REQUEST POST-SIGN ]----------------------------- |
|
| 99 |
+%s |
|
| 100 |
+-----------------------------------------------------` |
|
| 101 |
+ |
|
| 102 |
+func logRequest(r *request.Request) {
|
|
| 103 |
+ logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) |
|
| 104 |
+ dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) |
|
| 105 |
+ |
|
| 106 |
+ if logBody {
|
|
| 107 |
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's |
|
| 108 |
+ // Body as a NoOpCloser and will not be reset after read by the HTTP |
|
| 109 |
+ // client reader. |
|
| 110 |
+ r.Body.Seek(r.BodyStart, 0) |
|
| 111 |
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body) |
|
| 112 |
+ } |
|
| 113 |
+ |
|
| 114 |
+ r.Service.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.Service.ServiceName, r.Operation.Name, string(dumpedBody))) |
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+const logRespMsg = `DEBUG: Response %s/%s Details: |
|
| 118 |
+---[ RESPONSE ]-------------------------------------- |
|
| 119 |
+%s |
|
| 120 |
+-----------------------------------------------------` |
|
| 121 |
+ |
|
| 122 |
+func logResponse(r *request.Request) {
|
|
| 123 |
+ var msg = "no reponse data" |
|
| 124 |
+ if r.HTTPResponse != nil {
|
|
| 125 |
+ logBody := r.Service.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) |
|
| 126 |
+ dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) |
|
| 127 |
+ msg = string(dumpedBody) |
|
| 128 |
+ } else if r.Error != nil {
|
|
| 129 |
+ msg = r.Error.Error() |
|
| 130 |
+ } |
|
| 131 |
+ r.Service.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.Service.ServiceName, r.Operation.Name, msg)) |
|
| 132 |
+} |
| 0 | 133 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,15 @@ |
| 0 |
+package serviceinfo |
|
| 1 |
+ |
|
| 2 |
+import "github.com/aws/aws-sdk-go/aws" |
|
| 3 |
+ |
|
| 4 |
+// ServiceInfo wraps immutable data from the service.Service structure. |
|
| 5 |
+type ServiceInfo struct {
|
|
| 6 |
+ Config *aws.Config |
|
| 7 |
+ ServiceName string |
|
| 8 |
+ APIVersion string |
|
| 9 |
+ Endpoint string |
|
| 10 |
+ SigningName string |
|
| 11 |
+ SigningRegion string |
|
| 12 |
+ JSONVersion string |
|
| 13 |
+ TargetPrefix string |
|
| 14 |
+} |
| ... | ... |
@@ -2,6 +2,7 @@ package aws |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"io" |
| 5 |
+ "sync" |
|
| 5 | 6 |
) |
| 6 | 7 |
|
| 7 | 8 |
// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser |
| ... | ... |
@@ -53,3 +54,35 @@ func (r ReaderSeekerCloser) Close() error {
|
| 53 | 53 |
} |
| 54 | 54 |
return nil |
| 55 | 55 |
} |
| 56 |
+ |
|
| 57 |
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface |
|
| 58 |
+// Can be used with the s3manager.Downloader to download content to a buffer |
|
| 59 |
+// in memory. Safe to use concurrently. |
|
| 60 |
+type WriteAtBuffer struct {
|
|
| 61 |
+ buf []byte |
|
| 62 |
+ m sync.Mutex |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+// WriteAt writes a slice of bytes to a buffer starting at the position provided |
|
| 66 |
+// The number of bytes written will be returned, or error. Can overwrite previous |
|
| 67 |
+// written slices if the write ats overlap. |
|
| 68 |
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
|
| 69 |
+ b.m.Lock() |
|
| 70 |
+ defer b.m.Unlock() |
|
| 71 |
+ |
|
| 72 |
+ expLen := pos + int64(len(p)) |
|
| 73 |
+ if int64(len(b.buf)) < expLen {
|
|
| 74 |
+ newBuf := make([]byte, expLen) |
|
| 75 |
+ copy(newBuf, b.buf) |
|
| 76 |
+ b.buf = newBuf |
|
| 77 |
+ } |
|
| 78 |
+ copy(b.buf[pos:], p) |
|
| 79 |
+ return len(p), nil |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+// Bytes returns a slice of bytes written to the buffer. |
|
| 83 |
+func (b *WriteAtBuffer) Bytes() []byte {
|
|
| 84 |
+ b.m.Lock() |
|
| 85 |
+ defer b.m.Unlock() |
|
| 86 |
+ return b.buf[:len(b.buf):len(b.buf)] |
|
| 87 |
+} |
| ... | ... |
@@ -66,6 +66,17 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) |
| 66 | 66 |
return nil |
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
+ // unwrap payloads |
|
| 70 |
+ if payload := tag.Get("payload"); payload != "" {
|
|
| 71 |
+ field, _ := value.Type().FieldByName(payload) |
|
| 72 |
+ tag = field.Tag |
|
| 73 |
+ value = elemOf(value.FieldByName(payload)) |
|
| 74 |
+ |
|
| 75 |
+ if !value.IsValid() {
|
|
| 76 |
+ return nil |
|
| 77 |
+ } |
|
| 78 |
+ } |
|
| 79 |
+ |
|
| 69 | 80 |
buf.WriteString("{")
|
| 70 | 81 |
|
| 71 | 82 |
t, fields := value.Type(), []*reflect.StructField{}
|
| ... | ... |
@@ -197,3 +208,11 @@ func writeString(s string, buf *bytes.Buffer) {
|
| 197 | 197 |
} |
| 198 | 198 |
buf.WriteByte('"')
|
| 199 | 199 |
} |
| 200 |
+ |
|
| 201 |
+// Returns the reflection element of a value, if it is a pointer. |
|
| 202 |
+func elemOf(value reflect.Value) reflect.Value {
|
|
| 203 |
+ for value.Kind() == reflect.Ptr {
|
|
| 204 |
+ value = value.Elem() |
|
| 205 |
+ } |
|
| 206 |
+ return value |
|
| 207 |
+} |
| ... | ... |
@@ -7,7 +7,6 @@ import ( |
| 7 | 7 |
"io" |
| 8 | 8 |
"io/ioutil" |
| 9 | 9 |
"reflect" |
| 10 |
- "strings" |
|
| 11 | 10 |
"time" |
| 12 | 11 |
) |
| 13 | 12 |
|
| ... | ... |
@@ -99,7 +98,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa
|
| 99 | 99 |
|
| 100 | 100 |
for i := 0; i < t.NumField(); i++ {
|
| 101 | 101 |
field := t.Field(i) |
| 102 |
- if c := field.Name[0:1]; strings.ToLower(c) == c {
|
|
| 102 |
+ if field.PkgPath != "" {
|
|
| 103 | 103 |
continue // ignore unexported fields |
| 104 | 104 |
} |
| 105 | 105 |
|
| ... | ... |
@@ -10,15 +10,16 @@ import ( |
| 10 | 10 |
"io/ioutil" |
| 11 | 11 |
"strings" |
| 12 | 12 |
|
| 13 |
- "github.com/aws/aws-sdk-go/aws" |
|
| 14 | 13 |
"github.com/aws/aws-sdk-go/aws/awserr" |
| 14 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 15 | 15 |
"github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil" |
| 16 |
+ "github.com/aws/aws-sdk-go/internal/protocol/rest" |
|
| 16 | 17 |
) |
| 17 | 18 |
|
| 18 | 19 |
var emptyJSON = []byte("{}")
|
| 19 | 20 |
|
| 20 | 21 |
// Build builds a JSON payload for a JSON RPC request. |
| 21 |
-func Build(req *aws.Request) {
|
|
| 22 |
+func Build(req *request.Request) {
|
|
| 22 | 23 |
var buf []byte |
| 23 | 24 |
var err error |
| 24 | 25 |
if req.ParamsFilled() {
|
| ... | ... |
@@ -46,7 +47,7 @@ func Build(req *aws.Request) {
|
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 | 48 |
// Unmarshal unmarshals a response for a JSON RPC service. |
| 49 |
-func Unmarshal(req *aws.Request) {
|
|
| 49 |
+func Unmarshal(req *request.Request) {
|
|
| 50 | 50 |
defer req.HTTPResponse.Body.Close() |
| 51 | 51 |
if req.DataFilled() {
|
| 52 | 52 |
err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) |
| ... | ... |
@@ -58,12 +59,12 @@ func Unmarshal(req *aws.Request) {
|
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 | 60 |
// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. |
| 61 |
-func UnmarshalMeta(req *aws.Request) {
|
|
| 62 |
- req.RequestID = req.HTTPResponse.Header.Get("x-amzn-requestid")
|
|
| 61 |
+func UnmarshalMeta(req *request.Request) {
|
|
| 62 |
+ rest.UnmarshalMeta(req) |
|
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 | 65 |
// UnmarshalError unmarshals an error response for a JSON RPC service. |
| 66 |
-func UnmarshalError(req *aws.Request) {
|
|
| 66 |
+func UnmarshalError(req *request.Request) {
|
|
| 67 | 67 |
defer req.HTTPResponse.Body.Close() |
| 68 | 68 |
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) |
| 69 | 69 |
if err != nil {
|
| ... | ... |
@@ -88,7 +89,7 @@ func UnmarshalError(req *aws.Request) {
|
| 88 | 88 |
req.Error = awserr.NewRequestFailure( |
| 89 | 89 |
awserr.New(codes[len(codes)-1], jsonErr.Message, nil), |
| 90 | 90 |
req.HTTPResponse.StatusCode, |
| 91 |
- "", |
|
| 91 |
+ req.RequestID, |
|
| 92 | 92 |
) |
| 93 | 93 |
} |
| 94 | 94 |
|
| ... | ... |
@@ -1,4 +1,4 @@ |
| 1 |
-// Package rest provides RESTful serialisation of AWS requests and responses. |
|
| 1 |
+// Package rest provides RESTful serialization of AWS requests and responses. |
|
| 2 | 2 |
package rest |
| 3 | 3 |
|
| 4 | 4 |
import ( |
| ... | ... |
@@ -13,8 +13,8 @@ import ( |
| 13 | 13 |
"strings" |
| 14 | 14 |
"time" |
| 15 | 15 |
|
| 16 |
- "github.com/aws/aws-sdk-go/aws" |
|
| 17 | 16 |
"github.com/aws/aws-sdk-go/aws/awserr" |
| 17 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 18 | 18 |
) |
| 19 | 19 |
|
| 20 | 20 |
// RFC822 returns an RFC822 formatted timestamp for AWS protocols |
| ... | ... |
@@ -37,7 +37,7 @@ func init() {
|
| 37 | 37 |
} |
| 38 | 38 |
|
| 39 | 39 |
// Build builds the REST component of a service request. |
| 40 |
-func Build(r *aws.Request) {
|
|
| 40 |
+func Build(r *request.Request) {
|
|
| 41 | 41 |
if r.ParamsFilled() {
|
| 42 | 42 |
v := reflect.ValueOf(r.Params).Elem() |
| 43 | 43 |
buildLocationElements(r, v) |
| ... | ... |
@@ -45,7 +45,7 @@ func Build(r *aws.Request) {
|
| 45 | 45 |
} |
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 |
-func buildLocationElements(r *aws.Request, v reflect.Value) {
|
|
| 48 |
+func buildLocationElements(r *request.Request, v reflect.Value) {
|
|
| 49 | 49 |
query := r.HTTPRequest.URL.Query() |
| 50 | 50 |
|
| 51 | 51 |
for i := 0; i < v.NumField(); i++ {
|
| ... | ... |
@@ -87,7 +87,7 @@ func buildLocationElements(r *aws.Request, v reflect.Value) {
|
| 87 | 87 |
updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) |
| 88 | 88 |
} |
| 89 | 89 |
|
| 90 |
-func buildBody(r *aws.Request, v reflect.Value) {
|
|
| 90 |
+func buildBody(r *request.Request, v reflect.Value) {
|
|
| 91 | 91 |
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
| 92 | 92 |
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
| 93 | 93 |
pfield, _ := v.Type().FieldByName(payloadName) |
| ... | ... |
@@ -112,7 +112,7 @@ func buildBody(r *aws.Request, v reflect.Value) {
|
| 112 | 112 |
} |
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 |
-func buildHeader(r *aws.Request, v reflect.Value, name string) {
|
|
| 115 |
+func buildHeader(r *request.Request, v reflect.Value, name string) {
|
|
| 116 | 116 |
str, err := convertType(v) |
| 117 | 117 |
if err != nil {
|
| 118 | 118 |
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
| ... | ... |
@@ -121,7 +121,7 @@ func buildHeader(r *aws.Request, v reflect.Value, name string) {
|
| 121 | 121 |
} |
| 122 | 122 |
} |
| 123 | 123 |
|
| 124 |
-func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
|
|
| 124 |
+func buildHeaderMap(r *request.Request, v reflect.Value, prefix string) {
|
|
| 125 | 125 |
for _, key := range v.MapKeys() {
|
| 126 | 126 |
str, err := convertType(v.MapIndex(key)) |
| 127 | 127 |
if err != nil {
|
| ... | ... |
@@ -132,7 +132,7 @@ func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
|
| 132 | 132 |
} |
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 |
-func buildURI(r *aws.Request, v reflect.Value, name string) {
|
|
| 135 |
+func buildURI(r *request.Request, v reflect.Value, name string) {
|
|
| 136 | 136 |
value, err := convertType(v) |
| 137 | 137 |
if err != nil {
|
| 138 | 138 |
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
| ... | ... |
@@ -144,7 +144,7 @@ func buildURI(r *aws.Request, v reflect.Value, name string) {
|
| 144 | 144 |
} |
| 145 | 145 |
} |
| 146 | 146 |
|
| 147 |
-func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) {
|
|
| 147 |
+func buildQueryString(r *request.Request, v reflect.Value, name string, query url.Values) {
|
|
| 148 | 148 |
str, err := convertType(v) |
| 149 | 149 |
if err != nil {
|
| 150 | 150 |
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
| ... | ... |
@@ -156,8 +156,13 @@ func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Va |
| 156 | 156 |
func updatePath(url *url.URL, urlPath string) {
|
| 157 | 157 |
scheme, query := url.Scheme, url.RawQuery |
| 158 | 158 |
|
| 159 |
+ hasSlash := strings.HasSuffix(urlPath, "/") |
|
| 160 |
+ |
|
| 159 | 161 |
// clean up path |
| 160 | 162 |
urlPath = path.Clean(urlPath) |
| 163 |
+ if hasSlash && !strings.HasSuffix(urlPath, "/") {
|
|
| 164 |
+ urlPath += "/" |
|
| 165 |
+ } |
|
| 161 | 166 |
|
| 162 | 167 |
// get formatted URL minus scheme so we can build this into Opaque |
| 163 | 168 |
url.Scheme, url.Path, url.RawQuery = "", "", "" |
| ... | ... |
@@ -12,18 +12,27 @@ import ( |
| 12 | 12 |
|
| 13 | 13 |
"github.com/aws/aws-sdk-go/aws" |
| 14 | 14 |
"github.com/aws/aws-sdk-go/aws/awserr" |
| 15 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 15 | 16 |
) |
| 16 | 17 |
|
| 17 | 18 |
// Unmarshal unmarshals the REST component of a response in a REST service. |
| 18 |
-func Unmarshal(r *aws.Request) {
|
|
| 19 |
+func Unmarshal(r *request.Request) {
|
|
| 19 | 20 |
if r.DataFilled() {
|
| 20 | 21 |
v := reflect.Indirect(reflect.ValueOf(r.Data)) |
| 21 | 22 |
unmarshalBody(r, v) |
| 23 |
+ } |
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service |
|
| 27 |
+func UnmarshalMeta(r *request.Request) {
|
|
| 28 |
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
|
|
| 29 |
+ if r.DataFilled() {
|
|
| 30 |
+ v := reflect.Indirect(reflect.ValueOf(r.Data)) |
|
| 22 | 31 |
unmarshalLocationElements(r, v) |
| 23 | 32 |
} |
| 24 | 33 |
} |
| 25 | 34 |
|
| 26 |
-func unmarshalBody(r *aws.Request, v reflect.Value) {
|
|
| 35 |
+func unmarshalBody(r *request.Request, v reflect.Value) {
|
|
| 27 | 36 |
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
| 28 | 37 |
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
| 29 | 38 |
pfield, _ := v.Type().FieldByName(payloadName) |
| ... | ... |
@@ -64,7 +73,7 @@ func unmarshalBody(r *aws.Request, v reflect.Value) {
|
| 64 | 64 |
} |
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 |
-func unmarshalLocationElements(r *aws.Request, v reflect.Value) {
|
|
| 67 |
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
|
|
| 68 | 68 |
for i := 0; i < v.NumField(); i++ {
|
| 69 | 69 |
m, field := v.Field(i), v.Type().Field(i) |
| 70 | 70 |
if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
|
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
|
| 17 | 17 |
"github.com/aws/aws-sdk-go/aws" |
| 18 | 18 |
"github.com/aws/aws-sdk-go/aws/credentials" |
| 19 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 19 | 20 |
"github.com/aws/aws-sdk-go/internal/protocol/rest" |
| 20 | 21 |
) |
| 21 | 22 |
|
| ... | ... |
@@ -63,7 +64,7 @@ type signer struct {
|
| 63 | 63 |
// Will sign the requests with the service config's Credentials object |
| 64 | 64 |
// Signing is skipped if the credentials is the credentials.AnonymousCredentials |
| 65 | 65 |
// object. |
| 66 |
-func Sign(req *aws.Request) {
|
|
| 66 |
+func Sign(req *request.Request) {
|
|
| 67 | 67 |
// If the request does not need to be signed ignore the signing of the |
| 68 | 68 |
// request if the AnonymousCredentials object is used. |
| 69 | 69 |
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
| ... | ... |
@@ -4,15 +4,74 @@ |
| 4 | 4 |
package cloudwatchlogs |
| 5 | 5 |
|
| 6 | 6 |
import ( |
| 7 |
- "github.com/aws/aws-sdk-go/aws" |
|
| 8 | 7 |
"github.com/aws/aws-sdk-go/aws/awsutil" |
| 8 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 |
+const opCancelExportTask = "CancelExportTask" |
|
| 12 |
+ |
|
| 13 |
+// CancelExportTaskRequest generates a request for the CancelExportTask operation. |
|
| 14 |
+func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) {
|
|
| 15 |
+ op := &request.Operation{
|
|
| 16 |
+ Name: opCancelExportTask, |
|
| 17 |
+ HTTPMethod: "POST", |
|
| 18 |
+ HTTPPath: "/", |
|
| 19 |
+ } |
|
| 20 |
+ |
|
| 21 |
+ if input == nil {
|
|
| 22 |
+ input = &CancelExportTaskInput{}
|
|
| 23 |
+ } |
|
| 24 |
+ |
|
| 25 |
+ req = c.newRequest(op, input, output) |
|
| 26 |
+ output = &CancelExportTaskOutput{}
|
|
| 27 |
+ req.Data = output |
|
| 28 |
+ return |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+// Cancels an export task if it is in PENDING or RUNNING state. |
|
| 32 |
+func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) {
|
|
| 33 |
+ req, out := c.CancelExportTaskRequest(input) |
|
| 34 |
+ err := req.Send() |
|
| 35 |
+ return out, err |
|
| 36 |
+} |
|
| 37 |
+ |
|
| 38 |
+const opCreateExportTask = "CreateExportTask" |
|
| 39 |
+ |
|
| 40 |
+// CreateExportTaskRequest generates a request for the CreateExportTask operation. |
|
| 41 |
+func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) {
|
|
| 42 |
+ op := &request.Operation{
|
|
| 43 |
+ Name: opCreateExportTask, |
|
| 44 |
+ HTTPMethod: "POST", |
|
| 45 |
+ HTTPPath: "/", |
|
| 46 |
+ } |
|
| 47 |
+ |
|
| 48 |
+ if input == nil {
|
|
| 49 |
+ input = &CreateExportTaskInput{}
|
|
| 50 |
+ } |
|
| 51 |
+ |
|
| 52 |
+ req = c.newRequest(op, input, output) |
|
| 53 |
+ output = &CreateExportTaskOutput{}
|
|
| 54 |
+ req.Data = output |
|
| 55 |
+ return |
|
| 56 |
+} |
|
| 57 |
+ |
|
| 58 |
+// Creates an ExportTask which allows you to efficiently export data from a |
|
| 59 |
+// Log Group to your Amazon S3 bucket. |
|
| 60 |
+// |
|
| 61 |
+// This is an asynchronous call. If all the required information is provided, |
|
| 62 |
+// this API will initiate an export task and respond with the task Id. Once |
|
| 63 |
+// started, DescribeExportTasks can be used to get the status of an export task. |
|
| 64 |
+func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) {
|
|
| 65 |
+ req, out := c.CreateExportTaskRequest(input) |
|
| 66 |
+ err := req.Send() |
|
| 67 |
+ return out, err |
|
| 68 |
+} |
|
| 69 |
+ |
|
| 11 | 70 |
const opCreateLogGroup = "CreateLogGroup" |
| 12 | 71 |
|
| 13 | 72 |
// CreateLogGroupRequest generates a request for the CreateLogGroup operation. |
| 14 |
-func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *aws.Request, output *CreateLogGroupOutput) {
|
|
| 15 |
- op := &aws.Operation{
|
|
| 73 |
+func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) {
|
|
| 74 |
+ op := &request.Operation{
|
|
| 16 | 75 |
Name: opCreateLogGroup, |
| 17 | 76 |
HTTPMethod: "POST", |
| 18 | 77 |
HTTPPath: "/", |
| ... | ... |
@@ -44,8 +103,8 @@ func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogG |
| 44 | 44 |
const opCreateLogStream = "CreateLogStream" |
| 45 | 45 |
|
| 46 | 46 |
// CreateLogStreamRequest generates a request for the CreateLogStream operation. |
| 47 |
-func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *aws.Request, output *CreateLogStreamOutput) {
|
|
| 48 |
- op := &aws.Operation{
|
|
| 47 |
+func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) {
|
|
| 48 |
+ op := &request.Operation{
|
|
| 49 | 49 |
Name: opCreateLogStream, |
| 50 | 50 |
HTTPMethod: "POST", |
| 51 | 51 |
HTTPPath: "/", |
| ... | ... |
@@ -77,8 +136,8 @@ func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLo |
| 77 | 77 |
const opDeleteDestination = "DeleteDestination" |
| 78 | 78 |
|
| 79 | 79 |
// DeleteDestinationRequest generates a request for the DeleteDestination operation. |
| 80 |
-func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *aws.Request, output *DeleteDestinationOutput) {
|
|
| 81 |
- op := &aws.Operation{
|
|
| 80 |
+func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) {
|
|
| 81 |
+ op := &request.Operation{
|
|
| 82 | 82 |
Name: opDeleteDestination, |
| 83 | 83 |
HTTPMethod: "POST", |
| 84 | 84 |
HTTPPath: "/", |
| ... | ... |
@@ -106,8 +165,8 @@ func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*Dele |
| 106 | 106 |
const opDeleteLogGroup = "DeleteLogGroup" |
| 107 | 107 |
|
| 108 | 108 |
// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. |
| 109 |
-func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *aws.Request, output *DeleteLogGroupOutput) {
|
|
| 110 |
- op := &aws.Operation{
|
|
| 109 |
+func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) {
|
|
| 110 |
+ op := &request.Operation{
|
|
| 111 | 111 |
Name: opDeleteLogGroup, |
| 112 | 112 |
HTTPMethod: "POST", |
| 113 | 113 |
HTTPPath: "/", |
| ... | ... |
@@ -134,8 +193,8 @@ func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogG |
| 134 | 134 |
const opDeleteLogStream = "DeleteLogStream" |
| 135 | 135 |
|
| 136 | 136 |
// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. |
| 137 |
-func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *aws.Request, output *DeleteLogStreamOutput) {
|
|
| 138 |
- op := &aws.Operation{
|
|
| 137 |
+func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) {
|
|
| 138 |
+ op := &request.Operation{
|
|
| 139 | 139 |
Name: opDeleteLogStream, |
| 140 | 140 |
HTTPMethod: "POST", |
| 141 | 141 |
HTTPPath: "/", |
| ... | ... |
@@ -162,8 +221,8 @@ func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLo |
| 162 | 162 |
const opDeleteMetricFilter = "DeleteMetricFilter" |
| 163 | 163 |
|
| 164 | 164 |
// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. |
| 165 |
-func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *aws.Request, output *DeleteMetricFilterOutput) {
|
|
| 166 |
- op := &aws.Operation{
|
|
| 165 |
+func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) {
|
|
| 166 |
+ op := &request.Operation{
|
|
| 167 | 167 |
Name: opDeleteMetricFilter, |
| 168 | 168 |
HTTPMethod: "POST", |
| 169 | 169 |
HTTPPath: "/", |
| ... | ... |
@@ -189,8 +248,8 @@ func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*De |
| 189 | 189 |
const opDeleteRetentionPolicy = "DeleteRetentionPolicy" |
| 190 | 190 |
|
| 191 | 191 |
// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. |
| 192 |
-func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *aws.Request, output *DeleteRetentionPolicyOutput) {
|
|
| 193 |
- op := &aws.Operation{
|
|
| 192 |
+func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) {
|
|
| 193 |
+ op := &request.Operation{
|
|
| 194 | 194 |
Name: opDeleteRetentionPolicy, |
| 195 | 195 |
HTTPMethod: "POST", |
| 196 | 196 |
HTTPPath: "/", |
| ... | ... |
@@ -217,8 +276,8 @@ func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput |
| 217 | 217 |
const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" |
| 218 | 218 |
|
| 219 | 219 |
// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. |
| 220 |
-func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *aws.Request, output *DeleteSubscriptionFilterOutput) {
|
|
| 221 |
- op := &aws.Operation{
|
|
| 220 |
+func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) {
|
|
| 221 |
+ op := &request.Operation{
|
|
| 222 | 222 |
Name: opDeleteSubscriptionFilter, |
| 223 | 223 |
HTTPMethod: "POST", |
| 224 | 224 |
HTTPPath: "/", |
| ... | ... |
@@ -244,11 +303,17 @@ func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilte |
| 244 | 244 |
const opDescribeDestinations = "DescribeDestinations" |
| 245 | 245 |
|
| 246 | 246 |
// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. |
| 247 |
-func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *aws.Request, output *DescribeDestinationsOutput) {
|
|
| 248 |
- op := &aws.Operation{
|
|
| 247 |
+func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) {
|
|
| 248 |
+ op := &request.Operation{
|
|
| 249 | 249 |
Name: opDescribeDestinations, |
| 250 | 250 |
HTTPMethod: "POST", |
| 251 | 251 |
HTTPPath: "/", |
| 252 |
+ Paginator: &request.Paginator{
|
|
| 253 |
+ InputTokens: []string{"nextToken"},
|
|
| 254 |
+ OutputTokens: []string{"nextToken"},
|
|
| 255 |
+ LimitToken: "limit", |
|
| 256 |
+ TruncationToken: "", |
|
| 257 |
+ }, |
|
| 252 | 258 |
} |
| 253 | 259 |
|
| 254 | 260 |
if input == nil {
|
| ... | ... |
@@ -275,15 +340,56 @@ func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) |
| 275 | 275 |
return out, err |
| 276 | 276 |
} |
| 277 | 277 |
|
| 278 |
+func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error {
|
|
| 279 |
+ page, _ := c.DescribeDestinationsRequest(input) |
|
| 280 |
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
|
|
| 281 |
+ return fn(p.(*DescribeDestinationsOutput), lastPage) |
|
| 282 |
+ }) |
|
| 283 |
+} |
|
| 284 |
+ |
|
| 285 |
+const opDescribeExportTasks = "DescribeExportTasks" |
|
| 286 |
+ |
|
| 287 |
+// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. |
|
| 288 |
+func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) {
|
|
| 289 |
+ op := &request.Operation{
|
|
| 290 |
+ Name: opDescribeExportTasks, |
|
| 291 |
+ HTTPMethod: "POST", |
|
| 292 |
+ HTTPPath: "/", |
|
| 293 |
+ } |
|
| 294 |
+ |
|
| 295 |
+ if input == nil {
|
|
| 296 |
+ input = &DescribeExportTasksInput{}
|
|
| 297 |
+ } |
|
| 298 |
+ |
|
| 299 |
+ req = c.newRequest(op, input, output) |
|
| 300 |
+ output = &DescribeExportTasksOutput{}
|
|
| 301 |
+ req.Data = output |
|
| 302 |
+ return |
|
| 303 |
+} |
|
| 304 |
+ |
|
| 305 |
+// Returns all the export tasks that are associated with the AWS account making |
|
| 306 |
+// the request. The export tasks can be filtered based on TaskId or TaskStatus. |
|
| 307 |
+// |
|
| 308 |
+// By default, this operation returns up to 50 export tasks that satisfy the |
|
| 309 |
+// specified filters. If there are more export tasks to list, the response would |
|
| 310 |
+// contain a nextToken value in the response body. You can also limit the number |
|
| 311 |
+// of export tasks returned in the response by specifying the limit parameter |
|
| 312 |
+// in the request. |
|
| 313 |
+func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) {
|
|
| 314 |
+ req, out := c.DescribeExportTasksRequest(input) |
|
| 315 |
+ err := req.Send() |
|
| 316 |
+ return out, err |
|
| 317 |
+} |
|
| 318 |
+ |
|
| 278 | 319 |
const opDescribeLogGroups = "DescribeLogGroups" |
| 279 | 320 |
|
| 280 | 321 |
// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. |
| 281 |
-func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *aws.Request, output *DescribeLogGroupsOutput) {
|
|
| 282 |
- op := &aws.Operation{
|
|
| 322 |
+func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) {
|
|
| 323 |
+ op := &request.Operation{
|
|
| 283 | 324 |
Name: opDescribeLogGroups, |
| 284 | 325 |
HTTPMethod: "POST", |
| 285 | 326 |
HTTPPath: "/", |
| 286 |
- Paginator: &aws.Paginator{
|
|
| 327 |
+ Paginator: &request.Paginator{
|
|
| 287 | 328 |
InputTokens: []string{"nextToken"},
|
| 288 | 329 |
OutputTokens: []string{"nextToken"},
|
| 289 | 330 |
LimitToken: "limit", |
| ... | ... |
@@ -325,12 +431,12 @@ func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, f |
| 325 | 325 |
const opDescribeLogStreams = "DescribeLogStreams" |
| 326 | 326 |
|
| 327 | 327 |
// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. |
| 328 |
-func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *aws.Request, output *DescribeLogStreamsOutput) {
|
|
| 329 |
- op := &aws.Operation{
|
|
| 328 |
+func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) {
|
|
| 329 |
+ op := &request.Operation{
|
|
| 330 | 330 |
Name: opDescribeLogStreams, |
| 331 | 331 |
HTTPMethod: "POST", |
| 332 | 332 |
HTTPPath: "/", |
| 333 |
- Paginator: &aws.Paginator{
|
|
| 333 |
+ Paginator: &request.Paginator{
|
|
| 334 | 334 |
InputTokens: []string{"nextToken"},
|
| 335 | 335 |
OutputTokens: []string{"nextToken"},
|
| 336 | 336 |
LimitToken: "limit", |
| ... | ... |
@@ -373,12 +479,12 @@ func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, |
| 373 | 373 |
const opDescribeMetricFilters = "DescribeMetricFilters" |
| 374 | 374 |
|
| 375 | 375 |
// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. |
| 376 |
-func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *aws.Request, output *DescribeMetricFiltersOutput) {
|
|
| 377 |
- op := &aws.Operation{
|
|
| 376 |
+func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) {
|
|
| 377 |
+ op := &request.Operation{
|
|
| 378 | 378 |
Name: opDescribeMetricFilters, |
| 379 | 379 |
HTTPMethod: "POST", |
| 380 | 380 |
HTTPPath: "/", |
| 381 |
- Paginator: &aws.Paginator{
|
|
| 381 |
+ Paginator: &request.Paginator{
|
|
| 382 | 382 |
InputTokens: []string{"nextToken"},
|
| 383 | 383 |
OutputTokens: []string{"nextToken"},
|
| 384 | 384 |
LimitToken: "limit", |
| ... | ... |
@@ -419,11 +525,17 @@ func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFilters |
| 419 | 419 |
const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" |
| 420 | 420 |
|
| 421 | 421 |
// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. |
| 422 |
-func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *aws.Request, output *DescribeSubscriptionFiltersOutput) {
|
|
| 423 |
- op := &aws.Operation{
|
|
| 422 |
+func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) {
|
|
| 423 |
+ op := &request.Operation{
|
|
| 424 | 424 |
Name: opDescribeSubscriptionFilters, |
| 425 | 425 |
HTTPMethod: "POST", |
| 426 | 426 |
HTTPPath: "/", |
| 427 |
+ Paginator: &request.Paginator{
|
|
| 428 |
+ InputTokens: []string{"nextToken"},
|
|
| 429 |
+ OutputTokens: []string{"nextToken"},
|
|
| 430 |
+ LimitToken: "limit", |
|
| 431 |
+ TruncationToken: "", |
|
| 432 |
+ }, |
|
| 427 | 433 |
} |
| 428 | 434 |
|
| 429 | 435 |
if input == nil {
|
| ... | ... |
@@ -450,14 +562,27 @@ func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscription |
| 450 | 450 |
return out, err |
| 451 | 451 |
} |
| 452 | 452 |
|
| 453 |
+func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error {
|
|
| 454 |
+ page, _ := c.DescribeSubscriptionFiltersRequest(input) |
|
| 455 |
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
|
|
| 456 |
+ return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) |
|
| 457 |
+ }) |
|
| 458 |
+} |
|
| 459 |
+ |
|
| 453 | 460 |
const opFilterLogEvents = "FilterLogEvents" |
| 454 | 461 |
|
| 455 | 462 |
// FilterLogEventsRequest generates a request for the FilterLogEvents operation. |
| 456 |
-func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *aws.Request, output *FilterLogEventsOutput) {
|
|
| 457 |
- op := &aws.Operation{
|
|
| 463 |
+func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) {
|
|
| 464 |
+ op := &request.Operation{
|
|
| 458 | 465 |
Name: opFilterLogEvents, |
| 459 | 466 |
HTTPMethod: "POST", |
| 460 | 467 |
HTTPPath: "/", |
| 468 |
+ Paginator: &request.Paginator{
|
|
| 469 |
+ InputTokens: []string{"nextToken"},
|
|
| 470 |
+ OutputTokens: []string{"nextToken"},
|
|
| 471 |
+ LimitToken: "limit", |
|
| 472 |
+ TruncationToken: "", |
|
| 473 |
+ }, |
|
| 461 | 474 |
} |
| 462 | 475 |
|
| 463 | 476 |
if input == nil {
|
| ... | ... |
@@ -490,15 +615,22 @@ func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLo |
| 490 | 490 |
return out, err |
| 491 | 491 |
} |
| 492 | 492 |
|
| 493 |
+func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error {
|
|
| 494 |
+ page, _ := c.FilterLogEventsRequest(input) |
|
| 495 |
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
|
|
| 496 |
+ return fn(p.(*FilterLogEventsOutput), lastPage) |
|
| 497 |
+ }) |
|
| 498 |
+} |
|
| 499 |
+ |
|
| 493 | 500 |
const opGetLogEvents = "GetLogEvents" |
| 494 | 501 |
|
| 495 | 502 |
// GetLogEventsRequest generates a request for the GetLogEvents operation. |
| 496 |
-func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *aws.Request, output *GetLogEventsOutput) {
|
|
| 497 |
- op := &aws.Operation{
|
|
| 503 |
+func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) {
|
|
| 504 |
+ op := &request.Operation{
|
|
| 498 | 505 |
Name: opGetLogEvents, |
| 499 | 506 |
HTTPMethod: "POST", |
| 500 | 507 |
HTTPPath: "/", |
| 501 |
- Paginator: &aws.Paginator{
|
|
| 508 |
+ Paginator: &request.Paginator{
|
|
| 502 | 509 |
InputTokens: []string{"nextToken"},
|
| 503 | 510 |
OutputTokens: []string{"nextForwardToken"},
|
| 504 | 511 |
LimitToken: "limit", |
| ... | ... |
@@ -542,8 +674,8 @@ func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p * |
| 542 | 542 |
const opPutDestination = "PutDestination" |
| 543 | 543 |
|
| 544 | 544 |
// PutDestinationRequest generates a request for the PutDestination operation. |
| 545 |
-func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *aws.Request, output *PutDestinationOutput) {
|
|
| 546 |
- op := &aws.Operation{
|
|
| 545 |
+func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) {
|
|
| 546 |
+ op := &request.Operation{
|
|
| 547 | 547 |
Name: opPutDestination, |
| 548 | 548 |
HTTPMethod: "POST", |
| 549 | 549 |
HTTPPath: "/", |
| ... | ... |
@@ -579,8 +711,8 @@ func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestina |
| 579 | 579 |
const opPutDestinationPolicy = "PutDestinationPolicy" |
| 580 | 580 |
|
| 581 | 581 |
// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. |
| 582 |
-func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *aws.Request, output *PutDestinationPolicyOutput) {
|
|
| 583 |
- op := &aws.Operation{
|
|
| 582 |
+func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) {
|
|
| 583 |
+ op := &request.Operation{
|
|
| 584 | 584 |
Name: opPutDestinationPolicy, |
| 585 | 585 |
HTTPMethod: "POST", |
| 586 | 586 |
HTTPPath: "/", |
| ... | ... |
@@ -609,8 +741,8 @@ func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) |
| 609 | 609 |
const opPutLogEvents = "PutLogEvents" |
| 610 | 610 |
|
| 611 | 611 |
// PutLogEventsRequest generates a request for the PutLogEvents operation. |
| 612 |
-func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *aws.Request, output *PutLogEventsOutput) {
|
|
| 613 |
- op := &aws.Operation{
|
|
| 612 |
+func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) {
|
|
| 613 |
+ op := &request.Operation{
|
|
| 614 | 614 |
Name: opPutLogEvents, |
| 615 | 615 |
HTTPMethod: "POST", |
| 616 | 616 |
HTTPPath: "/", |
| ... | ... |
@@ -648,8 +780,8 @@ func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOu |
| 648 | 648 |
const opPutMetricFilter = "PutMetricFilter" |
| 649 | 649 |
|
| 650 | 650 |
// PutMetricFilterRequest generates a request for the PutMetricFilter operation. |
| 651 |
-func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *aws.Request, output *PutMetricFilterOutput) {
|
|
| 652 |
- op := &aws.Operation{
|
|
| 651 |
+func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) {
|
|
| 652 |
+ op := &request.Operation{
|
|
| 653 | 653 |
Name: opPutMetricFilter, |
| 654 | 654 |
HTTPMethod: "POST", |
| 655 | 655 |
HTTPPath: "/", |
| ... | ... |
@@ -680,8 +812,8 @@ func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetri |
| 680 | 680 |
const opPutRetentionPolicy = "PutRetentionPolicy" |
| 681 | 681 |
|
| 682 | 682 |
// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. |
| 683 |
-func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *aws.Request, output *PutRetentionPolicyOutput) {
|
|
| 684 |
- op := &aws.Operation{
|
|
| 683 |
+func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) {
|
|
| 684 |
+ op := &request.Operation{
|
|
| 685 | 685 |
Name: opPutRetentionPolicy, |
| 686 | 686 |
HTTPMethod: "POST", |
| 687 | 687 |
HTTPPath: "/", |
| ... | ... |
@@ -709,8 +841,8 @@ func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*Pu |
| 709 | 709 |
const opPutSubscriptionFilter = "PutSubscriptionFilter" |
| 710 | 710 |
|
| 711 | 711 |
// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. |
| 712 |
-func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *aws.Request, output *PutSubscriptionFilterOutput) {
|
|
| 713 |
- op := &aws.Operation{
|
|
| 712 |
+func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) {
|
|
| 713 |
+ op := &request.Operation{
|
|
| 714 | 714 |
Name: opPutSubscriptionFilter, |
| 715 | 715 |
HTTPMethod: "POST", |
| 716 | 716 |
HTTPPath: "/", |
| ... | ... |
@@ -745,8 +877,8 @@ func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput |
| 745 | 745 |
const opTestMetricFilter = "TestMetricFilter" |
| 746 | 746 |
|
| 747 | 747 |
// TestMetricFilterRequest generates a request for the TestMetricFilter operation. |
| 748 |
-func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *aws.Request, output *TestMetricFilterOutput) {
|
|
| 749 |
- op := &aws.Operation{
|
|
| 748 |
+func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) {
|
|
| 749 |
+ op := &request.Operation{
|
|
| 750 | 750 |
Name: opTestMetricFilter, |
| 751 | 751 |
HTTPMethod: "POST", |
| 752 | 752 |
HTTPPath: "/", |
| ... | ... |
@@ -771,9 +903,113 @@ func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMe |
| 771 | 771 |
return out, err |
| 772 | 772 |
} |
| 773 | 773 |
|
| 774 |
+type CancelExportTaskInput struct {
|
|
| 775 |
+ // Id of the export task to cancel. |
|
| 776 |
+ TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` |
|
| 777 |
+ |
|
| 778 |
+ metadataCancelExportTaskInput `json:"-" xml:"-"` |
|
| 779 |
+} |
|
| 780 |
+ |
|
| 781 |
+type metadataCancelExportTaskInput struct {
|
|
| 782 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 783 |
+} |
|
| 784 |
+ |
|
| 785 |
+// String returns the string representation |
|
| 786 |
+func (s CancelExportTaskInput) String() string {
|
|
| 787 |
+ return awsutil.Prettify(s) |
|
| 788 |
+} |
|
| 789 |
+ |
|
| 790 |
+// GoString returns the string representation |
|
| 791 |
+func (s CancelExportTaskInput) GoString() string {
|
|
| 792 |
+ return s.String() |
|
| 793 |
+} |
|
| 794 |
+ |
|
| 795 |
+type CancelExportTaskOutput struct {
|
|
| 796 |
+ metadataCancelExportTaskOutput `json:"-" xml:"-"` |
|
| 797 |
+} |
|
| 798 |
+ |
|
| 799 |
+type metadataCancelExportTaskOutput struct {
|
|
| 800 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 801 |
+} |
|
| 802 |
+ |
|
| 803 |
+// String returns the string representation |
|
| 804 |
+func (s CancelExportTaskOutput) String() string {
|
|
| 805 |
+ return awsutil.Prettify(s) |
|
| 806 |
+} |
|
| 807 |
+ |
|
| 808 |
+// GoString returns the string representation |
|
| 809 |
+func (s CancelExportTaskOutput) GoString() string {
|
|
| 810 |
+ return s.String() |
|
| 811 |
+} |
|
| 812 |
+ |
|
| 813 |
+type CreateExportTaskInput struct {
|
|
| 814 |
+ // Name of Amazon S3 bucket to which the log data will be exported. NOTE: Only |
|
| 815 |
+ // buckets in the same AWS region are supported |
|
| 816 |
+ Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` |
|
| 817 |
+ |
|
| 818 |
+ // Prefix that will be used as the start of Amazon S3 key for every object exported. |
|
| 819 |
+ // If not specified, this defaults to 'exportedlogs'. |
|
| 820 |
+ DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` |
|
| 821 |
+ |
|
| 822 |
+ // A unix timestamp indicating the start time of the range for the request. |
|
| 823 |
+ // Events with a timestamp prior to this time will not be exported. |
|
| 824 |
+ From *int64 `locationName:"from" type:"long" required:"true"` |
|
| 825 |
+ |
|
| 826 |
+ // The name of the log group to export. |
|
| 827 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 828 |
+ |
|
| 829 |
+ // Will only export log streams that match the provided logStreamNamePrefix. |
|
| 830 |
+ // If you don't specify a value, no prefix filter is applied. |
|
| 831 |
+ LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` |
|
| 832 |
+ |
|
| 833 |
+ // The name of the export task. |
|
| 834 |
+ TaskName *string `locationName:"taskName" min:"1" type:"string"` |
|
| 835 |
+ |
|
| 836 |
+ // A unix timestamp indicating the end time of the range for the request. Events |
|
| 837 |
+ // with a timestamp later than this time will not be exported. |
|
| 838 |
+ To *int64 `locationName:"to" type:"long" required:"true"` |
|
| 839 |
+ |
|
| 840 |
+ metadataCreateExportTaskInput `json:"-" xml:"-"` |
|
| 841 |
+} |
|
| 842 |
+ |
|
| 843 |
+type metadataCreateExportTaskInput struct {
|
|
| 844 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 845 |
+} |
|
| 846 |
+ |
|
| 847 |
+// String returns the string representation |
|
| 848 |
+func (s CreateExportTaskInput) String() string {
|
|
| 849 |
+ return awsutil.Prettify(s) |
|
| 850 |
+} |
|
| 851 |
+ |
|
| 852 |
+// GoString returns the string representation |
|
| 853 |
+func (s CreateExportTaskInput) GoString() string {
|
|
| 854 |
+ return s.String() |
|
| 855 |
+} |
|
| 856 |
+ |
|
| 857 |
+type CreateExportTaskOutput struct {
|
|
| 858 |
+ // Id of the export task that got created. |
|
| 859 |
+ TaskId *string `locationName:"taskId" min:"1" type:"string"` |
|
| 860 |
+ |
|
| 861 |
+ metadataCreateExportTaskOutput `json:"-" xml:"-"` |
|
| 862 |
+} |
|
| 863 |
+ |
|
| 864 |
+type metadataCreateExportTaskOutput struct {
|
|
| 865 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 866 |
+} |
|
| 867 |
+ |
|
| 868 |
+// String returns the string representation |
|
| 869 |
+func (s CreateExportTaskOutput) String() string {
|
|
| 870 |
+ return awsutil.Prettify(s) |
|
| 871 |
+} |
|
| 872 |
+ |
|
| 873 |
+// GoString returns the string representation |
|
| 874 |
+func (s CreateExportTaskOutput) GoString() string {
|
|
| 875 |
+ return s.String() |
|
| 876 |
+} |
|
| 877 |
+ |
|
| 774 | 878 |
type CreateLogGroupInput struct {
|
| 775 | 879 |
// The name of the log group to create. |
| 776 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 880 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 777 | 881 |
|
| 778 | 882 |
metadataCreateLogGroupInput `json:"-" xml:"-"` |
| 779 | 883 |
} |
| ... | ... |
@@ -812,10 +1048,10 @@ func (s CreateLogGroupOutput) GoString() string {
|
| 812 | 812 |
|
| 813 | 813 |
type CreateLogStreamInput struct {
|
| 814 | 814 |
// The name of the log group under which the log stream is to be created. |
| 815 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 815 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 816 | 816 |
|
| 817 | 817 |
// The name of the log stream to create. |
| 818 |
- LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"` |
|
| 818 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` |
|
| 819 | 819 |
|
| 820 | 820 |
metadataCreateLogStreamInput `json:"-" xml:"-"` |
| 821 | 821 |
} |
| ... | ... |
@@ -854,7 +1090,7 @@ func (s CreateLogStreamOutput) GoString() string {
|
| 854 | 854 |
|
| 855 | 855 |
type DeleteDestinationInput struct {
|
| 856 | 856 |
// The name of destination to delete. |
| 857 |
- DestinationName *string `locationName:"destinationName" type:"string" required:"true"` |
|
| 857 |
+ DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` |
|
| 858 | 858 |
|
| 859 | 859 |
metadataDeleteDestinationInput `json:"-" xml:"-"` |
| 860 | 860 |
} |
| ... | ... |
@@ -893,7 +1129,7 @@ func (s DeleteDestinationOutput) GoString() string {
|
| 893 | 893 |
|
| 894 | 894 |
type DeleteLogGroupInput struct {
|
| 895 | 895 |
// The name of the log group to delete. |
| 896 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 896 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 897 | 897 |
|
| 898 | 898 |
metadataDeleteLogGroupInput `json:"-" xml:"-"` |
| 899 | 899 |
} |
| ... | ... |
@@ -932,10 +1168,10 @@ func (s DeleteLogGroupOutput) GoString() string {
|
| 932 | 932 |
|
| 933 | 933 |
type DeleteLogStreamInput struct {
|
| 934 | 934 |
// The name of the log group under which the log stream to delete belongs. |
| 935 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 935 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 936 | 936 |
|
| 937 | 937 |
// The name of the log stream to delete. |
| 938 |
- LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"` |
|
| 938 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` |
|
| 939 | 939 |
|
| 940 | 940 |
metadataDeleteLogStreamInput `json:"-" xml:"-"` |
| 941 | 941 |
} |
| ... | ... |
@@ -974,10 +1210,10 @@ func (s DeleteLogStreamOutput) GoString() string {
|
| 974 | 974 |
|
| 975 | 975 |
type DeleteMetricFilterInput struct {
|
| 976 | 976 |
// The name of the metric filter to delete. |
| 977 |
- FilterName *string `locationName:"filterName" type:"string" required:"true"` |
|
| 977 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` |
|
| 978 | 978 |
|
| 979 | 979 |
// The name of the log group that is associated with the metric filter to delete. |
| 980 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 980 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 981 | 981 |
|
| 982 | 982 |
metadataDeleteMetricFilterInput `json:"-" xml:"-"` |
| 983 | 983 |
} |
| ... | ... |
@@ -1017,7 +1253,7 @@ func (s DeleteMetricFilterOutput) GoString() string {
|
| 1017 | 1017 |
type DeleteRetentionPolicyInput struct {
|
| 1018 | 1018 |
// The name of the log group that is associated with the retention policy to |
| 1019 | 1019 |
// delete. |
| 1020 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1020 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1021 | 1021 |
|
| 1022 | 1022 |
metadataDeleteRetentionPolicyInput `json:"-" xml:"-"` |
| 1023 | 1023 |
} |
| ... | ... |
@@ -1056,11 +1292,11 @@ func (s DeleteRetentionPolicyOutput) GoString() string {
|
| 1056 | 1056 |
|
| 1057 | 1057 |
type DeleteSubscriptionFilterInput struct {
|
| 1058 | 1058 |
// The name of the subscription filter to delete. |
| 1059 |
- FilterName *string `locationName:"filterName" type:"string" required:"true"` |
|
| 1059 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` |
|
| 1060 | 1060 |
|
| 1061 | 1061 |
// The name of the log group that is associated with the subscription filter |
| 1062 | 1062 |
// to delete. |
| 1063 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1063 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1064 | 1064 |
|
| 1065 | 1065 |
metadataDeleteSubscriptionFilterInput `json:"-" xml:"-"` |
| 1066 | 1066 |
} |
| ... | ... |
@@ -1100,15 +1336,15 @@ func (s DeleteSubscriptionFilterOutput) GoString() string {
|
| 1100 | 1100 |
type DescribeDestinationsInput struct {
|
| 1101 | 1101 |
// Will only return destinations that match the provided destinationNamePrefix. |
| 1102 | 1102 |
// If you don't specify a value, no prefix is applied. |
| 1103 |
- DestinationNamePrefix *string `type:"string"` |
|
| 1103 |
+ DestinationNamePrefix *string `min:"1" type:"string"` |
|
| 1104 | 1104 |
|
| 1105 | 1105 |
// The maximum number of results to return. |
| 1106 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1106 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1107 | 1107 |
|
| 1108 | 1108 |
// A string token used for pagination that points to the next page of results. |
| 1109 | 1109 |
// It must be a value obtained from the response of the previous request. The |
| 1110 | 1110 |
// token expires after 24 hours. |
| 1111 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1111 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1112 | 1112 |
|
| 1113 | 1113 |
metadataDescribeDestinationsInput `json:"-" xml:"-"` |
| 1114 | 1114 |
} |
| ... | ... |
@@ -1133,7 +1369,7 @@ type DescribeDestinationsOutput struct {
|
| 1133 | 1133 |
// A string token used for pagination that points to the next page of results. |
| 1134 | 1134 |
// It must be a value obtained from the response of the previous request. The |
| 1135 | 1135 |
// token expires after 24 hours. |
| 1136 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1136 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1137 | 1137 |
|
| 1138 | 1138 |
metadataDescribeDestinationsOutput `json:"-" xml:"-"` |
| 1139 | 1139 |
} |
| ... | ... |
@@ -1152,19 +1388,80 @@ func (s DescribeDestinationsOutput) GoString() string {
|
| 1152 | 1152 |
return s.String() |
| 1153 | 1153 |
} |
| 1154 | 1154 |
|
| 1155 |
+type DescribeExportTasksInput struct {
|
|
| 1156 |
+ // The maximum number of items returned in the response. If you don't specify |
|
| 1157 |
+ // a value, the request would return up to 50 items. |
|
| 1158 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1159 |
+ |
|
| 1160 |
+ // A string token used for pagination that points to the next page of results. |
|
| 1161 |
+ // It must be a value obtained from the response of the previous DescribeExportTasks |
|
| 1162 |
+ // request. |
|
| 1163 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1164 |
+ |
|
| 1165 |
+ // All export tasks that matches the specified status code will be returned. |
|
| 1166 |
+ // This can return zero or more export tasks. |
|
| 1167 |
+ StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` |
|
| 1168 |
+ |
|
| 1169 |
+ // Export task that matches the specified task Id will be returned. This can |
|
| 1170 |
+ // result in zero or one export task. |
|
| 1171 |
+ TaskId *string `locationName:"taskId" min:"1" type:"string"` |
|
| 1172 |
+ |
|
| 1173 |
+ metadataDescribeExportTasksInput `json:"-" xml:"-"` |
|
| 1174 |
+} |
|
| 1175 |
+ |
|
| 1176 |
+type metadataDescribeExportTasksInput struct {
|
|
| 1177 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 1178 |
+} |
|
| 1179 |
+ |
|
| 1180 |
+// String returns the string representation |
|
| 1181 |
+func (s DescribeExportTasksInput) String() string {
|
|
| 1182 |
+ return awsutil.Prettify(s) |
|
| 1183 |
+} |
|
| 1184 |
+ |
|
| 1185 |
+// GoString returns the string representation |
|
| 1186 |
+func (s DescribeExportTasksInput) GoString() string {
|
|
| 1187 |
+ return s.String() |
|
| 1188 |
+} |
|
| 1189 |
+ |
|
| 1190 |
+type DescribeExportTasksOutput struct {
|
|
| 1191 |
+ // A list of export tasks. |
|
| 1192 |
+ ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` |
|
| 1193 |
+ |
|
| 1194 |
+ // A string token used for pagination that points to the next page of results. |
|
| 1195 |
+ // It must be a value obtained from the response of the previous request. The |
|
| 1196 |
+ // token expires after 24 hours. |
|
| 1197 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1198 |
+ |
|
| 1199 |
+ metadataDescribeExportTasksOutput `json:"-" xml:"-"` |
|
| 1200 |
+} |
|
| 1201 |
+ |
|
| 1202 |
+type metadataDescribeExportTasksOutput struct {
|
|
| 1203 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 1204 |
+} |
|
| 1205 |
+ |
|
| 1206 |
+// String returns the string representation |
|
| 1207 |
+func (s DescribeExportTasksOutput) String() string {
|
|
| 1208 |
+ return awsutil.Prettify(s) |
|
| 1209 |
+} |
|
| 1210 |
+ |
|
| 1211 |
+// GoString returns the string representation |
|
| 1212 |
+func (s DescribeExportTasksOutput) GoString() string {
|
|
| 1213 |
+ return s.String() |
|
| 1214 |
+} |
|
| 1215 |
+ |
|
| 1155 | 1216 |
type DescribeLogGroupsInput struct {
|
| 1156 | 1217 |
// The maximum number of items returned in the response. If you don't specify |
| 1157 | 1218 |
// a value, the request would return up to 50 items. |
| 1158 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1219 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1159 | 1220 |
|
| 1160 | 1221 |
// Will only return log groups that match the provided logGroupNamePrefix. If |
| 1161 | 1222 |
// you don't specify a value, no prefix filter is applied. |
| 1162 |
- LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" type:"string"` |
|
| 1223 |
+ LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` |
|
| 1163 | 1224 |
|
| 1164 | 1225 |
// A string token used for pagination that points to the next page of results. |
| 1165 | 1226 |
// It must be a value obtained from the response of the previous DescribeLogGroups |
| 1166 | 1227 |
// request. |
| 1167 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1228 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1168 | 1229 |
|
| 1169 | 1230 |
metadataDescribeLogGroupsInput `json:"-" xml:"-"` |
| 1170 | 1231 |
} |
| ... | ... |
@@ -1190,7 +1487,7 @@ type DescribeLogGroupsOutput struct {
|
| 1190 | 1190 |
// A string token used for pagination that points to the next page of results. |
| 1191 | 1191 |
// It must be a value obtained from the response of the previous request. The |
| 1192 | 1192 |
// token expires after 24 hours. |
| 1193 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1193 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1194 | 1194 |
|
| 1195 | 1195 |
metadataDescribeLogGroupsOutput `json:"-" xml:"-"` |
| 1196 | 1196 |
} |
| ... | ... |
@@ -1216,19 +1513,19 @@ type DescribeLogStreamsInput struct {
|
| 1216 | 1216 |
|
| 1217 | 1217 |
// The maximum number of items returned in the response. If you don't specify |
| 1218 | 1218 |
// a value, the request would return up to 50 items. |
| 1219 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1219 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1220 | 1220 |
|
| 1221 | 1221 |
// The log group name for which log streams are to be listed. |
| 1222 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1222 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1223 | 1223 |
|
| 1224 | 1224 |
// Will only return log streams that match the provided logStreamNamePrefix. |
| 1225 | 1225 |
// If you don't specify a value, no prefix filter is applied. |
| 1226 |
- LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" type:"string"` |
|
| 1226 |
+ LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` |
|
| 1227 | 1227 |
|
| 1228 | 1228 |
// A string token used for pagination that points to the next page of results. |
| 1229 | 1229 |
// It must be a value obtained from the response of the previous DescribeLogStreams |
| 1230 | 1230 |
// request. |
| 1231 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1231 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1232 | 1232 |
|
| 1233 | 1233 |
// Specifies what to order the returned log streams by. Valid arguments are |
| 1234 | 1234 |
// 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results |
| ... | ... |
@@ -1260,7 +1557,7 @@ type DescribeLogStreamsOutput struct {
|
| 1260 | 1260 |
// A string token used for pagination that points to the next page of results. |
| 1261 | 1261 |
// It must be a value obtained from the response of the previous request. The |
| 1262 | 1262 |
// token expires after 24 hours. |
| 1263 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1263 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1264 | 1264 |
|
| 1265 | 1265 |
metadataDescribeLogStreamsOutput `json:"-" xml:"-"` |
| 1266 | 1266 |
} |
| ... | ... |
@@ -1282,19 +1579,19 @@ func (s DescribeLogStreamsOutput) GoString() string {
|
| 1282 | 1282 |
type DescribeMetricFiltersInput struct {
|
| 1283 | 1283 |
// Will only return metric filters that match the provided filterNamePrefix. |
| 1284 | 1284 |
// If you don't specify a value, no prefix filter is applied. |
| 1285 |
- FilterNamePrefix *string `locationName:"filterNamePrefix" type:"string"` |
|
| 1285 |
+ FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` |
|
| 1286 | 1286 |
|
| 1287 | 1287 |
// The maximum number of items returned in the response. If you don't specify |
| 1288 | 1288 |
// a value, the request would return up to 50 items. |
| 1289 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1289 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1290 | 1290 |
|
| 1291 | 1291 |
// The log group name for which metric filters are to be listed. |
| 1292 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1292 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1293 | 1293 |
|
| 1294 | 1294 |
// A string token used for pagination that points to the next page of results. |
| 1295 | 1295 |
// It must be a value obtained from the response of the previous DescribeMetricFilters |
| 1296 | 1296 |
// request. |
| 1297 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1297 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1298 | 1298 |
|
| 1299 | 1299 |
metadataDescribeMetricFiltersInput `json:"-" xml:"-"` |
| 1300 | 1300 |
} |
| ... | ... |
@@ -1319,7 +1616,7 @@ type DescribeMetricFiltersOutput struct {
|
| 1319 | 1319 |
// A string token used for pagination that points to the next page of results. |
| 1320 | 1320 |
// It must be a value obtained from the response of the previous request. The |
| 1321 | 1321 |
// token expires after 24 hours. |
| 1322 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1322 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1323 | 1323 |
|
| 1324 | 1324 |
metadataDescribeMetricFiltersOutput `json:"-" xml:"-"` |
| 1325 | 1325 |
} |
| ... | ... |
@@ -1341,18 +1638,18 @@ func (s DescribeMetricFiltersOutput) GoString() string {
|
| 1341 | 1341 |
type DescribeSubscriptionFiltersInput struct {
|
| 1342 | 1342 |
// Will only return subscription filters that match the provided filterNamePrefix. |
| 1343 | 1343 |
// If you don't specify a value, no prefix filter is applied. |
| 1344 |
- FilterNamePrefix *string `locationName:"filterNamePrefix" type:"string"` |
|
| 1344 |
+ FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` |
|
| 1345 | 1345 |
|
| 1346 | 1346 |
// The maximum number of results to return. |
| 1347 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1347 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1348 | 1348 |
|
| 1349 | 1349 |
// The log group name for which subscription filters are to be listed. |
| 1350 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1350 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1351 | 1351 |
|
| 1352 | 1352 |
// A string token used for pagination that points to the next page of results. |
| 1353 | 1353 |
// It must be a value obtained from the response of the previous request. The |
| 1354 | 1354 |
// token expires after 24 hours. |
| 1355 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1355 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1356 | 1356 |
|
| 1357 | 1357 |
metadataDescribeSubscriptionFiltersInput `json:"-" xml:"-"` |
| 1358 | 1358 |
} |
| ... | ... |
@@ -1375,7 +1672,7 @@ type DescribeSubscriptionFiltersOutput struct {
|
| 1375 | 1375 |
// A string token used for pagination that points to the next page of results. |
| 1376 | 1376 |
// It must be a value obtained from the response of the previous request. The |
| 1377 | 1377 |
// token expires after 24 hours. |
| 1378 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1378 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1379 | 1379 |
|
| 1380 | 1380 |
SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` |
| 1381 | 1381 |
|
| ... | ... |
@@ -1396,20 +1693,28 @@ func (s DescribeSubscriptionFiltersOutput) GoString() string {
|
| 1396 | 1396 |
return s.String() |
| 1397 | 1397 |
} |
| 1398 | 1398 |
|
| 1399 |
+// A cross account destination that is the recipient of subscription log events. |
|
| 1399 | 1400 |
type Destination struct {
|
| 1400 |
- ARN *string `locationName:"arn" type:"string"` |
|
| 1401 |
+ // An IAM policy document that governs which AWS accounts can create subscription |
|
| 1402 |
+ // filters against this destination. |
|
| 1403 |
+ AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` |
|
| 1401 | 1404 |
|
| 1402 |
- AccessPolicy *string `locationName:"accessPolicy" type:"string"` |
|
| 1405 |
+ // ARN of this destination. |
|
| 1406 |
+ Arn *string `locationName:"arn" type:"string"` |
|
| 1403 | 1407 |
|
| 1404 | 1408 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1405 |
- // 00:00:00 UTC. |
|
| 1409 |
+ // 00:00:00 UTC specifying when this destination was created. |
|
| 1406 | 1410 |
CreationTime *int64 `locationName:"creationTime" type:"long"` |
| 1407 | 1411 |
|
| 1408 |
- DestinationName *string `locationName:"destinationName" type:"string"` |
|
| 1412 |
+ // Name of the destination. |
|
| 1413 |
+ DestinationName *string `locationName:"destinationName" min:"1" type:"string"` |
|
| 1409 | 1414 |
|
| 1410 |
- RoleARN *string `locationName:"roleArn" type:"string"` |
|
| 1415 |
+ // A role for impersonation for delivering log events to the target. |
|
| 1416 |
+ RoleArn *string `locationName:"roleArn" min:"1" type:"string"` |
|
| 1411 | 1417 |
|
| 1412 |
- TargetARN *string `locationName:"targetArn" type:"string"` |
|
| 1418 |
+ // ARN of the physical target where the log events will be delivered (eg. ARN |
|
| 1419 |
+ // of a Kinesis stream). |
|
| 1420 |
+ TargetArn *string `locationName:"targetArn" min:"1" type:"string"` |
|
| 1413 | 1421 |
|
| 1414 | 1422 |
metadataDestination `json:"-" xml:"-"` |
| 1415 | 1423 |
} |
| ... | ... |
@@ -1428,6 +1733,104 @@ func (s Destination) GoString() string {
|
| 1428 | 1428 |
return s.String() |
| 1429 | 1429 |
} |
| 1430 | 1430 |
|
| 1431 |
+// Represents an export task. |
|
| 1432 |
+type ExportTask struct {
|
|
| 1433 |
+ // Name of Amazon S3 bucket to which the log data was exported. |
|
| 1434 |
+ Destination *string `locationName:"destination" min:"1" type:"string"` |
|
| 1435 |
+ |
|
| 1436 |
+ // Prefix that was used as the start of Amazon S3 key for every object exported. |
|
| 1437 |
+ DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` |
|
| 1438 |
+ |
|
| 1439 |
+ // Execution info about the export task. |
|
| 1440 |
+ ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` |
|
| 1441 |
+ |
|
| 1442 |
+ // A unix timestamp indicating the start time of the range for the request. |
|
| 1443 |
+ // Events with a timestamp prior to this time were not exported. |
|
| 1444 |
+ From *int64 `locationName:"from" type:"long"` |
|
| 1445 |
+ |
|
| 1446 |
+ // The name of the log group from which logs data was exported. |
|
| 1447 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` |
|
| 1448 |
+ |
|
| 1449 |
+ // Status of the export task. |
|
| 1450 |
+ Status *ExportTaskStatus `locationName:"status" type:"structure"` |
|
| 1451 |
+ |
|
| 1452 |
+ // Id of the export task. |
|
| 1453 |
+ TaskId *string `locationName:"taskId" min:"1" type:"string"` |
|
| 1454 |
+ |
|
| 1455 |
+ // The name of the export task. |
|
| 1456 |
+ TaskName *string `locationName:"taskName" min:"1" type:"string"` |
|
| 1457 |
+ |
|
| 1458 |
+ // A unix timestamp indicating the end time of the range for the request. Events |
|
| 1459 |
+ // with a timestamp later than this time were not exported. |
|
| 1460 |
+ To *int64 `locationName:"to" type:"long"` |
|
| 1461 |
+ |
|
| 1462 |
+ metadataExportTask `json:"-" xml:"-"` |
|
| 1463 |
+} |
|
| 1464 |
+ |
|
| 1465 |
+type metadataExportTask struct {
|
|
| 1466 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 1467 |
+} |
|
| 1468 |
+ |
|
| 1469 |
+// String returns the string representation |
|
| 1470 |
+func (s ExportTask) String() string {
|
|
| 1471 |
+ return awsutil.Prettify(s) |
|
| 1472 |
+} |
|
| 1473 |
+ |
|
| 1474 |
+// GoString returns the string representation |
|
| 1475 |
+func (s ExportTask) GoString() string {
|
|
| 1476 |
+ return s.String() |
|
| 1477 |
+} |
|
| 1478 |
+ |
|
| 1479 |
+// Represents the status of an export task. |
|
| 1480 |
+type ExportTaskExecutionInfo struct {
|
|
| 1481 |
+ // A point in time when the export task got completed. |
|
| 1482 |
+ CompletionTime *int64 `locationName:"completionTime" type:"long"` |
|
| 1483 |
+ |
|
| 1484 |
+ // A point in time when the export task got created. |
|
| 1485 |
+ CreationTime *int64 `locationName:"creationTime" type:"long"` |
|
| 1486 |
+ |
|
| 1487 |
+ metadataExportTaskExecutionInfo `json:"-" xml:"-"` |
|
| 1488 |
+} |
|
| 1489 |
+ |
|
| 1490 |
+type metadataExportTaskExecutionInfo struct {
|
|
| 1491 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 1492 |
+} |
|
| 1493 |
+ |
|
| 1494 |
+// String returns the string representation |
|
| 1495 |
+func (s ExportTaskExecutionInfo) String() string {
|
|
| 1496 |
+ return awsutil.Prettify(s) |
|
| 1497 |
+} |
|
| 1498 |
+ |
|
| 1499 |
+// GoString returns the string representation |
|
| 1500 |
+func (s ExportTaskExecutionInfo) GoString() string {
|
|
| 1501 |
+ return s.String() |
|
| 1502 |
+} |
|
| 1503 |
+ |
|
| 1504 |
+// Represents the status of an export task. |
|
| 1505 |
+type ExportTaskStatus struct {
|
|
| 1506 |
+ // Status code of the export task. |
|
| 1507 |
+ Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` |
|
| 1508 |
+ |
|
| 1509 |
+ // Status message related to the code. |
|
| 1510 |
+ Message *string `locationName:"message" type:"string"` |
|
| 1511 |
+ |
|
| 1512 |
+ metadataExportTaskStatus `json:"-" xml:"-"` |
|
| 1513 |
+} |
|
| 1514 |
+ |
|
| 1515 |
+type metadataExportTaskStatus struct {
|
|
| 1516 |
+ SDKShapeTraits bool `type:"structure"` |
|
| 1517 |
+} |
|
| 1518 |
+ |
|
| 1519 |
+// String returns the string representation |
|
| 1520 |
+func (s ExportTaskStatus) String() string {
|
|
| 1521 |
+ return awsutil.Prettify(s) |
|
| 1522 |
+} |
|
| 1523 |
+ |
|
| 1524 |
+// GoString returns the string representation |
|
| 1525 |
+func (s ExportTaskStatus) GoString() string {
|
|
| 1526 |
+ return s.String() |
|
| 1527 |
+} |
|
| 1528 |
+ |
|
| 1431 | 1529 |
type FilterLogEventsInput struct {
|
| 1432 | 1530 |
// A unix timestamp indicating the end time of the range for the request. If |
| 1433 | 1531 |
// provided, events with a timestamp later than this time will not be returned. |
| ... | ... |
@@ -1445,18 +1848,18 @@ type FilterLogEventsInput struct {
|
| 1445 | 1445 |
|
| 1446 | 1446 |
// The maximum number of events to return in a page of results. Default is 10,000 |
| 1447 | 1447 |
// events. |
| 1448 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1448 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1449 | 1449 |
|
| 1450 | 1450 |
// The name of the log group to query. |
| 1451 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1451 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1452 | 1452 |
|
| 1453 | 1453 |
// Optional list of log stream names within the specified log group to search. |
| 1454 | 1454 |
// Defaults to all the log streams in the log group. |
| 1455 |
- LogStreamNames []*string `locationName:"logStreamNames" type:"list"` |
|
| 1455 |
+ LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` |
|
| 1456 | 1456 |
|
| 1457 | 1457 |
// A pagination token obtained from a FilterLogEvents response to continue paginating |
| 1458 | 1458 |
// the FilterLogEvents results. |
| 1459 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1459 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1460 | 1460 |
|
| 1461 | 1461 |
// A unix timestamp indicating the start time of the range for the request. |
| 1462 | 1462 |
// If provided, events with a timestamp prior to this time will not be returned. |
| ... | ... |
@@ -1486,7 +1889,7 @@ type FilterLogEventsOutput struct {
|
| 1486 | 1486 |
|
| 1487 | 1487 |
// A pagination token obtained from a FilterLogEvents response to continue paginating |
| 1488 | 1488 |
// the FilterLogEvents results. |
| 1489 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1489 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1490 | 1490 |
|
| 1491 | 1491 |
// A list of SearchedLogStream objects indicating which log streams have been |
| 1492 | 1492 |
// searched in this request and whether each has been searched completely or |
| ... | ... |
@@ -1513,17 +1916,17 @@ func (s FilterLogEventsOutput) GoString() string {
|
| 1513 | 1513 |
// Represents a matched event from a FilterLogEvents request. |
| 1514 | 1514 |
type FilteredLogEvent struct {
|
| 1515 | 1515 |
// A unique identifier for this event. |
| 1516 |
- EventID *string `locationName:"eventId" type:"string"` |
|
| 1516 |
+ EventId *string `locationName:"eventId" type:"string"` |
|
| 1517 | 1517 |
|
| 1518 | 1518 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1519 | 1519 |
// 00:00:00 UTC. |
| 1520 | 1520 |
IngestionTime *int64 `locationName:"ingestionTime" type:"long"` |
| 1521 | 1521 |
|
| 1522 | 1522 |
// The name of the log stream this event belongs to. |
| 1523 |
- LogStreamName *string `locationName:"logStreamName" type:"string"` |
|
| 1523 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` |
|
| 1524 | 1524 |
|
| 1525 | 1525 |
// The data contained in the log event. |
| 1526 |
- Message *string `locationName:"message" type:"string"` |
|
| 1526 |
+ Message *string `locationName:"message" min:"1" type:"string"` |
|
| 1527 | 1527 |
|
| 1528 | 1528 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1529 | 1529 |
// 00:00:00 UTC. |
| ... | ... |
@@ -1554,18 +1957,18 @@ type GetLogEventsInput struct {
|
| 1554 | 1554 |
// The maximum number of log events returned in the response. If you don't specify |
| 1555 | 1555 |
// a value, the request would return as many log events as can fit in a response |
| 1556 | 1556 |
// size of 1MB, up to 10,000 log events. |
| 1557 |
- Limit *int64 `locationName:"limit" type:"integer"` |
|
| 1557 |
+ Limit *int64 `locationName:"limit" min:"1" type:"integer"` |
|
| 1558 | 1558 |
|
| 1559 | 1559 |
// The name of the log group to query. |
| 1560 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1560 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1561 | 1561 |
|
| 1562 | 1562 |
// The name of the log stream to query. |
| 1563 |
- LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"` |
|
| 1563 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` |
|
| 1564 | 1564 |
|
| 1565 | 1565 |
// A string token used for pagination that points to the next page of results. |
| 1566 | 1566 |
// It must be a value obtained from the nextForwardToken or nextBackwardToken |
| 1567 | 1567 |
// fields in the response of the previous GetLogEvents request. |
| 1568 |
- NextToken *string `locationName:"nextToken" type:"string"` |
|
| 1568 |
+ NextToken *string `locationName:"nextToken" min:"1" type:"string"` |
|
| 1569 | 1569 |
|
| 1570 | 1570 |
// If set to true, the earliest log events would be returned first. The default |
| 1571 | 1571 |
// is false (the latest log events are returned first). |
| ... | ... |
@@ -1598,12 +2001,12 @@ type GetLogEventsOutput struct {
|
| 1598 | 1598 |
// A string token used for pagination that points to the next page of results. |
| 1599 | 1599 |
// It must be a value obtained from the response of the previous request. The |
| 1600 | 1600 |
// token expires after 24 hours. |
| 1601 |
- NextBackwardToken *string `locationName:"nextBackwardToken" type:"string"` |
|
| 1601 |
+ NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` |
|
| 1602 | 1602 |
|
| 1603 | 1603 |
// A string token used for pagination that points to the next page of results. |
| 1604 | 1604 |
// It must be a value obtained from the response of the previous request. The |
| 1605 | 1605 |
// token expires after 24 hours. |
| 1606 |
- NextForwardToken *string `locationName:"nextForwardToken" type:"string"` |
|
| 1606 |
+ NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` |
|
| 1607 | 1607 |
|
| 1608 | 1608 |
metadataGetLogEventsOutput `json:"-" xml:"-"` |
| 1609 | 1609 |
} |
| ... | ... |
@@ -1627,7 +2030,7 @@ func (s GetLogEventsOutput) GoString() string {
|
| 1627 | 1627 |
// Logs understands contains two properties: the timestamp of when the event |
| 1628 | 1628 |
// occurred, and the raw event message. |
| 1629 | 1629 |
type InputLogEvent struct {
|
| 1630 |
- Message *string `locationName:"message" type:"string" required:"true"` |
|
| 1630 |
+ Message *string `locationName:"message" min:"1" type:"string" required:"true"` |
|
| 1631 | 1631 |
|
| 1632 | 1632 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1633 | 1633 |
// 00:00:00 UTC. |
| ... | ... |
@@ -1651,13 +2054,13 @@ func (s InputLogEvent) GoString() string {
|
| 1651 | 1651 |
} |
| 1652 | 1652 |
|
| 1653 | 1653 |
type LogGroup struct {
|
| 1654 |
- ARN *string `locationName:"arn" type:"string"` |
|
| 1654 |
+ Arn *string `locationName:"arn" type:"string"` |
|
| 1655 | 1655 |
|
| 1656 | 1656 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1657 | 1657 |
// 00:00:00 UTC. |
| 1658 | 1658 |
CreationTime *int64 `locationName:"creationTime" type:"long"` |
| 1659 | 1659 |
|
| 1660 |
- LogGroupName *string `locationName:"logGroupName" type:"string"` |
|
| 1660 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` |
|
| 1661 | 1661 |
|
| 1662 | 1662 |
// The number of metric filters associated with the log group. |
| 1663 | 1663 |
MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` |
| ... | ... |
@@ -1688,7 +2091,7 @@ func (s LogGroup) GoString() string {
|
| 1688 | 1688 |
|
| 1689 | 1689 |
// A log stream is sequence of log events from a single emitter of logs. |
| 1690 | 1690 |
type LogStream struct {
|
| 1691 |
- ARN *string `locationName:"arn" type:"string"` |
|
| 1691 |
+ Arn *string `locationName:"arn" type:"string"` |
|
| 1692 | 1692 |
|
| 1693 | 1693 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1694 | 1694 |
// 00:00:00 UTC. |
| ... | ... |
@@ -1706,14 +2109,14 @@ type LogStream struct {
|
| 1706 | 1706 |
// 00:00:00 UTC. |
| 1707 | 1707 |
LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` |
| 1708 | 1708 |
|
| 1709 |
- LogStreamName *string `locationName:"logStreamName" type:"string"` |
|
| 1709 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` |
|
| 1710 | 1710 |
|
| 1711 | 1711 |
StoredBytes *int64 `locationName:"storedBytes" type:"long"` |
| 1712 | 1712 |
|
| 1713 | 1713 |
// A string token used for making PutLogEvents requests. A sequenceToken can |
| 1714 | 1714 |
// only be used once, and PutLogEvents requests must include the sequenceToken |
| 1715 | 1715 |
// obtained from the response of the previous request. |
| 1716 |
- UploadSequenceToken *string `locationName:"uploadSequenceToken" type:"string"` |
|
| 1716 |
+ UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` |
|
| 1717 | 1717 |
|
| 1718 | 1718 |
metadataLogStream `json:"-" xml:"-"` |
| 1719 | 1719 |
} |
| ... | ... |
@@ -1741,7 +2144,7 @@ type MetricFilter struct {
|
| 1741 | 1741 |
CreationTime *int64 `locationName:"creationTime" type:"long"` |
| 1742 | 1742 |
|
| 1743 | 1743 |
// A name for a metric or subscription filter. |
| 1744 |
- FilterName *string `locationName:"filterName" type:"string"` |
|
| 1744 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string"` |
|
| 1745 | 1745 |
|
| 1746 | 1746 |
// A symbolic description of how Amazon CloudWatch Logs should interpret the |
| 1747 | 1747 |
// data in each log event. For example, a log event may contain timestamps, |
| ... | ... |
@@ -1749,7 +2152,7 @@ type MetricFilter struct {
|
| 1749 | 1749 |
// to look for in the log event message. |
| 1750 | 1750 |
FilterPattern *string `locationName:"filterPattern" type:"string"` |
| 1751 | 1751 |
|
| 1752 |
- MetricTransformations []*MetricTransformation `locationName:"metricTransformations" type:"list"` |
|
| 1752 |
+ MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` |
|
| 1753 | 1753 |
|
| 1754 | 1754 |
metadataMetricFilter `json:"-" xml:"-"` |
| 1755 | 1755 |
} |
| ... | ... |
@@ -1769,7 +2172,7 @@ func (s MetricFilter) GoString() string {
|
| 1769 | 1769 |
} |
| 1770 | 1770 |
|
| 1771 | 1771 |
type MetricFilterMatchRecord struct {
|
| 1772 |
- EventMessage *string `locationName:"eventMessage" type:"string"` |
|
| 1772 |
+ EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` |
|
| 1773 | 1773 |
|
| 1774 | 1774 |
EventNumber *int64 `locationName:"eventNumber" type:"long"` |
| 1775 | 1775 |
|
| ... | ... |
@@ -1828,7 +2231,7 @@ type OutputLogEvent struct {
|
| 1828 | 1828 |
// 00:00:00 UTC. |
| 1829 | 1829 |
IngestionTime *int64 `locationName:"ingestionTime" type:"long"` |
| 1830 | 1830 |
|
| 1831 |
- Message *string `locationName:"message" type:"string"` |
|
| 1831 |
+ Message *string `locationName:"message" min:"1" type:"string"` |
|
| 1832 | 1832 |
|
| 1833 | 1833 |
// A point in time expressed as the number of milliseconds since Jan 1, 1970 |
| 1834 | 1834 |
// 00:00:00 UTC. |
| ... | ... |
@@ -1853,14 +2256,14 @@ func (s OutputLogEvent) GoString() string {
|
| 1853 | 1853 |
|
| 1854 | 1854 |
type PutDestinationInput struct {
|
| 1855 | 1855 |
// A name for the destination. |
| 1856 |
- DestinationName *string `locationName:"destinationName" type:"string" required:"true"` |
|
| 1856 |
+ DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` |
|
| 1857 | 1857 |
|
| 1858 | 1858 |
// The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to |
| 1859 | 1859 |
// do Amazon Kinesis PutRecord requests on the desitnation stream. |
| 1860 |
- RoleARN *string `locationName:"roleArn" type:"string" required:"true"` |
|
| 1860 |
+ RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` |
|
| 1861 | 1861 |
|
| 1862 | 1862 |
// The ARN of an Amazon Kinesis stream to deliver matching log events to. |
| 1863 |
- TargetARN *string `locationName:"targetArn" type:"string" required:"true"` |
|
| 1863 |
+ TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` |
|
| 1864 | 1864 |
|
| 1865 | 1865 |
metadataPutDestinationInput `json:"-" xml:"-"` |
| 1866 | 1866 |
} |
| ... | ... |
@@ -1880,6 +2283,7 @@ func (s PutDestinationInput) GoString() string {
|
| 1880 | 1880 |
} |
| 1881 | 1881 |
|
| 1882 | 1882 |
type PutDestinationOutput struct {
|
| 1883 |
+ // A cross account destination that is the recipient of subscription log events. |
|
| 1883 | 1884 |
Destination *Destination `locationName:"destination" type:"structure"` |
| 1884 | 1885 |
|
| 1885 | 1886 |
metadataPutDestinationOutput `json:"-" xml:"-"` |
| ... | ... |
@@ -1902,10 +2306,10 @@ func (s PutDestinationOutput) GoString() string {
|
| 1902 | 1902 |
type PutDestinationPolicyInput struct {
|
| 1903 | 1903 |
// An IAM policy document that authorizes cross-account users to deliver their |
| 1904 | 1904 |
// log events to associated destination. |
| 1905 |
- AccessPolicy *string `locationName:"accessPolicy" type:"string" required:"true"` |
|
| 1905 |
+ AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` |
|
| 1906 | 1906 |
|
| 1907 | 1907 |
// A name for an existing destination. |
| 1908 |
- DestinationName *string `locationName:"destinationName" type:"string" required:"true"` |
|
| 1908 |
+ DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` |
|
| 1909 | 1909 |
|
| 1910 | 1910 |
metadataPutDestinationPolicyInput `json:"-" xml:"-"` |
| 1911 | 1911 |
} |
| ... | ... |
@@ -1944,17 +2348,17 @@ func (s PutDestinationPolicyOutput) GoString() string {
|
| 1944 | 1944 |
|
| 1945 | 1945 |
type PutLogEventsInput struct {
|
| 1946 | 1946 |
// A list of log events belonging to a log stream. |
| 1947 |
- LogEvents []*InputLogEvent `locationName:"logEvents" type:"list" required:"true"` |
|
| 1947 |
+ LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` |
|
| 1948 | 1948 |
|
| 1949 | 1949 |
// The name of the log group to put log events to. |
| 1950 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 1950 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 1951 | 1951 |
|
| 1952 | 1952 |
// The name of the log stream to put log events to. |
| 1953 |
- LogStreamName *string `locationName:"logStreamName" type:"string" required:"true"` |
|
| 1953 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` |
|
| 1954 | 1954 |
|
| 1955 | 1955 |
// A string token that must be obtained from the response of the previous PutLogEvents |
| 1956 | 1956 |
// request. |
| 1957 |
- SequenceToken *string `locationName:"sequenceToken" type:"string"` |
|
| 1957 |
+ SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` |
|
| 1958 | 1958 |
|
| 1959 | 1959 |
metadataPutLogEventsInput `json:"-" xml:"-"` |
| 1960 | 1960 |
} |
| ... | ... |
@@ -1977,7 +2381,7 @@ type PutLogEventsOutput struct {
|
| 1977 | 1977 |
// A string token used for making PutLogEvents requests. A sequenceToken can |
| 1978 | 1978 |
// only be used once, and PutLogEvents requests must include the sequenceToken |
| 1979 | 1979 |
// obtained from the response of the previous request. |
| 1980 |
- NextSequenceToken *string `locationName:"nextSequenceToken" type:"string"` |
|
| 1980 |
+ NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` |
|
| 1981 | 1981 |
|
| 1982 | 1982 |
RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` |
| 1983 | 1983 |
|
| ... | ... |
@@ -2000,17 +2404,17 @@ func (s PutLogEventsOutput) GoString() string {
|
| 2000 | 2000 |
|
| 2001 | 2001 |
type PutMetricFilterInput struct {
|
| 2002 | 2002 |
// A name for the metric filter. |
| 2003 |
- FilterName *string `locationName:"filterName" type:"string" required:"true"` |
|
| 2003 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` |
|
| 2004 | 2004 |
|
| 2005 | 2005 |
// A valid CloudWatch Logs filter pattern for extracting metric data out of |
| 2006 | 2006 |
// ingested log events. |
| 2007 | 2007 |
FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` |
| 2008 | 2008 |
|
| 2009 | 2009 |
// The name of the log group to associate the metric filter with. |
| 2010 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 2010 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 2011 | 2011 |
|
| 2012 | 2012 |
// A collection of information needed to define how metric data gets emitted. |
| 2013 |
- MetricTransformations []*MetricTransformation `locationName:"metricTransformations" type:"list" required:"true"` |
|
| 2013 |
+ MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` |
|
| 2014 | 2014 |
|
| 2015 | 2015 |
metadataPutMetricFilterInput `json:"-" xml:"-"` |
| 2016 | 2016 |
} |
| ... | ... |
@@ -2049,7 +2453,7 @@ func (s PutMetricFilterOutput) GoString() string {
|
| 2049 | 2049 |
|
| 2050 | 2050 |
type PutRetentionPolicyInput struct {
|
| 2051 | 2051 |
// The name of the log group to associate the retention policy with. |
| 2052 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 2052 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 2053 | 2053 |
|
| 2054 | 2054 |
// Specifies the number of days you want to retain log events in the specified |
| 2055 | 2055 |
// log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, |
| ... | ... |
@@ -2097,23 +2501,23 @@ type PutSubscriptionFilterInput struct {
|
| 2097 | 2097 |
// same account as the subscription filter, for same-account delivery. A logical |
| 2098 | 2098 |
// destination (used via an ARN of Destination) belonging to a different account, |
| 2099 | 2099 |
// for cross-account delivery. |
| 2100 |
- DestinationARN *string `locationName:"destinationArn" type:"string" required:"true"` |
|
| 2100 |
+ DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` |
|
| 2101 | 2101 |
|
| 2102 | 2102 |
// A name for the subscription filter. |
| 2103 |
- FilterName *string `locationName:"filterName" type:"string" required:"true"` |
|
| 2103 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` |
|
| 2104 | 2104 |
|
| 2105 | 2105 |
// A valid CloudWatch Logs filter pattern for subscribing to a filtered stream |
| 2106 | 2106 |
// of log events. |
| 2107 | 2107 |
FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` |
| 2108 | 2108 |
|
| 2109 | 2109 |
// The name of the log group to associate the subscription filter with. |
| 2110 |
- LogGroupName *string `locationName:"logGroupName" type:"string" required:"true"` |
|
| 2110 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` |
|
| 2111 | 2111 |
|
| 2112 | 2112 |
// The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to |
| 2113 | 2113 |
// deliver ingested log events to the destination stream. You don't need to |
| 2114 | 2114 |
// provide the ARN when you are working with a logical destination (used via |
| 2115 | 2115 |
// an ARN of Destination) for cross-account delivery. |
| 2116 |
- RoleARN *string `locationName:"roleArn" type:"string"` |
|
| 2116 |
+ RoleArn *string `locationName:"roleArn" min:"1" type:"string"` |
|
| 2117 | 2117 |
|
| 2118 | 2118 |
metadataPutSubscriptionFilterInput `json:"-" xml:"-"` |
| 2119 | 2119 |
} |
| ... | ... |
@@ -2178,7 +2582,7 @@ func (s RejectedLogEventsInfo) GoString() string {
|
| 2178 | 2178 |
// request. |
| 2179 | 2179 |
type SearchedLogStream struct {
|
| 2180 | 2180 |
// The name of the log stream. |
| 2181 |
- LogStreamName *string `locationName:"logStreamName" type:"string"` |
|
| 2181 |
+ LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` |
|
| 2182 | 2182 |
|
| 2183 | 2183 |
// Indicates whether all the events in this log stream were searched or more |
| 2184 | 2184 |
// data exists to search by paginating further. |
| ... | ... |
@@ -2206,10 +2610,10 @@ type SubscriptionFilter struct {
|
| 2206 | 2206 |
// 00:00:00 UTC. |
| 2207 | 2207 |
CreationTime *int64 `locationName:"creationTime" type:"long"` |
| 2208 | 2208 |
|
| 2209 |
- DestinationARN *string `locationName:"destinationArn" type:"string"` |
|
| 2209 |
+ DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` |
|
| 2210 | 2210 |
|
| 2211 | 2211 |
// A name for a metric or subscription filter. |
| 2212 |
- FilterName *string `locationName:"filterName" type:"string"` |
|
| 2212 |
+ FilterName *string `locationName:"filterName" min:"1" type:"string"` |
|
| 2213 | 2213 |
|
| 2214 | 2214 |
// A symbolic description of how Amazon CloudWatch Logs should interpret the |
| 2215 | 2215 |
// data in each log event. For example, a log event may contain timestamps, |
| ... | ... |
@@ -2217,9 +2621,9 @@ type SubscriptionFilter struct {
|
| 2217 | 2217 |
// to look for in the log event message. |
| 2218 | 2218 |
FilterPattern *string `locationName:"filterPattern" type:"string"` |
| 2219 | 2219 |
|
| 2220 |
- LogGroupName *string `locationName:"logGroupName" type:"string"` |
|
| 2220 |
+ LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` |
|
| 2221 | 2221 |
|
| 2222 |
- RoleARN *string `locationName:"roleArn" type:"string"` |
|
| 2222 |
+ RoleArn *string `locationName:"roleArn" min:"1" type:"string"` |
|
| 2223 | 2223 |
|
| 2224 | 2224 |
metadataSubscriptionFilter `json:"-" xml:"-"` |
| 2225 | 2225 |
} |
| ... | ... |
@@ -2246,7 +2650,7 @@ type TestMetricFilterInput struct {
|
| 2246 | 2246 |
FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` |
| 2247 | 2247 |
|
| 2248 | 2248 |
// A list of log event messages to test. |
| 2249 |
- LogEventMessages []*string `locationName:"logEventMessages" type:"list" required:"true"` |
|
| 2249 |
+ LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` |
|
| 2250 | 2250 |
|
| 2251 | 2251 |
metadataTestMetricFilterInput `json:"-" xml:"-"` |
| 2252 | 2252 |
} |
| ... | ... |
@@ -2286,6 +2690,21 @@ func (s TestMetricFilterOutput) GoString() string {
|
| 2286 | 2286 |
} |
| 2287 | 2287 |
|
| 2288 | 2288 |
const ( |
| 2289 |
+ // @enum ExportTaskStatusCode |
|
| 2290 |
+ ExportTaskStatusCodeCancelled = "CANCELLED" |
|
| 2291 |
+ // @enum ExportTaskStatusCode |
|
| 2292 |
+ ExportTaskStatusCodeCompleted = "COMPLETED" |
|
| 2293 |
+ // @enum ExportTaskStatusCode |
|
| 2294 |
+ ExportTaskStatusCodeFailed = "FAILED" |
|
| 2295 |
+ // @enum ExportTaskStatusCode |
|
| 2296 |
+ ExportTaskStatusCodePending = "PENDING" |
|
| 2297 |
+ // @enum ExportTaskStatusCode |
|
| 2298 |
+ ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" |
|
| 2299 |
+ // @enum ExportTaskStatusCode |
|
| 2300 |
+ ExportTaskStatusCodeRunning = "RUNNING" |
|
| 2301 |
+) |
|
| 2302 |
+ |
|
| 2303 |
+const ( |
|
| 2289 | 2304 |
// @enum OrderBy |
| 2290 | 2305 |
OrderByLogStreamName = "LogStreamName" |
| 2291 | 2306 |
// @enum OrderBy |
| ... | ... |
@@ -4,6 +4,10 @@ package cloudwatchlogs |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 | 6 |
"github.com/aws/aws-sdk-go/aws" |
| 7 |
+ "github.com/aws/aws-sdk-go/aws/defaults" |
|
| 8 |
+ "github.com/aws/aws-sdk-go/aws/request" |
|
| 9 |
+ "github.com/aws/aws-sdk-go/aws/service" |
|
| 10 |
+ "github.com/aws/aws-sdk-go/aws/service/serviceinfo" |
|
| 7 | 11 |
"github.com/aws/aws-sdk-go/internal/protocol/jsonrpc" |
| 8 | 12 |
"github.com/aws/aws-sdk-go/internal/signer/v4" |
| 9 | 13 |
) |
| ... | ... |
@@ -41,23 +45,25 @@ import ( |
| 41 | 41 |
// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET |
| 42 | 42 |
// Developer Center (http://aws.amazon.com/net/) |
| 43 | 43 |
type CloudWatchLogs struct {
|
| 44 |
- *aws.Service |
|
| 44 |
+ *service.Service |
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
// Used for custom service initialization logic |
| 48 |
-var initService func(*aws.Service) |
|
| 48 |
+var initService func(*service.Service) |
|
| 49 | 49 |
|
| 50 | 50 |
// Used for custom request initialization logic |
| 51 |
-var initRequest func(*aws.Request) |
|
| 51 |
+var initRequest func(*request.Request) |
|
| 52 | 52 |
|
| 53 | 53 |
// New returns a new CloudWatchLogs client. |
| 54 | 54 |
func New(config *aws.Config) *CloudWatchLogs {
|
| 55 |
- service := &aws.Service{
|
|
| 56 |
- Config: aws.DefaultConfig.Merge(config), |
|
| 57 |
- ServiceName: "logs", |
|
| 58 |
- APIVersion: "2014-03-28", |
|
| 59 |
- JSONVersion: "1.1", |
|
| 60 |
- TargetPrefix: "Logs_20140328", |
|
| 55 |
+ service := &service.Service{
|
|
| 56 |
+ ServiceInfo: serviceinfo.ServiceInfo{
|
|
| 57 |
+ Config: defaults.DefaultConfig.Merge(config), |
|
| 58 |
+ ServiceName: "logs", |
|
| 59 |
+ APIVersion: "2014-03-28", |
|
| 60 |
+ JSONVersion: "1.1", |
|
| 61 |
+ TargetPrefix: "Logs_20140328", |
|
| 62 |
+ }, |
|
| 61 | 63 |
} |
| 62 | 64 |
service.Initialize() |
| 63 | 65 |
|
| ... | ... |
@@ -78,8 +84,8 @@ func New(config *aws.Config) *CloudWatchLogs {
|
| 78 | 78 |
|
| 79 | 79 |
// newRequest creates a new request for a CloudWatchLogs operation and runs any |
| 80 | 80 |
// custom request initialization. |
| 81 |
-func (c *CloudWatchLogs) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
|
|
| 82 |
- req := aws.NewRequest(c.Service, op, params, data) |
|
| 81 |
+func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
|
| 82 |
+ req := c.NewRequest(op, params, data) |
|
| 83 | 83 |
|
| 84 | 84 |
// Run custom request initialization if present |
| 85 | 85 |
if initRequest != nil {
|