diff -Nru libamazon-s3-perl-0.58/ChangeLog libamazon-s3-perl-0.60/ChangeLog --- libamazon-s3-perl-0.58/ChangeLog 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/ChangeLog 2023-02-10 17:46:16.000000000 +0000 @@ -1,3 +1,42 @@ +Fri Feb 10 07:47:22 2023 Rob Lauer > + + [0.60 - logging]: + * VERSION: bump + * NEWS.md: updated + * bootstrap: support M.rr style versions + * src/main/perl/lib/Amazon/S3.pm.in + - removed all end of block indicators inserted by perltidy + (new) + - only consider 'debug' flag when internal logger used + * src/main/perl/lib/Amazon/S3.pm.in + (new): new + * configure.ac + - fix email addres + - remove -Wall to prevent warning during configure + * s3-perl.pl: new + * src/main/perl/test.localstack: new + * .gitignore: added some of the files created by `make cpan` + +Wed Jan 25 11:54:59 2023 Rob Lauer + + [0.59 - copy_object]: + * VERSION: bump + * README.md: generated + * src/main/perl/lib/Amazon/S3.pm.in + - minor pod changes + * src/main/perl/lib/Amazon/S3/Bucket.pm.in + (copy_object): new + * src/main/perl/Makefile.am: corrected comments re: make test + * cpan/Makefile.am: PROJECT_HOME + + [unit tests]: + * src/main/perl/t/01-api.t + - added unit test for copy_object() + * src/main/perl/t/04-list-buckets.t + - use AMAZON_S3_HOST from environment not S3_HOST + * README-TESTING.md + - corrected way make test invoked + Mon Dec 19 09:25:04 2022 Rob Lauer [0.58 - min perl required]: diff -Nru libamazon-s3-perl-0.58/debian/changelog libamazon-s3-perl-0.60/debian/changelog --- libamazon-s3-perl-0.58/debian/changelog 2023-01-14 23:55:31.000000000 +0000 +++ libamazon-s3-perl-0.60/debian/changelog 2023-02-10 20:18:04.000000000 +0000 @@ -1,3 +1,11 @@ +libamazon-s3-perl (0.60-1) unstable; urgency=medium + + * Team upload. + * Import upstream version 0.60. + * Refresh spellings.patch (offset). + + -- gregor herrmann Fri, 10 Feb 2023 21:18:04 +0100 + libamazon-s3-perl (0.58-1) unstable; urgency=medium * Team upload. diff -Nru libamazon-s3-perl-0.58/debian/patches/spellings.patch libamazon-s3-perl-0.60/debian/patches/spellings.patch --- libamazon-s3-perl-0.58/debian/patches/spellings.patch 2023-01-14 23:55:31.000000000 +0000 +++ libamazon-s3-perl-0.60/debian/patches/spellings.patch 2023-02-10 20:18:04.000000000 +0000 @@ -3,7 +3,7 @@ Forwarded: https://rt.cpan.org/Public/Bug/Display.html?id=119229 Author: Christopher Hoskin Reviewed-by: gregor herrmann -Last-Update: 2023-01-15 +Last-Update: 2023-02-10 --- a/lib/Amazon/S3.pm +++ b/lib/Amazon/S3.pm @@ -18,7 +18,7 @@ --- a/lib/Amazon/S3/Bucket.pm +++ b/lib/Amazon/S3/Bucket.pm -@@ -1103,9 +1103,9 @@ +@@ -1162,9 +1162,9 @@ =item acl_short (optional) diff -Nru libamazon-s3-perl-0.58/lib/Amazon/S3/Bucket.pm libamazon-s3-perl-0.60/lib/Amazon/S3/Bucket.pm --- libamazon-s3-perl-0.58/lib/Amazon/S3/Bucket.pm 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/lib/Amazon/S3/Bucket.pm 2023-02-10 17:46:16.000000000 +0000 @@ -19,7 +19,7 @@ use parent qw{Class::Accessor::Fast}; -our $VERSION = '0.58'; ## no critic +our $VERSION = '0.60'; ## no critic __PACKAGE__->mk_accessors( qw{bucket creation_date account buffer_size region logger verify_region }); @@ -689,7 +689,66 @@ return $self->get_key( $key, $method, \$filename ); } ## end sub get_key_filename -# returns bool +######################################################################## +# See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +# +# Note that in this request the bucket object is the destination you +# specify the source bucket in the key (bucket-name/source-key) or the +# header x-amz-copy-source +######################################################################## +sub copy_object { +######################################################################## + my ( $self, %parameters ) = @_; + + my ( $source, $key, $bucket, $headers_in ) + = @parameters{qw(source key bucket headers)}; + + $headers_in //= {}; + + my %request_headers; + + if ( reftype($headers_in) eq 'ARRAY' ) { + %request_headers = @{$headers_in}; + } + elsif ( reftype($headers_in) eq 'HASH' ) { + %request_headers = %{$headers_in}; + } + else { + croak 'headers must be hash or array' + if !ref($headers_in) || reftype($headers_in) ne 'HASH'; + } + + croak 'source or x-amz-copy-source must be specified' + if !$source && !exists $request_headers{'x-amz-copy-source'}; + + croak 'no key' + if !$key; + + my $acct = $self->account; + + if ( !$request_headers{'x-amz-copy-source'} ) { + + $request_headers{'x-amz-copy-source'} = sprintf '%s/%s', + $bucket // $self->{bucket}, + $acct->_urlencode($source); + } + + $request_headers{'x-amz-tagging-directive'} //= 'COPY'; + + $key = $self->_uri($key); + + my $request = $acct->_make_request( 'PUT', $key, \%request_headers, ); + + my $response = $acct->_do_http($request); + + if ( $response->code !~ /\A2\d\d\z/xsm ) { + $acct->_remember_errors( $response->content, 1 ); + croak $response->status_line; + } + + return $acct->_xpc_of_content( $response->content ); +} ## end sub copy_key + ######################################################################## sub delete_key { ######################################################################## @@ -1128,6 +1187,69 @@ to be a filename on the local file system. The file will be streamed rather then loaded into memory in one big chunk. +=head2 copy_object %parameters + +Copies an object from one bucket to another bucket. I Returns a +hash reference to the response object (C). + +Headers returned from the request can be obtained using the +C method. + + my $headers = { $bucket->last_response->headers->flatten }; + +Throws an exception if the response code is not 2xx. You can get an +extended error message using the C method. + + my $result = eval { return $s3->copy_object( key => 'foo.jpg', + source => 'boo.jpg' ); }; + + if ($@) { + die $s3->errstr; + } + +Examples: + + $bucket->copy_object( key => 'foo.jpg', source => 'boo.jpg' ); + + $bucket->copy_object( + key => 'foo.jpg', + source => 'boo.jpg', + bucket => 'my-source-bucket' + ); + + $bucket->copy_object( + key => 'foo.jpg', + headers => { 'x-amz-copy-source' => 'my-source-bucket/boo.jpg' + ); + +See L +for more details. + +C<%parameters> is a list of key/value pairs described below: + +=over + +=item key (required) + +Name of the destination key in the bucket represented by the bucket object. + +=item headers (optional) + +Hash or array reference of headers to send in the request. + +=item bucket (optional) + +Name of the source bucket. Default is the same bucket as the destination. + +=item source (optional) + +Name of the source key in the source bucket. If not provided, you must +provide the source in the `x-amz-copy-source` header. + +=back + =head2 head_key $key_name Returns a configuration HASH of the given key. If a key does @@ -1500,4 +1622,11 @@ Please see the L manpage for author, copyright, and license information. +=head1 CONTRIBUTORS + +Rob Lauer +Jojess Fournier +Tim Mullin +Todd Rinaldo + =cut diff -Nru libamazon-s3-perl-0.58/lib/Amazon/S3/Constants.pm libamazon-s3-perl-0.60/lib/Amazon/S3/Constants.pm --- libamazon-s3-perl-0.58/lib/Amazon/S3/Constants.pm 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/lib/Amazon/S3/Constants.pm 2023-02-10 17:46:16.000000000 +0000 @@ -7,7 +7,7 @@ use Readonly; -our $VERSION = '0.58'; +our $VERSION = '0.60'; # defaults Readonly our $AMAZON_HEADER_PREFIX => 'x-amz-'; diff -Nru libamazon-s3-perl-0.58/lib/Amazon/S3/Logger.pm libamazon-s3-perl-0.60/lib/Amazon/S3/Logger.pm --- libamazon-s3-perl-0.58/lib/Amazon/S3/Logger.pm 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/lib/Amazon/S3/Logger.pm 2023-02-10 17:46:16.000000000 +0000 @@ -10,7 +10,7 @@ use Readonly; use Scalar::Util qw{ reftype }; -our $VERSION = '0.58'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars) +our $VERSION = '0.60'; ## no critic (RequireInterpolationOfMetachars) Readonly::Hash our %LOG_LEVELS => ( trace => 5, @@ -22,7 +22,7 @@ ); { - no strict 'refs'; ## no critic (TestingAndDebugging::ProhibitNoStrict) + no strict 'refs'; ## no critic (ProhibitNoStrict) foreach my $level (qw{fatal error warn info debug trace}) { @@ -30,7 +30,17 @@ my ( $self, @message ) = @_; $self->_log_message( $level, @message ); }; - } ## end foreach my $level (qw{fatal error warn info debug trace}) + } +} + +######################################################################## +sub new { +######################################################################## + my ( $class, @args ) = @_; + + my $options = ref $args[0] ? $args[0] : {@args}; + + return bless $options, $class; } ######################################################################## @@ -40,10 +50,10 @@ if (@args) { $self->{log_level} = $args[0]; - } ## end if (@args) + } return $self->{log_level}; -} ## end sub level +} ######################################################################## sub _log_message { @@ -59,10 +69,10 @@ && ref $message[0] && reftype( $message[0] ) eq 'CODE' ) { $log_message = $message[0]->(); - } ## end if ( defined $message[...]) + } else { $log_message = join $EMPTY, @message; - } ## end else [ if ( defined $message[...])] + } chomp $log_message; @@ -72,6 +82,6 @@ return print {*STDERR} sprintf qq{%s: %s %s %s\n}, uc $level, $timestamp, $PROCESS_ID, $log_message; -} ## end sub _log_message +} 1; diff -Nru libamazon-s3-perl-0.58/lib/Amazon/S3.pm libamazon-s3-perl-0.60/lib/Amazon/S3.pm --- libamazon-s3-perl-0.58/lib/Amazon/S3.pm 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/lib/Amazon/S3.pm 2023-02-10 17:46:16.000000000 +0000 @@ -51,7 +51,7 @@ } ); -our $VERSION = '0.58'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars) +our $VERSION = '0.60'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars) ######################################################################## sub new { @@ -70,32 +70,26 @@ $options{_region} = delete $options{region}; $options{_signer} = delete $options{signer}; - # convenience for level => 'debug' & for consistency with Amazon::Credentials - if ( delete $options{debug} ) { - $options{level} = 'debug'; - } - - # save this for later - my $level = $options{level}; - $options{log_level} = delete $options{level}; + # convenience for level => 'debug' & for consistency with + # Amazon::Credentials only do this if we are using internal logger, + # call should NOT use debug flag but rather use their own logger's + # level to turn on higher levels of logging... + + if ( !$options{logger} ) { + if ( delete $options{debug} ) { + $options{level} = 'debug'; + } - my $self = $class->SUPER::new( \%options ); + $options{log_level} = delete $options{level}; + $options{log_level} //= $DEFAULT_LOG_LEVEL; - # setup logger - if ( blessed( $self->logger ) ) { + $options{logger} + = Amazon::S3::Logger->new( log_level => $options{log_level} ); + } - # get level from your logger, if you didn't pass one - if ( $self->get_logger->can('level') ) { - if ( !$level ) { - $level = $self->get_logger->level(); - } ## end if ( !$level ) - } ## end if ( $self->get_logger...) - } ## end if ( blessed( $self->logger...)) - else { + my $self = $class->SUPER::new( \%options ); - $self->logger( bless { log_level => $level // $DEFAULT_LOG_LEVEL }, - 'Amazon::S3::Logger' ); - } ## end else [ if ( blessed( $self->logger...))] + # setup logger internal logging $self->get_logger->debug( sub { @@ -104,7 +98,7 @@ if ( $safe_options{aws_secret_access_key} ) { $safe_options{aws_secret_access_key} = '****'; $safe_options{aws_access_key_id} = '****'; - } ## end if ( $safe_options{aws_secret_access_key...}) + } return Dumper( [ 'options: ', \%safe_options ] ); } @@ -122,7 +116,7 @@ $self->aws_access_key_id( _encrypt( $self->aws_access_key_id ) ); $self->aws_secret_access_key( _encrypt( $self->aws_secret_access_key ) ); $self->token( _encrypt( $self->token ) ); - } ## end if ( !$self->credentials) + } my $ua; @@ -133,13 +127,13 @@ ); $ua->timing( join $COMMA, map { 2**$_ } 0 .. 5 ); - } ## end if ( $self->retry ) + } else { $ua = LWP::UserAgent->new( keep_alive => $KEEP_ALIVE_CACHESIZE, requests_redirectable => [qw(GET HEAD DELETE)], ); - } ## end else [ if ( $self->retry ) ] + } $ua->timeout( $self->timeout ); $ua->env_proxy; @@ -154,7 +148,7 @@ $self->turn_on_special_retry(); return $self; -} ## end sub new +} ######################################################################## { @@ -324,7 +318,7 @@ if (@args) { $self->_region( $args[0] ); - } ## end if (@args) + } $self->get_logger->debug( sub { return 'region: ' . ( $self->_region // $EMPTY ) } ); @@ -335,11 +329,11 @@ if ( $host =~ /\As3[.](.*)?amazonaws/xsm ) { $self->host( sprintf 's3.%s.amazonaws.com', $self->_region ); - } ## end if ( $host =~ /\As3[.](.*)?amazonaws/xsm) - } ## end if ( $self->_region ) + } + } return $self->_region; -} ## end sub region +} ######################################################################## sub buckets { @@ -377,7 +371,7 @@ if ( !ref $buckets || reftype($buckets) ne 'ARRAY' ) { $buckets = [$buckets]; - } ## end if ( !ref $buckets || ...) + } foreach my $node ( @{$buckets} ) { push @buckets, @@ -390,8 +384,8 @@ } ); - } ## end foreach my $node ( @{$buckets...}) - } ## end if ( ref $r->{Buckets}) + } + } $self->reset_signer_region($region); # restore original region @@ -402,7 +396,7 @@ }; return $bucket_list; -} ## end sub buckets +} ######################################################################## sub reset_signer_region { @@ -449,7 +443,7 @@ $self->_validate_acl_short( $conf->{acl_short} ); $header_ref{'x-amz-acl'} = $conf->{acl_short}; - } ## end if ( $conf->{acl_short...}) + } my $xml = <<'XML'; @@ -471,7 +465,7 @@ my $bucket_obj = $retval ? $self->bucket($bucket) : undef; return $bucket_obj; -} ## end sub add_bucket +} ######################################################################## sub bucket { @@ -501,7 +495,7 @@ verify_region => $verify_region, } ); -} ## end sub bucket +} ######################################################################## sub delete_bucket { @@ -514,11 +508,11 @@ if ( eval { return $conf->isa('Amazon::S3::Bucket'); } ) { $bucket = $conf->bucket; $region = $conf->region; - } ## end if ( eval { return $conf...}) + } else { $bucket = $conf->{bucket}; $region = $conf->{region} || $self->get_bucket_location($bucket); - } ## end else [ if ( eval { return $conf...})] + } croak 'must specify bucket' if !$bucket; @@ -530,7 +524,7 @@ region => $region, } ); -} ## end sub delete_bucket +} ######################################################################## sub list_bucket_v2 { @@ -540,7 +534,7 @@ $conf->{'list-type'} = '2'; goto &list_bucket; -} ## end sub list_bucket_v2 +} ######################################################################## sub list_bucket { @@ -572,7 +566,7 @@ $path .= $query_string; - } ## end if ( %{$conf} ) + } my $r = $self->_send_request( { method => 'GET', @@ -592,7 +586,7 @@ if ( $conf->{'list-type'} && $conf->{'list-type'} eq '2' ) { $marker = 'ContinuationToken'; $next_marker = 'NextContinuationToken'; - } ## end if ( $conf->{'list-type'...}) + } $bucket_list = { bucket => $r->{Name}, @@ -614,7 +608,7 @@ if ( defined $etag ) { $etag =~ s{(^"|"$)}{}gxsm; - } ## end if ( defined $etag ) + } push @keys, { @@ -626,7 +620,7 @@ owner_id => $node->{Owner}{ID}, owner_displayname => $node->{Owner}{DisplayName}, }; - } ## end foreach my $node ( @{ $r->{...}}) + } $bucket_list->{keys} = \@keys; if ( $conf->{delimiter} ) { @@ -636,7 +630,7 @@ foreach my $node ( $r->{CommonPrefixes} ) { if ( ref $node ne 'ARRAY' ) { $node = [$node]; - } ## end if ( ref $node ne 'ARRAY') + } foreach my $n ( @{$node} ) { next if !exists $n->{Prefix}; @@ -645,16 +639,16 @@ # strip delimiter from end of prefix if ($prefix) { $prefix =~ s/$strip_delim//xsm; - } ## end if ($prefix) + } push @common_prefixes, $prefix; - } ## end foreach my $n ( @{$node} ) - } ## end foreach my $node ( $r->{CommonPrefixes...}) + } + } $bucket_list->{common_prefixes} = \@common_prefixes; - } ## end if ( $conf->{delimiter...}) + } return $bucket_list; -} ## end sub list_bucket +} ######################################################################## sub list_bucket_all_v2 { @@ -665,7 +659,7 @@ $conf->{'list-type'} = '2'; return $self->list_bucket_all($conf); -} ## end sub list_bucket_all_v2 +} ######################################################################## sub list_bucket_all { @@ -701,13 +695,13 @@ push @{ $all->{keys} }, @{ $response->{keys} }; last if !$response->{is_truncated}; - } ## end while ($TRUE) + } delete $all->{is_truncated}; delete $all->{next_marker}; return $all; -} ## end sub list_bucket_all +} ######################################################################## sub get_credentials { @@ -722,15 +716,15 @@ $aws_access_key_id = $self->credentials->get_aws_access_key_id; $aws_secret_access_key = $self->credentials->get_aws_secret_access_key; $token = $self->credentials->get_token; - } ## end if ( $self->credentials) + } else { $aws_access_key_id = $self->aws_access_key_id; $aws_secret_access_key = $self->aws_secret_access_key; $token = $self->token; - } ## end else [ if ( $self->credentials)] + } return ( $aws_access_key_id, $aws_secret_access_key, $token ); -} ## end sub get_credentials +} # Log::Log4perl compatibility routines ######################################################################## @@ -739,7 +733,7 @@ my ($self) = @_; return $self->logger; -} ## end sub get_logger +} ######################################################################## sub level { @@ -750,10 +744,10 @@ $self->log_level( $args[0] ); $self->get_logger->level( uc $args[0] ); - } ## end if (@args) + } return $self->get_logger->level; -} ## end sub level +} ######################################################################## sub signer { @@ -789,10 +783,10 @@ if ( !any { $policy_name eq $_ } qw(private public-read public-read-write authenticated-read) ) { croak "$policy_name is not a supported canned access policy"; - } ## end if ( !any { $policy_name...}) + } return; -} ## end sub _validate_acl_short +} # Determine if a bucket can used as subdomain for the host # Specifying the bucket in the URL path is being deprecated @@ -806,7 +800,7 @@ if ( length $bucketname > $MAX_BUCKET_NAME_LENGTH - 1 ) { return $FALSE; - } ## end if ( length $bucketname...) + } if ( length $bucketname < $MIN_BUCKET_NAME_LENGTH ) { return $FALSE; @@ -898,7 +892,7 @@ $self->get_logger->trace( sub { return Dumper( [$request] ); } ); return $request; -} ## end sub _make_request +} # $self->_send_request($HTTP::Request) # $self->_send_request(@params_to_make_request) @@ -930,10 +924,10 @@ } elsif ( $content && $response->content_type eq 'application/xml' ) { $content = $self->_xpc_of_content($content); - } ## end if ( $content && $response...) + } return $content; -} ## end sub _send_request +} # # This is the necessary to find the region for a specific bucket @@ -1093,7 +1087,7 @@ $self->last_response($response); return $response; -} ## end sub _do_http +} ######################################################################## sub _send_request_expect_nothing { @@ -1114,7 +1108,7 @@ $self->_remember_errors( $response->content, $TRUE ); return $FALSE; -} ## end sub _send_request_expect_nothing +} # Send a HEAD request first, to find out if we'll be hit with a 307 redirect. # Since currently LWP does not have true support for 100 Continue, it simply @@ -1158,7 +1152,7 @@ if ( $response->code =~ /^3/xsm ) { if ( defined $response->header('Location') ) { $override_uri = $response->header('Location'); - } ## end if ( $response->code =~...) + } else { $self->_croak_if_response_error($response); } @@ -1177,7 +1171,7 @@ if ( defined $override_uri ) { $request->uri($override_uri); - } ## end if ( defined $override_uri) + } $response = $self->_do_http_no_redirect($request); @@ -1192,7 +1186,7 @@ $self->_remember_errors( $response->content, $TRUE ); return $FALSE; -} ## end sub _send_request_expect_nothing_probed +} ######################################################################## sub _croak_if_response_error { @@ -1206,10 +1200,10 @@ croak sprintf 'Amazon::S3: Amazon responded with %s ', $response->status_line; - } ## end if ( $response->code !~...) + } return; -} ## end sub _croak_if_response_error +} ######################################################################## sub _xpc_of_content { @@ -1232,7 +1226,7 @@ } return $xml_hr; -} ## end sub _xpc_of_content +} # returns 1 if errors were found ######################################################################## @@ -1249,7 +1243,7 @@ $self->errstr($src); return $TRUE; - } ## end if ( !ref $src && $src...) + } my $r = ref $src ? $src : $self->_xpc_of_content( $src, $keep_root ); @@ -1258,17 +1252,17 @@ # apparently buckets() does not keep_root if ( $r->{Error} ) { $r = $r->{Error}; - } ## end if ( $r->{Error} ) + } if ( $r->{Code} ) { $self->err( $r->{Code} ); $self->errstr( $r->{Message} ); return $TRUE; - } ## end if ( $r->{Code} ) + } return $FALSE; -} ## end sub _remember_errors +} # # Deprecated - this adds a header for the old V2 auth signatures @@ -1283,11 +1277,11 @@ if ( not $headers->header('Date') ) { $headers->header( Date => time2str(time) ); - } ## end if ( not $headers->header...) + } if ($token) { $headers->header( $AMAZON_HEADER_PREFIX . 'security-token', $token ); - } ## end if ($token) + } my $canonical_string = $self->_canonical_string( $method, $path, $headers ); $self->get_logger->trace( Dumper( [$headers] ) ); @@ -1300,7 +1294,7 @@ Authorization => "AWS $aws_access_key_id:$encoded_canonical" ); return; -} ## end sub _add_auth_header +} # generates an HTTP::Headers objects given one hash that represents http # headers to set and another hash that represents an object's metadata. @@ -1322,10 +1316,10 @@ foreach my $p ( pairs %{$metadata} ) { my ( $k, $v ) = @{$p}; $http_header->header( "$METADATA_PREFIX$k" => $v ); - } ## end while ( my ( $k, $v ) = each...) + } return $http_header; -} ## end sub _merge_meta +} # generate a canonical string for the given parameters. expires is optional and is # only used by query string authentication. @@ -1348,8 +1342,8 @@ or $lk eq 'date' or $lk =~ /^$AMAZON_HEADER_PREFIX/xsm ) { $interesting_headers{$lk} = $self->_trim($value); - } ## end if ( $lk eq 'content-md5'...) - } ## end while ( my ( $key, $value...)) + } + } # these keys get empty strings if they don't exist $interesting_headers{'content-type'} ||= $EMPTY; @@ -1358,24 +1352,24 @@ # just in case someone used this. it's not necessary in this lib. if ( $interesting_headers{'x-amz-date'} ) { $interesting_headers{'date'} = $EMPTY; - } ## end if ( $interesting_headers...) + } # if you're using expires for query string auth, then it trumps date # (and x-amz-date) if ($expires) { $interesting_headers{'date'} = $expires; - } ## end if ($expires) + } my $buf = "$method\n"; foreach my $key ( sort keys %interesting_headers ) { if ( $key =~ /^$AMAZON_HEADER_PREFIX/xsm ) { $buf .= "$key:$interesting_headers{$key}\n"; - } ## end if ( $key =~ /^$AMAZON_HEADER_PREFIX/xsm) + } else { $buf .= "$interesting_headers{$key}\n"; - } ## end else [ if ( $key =~ /^$AMAZON_HEADER_PREFIX/xsm)] - } ## end foreach my $key ( sort keys...) + } + } # don't include anything after the first ? in the resource... # $path =~ /^([^?]*)/xsm; @@ -1387,7 +1381,7 @@ if ( $path =~ /[&?](acl|torrent|location|uploads|delete)($|=|&)/xsm ) { # if ( $path =~ /[&?](acl|torrent|location|uploads|delete)([=&])?/xsm ) { $buf .= "?$1"; - } ## end if ( $path =~ ...) + } elsif ( my %query_params = URI->new($path)->query_form ) { # see if the remaining parsed query string provides us with any # query string or upload id @@ -1399,14 +1393,14 @@ $buf .= sprintf '?partNumber=%s&uploadId=%s', $query_params{partNumber}, $query_params{uploadId}; - } ## end if ( $query_params{partNumber...}) + } elsif ( $query_params{uploadId} ) { $buf .= sprintf '?uploadId=%s', $query_params{uploadId}; - } ## end elsif ( $query_params{uploadId...}) - } ## end elsif ( my %query_params ...) + } + } return $buf; -} ## end sub _canonical_string +} ######################################################################## sub _trim { @@ -1417,7 +1411,7 @@ $value =~ s/\s+$//xsm; return $value; -} ## end sub _trim +} # finds the hmac-sha1 hash of the canonical string and the aws secret access key and then # base64 encodes the result (optionally urlencoding after that). @@ -1432,7 +1426,7 @@ my $b64 = encode_base64( $hmac->digest, $EMPTY ); return $urlencode ? $self->_urlencode($b64) : return $b64; -} ## end sub _encode +} ######################################################################## sub _urlencode { @@ -1440,7 +1434,7 @@ my ( $self, $unencoded ) = @_; return uri_escape_utf8( $unencoded, '^A-Za-z0-9\-\._~\x2f' ); -} ## end sub _urlencode +} 1; @@ -1495,7 +1489,13 @@ 'x-amz-meta-colour' => 'orange', } ); - + + # copy an object + $bucket->copy_object( + source => $source, + key => $new_keyname + ); + # list keys in the bucket $response = $bucket->list or die $s3->err . ": " . $s3->errstr; @@ -1865,7 +1865,7 @@ =back -Returns a HASHREF containging the metadata for all of the buckets +Returns a HASHREF containing the metadata for all of the buckets owned by the accout or (see below) or C on error. @@ -2190,6 +2190,8 @@ Because of this, the application's test suite skips anything approaching a real test unless you set these environment variables: +For more on testing this module see L + =over =item AMAZON_S3_EXPENSIVE_TESTS @@ -2310,6 +2312,10 @@ For other issues, contact the author. +=head1 REPOSITORY + +L + =head1 AUTHOR Original author: Timothy Appnel diff -Nru libamazon-s3-perl-0.58/Makefile.PL libamazon-s3-perl-0.60/Makefile.PL --- libamazon-s3-perl-0.58/Makefile.PL 2022-12-19 14:28:29.000000000 +0000 +++ libamazon-s3-perl-0.60/Makefile.PL 2023-02-10 17:46:17.000000000 +0000 @@ -1,4 +1,4 @@ -# autogenerated by /usr/local/libexec/make-cpan-dist.pl on Mon Dec 19 09:28:28 2022 +# autogenerated by /usr/local/libexec/make-cpan-dist.pl on Fri Feb 10 12:46:17 2023 use strict; use warnings; @@ -56,19 +56,19 @@ 'provides' => { 'Amazon::S3' => { 'file' => 'lib/Amazon/S3.pm', - 'version' => '0.58' + 'version' => '0.60' }, 'Amazon::S3::Bucket' => { 'file' => 'lib/Amazon/S3/Bucket.pm', - 'version' => '0.58' + 'version' => '0.60' }, 'Amazon::S3::Constants' => { 'file' => 'lib/Amazon/S3/Constants.pm', - 'version' => '0.58' + 'version' => '0.60' }, 'Amazon::S3::Logger' => { 'file' => 'lib/Amazon/S3/Logger.pm', - 'version' => '0.58' + 'version' => '0.60' }, 'Amazon::S3::Signature::V4' => { 'file' => 'lib/Amazon/S3/Signature/V4.pm', diff -Nru libamazon-s3-perl-0.58/META.json libamazon-s3-perl-0.60/META.json --- libamazon-s3-perl-0.58/META.json 2022-12-19 14:28:29.000000000 +0000 +++ libamazon-s3-perl-0.60/META.json 2023-02-10 17:46:17.000000000 +0000 @@ -4,7 +4,7 @@ "Rob Lauer " ], "dynamic_config" : 1, - "generated_by" : "ExtUtils::MakeMaker version 7.64, CPAN::Meta::Converter version 2.150010", + "generated_by" : "ExtUtils::MakeMaker version 7.44, CPAN::Meta::Converter version 2.150010", "license" : [ "perl_5" ], @@ -65,19 +65,19 @@ "provides" : { "Amazon::S3" : { "file" : "lib/Amazon/S3.pm", - "version" : "0.58" + "version" : "0.60" }, "Amazon::S3::Bucket" : { "file" : "lib/Amazon/S3/Bucket.pm", - "version" : "0.58" + "version" : "0.60" }, "Amazon::S3::Constants" : { "file" : "lib/Amazon/S3/Constants.pm", - "version" : "0.58" + "version" : "0.60" }, "Amazon::S3::Logger" : { "file" : "lib/Amazon/S3/Logger.pm", - "version" : "0.58" + "version" : "0.60" }, "Amazon::S3::Signature::V4" : { "file" : "lib/Amazon/S3/Signature/V4.pm", @@ -97,6 +97,6 @@ "web" : "http://github.com/rlauer6/perl-amazon-s3" } }, - "version" : "0.58", - "x_serialization_backend" : "JSON::PP version 4.07" + "version" : "0.60", + "x_serialization_backend" : "JSON::PP version 4.10" } diff -Nru libamazon-s3-perl-0.58/META.yml libamazon-s3-perl-0.60/META.yml --- libamazon-s3-perl-0.58/META.yml 2022-12-19 14:28:29.000000000 +0000 +++ libamazon-s3-perl-0.60/META.yml 2023-02-10 17:46:17.000000000 +0000 @@ -12,7 +12,7 @@ ExtUtils::MakeMaker: '6.64' File::ShareDir::Install: '0' dynamic_config: 1 -generated_by: 'ExtUtils::MakeMaker version 7.64, CPAN::Meta::Converter version 2.150010' +generated_by: 'ExtUtils::MakeMaker version 7.44, CPAN::Meta::Converter version 2.150010' license: perl meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html @@ -25,16 +25,16 @@ provides: Amazon::S3: file: lib/Amazon/S3.pm - version: '0.58' + version: '0.60' Amazon::S3::Bucket: file: lib/Amazon/S3/Bucket.pm - version: '0.58' + version: '0.60' Amazon::S3::Constants: file: lib/Amazon/S3/Constants.pm - version: '0.58' + version: '0.60' Amazon::S3::Logger: file: lib/Amazon/S3/Logger.pm - version: '0.58' + version: '0.60' Amazon::S3::Signature::V4: file: lib/Amazon/S3/Signature/V4.pm version: '0' @@ -61,5 +61,5 @@ bugtracker: http://github.com/rlauer6/perl-amazon-s3/issues homepage: http://github.com/rlauer6/perl-amazon-s3 repository: git://github.com/rlauer6/perl-amazon-s3.git -version: '0.58' +version: '0.60' x_serialization_backend: 'CPAN::Meta::YAML version 0.018' diff -Nru libamazon-s3-perl-0.58/README.md libamazon-s3-perl-0.60/README.md --- libamazon-s3-perl-0.58/README.md 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/README.md 2023-02-10 17:46:16.000000000 +0000 @@ -41,7 +41,13 @@ 'x-amz-meta-colour' => 'orange', } ); - + + # copy an object + $bucket->copy_object( + source => $source, + key => $new_keyname + ); + # list keys in the bucket $response = $bucket->list or die $s3->err . ": " . $s3->errstr; @@ -387,7 +393,7 @@ default: false -Returns a HASHREF containging the metadata for all of the buckets +Returns a HASHREF containing the metadata for all of the buckets owned by the accout or (see below) or `undef` on error. @@ -687,6 +693,8 @@ Because of this, the application's test suite skips anything approaching a real test unless you set these environment variables: +For more on testing this module see [README-TESTING.md](https://github.com/rlauer6/perl-amazon-s3/blob/master/README-TESTING.md) + - AMAZON\_S3\_EXPENSIVE\_TESTS Doesn't matter what you set it to. Just has to be set @@ -795,6 +803,10 @@ For other issues, contact the author. +# REPOSITORY + +[https://github.com/rlauer6/perl-amazon-s3](https://github.com/rlauer6/perl-amazon-s3) + # AUTHOR Original author: Timothy Appnel diff -Nru libamazon-s3-perl-0.58/README-TESTING.md libamazon-s3-perl-0.60/README-TESTING.md --- libamazon-s3-perl-0.58/README-TESTING.md 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/README-TESTING.md 2023-02-10 17:46:16.000000000 +0000 @@ -20,7 +20,7 @@ | `AWS_ACCESS_KEY_ID` | Your AWS access key | | `AWS_ACCESS_KEY_SECRET` | Your AWS sekkr1t passkey. Be forewarned that setting this environment variable on a shared system might leak that information to another user. Be careful. | | `AWS_SESSION_TOKEN` | Optional session token. | -| `S3_HOST` | Defaults to s3.amazonaws.com. Set this for example if you want to test the module against an API compatible service like minio. | +| `AMAZONS3_HOST` | Defaults to s3.amazonaws.com. Set this for example if you want to test the module against an API compatible service like minio. | | `AMAZON_S3_SKIP_ACL_TESTS` | Doesn't matter what you set it to. Just has to be set if you want to skip ACLs tests. | | `AMAZON_S3_SKIP_REGION_CONSTRAINT_TEST` | Doesn't matter what you set it to. Just has to be set if you want to skip region constraint test. | | `AMAZON_S3_MINIO` | Doesn't matter what you set it to. Just has to be set if you want to skip tests that would fail on minio. | @@ -72,7 +72,7 @@ # Credentials for Testing You should set the environment variables `AWS_ACCESS_KEY_ID` and -AWS_ACCESS_SECRET_KEY` to your AWS credential values that have the +`AWS_ACCESS_SECRET_KEY` to your AWS credential values that have the ability to create and write to buckets. If you set environment variable `AMAZON_S3_CREDENTIAL` to any value, @@ -95,11 +95,23 @@ through the tests, try setting one or more of the environment variables above which will selectively skip some test. +If you are using a mocking service, you might find it useful to set +the environment variable AWS_EC2_METADATA_DISABLED to a true value. + +``` +export AWS_EC2_METADATA_DISABLED=true +``` + +This will prevent the AWS CLI from looking for metadata when you are +not actually running on an EC2 instance or container. Without this +variable set, the CLI attempts to access the metadata service at +http://169.254.169.254/latest/meta-data/ until it eventually times out. + ## Testing with LocalStack LocalStack seems to be the easiest to work with and supports a number of AWS APIs besides S3. It does not implement the full suite of APIs -however. In particular, LocalStack does not envorce ACLs. Accordingly, +however. In particular, LocalStack does not enforce ACLs. Accordingly, those tests are skipped if the environment variable AMAZON_S3_LOCALSTACK is set to any value. @@ -141,7 +153,7 @@ Environment Variable | Value | Description -------------------- | ----- | ----------- AMAZON_EXPENSIVE_TESTS | 1 | enables testing of S3 API -S3_HOST | localhost:4566 +AMAZONS3_HOST | localhost:4566 AMAZON_S3_LOCALSTACK | any | skips some tests that will fail on LocalStack AWS_ACCESS_KEY_ID | test | AWS access key for LocalStack AWS_ACCESS_KEY_SECRET | test | AWS secret access key for LocalStack @@ -157,12 +169,11 @@ To run tests using LocalStack... ``` -make test \ - AMAZON_S3_EXPENSIVE_TESTS=1 - S3_HOST=s3.localhost.localstack.cloud:4566 \ - AMAZON_S3_LOCALSTACK=1 \ - AWS_ACCESS_KEY_ID=test \ - AWS_ACCESS_SECRET_KEY=test \ - AMAZON_S3_DOMAIN_BUCKET_NAMES=1 + AMAZON_S3_EXPENSIVE_TESTS=1 \ + AMAZON_S3_HOST=s3.localhost.localstack.cloud:4566 \ + AMAZON_S3_LOCALSTACK=1 \ + AWS_ACCESS_KEY_ID=test \ + AWS_ACCESS_SECRET_KEY=test \ + AMAZON_S3_DOMAIN_BUCKET_NAMES=1 make test ``` diff -Nru libamazon-s3-perl-0.58/t/01-api.t libamazon-s3-perl-0.60/t/01-api.t --- libamazon-s3-perl-0.58/t/01-api.t 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/t/01-api.t 2023-02-10 17:46:16.000000000 +0000 @@ -1,4 +1,4 @@ -#!/usr/bin/perl -w +#!/usr/bin/env perl -w ## no critic @@ -19,7 +19,7 @@ if ( $ENV{AMAZON_S3_REGIONS} ) { push @REGIONS, split /\s*,\s*/xsm, $ENV{AMAZON_S3_REGIONS}; -} ## end if ( $ENV{AMAZON_S3_REGIONS...}) +} my $host; @@ -38,17 +38,17 @@ $skip_owner_id = 1; $skip_permissions = 1; $skip_acls = 1; -} ## end if ( exists $ENV{AMAZON_S3_LOCALSTACK...}) +} else { $host = $ENV{AMAZON_S3_HOST}; -} ## end else [ if ( exists $ENV{AMAZON_S3_LOCALSTACK...})] +} my $secure = $host ? 0 : 1; -# do not use DNS bucket names for testing if a mocking service is used -# override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value -# your tests may fail unless you have DNS entry for the bucket name -# e.g 127.0.0.1 net-amazon-s3-test-test.localhost +# - do not use DNS bucket names for testing if a mocking service is used +# - override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value +# - your tests may fail unless you have DNS entry for the bucket name +# e.g 127.0.0.1 net-amazon-s3-test-test.localhost my $dns_bucket_names = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1; @@ -65,10 +65,10 @@ if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { plan skip_all => 'Testing this module for real costs money.'; -} ## end if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...}) +} else { - plan tests => 74 * scalar(@REGIONS) + 2; -} ## end else [ if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...})] + plan tests => 76 * scalar(@REGIONS) + 2; +} ######################################################################## # BEGIN TESTS @@ -92,7 +92,7 @@ ); ( $aws_access_key_id, $aws_secret_access_key, $token ) = $s3->get_credentials; -} ## end if ( $ENV{AMAZON_S3_CREDENTIALS...}) +} else { $s3 = Amazon::S3->new( { aws_access_key_id => $aws_access_key_id, @@ -104,14 +104,14 @@ level => $ENV{DEBUG} ? 'trace' : 'error', } ); -} ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})] +} # list all buckets that i own my $response = eval { return $s3->buckets; }; if ( $EVAL_ERROR || !$response ) { BAIL_OUT($EVAL_ERROR); -} ## end if ( $EVAL_ERROR || !$response) +} $OWNER_ID = $response->{owner_id}; $OWNER_DISPLAYNAME = $response->{owner_displayname}; @@ -154,17 +154,17 @@ if ( $EVAL_ERROR || !$bucket_obj ) { diag( Dumper( [ $EVAL_ERROR, $s3->err, $s3->errstr, $s3->error ] ) ); - } ## end if ( $EVAL_ERROR || !$bucket_obj) + } last if $bucket_obj; # 409 indicates bucket name not yet available... if ( $s3->last_response->code ne '409' ) { BAIL_OUT("could not create $bucketname"); - } ## end if ( $s3->last_response...) + } $bucket_suffix = '-2'; - } ## end while (1) + } is( ref $bucket_obj, 'Amazon::S3::Bucket', @@ -174,23 +174,23 @@ SKIP: { if ($no_region_constraint) { skip "No region constraints", 1; - } ## end if ($no_region_constraint) + } is( $bucket_obj->get_location_constraint, $location ); - } ## end SKIP: + } SKIP: { if ( $skip_acls || !$bucket_obj ) { skip "ACLs only for Amazon S3", 3; - } ## end if ( $skip_acls || !$bucket_obj) + } like_acl_allusers_read($bucket_obj); ok( $bucket_obj->set_acl( { acl_short => 'private' } ) ); unlike_acl_allusers_read($bucket_obj); - } ## end SKIP: + } # another way to get a bucket object (does no network I/O, # assumes it already exists). Read Amazon::S3::Bucket. @@ -222,7 +222,7 @@ or diag( Dumper( [$response] ) ); is( undef, $bucket_obj->get_key("non-existing-key") ); - } ## end SKIP: + } my $keyname = 'testing.txt'; @@ -247,7 +247,7 @@ SKIP: { if ($skip_acls) { skip "ACLs only for Amazon S3", 3; - } ## end if ($skip_acls) + } is_request_response_code( $url, 200, "can access the publicly readable key" ); @@ -256,20 +256,20 @@ ok( $bucket_obj->set_acl( { key => $keyname, acl_short => 'private' } ) ); - } ## end SKIP: + } SKIP: { if ($skip_acls) { skip 'ACLs only for Amazon S3', 1; - } ## end if ($skip_acls) + } is_request_response_code( $url, 403, "cannot access the private key" ); - } ## end SKIP: + } SKIP: { if ($skip_acls) { skip 'ACLs only for Amazon S3', 5; - } ## end if ($skip_acls) + } unlike_acl_allusers_read( $bucket_obj, $keyname ); @@ -293,18 +293,18 @@ } ) ); - } ## end SKIP: + } SKIP: { if ( $skip_acls || $ENV{LOCALSTACK} ) { skip 'LocalStack does not enforce ACLs', 2; - } ## end if ( $skip_acls || $ENV...) + } is_request_response_code( $url, 403, 'cannot access the private key after acl_xml set' ); unlike_acl_allusers_read( $bucket_obj, $keyname ); - } ## end SKIP: + } } { @@ -334,7 +334,7 @@ if $skip_permissions || $skip_acls; is_request_response_code( $url, 403, "cannot access the private key" ); - } ## end SKIP: + } SKIP: { skip 'ACLs only for Amazon S3', 4 if $skip_acls; @@ -354,7 +354,7 @@ like_acl_allusers_read( $bucket_obj, $keyname2 ); - } ## end SKIP: + } $bucket_obj->delete_key($keyname2); } @@ -364,14 +364,14 @@ if ( $v eq '2' ) { $response = $bucket_obj->list_v2( { 'fetch-owner' => 'true' } ); - } ## end if ( $v eq '2' ) + } else { $response = $bucket_obj->list; - } ## end else [ if ( $v eq '2' ) ] + } if ( !$response ) { BAIL_OUT( $s3->err . ": " . $s3->errstr ); - } ## end if ( !$response ) + } is( $response->{bucket}, $bucketname_raw, "list($v) - bucketname " ); @@ -400,11 +400,11 @@ skip 'LocalStack has different owner for bucket', 1 if $skip_owner_id; is( $key->{owner_id}, $OWNER_ID, "list($v) - owner id " ) or diag( Dumper [$key] ); - } ## end SKIP: + } is( $key->{owner_displayname}, $OWNER_DISPLAYNAME, "list($v) - owner display name" ); - } ## end foreach my $v ( 1 .. 2 ) + } # You can't delete a bucket with things in it ok( !$bucket_obj->delete_bucket(), 'delete bucket' ); @@ -470,7 +470,22 @@ is( $response->{content_length}, $lorem_ipsum_size, 'get_key_filename - content_length' ); + # before we delete this key... + + my $copy_result = $bucket_obj->copy_object( + key => "$keyname.bak", + source => "$keyname", + ); + + isa_ok( $copy_result, 'HASH', 'copy_object returns a hash reference' ); + $bucket_obj->delete_key($keyname); + $response = $bucket_obj->list; + + ok( ( grep {"$keyname.bak"} @{ $response->{keys} } ), 'found the copy' ); + + $bucket_obj->delete_key($keyname); + $bucket_obj->delete_key("$keyname.bak"); # try empty files $keyname .= '3'; @@ -516,7 +531,7 @@ 'delete key from bucket - empty list of keys' ); ok( $bucket_obj->delete_bucket(), 'delete bucket' ); -} ## end for my $location (@REGIONS) +} # see more docs in Amazon::S3::Bucket @@ -530,7 +545,7 @@ is( $response->code, $code, $message ) or diag( Dumper($response) ); -} ## end sub is_request_response_code +} sub like_acl_allusers_read { my ( $bucketobj, $keyname ) = @_; @@ -542,20 +557,20 @@ like( $acl, qr(AllUsers.+READ), $message ) or diag( Dumper [$acl] ); -} ## end sub like_acl_allusers_read +} sub unlike_acl_allusers_read { my ( $bucketobj, $keyname ) = @_; my $message = acl_allusers_read_message( 'unlike', @_ ); unlike( $bucketobj->get_acl($keyname), qr(AllUsers.+READ), $message ); -} ## end sub unlike_acl_allusers_read +} sub acl_allusers_read_message { my ( $like_or_unlike, $bucketobj, $keyname ) = @_; my $message = $like_or_unlike . "_acl_allusers_read: " . $bucketobj->bucket; $message .= " - $keyname" if $keyname; return $message; -} ## end sub acl_allusers_read_message +} sub acl_xml_from_acl_short { my $acl_short = shift || 'private'; @@ -571,7 +586,7 @@ READ ~; - } ## end if ( $acl_short eq 'public-read') + } return qq~ @@ -591,5 +606,5 @@ $public_read ~; -} ## end sub acl_xml_from_acl_short +} diff -Nru libamazon-s3-perl-0.58/t/04-list-buckets.t libamazon-s3-perl-0.60/t/04-list-buckets.t --- libamazon-s3-perl-0.58/t/04-list-buckets.t 2022-12-19 14:28:28.000000000 +0000 +++ libamazon-s3-perl-0.60/t/04-list-buckets.t 2023-02-10 17:46:16.000000000 +0000 @@ -16,7 +16,7 @@ my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'} // 'foo'; my $token = $ENV{'AWS_SESSION_TOKEN'}; -my $host = $ENV{S3_HOST}; +my $host = $ENV{AMAZON_S3_HOST}; if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { plan skip_all => 'Testing this module for real costs money.';