Learn the basics of Amazon S3 with an AWS SDK - AWS SDK Code Examples

There are more AWS SDK examples available in the AWS Doc SDK Examples GitHub repo.

Learn the basics of Amazon S3 with an AWS SDK

The following code examples show how to:

  • Create a bucket and upload a file to it.

  • Download an object from a bucket.

  • Copy an object to a subfolder in a bucket.

  • List the objects in a bucket.

  • Delete the bucket objects and the bucket.

.NET
AWS SDK for .NET
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

public class S3_Basics { public static async Task Main() { // Create an Amazon S3 client object. The constructor uses the // default user installed on the system. To work with Amazon S3 // features in a different AWS Region, pass the AWS Region as a // parameter to the client constructor. IAmazonS3 client = new AmazonS3Client(); string bucketName = string.Empty; string filePath = string.Empty; string keyName = string.Empty; var sepBar = new string('-', Console.WindowWidth); Console.WriteLine(sepBar); Console.WriteLine("Amazon Simple Storage Service (Amazon S3) basic"); Console.WriteLine("procedures. This application will:"); Console.WriteLine("\n\t1. Create a bucket"); Console.WriteLine("\n\t2. Upload an object to the new bucket"); Console.WriteLine("\n\t3. Copy the uploaded object to a folder in the bucket"); Console.WriteLine("\n\t4. List the items in the new bucket"); Console.WriteLine("\n\t5. Delete all the items in the bucket"); Console.WriteLine("\n\t6. Delete the bucket"); Console.WriteLine(sepBar); // Create a bucket. Console.WriteLine($"\n{sepBar}"); Console.WriteLine("\nCreate a new Amazon S3 bucket.\n"); Console.WriteLine(sepBar); Console.Write("Please enter a name for the new bucket: "); bucketName = Console.ReadLine(); var success = await S3Bucket.CreateBucketAsync(client, bucketName); if (success) { Console.WriteLine($"Successfully created bucket: {bucketName}.\n"); } else { Console.WriteLine($"Could not create bucket: {bucketName}.\n"); } Console.WriteLine(sepBar); Console.WriteLine("Upload a file to the new bucket."); Console.WriteLine(sepBar); // Get the local path and filename for the file to upload. while (string.IsNullOrEmpty(filePath)) { Console.Write("Please enter the path and filename of the file to upload: "); filePath = Console.ReadLine(); // Confirm that the file exists on the local computer. if (!File.Exists(filePath)) { Console.WriteLine($"Couldn't find {filePath}. Try again.\n"); filePath = string.Empty; } } // Get the file name from the full path. keyName = Path.GetFileName(filePath); success = await S3Bucket.UploadFileAsync(client, bucketName, keyName, filePath); if (success) { Console.WriteLine($"Successfully uploaded {keyName} from {filePath} to {bucketName}.\n"); } else { Console.WriteLine($"Could not upload {keyName}.\n"); } // Set the file path to an empty string to avoid overwriting the // file we just uploaded to the bucket. filePath = string.Empty; // Now get a new location where we can save the file. while (string.IsNullOrEmpty(filePath)) { // First get the path to which the file will be downloaded. Console.Write("Please enter the path where the file will be downloaded: "); filePath = Console.ReadLine(); // Confirm that the file exists on the local computer. if (File.Exists($"{filePath}\\{keyName}")) { Console.WriteLine($"Sorry, the file already exists in that location.\n"); filePath = string.Empty; } } // Download an object from a bucket. success = await S3Bucket.DownloadObjectFromBucketAsync(client, bucketName, keyName, filePath); if (success) { Console.WriteLine($"Successfully downloaded {keyName}.\n"); } else { Console.WriteLine($"Sorry, could not download {keyName}.\n"); } // Copy the object to a different folder in the bucket. string folderName = string.Empty; while (string.IsNullOrEmpty(folderName)) { Console.Write("Please enter the name of the folder to copy your object to: "); folderName = Console.ReadLine(); } while (string.IsNullOrEmpty(keyName)) { // Get the name to give to the object once uploaded. Console.Write("Enter the name of the object to copy: "); keyName = Console.ReadLine(); } await S3Bucket.CopyObjectInBucketAsync(client, bucketName, keyName, folderName); // List the objects in the bucket. await S3Bucket.ListBucketContentsAsync(client, bucketName); // Delete the contents of the bucket. await S3Bucket.DeleteBucketContentsAsync(client, bucketName); // Deleting the bucket too quickly after deleting its contents will // cause an error that the bucket isn't empty. So... Console.WriteLine("Press <Enter> when you are ready to delete the bucket."); _ = Console.ReadLine(); // Delete the bucket. await S3Bucket.DeleteBucketAsync(client, bucketName); } }
Bash
AWS CLI with Bash script
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

############################################################################### # function s3_getting_started # # This function creates, copies, and deletes S3 buckets and objects. # # Returns: # 0 - If successful. # 1 - If an error occurred. ############################################################################### function s3_getting_started() { { if [ "$BUCKET_OPERATIONS_SOURCED" != "True" ]; then cd bucket-lifecycle-operations || exit source ./bucket_operations.sh cd .. fi } echo_repeat "*" 88 echo "Welcome to the Amazon S3 getting started demo." echo_repeat "*" 88 echo "A unique bucket will be created by appending a Universally Unique Identifier to a bucket name prefix." echo -n "Enter a prefix for the S3 bucket that will be used in this demo: " get_input bucket_name_prefix=$get_input_result local bucket_name bucket_name=$(generate_random_name "$bucket_name_prefix") local region_code region_code=$(aws configure get region) if create_bucket -b "$bucket_name" -r "$region_code"; then echo "Created demo bucket named $bucket_name" else errecho "The bucket failed to create. This demo will exit." return 1 fi local file_name while [ -z "$file_name" ]; do echo -n "Enter a file you want to upload to your bucket: " get_input file_name=$get_input_result if [ ! -f "$file_name" ]; then echo "Could not find file $file_name. Are you sure it exists?" file_name="" fi done local key key="$(basename "$file_name")" local result=0 if copy_file_to_bucket "$bucket_name" "$file_name" "$key"; then echo "Uploaded file $file_name into bucket $bucket_name with key $key." else result=1 fi local destination_file destination_file="$file_name.download" if yes_no_input "Would you like to download $key to the file $destination_file? (y/n) "; then if download_object_from_bucket "$bucket_name" "$destination_file" "$key"; then echo "Downloaded $key in the bucket $bucket_name to the file $destination_file." else result=1 fi fi if yes_no_input "Would you like to copy $key a new object key in your bucket? (y/n) "; then local to_key to_key="demo/$key" if copy_item_in_bucket "$bucket_name" "$key" "$to_key"; then echo "Copied $key in the bucket $bucket_name to the $to_key." else result=1 fi fi local bucket_items bucket_items=$(list_items_in_bucket "$bucket_name") # shellcheck disable=SC2181 if [[ $? -ne 0 ]]; then result=1 fi echo "Your bucket contains the following items." echo -e "Name\t\tSize" echo "$bucket_items" if yes_no_input "Delete the bucket, $bucket_name, as well as the objects in it? (y/n) "; then bucket_items=$(echo "$bucket_items" | cut -f 1) if delete_items_in_bucket "$bucket_name" "$bucket_items"; then echo "The following items were deleted from the bucket $bucket_name" echo "$bucket_items" else result=1 fi if delete_bucket "$bucket_name"; then echo "Deleted the bucket $bucket_name" else result=1 fi fi return $result }

The Amazon S3 functions used in this scenario.

############################################################################### # function create-bucket # # This function creates the specified bucket in the specified AWS Region, unless # it already exists. # # Parameters: # -b bucket_name -- The name of the bucket to create. # -r region_code -- The code for an AWS Region in which to # create the bucket. # # Returns: # The URL of the bucket that was created. # And: # 0 - If successful. # 1 - If it fails. ############################################################################### function create_bucket() { local bucket_name region_code response local option OPTARG # Required to use getopts command in a function. # bashsupport disable=BP5008 function usage() { echo "function create_bucket" echo "Creates an Amazon S3 bucket. You must supply a bucket name:" echo " -b bucket_name The name of the bucket. It must be globally unique." echo " [-r region_code] The code for an AWS Region in which the bucket is created." echo "" } # Retrieve the calling parameters. while getopts "b:r:h" option; do case "${option}" in b) bucket_name="${OPTARG}" ;; r) region_code="${OPTARG}" ;; h) usage return 0 ;; \?) echo "Invalid parameter" usage return 1 ;; esac done if [[ -z "$bucket_name" ]]; then errecho "ERROR: You must provide a bucket name with the -b parameter." usage return 1 fi local bucket_config_arg # A location constraint for "us-east-1" returns an error. if [[ -n "$region_code" ]] && [[ "$region_code" != "us-east-1" ]]; then bucket_config_arg="--create-bucket-configuration LocationConstraint=$region_code" fi iecho "Parameters:\n" iecho " Bucket name: $bucket_name" iecho " Region code: $region_code" iecho "" # If the bucket already exists, we don't want to try to create it. if (bucket_exists "$bucket_name"); then errecho "ERROR: A bucket with that name already exists. Try again." return 1 fi # shellcheck disable=SC2086 response=$(aws s3api create-bucket \ --bucket "$bucket_name" \ $bucket_config_arg) # shellcheck disable=SC2181 if [[ ${?} -ne 0 ]]; then errecho "ERROR: AWS reports create-bucket operation failed.\n$response" return 1 fi } ############################################################################### # function copy_file_to_bucket # # This function creates a file in the specified bucket. # # Parameters: # $1 - The name of the bucket to copy the file to. # $2 - The path and file name of the local file to copy to the bucket. # $3 - The key (name) to call the copy of the file in the bucket. # # Returns: # 0 - If successful. # 1 - If it fails. ############################################################################### function copy_file_to_bucket() { local response bucket_name source_file destination_file_name bucket_name=$1 source_file=$2 destination_file_name=$3 response=$(aws s3api put-object \ --bucket "$bucket_name" \ --body "$source_file" \ --key "$destination_file_name") # shellcheck disable=SC2181 if [[ ${?} -ne 0 ]]; then errecho "ERROR: AWS reports put-object operation failed.\n$response" return 1 fi } ############################################################################### # function download_object_from_bucket # # This function downloads an object in a bucket to a file. # # Parameters: # $1 - The name of the bucket to download the object from. # $2 - The path and file name to store the downloaded bucket. # $3 - The key (name) of the object in the bucket. # # Returns: # 0 - If successful. # 1 - If it fails. ############################################################################### function download_object_from_bucket() { local bucket_name=$1 local destination_file_name=$2 local object_name=$3 local response response=$(aws s3api get-object \ --bucket "$bucket_name" \ --key "$object_name" \ "$destination_file_name") # shellcheck disable=SC2181 if [[ ${?} -ne 0 ]]; then errecho "ERROR: AWS reports put-object operation failed.\n$response" return 1 fi } ############################################################################### # function copy_item_in_bucket # # This function creates a copy of the specified file in the same bucket. # # Parameters: # $1 - The name of the bucket to copy the file from and to. # $2 - The key of the source file to copy. # $3 - The key of the destination file. # # Returns: # 0 - If successful. # 1 - If it fails. ############################################################################### function copy_item_in_bucket() { local bucket_name=$1 local source_key=$2 local destination_key=$3 local response response=$(aws s3api copy-object \ --bucket "$bucket_name" \ --copy-source "$bucket_name/$source_key" \ --key "$destination_key") # shellcheck disable=SC2181 if [[ $? -ne 0 ]]; then errecho "ERROR: AWS reports s3api copy-object operation failed.\n$response" return 1 fi } ############################################################################### # function list_items_in_bucket # # This function displays a list of the files in the bucket with each file's # size. The function uses the --query parameter to retrieve only the key and # size fields from the Contents collection. # # Parameters: # $1 - The name of the bucket. # # Returns: # The list of files in text format. # And: # 0 - If successful. # 1 - If it fails. ############################################################################### function list_items_in_bucket() { local bucket_name=$1 local response response=$(aws s3api list-objects \ --bucket "$bucket_name" \ --output text \ --query 'Contents[].{Key: Key, Size: Size}') # shellcheck disable=SC2181 if [[ ${?} -eq 0 ]]; then echo "$response" else errecho "ERROR: AWS reports s3api list-objects operation failed.\n$response" return 1 fi } ############################################################################### # function delete_items_in_bucket # # This function deletes the specified list of keys from the specified bucket. # # Parameters: # $1 - The name of the bucket. # $2 - A list of keys in the bucket to delete. # Returns: # 0 - If successful. # 1 - If it fails. ############################################################################### function delete_items_in_bucket() { local bucket_name=$1 local keys=$2 local response # Create the JSON for the items to delete. local delete_items delete_items="{\"Objects\":[" for key in $keys; do delete_items="$delete_items{\"Key\": \"$key\"}," done delete_items=${delete_items%?} # Remove the final comma. delete_items="$delete_items]}" response=$(aws s3api delete-objects \ --bucket "$bucket_name" \ --delete "$delete_items") # shellcheck disable=SC2181 if [[ $? -ne 0 ]]; then errecho "ERROR: AWS reports s3api delete-object operation failed.\n$response" return 1 fi } ############################################################################### # function delete_bucket # # This function deletes the specified bucket. # # Parameters: # $1 - The name of the bucket. # Returns: # 0 - If successful. # 1 - If it fails. ############################################################################### function delete_bucket() { local bucket_name=$1 local response response=$(aws s3api delete-bucket \ --bucket "$bucket_name") # shellcheck disable=SC2181 if [[ $? -ne 0 ]]; then errecho "ERROR: AWS reports s3api delete-bucket failed.\n$response" return 1 fi }
C++
SDK for C++
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

#include <iostream> #include <aws/core/Aws.h> #include <aws/s3/S3Client.h> #include <aws/s3/model/CopyObjectRequest.h> #include <aws/s3/model/CreateBucketRequest.h> #include <aws/s3/model/DeleteBucketRequest.h> #include <aws/s3/model/DeleteObjectRequest.h> #include <aws/s3/model/GetObjectRequest.h> #include <aws/s3/model/ListObjectsV2Request.h> #include <aws/s3/model/PutObjectRequest.h> #include <aws/s3/model/BucketLocationConstraint.h> #include <aws/s3/model/CreateBucketConfiguration.h> #include <aws/core/utils/UUID.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/memory/stl/AWSAllocator.h> #include <fstream> #include "s3_examples.h" namespace AwsDoc { namespace S3 { //! Delete an S3 bucket. /*! \param bucketName: The S3 bucket's name. \param client: An S3 client. \return bool: Function succeeded. */ static bool deleteBucket(const Aws::String &bucketName, Aws::S3::S3Client &client); //! Delete an object in an S3 bucket. /*! \param bucketName: The S3 bucket's name. \param key: The key for the object in the S3 bucket. \param client: An S3 client. \return bool: Function succeeded. */ static bool deleteObjectFromBucket(const Aws::String &bucketName, const Aws::String &key, Aws::S3::S3Client &client); } } //! Scenario to create, copy, and delete S3 buckets and objects. /*! \param bucketNamePrefix: A prefix for a bucket name. \param uploadFilePath: Path to file to upload to an Amazon S3 bucket. \param saveFilePath: Path for saving a downloaded S3 object. \param clientConfig: Aws client configuration. \return bool: Function succeeded. */ bool AwsDoc::S3::S3_GettingStartedScenario(const Aws::String &bucketNamePrefix, const Aws::String &uploadFilePath, const Aws::String &saveFilePath, const Aws::Client::ClientConfiguration &clientConfig) { Aws::S3::S3Client client(clientConfig); // Create a unique bucket name which is only temporary and will be deleted. // Format: <bucketNamePrefix> + "-" + lowercase UUID. Aws::String uuid = Aws::Utils::UUID::RandomUUID(); Aws::String bucketName = bucketNamePrefix + Aws::Utils::StringUtils::ToLower(uuid.c_str()); // 1. Create a bucket. { Aws::S3::Model::CreateBucketRequest request; request.SetBucket(bucketName); if (clientConfig.region != Aws::Region::US_EAST_1) { Aws::S3::Model::CreateBucketConfiguration createBucketConfiguration; createBucketConfiguration.WithLocationConstraint( Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName( clientConfig.region)); request.WithCreateBucketConfiguration(createBucketConfiguration); } Aws::S3::Model::CreateBucketOutcome outcome = client.CreateBucket(request); if (!outcome.IsSuccess()) { const Aws::S3::S3Error &err = outcome.GetError(); std::cerr << "Error: createBucket: " << err.GetExceptionName() << ": " << err.GetMessage() << std::endl; return false; } else { std::cout << "Created the bucket, '" << bucketName << "', in the region, '" << clientConfig.region << "'." << std::endl; } } // 2. Upload a local file to the bucket. Aws::String key = "key-for-test"; { Aws::S3::Model::PutObjectRequest request; request.SetBucket(bucketName); request.SetKey(key); std::shared_ptr<Aws::FStream> input_data = Aws::MakeShared<Aws::FStream>("SampleAllocationTag", uploadFilePath, std::ios_base::in | std::ios_base::binary); if (!input_data->is_open()) { std::cerr << "Error: unable to open file, '" << uploadFilePath << "'." << std::endl; AwsDoc::S3::deleteBucket(bucketName, client); return false; } request.SetBody(input_data); Aws::S3::Model::PutObjectOutcome outcome = client.PutObject(request); if (!outcome.IsSuccess()) { std::cerr << "Error: putObject: " << outcome.GetError().GetMessage() << std::endl; AwsDoc::S3::deleteObjectFromBucket(bucketName, key, client); AwsDoc::S3::deleteBucket(bucketName, client); return false; } else { std::cout << "Added the object with the key, '" << key << "', to the bucket, '" << bucketName << "'." << std::endl; } } // 3. Download the object to a local file. { Aws::S3::Model::GetObjectRequest request; request.SetBucket(bucketName); request.SetKey(key); Aws::S3::Model::GetObjectOutcome outcome = client.GetObject(request); if (!outcome.IsSuccess()) { const Aws::S3::S3Error &err = outcome.GetError(); std::cerr << "Error: getObject: " << err.GetExceptionName() << ": " << err.GetMessage() << std::endl; } else { std::cout << "Downloaded the object with the key, '" << key << "', in the bucket, '" << bucketName << "'." << std::endl; Aws::IOStream &ioStream = outcome.GetResultWithOwnership(). GetBody(); Aws::OFStream outStream(saveFilePath, std::ios_base::out | std::ios_base::binary); if (!outStream.is_open()) { std::cout << "Error: unable to open file, '" << saveFilePath << "'." << std::endl; } else { outStream << ioStream.rdbuf(); std::cout << "Wrote the downloaded object to the file '" << saveFilePath << "'." << std::endl; } } } // 4. Copy the object to a different "folder" in the bucket. Aws::String copiedToKey = "test-folder/" + key; { Aws::S3::Model::CopyObjectRequest request; request.WithBucket(bucketName) .WithKey(copiedToKey) .WithCopySource(bucketName + "/" + key); Aws::S3::Model::CopyObjectOutcome outcome = client.CopyObject(request); if (!outcome.IsSuccess()) { std::cerr << "Error: copyObject: " << outcome.GetError().GetMessage() << std::endl; } else { std::cout << "Copied the object with the key, '" << key << "', to the key, '" << copiedToKey << ", in the bucket, '" << bucketName << "'." << std::endl; } } // 5. List objects in the bucket. { Aws::S3::Model::ListObjectsV2Request request; request.WithBucket(bucketName); Aws::String continuationToken; Aws::Vector<Aws::S3::Model::Object> allObjects; do { if (!continuationToken.empty()) { request.SetContinuationToken(continuationToken); } Aws::S3::Model::ListObjectsV2Outcome outcome = client.ListObjectsV2( request); if (!outcome.IsSuccess()) { std::cerr << "Error: ListObjects: " << outcome.GetError().GetMessage() << std::endl; break; } else { Aws::Vector<Aws::S3::Model::Object> objects = outcome.GetResult().GetContents(); allObjects.insert(allObjects.end(), objects.begin(), objects.end()); continuationToken = outcome.GetResult().GetContinuationToken(); } } while (!continuationToken.empty()); std::cout << allObjects.size() << " objects in the bucket, '" << bucketName << "':" << std::endl; for (Aws::S3::Model::Object &object: allObjects) { std::cout << " '" << object.GetKey() << "'" << std::endl; } } // 6. Delete all objects in the bucket. // All objects in the bucket must be deleted before deleting the bucket. AwsDoc::S3::deleteObjectFromBucket(bucketName, copiedToKey, client); AwsDoc::S3::deleteObjectFromBucket(bucketName, key, client); // 7. Delete the bucket. return AwsDoc::S3::deleteBucket(bucketName, client); } bool AwsDoc::S3::deleteObjectFromBucket(const Aws::String &bucketName, const Aws::String &key, Aws::S3::S3Client &client) { Aws::S3::Model::DeleteObjectRequest request; request.SetBucket(bucketName); request.SetKey(key); Aws::S3::Model::DeleteObjectOutcome outcome = client.DeleteObject(request); if (!outcome.IsSuccess()) { std::cerr << "Error: deleteObject: " << outcome.GetError().GetMessage() << std::endl; } else { std::cout << "Deleted the object with the key, '" << key << "', from the bucket, '" << bucketName << "'." << std::endl; } return outcome.IsSuccess(); } bool AwsDoc::S3::deleteBucket(const Aws::String &bucketName, Aws::S3::S3Client &client) { Aws::S3::Model::DeleteBucketRequest request; request.SetBucket(bucketName); Aws::S3::Model::DeleteBucketOutcome outcome = client.DeleteBucket(request); if (!outcome.IsSuccess()) { const Aws::S3::S3Error &err = outcome.GetError(); std::cerr << "Error: deleteBucket: " << err.GetExceptionName() << ": " << err.GetMessage() << std::endl; } else { std::cout << "Deleted the bucket, '" << bucketName << "'." << std::endl; } return outcome.IsSuccess(); }
Go
SDK for Go V2
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

Define a struct that wraps bucket and object actions used by the scenario.

import ( "bytes" "context" "errors" "fmt" "io" "log" "os" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go" ) // BucketBasics encapsulates the Amazon Simple Storage Service (Amazon S3) actions // used in the examples. // It contains S3Client, an Amazon S3 service client that is used to perform bucket // and object actions. type BucketBasics struct { S3Client *s3.Client } // ListBuckets lists the buckets in the current account. func (basics BucketBasics) ListBuckets(ctx context.Context) ([]types.Bucket, error) { var err error var output *s3.ListBucketsOutput var buckets []types.Bucket bucketPaginator := s3.NewListBucketsPaginator(basics.S3Client, &s3.ListBucketsInput{}) for bucketPaginator.HasMorePages() { output, err = bucketPaginator.NextPage(ctx) if err != nil { var apiErr smithy.APIError if errors.As(err, &apiErr) && apiErr.ErrorCode() == "AccessDenied" { fmt.Println("You don't have permission to list buckets for this account.") err = apiErr } else { log.Printf("Couldn't list buckets for your account. Here's why: %v\n", err) } break } else { buckets = append(buckets, output.Buckets...) } } return buckets, err } // BucketExists checks whether a bucket exists in the current account. func (basics BucketBasics) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := basics.S3Client.HeadBucket(ctx, &s3.HeadBucketInput{ Bucket: aws.String(bucketName), }) exists := true if err != nil { var apiError smithy.APIError if errors.As(err, &apiError) { switch apiError.(type) { case *types.NotFound: log.Printf("Bucket %v is available.\n", bucketName) exists = false err = nil default: log.Printf("Either you don't have access to bucket %v or another error occurred. "+ "Here's what happened: %v\n", bucketName, err) } } } else { log.Printf("Bucket %v exists and you already own it.", bucketName) } return exists, err } // CreateBucket creates a bucket with the specified name in the specified Region. func (basics BucketBasics) CreateBucket(ctx context.Context, name string, region string) error { _, err := basics.S3Client.CreateBucket(ctx, &s3.CreateBucketInput{ Bucket: aws.String(name), CreateBucketConfiguration: &types.CreateBucketConfiguration{ LocationConstraint: types.BucketLocationConstraint(region), }, }) if err != nil { var owned *types.BucketAlreadyOwnedByYou var exists *types.BucketAlreadyExists if errors.As(err, &owned) { log.Printf("You already own bucket %s.\n", name) err = owned } else if errors.As(err, &exists) { log.Printf("Bucket %s already exists.\n", name) err = exists } } else { err = s3.NewBucketExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadBucketInput{Bucket: aws.String(name)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for bucket %s to exist.\n", name) } } return err } // UploadFile reads from a file and puts the data into an object in a bucket. func (basics BucketBasics) UploadFile(ctx context.Context, bucketName string, objectKey string, fileName string) error { file, err := os.Open(fileName) if err != nil { log.Printf("Couldn't open file %v to upload. Here's why: %v\n", fileName, err) } else { defer file.Close() _, err = basics.S3Client.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), Body: file, }) if err != nil { var apiErr smithy.APIError if errors.As(err, &apiErr) && apiErr.ErrorCode() == "EntityTooLarge" { log.Printf("Error while uploading object to %s. The object is too large.\n"+ "To upload objects larger than 5GB, use the S3 console (160GB max)\n"+ "or the multipart upload API (5TB max).", bucketName) } else { log.Printf("Couldn't upload file %v to %v:%v. Here's why: %v\n", fileName, bucketName, objectKey, err) } } else { err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectKey)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) } } } return err } // UploadLargeObject uses an upload manager to upload data to an object in a bucket. // The upload manager breaks large data into parts and uploads the parts concurrently. func (basics BucketBasics) UploadLargeObject(ctx context.Context, bucketName string, objectKey string, largeObject []byte) error { largeBuffer := bytes.NewReader(largeObject) var partMiBs int64 = 10 uploader := manager.NewUploader(basics.S3Client, func(u *manager.Uploader) { u.PartSize = partMiBs * 1024 * 1024 }) _, err := uploader.Upload(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), Body: largeBuffer, }) if err != nil { var apiErr smithy.APIError if errors.As(err, &apiErr) && apiErr.ErrorCode() == "EntityTooLarge" { log.Printf("Error while uploading object to %s. The object is too large.\n"+ "The maximum size for a multipart upload is 5TB.", bucketName) } else { log.Printf("Couldn't upload large object to %v:%v. Here's why: %v\n", bucketName, objectKey, err) } } else { err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectKey)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) } } return err } // DownloadFile gets an object from a bucket and stores it in a local file. func (basics BucketBasics) DownloadFile(ctx context.Context, bucketName string, objectKey string, fileName string) error { result, err := basics.S3Client.GetObject(ctx, &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), }) if err != nil { var noKey *types.NoSuchKey if errors.As(err, &noKey) { log.Printf("Can't get object %s from bucket %s. No such key exists.\n", objectKey, bucketName) err = noKey } else { log.Printf("Couldn't get object %v:%v. Here's why: %v\n", bucketName, objectKey, err) } return err } defer result.Body.Close() file, err := os.Create(fileName) if err != nil { log.Printf("Couldn't create file %v. Here's why: %v\n", fileName, err) return err } defer file.Close() body, err := io.ReadAll(result.Body) if err != nil { log.Printf("Couldn't read object body from %v. Here's why: %v\n", objectKey, err) } _, err = file.Write(body) return err } // DownloadLargeObject uses a download manager to download an object from a bucket. // The download manager gets the data in parts and writes them to a buffer until all of // the data has been downloaded. func (basics BucketBasics) DownloadLargeObject(ctx context.Context, bucketName string, objectKey string) ([]byte, error) { var partMiBs int64 = 10 downloader := manager.NewDownloader(basics.S3Client, func(d *manager.Downloader) { d.PartSize = partMiBs * 1024 * 1024 }) buffer := manager.NewWriteAtBuffer([]byte{}) _, err := downloader.Download(ctx, buffer, &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(objectKey), }) if err != nil { log.Printf("Couldn't download large object from %v:%v. Here's why: %v\n", bucketName, objectKey, err) } return buffer.Bytes(), err } // CopyToFolder copies an object in a bucket to a subfolder in the same bucket. func (basics BucketBasics) CopyToFolder(ctx context.Context, bucketName string, objectKey string, folderName string) error { objectDest := fmt.Sprintf("%v/%v", folderName, objectKey) _, err := basics.S3Client.CopyObject(ctx, &s3.CopyObjectInput{ Bucket: aws.String(bucketName), CopySource: aws.String(fmt.Sprintf("%v/%v", bucketName, objectKey)), Key: aws.String(objectDest), }) if err != nil { var notActive *types.ObjectNotInActiveTierError if errors.As(err, &notActive) { log.Printf("Couldn't copy object %s from %s because the object isn't in the active tier.\n", objectKey, bucketName) err = notActive } } else { err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: aws.String(objectDest)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for object %s to exist.\n", objectDest) } } return err } // CopyToBucket copies an object in a bucket to another bucket. func (basics BucketBasics) CopyToBucket(ctx context.Context, sourceBucket string, destinationBucket string, objectKey string) error { _, err := basics.S3Client.CopyObject(ctx, &s3.CopyObjectInput{ Bucket: aws.String(destinationBucket), CopySource: aws.String(fmt.Sprintf("%v/%v", sourceBucket, objectKey)), Key: aws.String(objectKey), }) if err != nil { var notActive *types.ObjectNotInActiveTierError if errors.As(err, &notActive) { log.Printf("Couldn't copy object %s from %s because the object isn't in the active tier.\n", objectKey, sourceBucket) err = notActive } } else { err = s3.NewObjectExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadObjectInput{Bucket: aws.String(destinationBucket), Key: aws.String(objectKey)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for object %s to exist.\n", objectKey) } } return err } // ListObjects lists the objects in a bucket. func (basics BucketBasics) ListObjects(ctx context.Context, bucketName string) ([]types.Object, error) { var err error var output *s3.ListObjectsV2Output input := &s3.ListObjectsV2Input{ Bucket: aws.String(bucketName), } var objects []types.Object objectPaginator := s3.NewListObjectsV2Paginator(basics.S3Client, input) for objectPaginator.HasMorePages() { output, err = objectPaginator.NextPage(ctx) if err != nil { var noBucket *types.NoSuchBucket if errors.As(err, &noBucket) { log.Printf("Bucket %s does not exist.\n", bucketName) err = noBucket } break } else { objects = append(objects, output.Contents...) } } return objects, err } // DeleteObjects deletes a list of objects from a bucket. func (basics BucketBasics) DeleteObjects(ctx context.Context, bucketName string, objectKeys []string) error { var objectIds []types.ObjectIdentifier for _, key := range objectKeys { objectIds = append(objectIds, types.ObjectIdentifier{Key: aws.String(key)}) } output, err := basics.S3Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ Bucket: aws.String(bucketName), Delete: &types.Delete{Objects: objectIds, Quiet: aws.Bool(true)}, }) if err != nil || len(output.Errors) > 0 { log.Printf("Error deleting objects from bucket %s.\n", bucketName) if err != nil { var noBucket *types.NoSuchBucket if errors.As(err, &noBucket) { log.Printf("Bucket %s does not exist.\n", bucketName) err = noBucket } } else if len(output.Errors) > 0 { for _, outErr := range output.Errors { log.Printf("%s: %s\n", *outErr.Key, *outErr.Message) } err = fmt.Errorf("%s", *output.Errors[0].Message) } } else { for _, delObjs := range output.Deleted { err = s3.NewObjectNotExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadObjectInput{Bucket: aws.String(bucketName), Key: delObjs.Key}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for object %s to be deleted.\n", *delObjs.Key) } else { log.Printf("Deleted %s.\n", *delObjs.Key) } } } return err } // DeleteBucket deletes a bucket. The bucket must be empty or an error is returned. func (basics BucketBasics) DeleteBucket(ctx context.Context, bucketName string) error { _, err := basics.S3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{ Bucket: aws.String(bucketName)}) if err != nil { var noBucket *types.NoSuchBucket if errors.As(err, &noBucket) { log.Printf("Bucket %s does not exist.\n", bucketName) err = noBucket } else { log.Printf("Couldn't delete bucket %v. Here's why: %v\n", bucketName, err) } } else { err = s3.NewBucketNotExistsWaiter(basics.S3Client).Wait( ctx, &s3.HeadBucketInput{Bucket: aws.String(bucketName)}, time.Minute) if err != nil { log.Printf("Failed attempt to wait for bucket %s to be deleted.\n", bucketName) } else { log.Printf("Deleted %s.\n", bucketName) } } return err }

Run an interactive scenario that shows you how to work with S3 buckets and objects.

import ( "context" "fmt" "log" "os" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" "github.com/awsdocs/aws-doc-sdk-examples/gov2/s3/actions" ) // RunGetStartedScenario is an interactive example that shows you how to use Amazon // Simple Storage Service (Amazon S3) to create an S3 bucket and use it to store objects. // // 1. Create a bucket. // 2. Upload a local file to the bucket. // 3. Download an object to a local file. // 4. Copy an object to a different folder in the bucket. // 5. List objects in the bucket. // 6. Delete all objects in the bucket. // 7. Delete the bucket. // // This example creates an Amazon S3 service client from the specified sdkConfig so that // you can replace it with a mocked or stubbed config for unit testing. // // It uses a questioner from the `demotools` package to get input during the example. // This package can be found in the ..\..\demotools folder of this repo. func RunGetStartedScenario(ctx context.Context, sdkConfig aws.Config, questioner demotools.IQuestioner) { defer func() { if r := recover(); r != nil { log.Println("Something went wrong with the demo.") _, isMock := questioner.(*demotools.MockQuestioner) if isMock || questioner.AskBool("Do you want to see the full error message (y/n)?", "y") { log.Println(r) } } }() log.Println(strings.Repeat("-", 88)) log.Println("Welcome to the Amazon S3 getting started demo.") log.Println(strings.Repeat("-", 88)) s3Client := s3.NewFromConfig(sdkConfig) bucketBasics := actions.BucketBasics{S3Client: s3Client} count := 10 log.Printf("Let's list up to %v buckets for your account:", count) buckets, err := bucketBasics.ListBuckets(ctx) if err != nil { panic(err) } if len(buckets) == 0 { log.Println("You don't have any buckets!") } else { if count > len(buckets) { count = len(buckets) } for _, bucket := range buckets[:count] { log.Printf("\t%v\n", *bucket.Name) } } bucketName := questioner.Ask("Let's create a bucket. Enter a name for your bucket:", demotools.NotEmpty{}) bucketExists, err := bucketBasics.BucketExists(ctx, bucketName) if err != nil { panic(err) } if !bucketExists { err = bucketBasics.CreateBucket(ctx, bucketName, sdkConfig.Region) if err != nil { panic(err) } else { log.Println("Bucket created.") } } log.Println(strings.Repeat("-", 88)) fmt.Println("Let's upload a file to your bucket.") smallFile := questioner.Ask("Enter the path to a file you want to upload:", demotools.NotEmpty{}) const smallKey = "doc-example-key" err = bucketBasics.UploadFile(ctx, bucketName, smallKey, smallFile) if err != nil { panic(err) } log.Printf("Uploaded %v as %v.\n", smallFile, smallKey) log.Println(strings.Repeat("-", 88)) log.Printf("Let's download %v to a file.", smallKey) downloadFileName := questioner.Ask("Enter a name for the downloaded file:", demotools.NotEmpty{}) err = bucketBasics.DownloadFile(ctx, bucketName, smallKey, downloadFileName) if err != nil { panic(err) } log.Printf("File %v downloaded.", downloadFileName) log.Println(strings.Repeat("-", 88)) log.Printf("Let's copy %v to a folder in the same bucket.", smallKey) folderName := questioner.Ask("Enter a folder name: ", demotools.NotEmpty{}) err = bucketBasics.CopyToFolder(ctx, bucketName, smallKey, folderName) if err != nil { panic(err) } log.Printf("Copied %v to %v/%v.\n", smallKey, folderName, smallKey) log.Println(strings.Repeat("-", 88)) log.Println("Let's list the objects in your bucket.") questioner.Ask("Press Enter when you're ready.") objects, err := bucketBasics.ListObjects(ctx, bucketName) if err != nil { panic(err) } log.Printf("Found %v objects.\n", len(objects)) var objKeys []string for _, object := range objects { objKeys = append(objKeys, *object.Key) log.Printf("\t%v\n", *object.Key) } log.Println(strings.Repeat("-", 88)) if questioner.AskBool("Do you want to delete your bucket and all of its "+ "contents? (y/n)", "y") { log.Println("Deleting objects.") err = bucketBasics.DeleteObjects(ctx, bucketName, objKeys) if err != nil { panic(err) } log.Println("Deleting bucket.") err = bucketBasics.DeleteBucket(ctx, bucketName) if err != nil { panic(err) } log.Printf("Deleting downloaded file %v.\n", downloadFileName) err = os.Remove(downloadFileName) if err != nil { panic(err) } } else { log.Println("Okay. Don't forget to delete objects from your bucket to avoid charges.") } log.Println(strings.Repeat("-", 88)) log.Println("Thanks for watching!") log.Println(strings.Repeat("-", 88)) }
Java
SDK for Java 2.x
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

A scenario example.

import java.io.IOException; import java.util.Scanner; import java.util.UUID; import java.util.concurrent.CompletableFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.S3Exception; /** * Before running this Java V2 code example, set up your development * environment, including your credentials. * * For more information, see the following documentation topic: * * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html * * This Java code example performs the following tasks: * * 1. Creates an Amazon S3 bucket. * 2. Uploads an object to the bucket. * 3. Downloads the object to another local file. * 4. Uploads an object using multipart upload. * 5. List all objects located in the Amazon S3 bucket. * 6. Copies the object to another Amazon S3 bucket. * 7. Copy the object to another Amazon S3 bucket using multi copy. * 8. Deletes the object from the Amazon S3 bucket. * 9. Deletes the Amazon S3 bucket. */ public class S3Scenario { public static Scanner scanner = new Scanner(System.in); static S3Actions s3Actions = new S3Actions(); public static final String DASHES = new String(new char[80]).replace("\0", "-"); private static final Logger logger = LoggerFactory.getLogger(S3Scenario.class); public static void main(String[] args) throws IOException { final String usage = """ Usage: <bucketName> <key> <objectPath> <savePath> <toBucket> Where: bucketName - The name of the S3 bucket. key - The unique identifier for the object stored in the S3 bucket. objectPath - The full file path of the object within the S3 bucket (e.g., "documents/reports/annual_report.pdf"). savePath - The local file path where the object will be downloaded and saved (e.g., "C:/Users/username/Downloads/annual_report.pdf"). toBucket - The name of the S3 bucket to which the object will be copied. """; if (args.length != 5) { logger.info(usage); return; } String bucketName = args[0]; String key = args[1]; String objectPath = args[2]; String savePath = args[3]; String toBucket = args[4]; logger.info(DASHES); logger.info("Welcome to the Amazon Simple Storage Service (S3) example scenario."); logger.info(""" Amazon S3 is a highly scalable and durable object storage service provided by Amazon Web Services (AWS). It is designed to store and retrieve any amount of data, from anywhere on the web, at any time. The `S3AsyncClient` interface in the AWS SDK for Java 2.x provides a set of methods to programmatically interact with the Amazon S3 (Simple Storage Service) service. This allows developers to automate the management and manipulation of S3 buckets and objects as part of their application deployment pipelines. With S3, teams can focus on building and deploying their applications without having to worry about the underlying storage infrastructure required to host and manage large amounts of data. This scenario walks you through how to perform key operations for this service. Let's get started... """); waitForInputToContinue(scanner); logger.info(DASHES); try { // Run the methods that belong to this scenario. runScenario(bucketName, key, objectPath, savePath, toBucket); } catch (Throwable rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception kmsEx) { logger.info("KMS error occurred: Error message: {}, Error code {}", kmsEx.getMessage(), kmsEx.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } } } private static void runScenario(String bucketName, String key, String objectPath, String savePath, String toBucket) throws Throwable { logger.info(DASHES); logger.info("1. Create an Amazon S3 bucket."); try { CompletableFuture<Void> future = s3Actions.createBucketAsync(bucketName); future.join(); waitForInputToContinue(scanner); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } logger.info(DASHES); logger.info(DASHES); logger.info("2. Upload a local file to the Amazon S3 bucket."); waitForInputToContinue(scanner); try { CompletableFuture<PutObjectResponse> future = s3Actions.uploadLocalFileAsync(bucketName, key, objectPath); future.join(); logger.info("File uploaded successfully to {}/{}", bucketName, key); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("3. Download the object to another local file."); waitForInputToContinue(scanner); try { CompletableFuture<Void> future = s3Actions.getObjectBytesAsync(bucketName, key, savePath); future.join(); logger.info("Successfully obtained bytes from S3 object and wrote to file {}", savePath); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("4. Perform a multipart upload."); waitForInputToContinue(scanner); String multipartKey = "multiPartKey"; try { // Call the multipartUpload method CompletableFuture<Void> future = s3Actions.multipartUpload(bucketName, multipartKey); future.join(); logger.info("Multipart upload completed successfully for bucket '{}' and key '{}'", bucketName, multipartKey); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("5. List all objects located in the Amazon S3 bucket."); waitForInputToContinue(scanner); try { CompletableFuture<Void> future = s3Actions.listAllObjectsAsync(bucketName); future.join(); logger.info("Object listing completed successfully."); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("6. Copy the object to another Amazon S3 bucket."); waitForInputToContinue(scanner); try { CompletableFuture<String> future = s3Actions.copyBucketObjectAsync(bucketName, key, toBucket); String result = future.join(); logger.info("Copy operation result: {}", result); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("7. Copy the object to another Amazon S3 bucket using multi copy."); waitForInputToContinue(scanner); try { CompletableFuture<String> future = s3Actions.performMultiCopy(toBucket, bucketName, key); String result = future.join(); logger.info("Copy operation result: {}", result); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("KMS error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("8. Delete objects from the Amazon S3 bucket."); waitForInputToContinue(scanner); try { CompletableFuture<Void> future = s3Actions.deleteObjectFromBucketAsync(bucketName, key); future.join(); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } try { CompletableFuture<Void> future = s3Actions.deleteObjectFromBucketAsync(bucketName, "multiPartKey"); future.join(); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("9. Delete the Amazon S3 bucket."); waitForInputToContinue(scanner); try { CompletableFuture<Void> future = s3Actions.deleteBucketAsync(bucketName); future.join(); } catch (RuntimeException rt) { Throwable cause = rt.getCause(); if (cause instanceof S3Exception s3Ex) { logger.info("S3 error occurred: Error message: {}, Error code {}", s3Ex.getMessage(), s3Ex.awsErrorDetails().errorCode()); } else { logger.info("An unexpected error occurred: " + rt.getMessage()); } throw cause; } waitForInputToContinue(scanner); logger.info(DASHES); logger.info(DASHES); logger.info("You successfully completed the Amazon S3 scenario."); logger.info(DASHES); } private static void waitForInputToContinue(Scanner scanner) { while (true) { logger.info(""); logger.info("Enter 'c' followed by <ENTER> to continue:"); String input = scanner.nextLine(); if (input.trim().equalsIgnoreCase("c")) { logger.info("Continuing with the program..."); logger.info(""); break; } else { // Handle invalid input. logger.info("Invalid input. Please try again."); } } } }

A wrapper class that contains the operations.

public class S3Actions { private static final Logger logger = LoggerFactory.getLogger(S3Actions.class); private static S3AsyncClient s3AsyncClient; public static S3AsyncClient getAsyncClient() { if (s3AsyncClient == null) { /* The `NettyNioAsyncHttpClient` class is part of the AWS SDK for Java, version 2, and it is designed to provide a high-performance, asynchronous HTTP client for interacting with AWS services. It uses the Netty framework to handle the underlying network communication and the Java NIO API to provide a non-blocking, event-driven approach to HTTP requests and responses. */ SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() .maxConcurrency(50) // Adjust as needed. .connectionTimeout(Duration.ofSeconds(60)) // Set the connection timeout. .readTimeout(Duration.ofSeconds(60)) // Set the read timeout. .writeTimeout(Duration.ofSeconds(60)) // Set the write timeout. .build(); ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() .apiCallTimeout(Duration.ofMinutes(2)) // Set the overall API call timeout. .apiCallAttemptTimeout(Duration.ofSeconds(90)) // Set the individual call attempt timeout. .retryStrategy(RetryMode.STANDARD) .build(); s3AsyncClient = S3AsyncClient.builder() .region(Region.US_EAST_1) .httpClient(httpClient) .overrideConfiguration(overrideConfig) .build(); } return s3AsyncClient; } /** * Creates an S3 bucket asynchronously. * * @param bucketName the name of the S3 bucket to create * @return a {@link CompletableFuture} that completes when the bucket is created and ready * @throws RuntimeException if there is a failure while creating the bucket */ public CompletableFuture<Void> createBucketAsync(String bucketName) { CreateBucketRequest bucketRequest = CreateBucketRequest.builder() .bucket(bucketName) .build(); CompletableFuture<CreateBucketResponse> response = getAsyncClient().createBucket(bucketRequest); return response.thenCompose(resp -> { S3AsyncWaiter s3Waiter = getAsyncClient().waiter(); HeadBucketRequest bucketRequestWait = HeadBucketRequest.builder() .bucket(bucketName) .build(); CompletableFuture<WaiterResponse<HeadBucketResponse>> waiterResponseFuture = s3Waiter.waitUntilBucketExists(bucketRequestWait); return waiterResponseFuture.thenAccept(waiterResponse -> { waiterResponse.matched().response().ifPresent(headBucketResponse -> { logger.info(bucketName + " is ready"); }); }); }).whenComplete((resp, ex) -> { if (ex != null) { throw new RuntimeException("Failed to create bucket", ex); } }); } /** * Uploads a local file to an AWS S3 bucket asynchronously. * * @param bucketName the name of the S3 bucket to upload the file to * @param key the key (object name) to use for the uploaded file * @param objectPath the local file path of the file to be uploaded * @return a {@link CompletableFuture} that completes with the {@link PutObjectResponse} when the upload is successful, or throws a {@link RuntimeException} if the upload fails */ public CompletableFuture<PutObjectResponse> uploadLocalFileAsync(String bucketName, String key, String objectPath) { PutObjectRequest objectRequest = PutObjectRequest.builder() .bucket(bucketName) .key(key) .build(); CompletableFuture<PutObjectResponse> response = getAsyncClient().putObject(objectRequest, AsyncRequestBody.fromFile(Paths.get(objectPath))); return response.whenComplete((resp, ex) -> { if (ex != null) { throw new RuntimeException("Failed to upload file", ex); } }); } /** * Asynchronously retrieves the bytes of an object from an Amazon S3 bucket and writes them to a local file. * * @param bucketName the name of the S3 bucket containing the object * @param keyName the key (or name) of the S3 object to retrieve * @param path the local file path where the object's bytes will be written * @return a {@link CompletableFuture} that completes when the object bytes have been written to the local file */ public CompletableFuture<Void> getObjectBytesAsync(String bucketName, String keyName, String path) { GetObjectRequest objectRequest = GetObjectRequest.builder() .key(keyName) .bucket(bucketName) .build(); CompletableFuture<ResponseBytes<GetObjectResponse>> response = getAsyncClient().getObject(objectRequest, AsyncResponseTransformer.toBytes()); return response.thenAccept(objectBytes -> { try { byte[] data = objectBytes.asByteArray(); Path filePath = Paths.get(path); Files.write(filePath, data); logger.info("Successfully obtained bytes from an S3 object"); } catch (IOException ex) { throw new RuntimeException("Failed to write data to file", ex); } }).whenComplete((resp, ex) -> { if (ex != null) { throw new RuntimeException("Failed to get object bytes from S3", ex); } }); } /** * Asynchronously lists all objects in the specified S3 bucket. * * @param bucketName the name of the S3 bucket to list objects for * @return a {@link CompletableFuture} that completes when all objects have been listed */ public CompletableFuture<Void> listAllObjectsAsync(String bucketName) { ListObjectsV2Request initialRequest = ListObjectsV2Request.builder() .bucket(bucketName) .maxKeys(1) .build(); ListObjectsV2Publisher paginator = getAsyncClient().listObjectsV2Paginator(initialRequest); return paginator.subscribe(response -> { response.contents().forEach(s3Object -> { logger.info("Object key: " + s3Object.key()); }); }).thenRun(() -> { logger.info("Successfully listed all objects in the bucket: " + bucketName); }).exceptionally(ex -> { throw new RuntimeException("Failed to list objects", ex); }); } /** * Asynchronously copies an object from one S3 bucket to another. * * @param fromBucket the name of the source S3 bucket * @param objectKey the key (name) of the object to be copied * @param toBucket the name of the destination S3 bucket * @return a {@link CompletableFuture} that completes with the copy result as a {@link String} * @throws RuntimeException if the URL could not be encoded or an S3 exception occurred during the copy */ public CompletableFuture<String> copyBucketObjectAsync(String fromBucket, String objectKey, String toBucket) { CopyObjectRequest copyReq = CopyObjectRequest.builder() .sourceBucket(fromBucket) .sourceKey(objectKey) .destinationBucket(toBucket) .destinationKey(objectKey) .build(); CompletableFuture<CopyObjectResponse> response = getAsyncClient().copyObject(copyReq); response.whenComplete((copyRes, ex) -> { if (copyRes != null) { logger.info("The " + objectKey + " was copied to " + toBucket); } else { throw new RuntimeException("An S3 exception occurred during copy", ex); } }); return response.thenApply(CopyObjectResponse::copyObjectResult) .thenApply(Object::toString); } /** * Performs a multipart upload to an Amazon S3 bucket. * * @param bucketName the name of the S3 bucket to upload the file to * @param key the key (name) of the file to be uploaded * @return a {@link CompletableFuture} that completes when the multipart upload is successful */ public CompletableFuture<Void> multipartUpload(String bucketName, String key) { int mB = 1024 * 1024; CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder() .bucket(bucketName) .key(key) .build(); return getAsyncClient().createMultipartUpload(createMultipartUploadRequest) .thenCompose(createResponse -> { String uploadId = createResponse.uploadId(); System.out.println("Upload ID: " + uploadId); // Upload part 1. UploadPartRequest uploadPartRequest1 = UploadPartRequest.builder() .bucket(bucketName) .key(key) .uploadId(uploadId) .partNumber(1) .contentLength((long) (5 * mB)) // Specify the content length .build(); CompletableFuture<CompletedPart> part1Future = getAsyncClient().uploadPart(uploadPartRequest1, AsyncRequestBody.fromByteBuffer(getRandomByteBuffer(5 * mB))) .thenApply(uploadPartResponse -> CompletedPart.builder() .partNumber(1) .eTag(uploadPartResponse.eTag()) .build()); // Upload part 2. UploadPartRequest uploadPartRequest2 = UploadPartRequest.builder() .bucket(bucketName) .key(key) .uploadId(uploadId) .partNumber(2) .contentLength((long) (3 * mB)) .build(); CompletableFuture<CompletedPart> part2Future = getAsyncClient().uploadPart(uploadPartRequest2, AsyncRequestBody.fromByteBuffer(getRandomByteBuffer(3 * mB))) .thenApply(uploadPartResponse -> CompletedPart.builder() .partNumber(2) .eTag(uploadPartResponse.eTag()) .build()); // Combine the results of both parts. return CompletableFuture.allOf(part1Future, part2Future) .thenCompose(v -> { CompletedPart part1 = part1Future.join(); CompletedPart part2 = part2Future.join(); CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder() .parts(part1, part2) .build(); CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder() .bucket(bucketName) .key(key) .uploadId(uploadId) .multipartUpload(completedMultipartUpload) .build(); // Complete the multipart upload return getAsyncClient().completeMultipartUpload(completeMultipartUploadRequest); }); }) .thenAccept(response -> System.out.println("Multipart upload completed successfully")) .exceptionally(ex -> { System.err.println("Failed to complete multipart upload: " + ex.getMessage()); throw new RuntimeException(ex); }); } /** * Deletes an object from an S3 bucket asynchronously. * * @param bucketName the name of the S3 bucket * @param key the key (file name) of the object to be deleted * @return a {@link CompletableFuture} that completes when the object has been deleted */ public CompletableFuture<Void> deleteObjectFromBucketAsync(String bucketName, String key) { DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder() .bucket(bucketName) .key(key) .build(); CompletableFuture<DeleteObjectResponse> response = getAsyncClient().deleteObject(deleteObjectRequest); response.whenComplete((deleteRes, ex) -> { if (deleteRes != null) { logger.info(key + " was deleted"); } else { throw new RuntimeException("An S3 exception occurred during delete", ex); } }); return response.thenApply(r -> null); } /** * Deletes an S3 bucket asynchronously. * * @param bucket the name of the bucket to be deleted * @return a {@link CompletableFuture} that completes when the bucket deletion is successful, or throws a {@link RuntimeException} * if an error occurs during the deletion process */ public CompletableFuture<Void> deleteBucketAsync(String bucket) { DeleteBucketRequest deleteBucketRequest = DeleteBucketRequest.builder() .bucket(bucket) .build(); CompletableFuture<DeleteBucketResponse> response = getAsyncClient().deleteBucket(deleteBucketRequest); response.whenComplete((deleteRes, ex) -> { if (deleteRes != null) { logger.info(bucket + " was deleted."); } else { throw new RuntimeException("An S3 exception occurred during bucket deletion", ex); } }); return response.thenApply(r -> null); } public CompletableFuture<String> performMultiCopy(String toBucket, String bucketName, String key) { CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder() .bucket(toBucket) .key(key) .build(); getAsyncClient().createMultipartUpload(createMultipartUploadRequest) .thenApply(createMultipartUploadResponse -> { String uploadId = createMultipartUploadResponse.uploadId(); System.out.println("Upload ID: " + uploadId); UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder() .sourceBucket(bucketName) .destinationBucket(toBucket) .sourceKey(key) .destinationKey(key) .uploadId(uploadId) // Use the valid uploadId. .partNumber(1) // Ensure the part number is correct. .copySourceRange("bytes=0-1023") // Adjust range as needed .build(); return getAsyncClient().uploadPartCopy(uploadPartCopyRequest); }) .thenCompose(uploadPartCopyFuture -> uploadPartCopyFuture) .whenComplete((uploadPartCopyResponse, exception) -> { if (exception != null) { // Handle any exceptions. logger.error("Error during upload part copy: " + exception.getMessage()); } else { // Successfully completed the upload part copy. System.out.println("Upload Part Copy completed successfully. ETag: " + uploadPartCopyResponse.copyPartResult().eTag()); } }); return null; } private static ByteBuffer getRandomByteBuffer(int size) { ByteBuffer buffer = ByteBuffer.allocate(size); for (int i = 0; i < size; i++) { buffer.put((byte) (Math.random() * 256)); } buffer.flip(); return buffer; } }
JavaScript
SDK for JavaScript (v3)
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

First, import all the necessary modules.

// Used to check if currently running file is this file. import { fileURLToPath } from "node:url"; import { readdirSync, readFileSync, writeFileSync } from "node:fs"; // Local helper utils. import { dirnameFromMetaUrl } from "@aws-doc-sdk-examples/lib/utils/util-fs.js"; import { Prompter } from "@aws-doc-sdk-examples/lib/prompter.js"; import { wrapText } from "@aws-doc-sdk-examples/lib/utils/util-string.js"; import { S3Client, CreateBucketCommand, PutObjectCommand, ListObjectsCommand, CopyObjectCommand, GetObjectCommand, DeleteObjectsCommand, DeleteBucketCommand, } from "@aws-sdk/client-s3";

The preceding imports reference some helper utilities. These utilities are local to the GitHub repository linked at the start of this section. For your reference, see the following implementations of those utilities.

export const dirnameFromMetaUrl = (metaUrl) => fileURLToPath(new URL(".", metaUrl)); import { select, input, confirm, checkbox, password } from "@inquirer/prompts"; export class Prompter { /** * @param {{ message: string, choices: { name: string, value: string }[]}} options */ select(options) { return select(options); } /** * @param {{ message: string }} options */ input(options) { return input(options); } /** * @param {{ message: string }} options */ password(options) { return password({ ...options, mask: true }); } /** * @param {string} prompt */ checkContinue = async (prompt = "") => { const prefix = prompt && `${prompt} `; const ok = await this.confirm({ message: `${prefix}Continue?`, }); if (!ok) throw new Error("Exiting..."); }; /** * @param {{ message: string }} options */ confirm(options) { return confirm(options); } /** * @param {{ message: string, choices: { name: string, value: string }[]}} options */ checkbox(options) { return checkbox(options); } } export const wrapText = (text, char = "=") => { const rule = char.repeat(80); return `${rule}\n ${text}\n${rule}\n`; };

Objects in S3 are stored in 'buckets'. Let's define a function for creating a new bucket.

export const createBucket = async () => { const bucketName = await prompter.input({ message: "Enter a bucket name. Bucket names must be globally unique:", }); const command = new CreateBucketCommand({ Bucket: bucketName }); await s3Client.send(command); console.log("Bucket created successfully.\n"); return bucketName; };

Buckets contain 'objects'. This function uploads the contents of a directory to your bucket as objects.

export const uploadFilesToBucket = async ({ bucketName, folderPath }) => { console.log(`Uploading files from ${folderPath}\n`); const keys = readdirSync(folderPath); const files = keys.map((key) => { const filePath = `${folderPath}/${key}`; const fileContent = readFileSync(filePath); return { Key: key, Body: fileContent, }; }); for (const file of files) { await s3Client.send( new PutObjectCommand({ Bucket: bucketName, Body: file.Body, Key: file.Key, }), ); console.log(`${file.Key} uploaded successfully.`); } };

After uploading objects, check to confirm that they were uploaded correctly. You can use ListObjects for that. You'll be using the 'Key' property, but there are other useful properties in the response also.

export const listFilesInBucket = async ({ bucketName }) => { const command = new ListObjectsCommand({ Bucket: bucketName }); const { Contents } = await s3Client.send(command); const contentsList = Contents.map((c) => ` • ${c.Key}`).join("\n"); console.log("\nHere's a list of files in the bucket:"); console.log(`${contentsList}\n`); };

Sometimes you might want to copy an object from one bucket to another. Use the CopyObject command for that.

export const copyFileFromBucket = async ({ destinationBucket }) => { const proceed = await prompter.confirm({ message: "Would you like to copy an object from another bucket?", }); if (!proceed) { return; } const copy = async () => { try { const sourceBucket = await prompter.input({ message: "Enter source bucket name:", }); const sourceKey = await prompter.input({ message: "Enter source key:", }); const destinationKey = await prompter.input({ message: "Enter destination key:", }); const command = new CopyObjectCommand({ Bucket: destinationBucket, CopySource: `${sourceBucket}/${sourceKey}`, Key: destinationKey, }); await s3Client.send(command); await copyFileFromBucket({ destinationBucket }); } catch (err) { console.error("Copy error."); console.error(err); const retryAnswer = await prompter.confirm({ message: "Try again?" }); if (retryAnswer) { await copy(); } } }; await copy(); };

There's no SDK method for getting multiple objects from a bucket. Instead, you'll create a list of objects to download and iterate over them.

export const downloadFilesFromBucket = async ({ bucketName }) => { const { Contents } = await s3Client.send( new ListObjectsCommand({ Bucket: bucketName }), ); const path = await prompter.input({ message: "Enter destination path for files:", }); for (const content of Contents) { const obj = await s3Client.send( new GetObjectCommand({ Bucket: bucketName, Key: content.Key }), ); writeFileSync( `${path}/${content.Key}`, await obj.Body.transformToByteArray(), ); } console.log("Files downloaded successfully.\n"); };

It's time to clean up your resources. A bucket must be empty before it can be deleted. These two functions empty and delete the bucket.

export const emptyBucket = async ({ bucketName }) => { const listObjectsCommand = new ListObjectsCommand({ Bucket: bucketName }); const { Contents } = await s3Client.send(listObjectsCommand); const keys = Contents.map((c) => c.Key); const deleteObjectsCommand = new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: keys.map((key) => ({ Key: key })) }, }); await s3Client.send(deleteObjectsCommand); console.log(`${bucketName} emptied successfully.\n`); }; export const deleteBucket = async ({ bucketName }) => { const command = new DeleteBucketCommand({ Bucket: bucketName }); await s3Client.send(command); console.log(`${bucketName} deleted successfully.\n`); };

The 'main' function pulls everything together. If you run this file directly the main function will be called.

const main = async () => { const OBJECT_DIRECTORY = `${dirnameFromMetaUrl( import.meta.url, )}../../../../resources/sample_files/.sample_media`; try { console.log(wrapText("Welcome to the Amazon S3 getting started example.")); console.log("Let's create a bucket."); const bucketName = await createBucket(); await prompter.confirm({ message: continueMessage }); console.log(wrapText("File upload.")); console.log( "I have some default files ready to go. You can edit the source code to provide your own.", ); await uploadFilesToBucket({ bucketName, folderPath: OBJECT_DIRECTORY, }); await listFilesInBucket({ bucketName }); await prompter.confirm({ message: continueMessage }); console.log(wrapText("Copy files.")); await copyFileFromBucket({ destinationBucket: bucketName }); await listFilesInBucket({ bucketName }); await prompter.confirm({ message: continueMessage }); console.log(wrapText("Download files.")); await downloadFilesFromBucket({ bucketName }); console.log(wrapText("Clean up.")); await emptyBucket({ bucketName }); await deleteBucket({ bucketName }); } catch (err) { console.error(err); } };
Kotlin
SDK for Kotlin
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

suspend fun main(args: Array<String>) { val usage = """ Usage: <bucketName> <key> <objectPath> <savePath> <toBucket> Where: bucketName - The Amazon S3 bucket to create. key - The key to use. objectPath - The path where the file is located (for example, C:/AWS/book2.pdf). savePath - The path where the file is saved after it's downloaded (for example, C:/AWS/book2.pdf). toBucket - An Amazon S3 bucket to where an object is copied to (for example, C:/AWS/book2.pdf). """ if (args.size != 4) { println(usage) exitProcess(1) } val bucketName = args[0] val key = args[1] val objectPath = args[2] val savePath = args[3] val toBucket = args[4] // Create an Amazon S3 bucket. createBucket(bucketName) // Update a local file to the Amazon S3 bucket. putObject(bucketName, key, objectPath) // Download the object to another local file. getObjectFromMrap(bucketName, key, savePath) // List all objects located in the Amazon S3 bucket. listBucketObs(bucketName) // Copy the object to another Amazon S3 bucket copyBucketOb(bucketName, key, toBucket) // Delete the object from the Amazon S3 bucket. deleteBucketObs(bucketName, key) // Delete the Amazon S3 bucket. deleteBucket(bucketName) println("All Amazon S3 operations were successfully performed") } suspend fun createBucket(bucketName: String) { val request = CreateBucketRequest { bucket = bucketName } S3Client { region = "us-east-1" }.use { s3 -> s3.createBucket(request) println("$bucketName is ready") } } suspend fun putObject( bucketName: String, objectKey: String, objectPath: String, ) { val metadataVal = mutableMapOf<String, String>() metadataVal["myVal"] = "test" val request = PutObjectRequest { bucket = bucketName key = objectKey metadata = metadataVal this.body = Paths.get(objectPath).asByteStream() } S3Client { region = "us-east-1" }.use { s3 -> val response = s3.putObject(request) println("Tag information is ${response.eTag}") } } suspend fun getObjectFromMrap( bucketName: String, keyName: String, path: String, ) { val request = GetObjectRequest { key = keyName bucket = bucketName } S3Client { region = "us-east-1" }.use { s3 -> s3.getObject(request) { resp -> val myFile = File(path) resp.body?.writeToFile(myFile) println("Successfully read $keyName from $bucketName") } } } suspend fun listBucketObs(bucketName: String) { val request = ListObjectsRequest { bucket = bucketName } S3Client { region = "us-east-1" }.use { s3 -> val response = s3.listObjects(request) response.contents?.forEach { myObject -> println("The name of the key is ${myObject.key}") println("The owner is ${myObject.owner}") } } } suspend fun copyBucketOb( fromBucket: String, objectKey: String, toBucket: String, ) { var encodedUrl = "" try { encodedUrl = URLEncoder.encode("$fromBucket/$objectKey", StandardCharsets.UTF_8.toString()) } catch (e: UnsupportedEncodingException) { println("URL could not be encoded: " + e.message) } val request = CopyObjectRequest { copySource = encodedUrl bucket = toBucket key = objectKey } S3Client { region = "us-east-1" }.use { s3 -> s3.copyObject(request) } } suspend fun deleteBucketObs( bucketName: String, objectName: String, ) { val objectId = ObjectIdentifier { key = objectName } val delOb = Delete { objects = listOf(objectId) } val request = DeleteObjectsRequest { bucket = bucketName delete = delOb } S3Client { region = "us-east-1" }.use { s3 -> s3.deleteObjects(request) println("$objectName was deleted from $bucketName") } } suspend fun deleteBucket(bucketName: String?) { val request = DeleteBucketRequest { bucket = bucketName } S3Client { region = "us-east-1" }.use { s3 -> s3.deleteBucket(request) println("The $bucketName was successfully deleted!") } }
PHP
SDK for PHP
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

echo("\n"); echo("--------------------------------------\n"); print("Welcome to the Amazon S3 getting started demo using PHP!\n"); echo("--------------------------------------\n"); $region = 'us-west-2'; $this->s3client = new S3Client([ 'region' => $region, ]); /* Inline declaration example $s3client = new Aws\S3\S3Client(['region' => 'us-west-2']); */ $this->bucketName = "amzn-s3-demo-bucket-" . uniqid(); try { $this->s3client->createBucket([ 'Bucket' => $this->bucketName, 'CreateBucketConfiguration' => ['LocationConstraint' => $region], ]); echo "Created bucket named: $this->bucketName \n"; } catch (Exception $exception) { echo "Failed to create bucket $this->bucketName with error: " . $exception->getMessage(); exit("Please fix error with bucket creation before continuing."); } $fileName = __DIR__ . "/local-file-" . uniqid(); try { $this->s3client->putObject([ 'Bucket' => $this->bucketName, 'Key' => $fileName, 'SourceFile' => __DIR__ . '/testfile.txt' ]); echo "Uploaded $fileName to $this->bucketName.\n"; } catch (Exception $exception) { echo "Failed to upload $fileName with error: " . $exception->getMessage(); exit("Please fix error with file upload before continuing."); } try { $file = $this->s3client->getObject([ 'Bucket' => $this->bucketName, 'Key' => $fileName, ]); $body = $file->get('Body'); $body->rewind(); echo "Downloaded the file and it begins with: {$body->read(26)}.\n"; } catch (Exception $exception) { echo "Failed to download $fileName from $this->bucketName with error: " . $exception->getMessage(); exit("Please fix error with file downloading before continuing."); } try { $folder = "copied-folder"; $this->s3client->copyObject([ 'Bucket' => $this->bucketName, 'CopySource' => "$this->bucketName/$fileName", 'Key' => "$folder/$fileName-copy", ]); echo "Copied $fileName to $folder/$fileName-copy.\n"; } catch (Exception $exception) { echo "Failed to copy $fileName with error: " . $exception->getMessage(); exit("Please fix error with object copying before continuing."); } try { $contents = $this->s3client->listObjectsV2([ 'Bucket' => $this->bucketName, ]); echo "The contents of your bucket are: \n"; foreach ($contents['Contents'] as $content) { echo $content['Key'] . "\n"; } } catch (Exception $exception) { echo "Failed to list objects in $this->bucketName with error: " . $exception->getMessage(); exit("Please fix error with listing objects before continuing."); } try { $objects = []; foreach ($contents['Contents'] as $content) { $objects[] = [ 'Key' => $content['Key'], ]; } $this->s3client->deleteObjects([ 'Bucket' => $this->bucketName, 'Delete' => [ 'Objects' => $objects, ], ]); $check = $this->s3client->listObjectsV2([ 'Bucket' => $this->bucketName, ]); if (count($check) <= 0) { throw new Exception("Bucket wasn't empty."); } echo "Deleted all objects and folders from $this->bucketName.\n"; } catch (Exception $exception) { echo "Failed to delete $fileName from $this->bucketName with error: " . $exception->getMessage(); exit("Please fix error with object deletion before continuing."); } try { $this->s3client->deleteBucket([ 'Bucket' => $this->bucketName, ]); echo "Deleted bucket $this->bucketName.\n"; } catch (Exception $exception) { echo "Failed to delete $this->bucketName with error: " . $exception->getMessage(); exit("Please fix error with bucket deletion before continuing."); } echo "Successfully ran the Amazon S3 with PHP demo.\n";
Python
SDK for Python (Boto3)
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

import io import os import uuid import boto3 from boto3.s3.transfer import S3UploadFailedError from botocore.exceptions import ClientError def do_scenario(s3_resource): print("-" * 88) print("Welcome to the Amazon S3 getting started demo!") print("-" * 88) bucket_name = f"amzn-s3-demo-bucket-{uuid.uuid4()}" bucket = s3_resource.Bucket(bucket_name) try: bucket.create( CreateBucketConfiguration={ "LocationConstraint": s3_resource.meta.client.meta.region_name } ) print(f"Created demo bucket named {bucket.name}.") except ClientError as err: print(f"Tried and failed to create demo bucket {bucket_name}.") print(f"\t{err.response['Error']['Code']}:{err.response['Error']['Message']}") print(f"\nCan't continue the demo without a bucket!") return file_name = None while file_name is None: file_name = input("\nEnter a file you want to upload to your bucket: ") if not os.path.exists(file_name): print(f"Couldn't find file {file_name}. Are you sure it exists?") file_name = None obj = bucket.Object(os.path.basename(file_name)) try: obj.upload_file(file_name) print( f"Uploaded file {file_name} into bucket {bucket.name} with key {obj.key}." ) except S3UploadFailedError as err: print(f"Couldn't upload file {file_name} to {bucket.name}.") print(f"\t{err}") answer = input(f"\nDo you want to download {obj.key} into memory (y/n)? ") if answer.lower() == "y": data = io.BytesIO() try: obj.download_fileobj(data) data.seek(0) print(f"Got your object. Here are the first 20 bytes:\n") print(f"\t{data.read(20)}") except ClientError as err: print(f"Couldn't download {obj.key}.") print( f"\t{err.response['Error']['Code']}:{err.response['Error']['Message']}" ) answer = input( f"\nDo you want to copy {obj.key} to a subfolder in your bucket (y/n)? " ) if answer.lower() == "y": dest_obj = bucket.Object(f"demo-folder/{obj.key}") try: dest_obj.copy({"Bucket": bucket.name, "Key": obj.key}) print(f"Copied {obj.key} to {dest_obj.key}.") except ClientError as err: print(f"Couldn't copy {obj.key} to {dest_obj.key}.") print( f"\t{err.response['Error']['Code']}:{err.response['Error']['Message']}" ) print("\nYour bucket contains the following objects:") try: for o in bucket.objects.all(): print(f"\t{o.key}") except ClientError as err: print(f"Couldn't list the objects in bucket {bucket.name}.") print(f"\t{err.response['Error']['Code']}:{err.response['Error']['Message']}") answer = input( "\nDo you want to delete all of the objects as well as the bucket (y/n)? " ) if answer.lower() == "y": try: bucket.objects.delete() bucket.delete() print(f"Emptied and deleted bucket {bucket.name}.\n") except ClientError as err: print(f"Couldn't empty and delete bucket {bucket.name}.") print( f"\t{err.response['Error']['Code']}:{err.response['Error']['Message']}" ) print("Thanks for watching!") print("-" * 88) if __name__ == "__main__": do_scenario(boto3.resource("s3"))
Ruby
SDK for Ruby
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

require 'aws-sdk-s3' # Wraps the getting started scenario actions. class ScenarioGettingStarted attr_reader :s3_resource # @param s3_resource [Aws::S3::Resource] An Amazon S3 resource. def initialize(s3_resource) @s3_resource = s3_resource end # Creates a bucket with a random name in the currently configured account and # AWS Region. # # @return [Aws::S3::Bucket] The newly created bucket. def create_bucket bucket = @s3_resource.create_bucket( bucket: "amzn-s3-demo-bucket-#{Random.uuid}", create_bucket_configuration: { location_constraint: 'us-east-1' # NOTE: only certain regions permitted } ) puts("Created demo bucket named #{bucket.name}.") rescue Aws::Errors::ServiceError => e puts('Tried and failed to create demo bucket.') puts("\t#{e.code}: #{e.message}") puts("\nCan't continue the demo without a bucket!") raise else bucket end # Requests a file name from the user. # # @return The name of the file. def create_file File.open('demo.txt', w) { |f| f.write('This is a demo file.') } end # Uploads a file to an Amazon S3 bucket. # # @param bucket [Aws::S3::Bucket] The bucket object representing the upload destination # @return [Aws::S3::Object] The Amazon S3 object that contains the uploaded file. def upload_file(bucket) File.open('demo.txt', 'w+') { |f| f.write('This is a demo file.') } s3_object = bucket.object(File.basename('demo.txt')) s3_object.upload_file('demo.txt') puts("Uploaded file demo.txt into bucket #{bucket.name} with key #{s3_object.key}.") rescue Aws::Errors::ServiceError => e puts("Couldn't upload file demo.txt to #{bucket.name}.") puts("\t#{e.code}: #{e.message}") raise else s3_object end # Downloads an Amazon S3 object to a file. # # @param s3_object [Aws::S3::Object] The object to download. def download_file(s3_object) puts("\nDo you want to download #{s3_object.key} to a local file (y/n)? ") answer = gets.chomp.downcase if answer == 'y' puts('Enter a name for the downloaded file: ') file_name = gets.chomp s3_object.download_file(file_name) puts("Object #{s3_object.key} successfully downloaded to #{file_name}.") end rescue Aws::Errors::ServiceError => e puts("Couldn't download #{s3_object.key}.") puts("\t#{e.code}: #{e.message}") raise end # Copies an Amazon S3 object to a subfolder within the same bucket. # # @param source_object [Aws::S3::Object] The source object to copy. # @return [Aws::S3::Object, nil] The destination object. def copy_object(source_object) dest_object = nil puts("\nDo you want to copy #{source_object.key} to a subfolder in your bucket (y/n)? ") answer = gets.chomp.downcase if answer == 'y' dest_object = source_object.bucket.object("demo-folder/#{source_object.key}") dest_object.copy_from(source_object) puts("Copied #{source_object.key} to #{dest_object.key}.") end rescue Aws::Errors::ServiceError => e puts("Couldn't copy #{source_object.key}.") puts("\t#{e.code}: #{e.message}") raise else dest_object end # Lists the objects in an Amazon S3 bucket. # # @param bucket [Aws::S3::Bucket] The bucket to query. def list_objects(bucket) puts("\nYour bucket contains the following objects:") bucket.objects.each do |obj| puts("\t#{obj.key}") end rescue Aws::Errors::ServiceError => e puts("Couldn't list the objects in bucket #{bucket.name}.") puts("\t#{e.code}: #{e.message}") raise end # Deletes the objects in an Amazon S3 bucket and deletes the bucket. # # @param bucket [Aws::S3::Bucket] The bucket to empty and delete. def delete_bucket(bucket) puts("\nDo you want to delete all of the objects as well as the bucket (y/n)? ") answer = gets.chomp.downcase if answer == 'y' bucket.objects.batch_delete! bucket.delete puts("Emptied and deleted bucket #{bucket.name}.\n") end rescue Aws::Errors::ServiceError => e puts("Couldn't empty and delete bucket #{bucket.name}.") puts("\t#{e.code}: #{e.message}") raise end end # Runs the Amazon S3 getting started scenario. def run_scenario(scenario) puts('-' * 88) puts('Welcome to the Amazon S3 getting started demo!') puts('-' * 88) bucket = scenario.create_bucket s3_object = scenario.upload_file(bucket) scenario.download_file(s3_object) scenario.copy_object(s3_object) scenario.list_objects(bucket) scenario.delete_bucket(bucket) puts('Thanks for watching!') puts('-' * 88) rescue Aws::Errors::ServiceError puts('Something went wrong with the demo!') end run_scenario(ScenarioGettingStarted.new(Aws::S3::Resource.new)) if $PROGRAM_NAME == __FILE__
Rust
SDK for Rust
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

Code for the binary crate which runs the scenario.

#![allow(clippy::result_large_err)] //! Purpose //! Shows how to use the AWS SDK for Rust to get started using //! Amazon Simple Storage Service (Amazon S3). Create a bucket, move objects into and out of it, //! and delete all resources at the end of the demo. //! //! This example follows the steps in "Getting started with Amazon S3" in the Amazon S3 //! user guide. //! - https://docs.aws.amazon.com/AmazonS3/latest/userguide/GetStartedWithS3.html use aws_config::meta::region::RegionProviderChain; use aws_sdk_s3::{config::Region, Client}; use s3_code_examples::error::S3ExampleError; use uuid::Uuid; #[tokio::main] async fn main() -> Result<(), S3ExampleError> { let region_provider = RegionProviderChain::first_try(Region::new("us-west-2")); let region = region_provider.region().await.unwrap(); let shared_config = aws_config::from_env().region(region_provider).load().await; let client = Client::new(&shared_config); let bucket_name = format!("amzn-s3-demo-bucket-{}", Uuid::new_v4()); let file_name = "s3/testfile.txt".to_string(); let key = "test file key name".to_string(); let target_key = "target_key".to_string(); if let Err(e) = run_s3_operations(region, client, bucket_name, file_name, key, target_key).await { eprintln!("{:?}", e); }; Ok(()) } async fn run_s3_operations( region: Region, client: Client, bucket_name: String, file_name: String, key: String, target_key: String, ) -> Result<(), S3ExampleError> { s3_code_examples::create_bucket(&client, &bucket_name, &region).await?; let run_example: Result<(), S3ExampleError> = (async { s3_code_examples::upload_object(&client, &bucket_name, &file_name, &key).await?; let _object = s3_code_examples::download_object(&client, &bucket_name, &key).await; s3_code_examples::copy_object(&client, &bucket_name, &bucket_name, &key, &target_key) .await?; s3_code_examples::list_objects(&client, &bucket_name).await?; s3_code_examples::clear_bucket(&client, &bucket_name).await?; Ok(()) }) .await; if let Err(err) = run_example { eprintln!("Failed to complete getting-started example: {err:?}"); } s3_code_examples::delete_bucket(&client, &bucket_name).await?; Ok(()) }

Common actions used by the scenario.

pub async fn create_bucket( client: &aws_sdk_s3::Client, bucket_name: &str, region: &aws_config::Region, ) -> Result<Option<aws_sdk_s3::operation::create_bucket::CreateBucketOutput>, S3ExampleError> { let constraint = aws_sdk_s3::types::BucketLocationConstraint::from(region.to_string().as_str()); let cfg = aws_sdk_s3::types::CreateBucketConfiguration::builder() .location_constraint(constraint) .build(); let create = client .create_bucket() .create_bucket_configuration(cfg) .bucket(bucket_name) .send() .await; // BucketAlreadyExists and BucketAlreadyOwnedByYou are not problems for this task. create.map(Some).or_else(|err| { if err .as_service_error() .map(|se| se.is_bucket_already_exists() || se.is_bucket_already_owned_by_you()) == Some(true) { Ok(None) } else { Err(S3ExampleError::from(err)) } }) } pub async fn upload_object( client: &aws_sdk_s3::Client, bucket_name: &str, file_name: &str, key: &str, ) -> Result<aws_sdk_s3::operation::put_object::PutObjectOutput, S3ExampleError> { let body = aws_sdk_s3::primitives::ByteStream::from_path(std::path::Path::new(file_name)).await; client .put_object() .bucket(bucket_name) .key(key) .body(body.unwrap()) .send() .await .map_err(S3ExampleError::from) } pub async fn download_object( client: &aws_sdk_s3::Client, bucket_name: &str, key: &str, ) -> Result<aws_sdk_s3::operation::get_object::GetObjectOutput, S3ExampleError> { client .get_object() .bucket(bucket_name) .key(key) .send() .await .map_err(S3ExampleError::from) } /// Copy an object from one bucket to another. pub async fn copy_object( client: &aws_sdk_s3::Client, source_bucket: &str, destination_bucket: &str, source_object: &str, destination_object: &str, ) -> Result<(), S3ExampleError> { let source_key = format!("{source_bucket}/{source_object}"); let response = client .copy_object() .copy_source(&source_key) .bucket(destination_bucket) .key(destination_object) .send() .await?; println!( "Copied from {source_key} to {destination_bucket}/{destination_object} with etag {}", response .copy_object_result .unwrap_or_else(|| aws_sdk_s3::types::CopyObjectResult::builder().build()) .e_tag() .unwrap_or("missing") ); Ok(()) } pub async fn list_objects(client: &aws_sdk_s3::Client, bucket: &str) -> Result<(), S3ExampleError> { let mut response = client .list_objects_v2() .bucket(bucket.to_owned()) .max_keys(10) // In this example, go 10 at a time. .into_paginator() .send(); while let Some(result) = response.next().await { match result { Ok(output) => { for object in output.contents() { println!(" - {}", object.key().unwrap_or("Unknown")); } } Err(err) => { eprintln!("{err:?}") } } } Ok(()) } /// Given a bucket, remove all objects in the bucket, and then ensure no objects /// remain in the bucket. pub async fn clear_bucket( client: &aws_sdk_s3::Client, bucket_name: &str, ) -> Result<Vec<String>, S3ExampleError> { let objects = client.list_objects_v2().bucket(bucket_name).send().await?; // delete_objects no longer needs to be mutable. let objects_to_delete: Vec<String> = objects .contents() .iter() .filter_map(|obj| obj.key()) .map(String::from) .collect(); if objects_to_delete.is_empty() { return Ok(vec![]); } let return_keys = objects_to_delete.clone(); delete_objects(client, bucket_name, objects_to_delete).await?; let objects = client.list_objects_v2().bucket(bucket_name).send().await?; eprintln!("{objects:?}"); match objects.key_count { Some(0) => Ok(return_keys), _ => Err(S3ExampleError::new( "There were still objects left in the bucket.", )), } } pub async fn delete_bucket( client: &aws_sdk_s3::Client, bucket_name: &str, ) -> Result<(), S3ExampleError> { let resp = client.delete_bucket().bucket(bucket_name).send().await; match resp { Ok(_) => Ok(()), Err(err) => { if err .as_service_error() .and_then(aws_sdk_s3::error::ProvideErrorMetadata::code) == Some("NoSuchBucket") { Ok(()) } else { Err(S3ExampleError::from(err)) } } } }
SAP ABAP
SDK for SAP ABAP
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

DATA(lo_session) = /aws1/cl_rt_session_aws=>create( cv_pfl ). DATA(lo_s3) = /aws1/cl_s3_factory=>create( lo_session ). " Create an Amazon Simple Storage Service (Amazon S3) bucket. " TRY. lo_s3->createbucket( iv_bucket = iv_bucket_name ). MESSAGE 'S3 bucket created.' TYPE 'I'. CATCH /aws1/cx_s3_bucketalrdyexists. MESSAGE 'Bucket name already exists.' TYPE 'E'. CATCH /aws1/cx_s3_bktalrdyownedbyyou. MESSAGE 'Bucket already exists and is owned by you.' TYPE 'E'. ENDTRY. "Upload an object to an S3 bucket." TRY. "Get contents of file from application server." DATA lv_file_content TYPE xstring. OPEN DATASET iv_key FOR INPUT IN BINARY MODE. READ DATASET iv_key INTO lv_file_content. CLOSE DATASET iv_key. lo_s3->putobject( iv_bucket = iv_bucket_name iv_key = iv_key iv_body = lv_file_content ). MESSAGE 'Object uploaded to S3 bucket.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. ENDTRY. " Get an object from a bucket. " TRY. DATA(lo_result) = lo_s3->getobject( iv_bucket = iv_bucket_name iv_key = iv_key ). DATA(lv_object_data) = lo_result->get_body( ). MESSAGE 'Object retrieved from S3 bucket.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. CATCH /aws1/cx_s3_nosuchkey. MESSAGE 'Object key does not exist.' TYPE 'E'. ENDTRY. " Copy an object to a subfolder in a bucket. " TRY. lo_s3->copyobject( iv_bucket = iv_bucket_name iv_key = |{ iv_copy_to_folder }/{ iv_key }| iv_copysource = |{ iv_bucket_name }/{ iv_key }| ). MESSAGE 'Object copied to a subfolder.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. CATCH /aws1/cx_s3_nosuchkey. MESSAGE 'Object key does not exist.' TYPE 'E'. ENDTRY. " List objects in the bucket. " TRY. DATA(lo_list) = lo_s3->listobjects( iv_bucket = iv_bucket_name ). MESSAGE 'Retrieved list of objects in S3 bucket.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. ENDTRY. DATA text TYPE string VALUE 'Object List - '. DATA lv_object_key TYPE /aws1/s3_objectkey. LOOP AT lo_list->get_contents( ) INTO DATA(lo_object). lv_object_key = lo_object->get_key( ). CONCATENATE lv_object_key ', ' INTO text. ENDLOOP. MESSAGE text TYPE'I'. " Delete the objects in a bucket. " TRY. lo_s3->deleteobject( iv_bucket = iv_bucket_name iv_key = iv_key ). lo_s3->deleteobject( iv_bucket = iv_bucket_name iv_key = |{ iv_copy_to_folder }/{ iv_key }| ). MESSAGE 'Objects deleted from S3 bucket.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. ENDTRY. " Delete the bucket. " TRY. lo_s3->deletebucket( iv_bucket = iv_bucket_name ). MESSAGE 'Deleted S3 bucket.' TYPE 'I'. CATCH /aws1/cx_s3_nosuchbucket. MESSAGE 'Bucket does not exist.' TYPE 'E'. ENDTRY.
Swift
SDK for Swift
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

import AWSS3 import Foundation import AWSS3 import Smithy import ClientRuntime /// A class containing all the code that interacts with the AWS SDK for Swift. public class ServiceHandler { let configuration: S3Client.S3ClientConfiguration let client: S3Client enum HandlerError: Error { case getObjectBody(String) case readGetObjectBody(String) case missingContents(String) } /// Initialize and return a new ``ServiceHandler`` object, which is used to drive the AWS calls /// used for the example. /// /// - Returns: A new ``ServiceHandler`` object, ready to be called to /// execute AWS operations. public init() async throws { do { configuration = try await S3Client.S3ClientConfiguration() // configuration.region = "us-east-2" // Uncomment this to set the region programmatically. client = S3Client(config: configuration) } catch { print("ERROR: ", dump(error, name: "Initializing S3 client")) throw error } } /// Create a new user given the specified name. /// /// - Parameters: /// - name: Name of the bucket to create. /// Throws an exception if an error occurs. public func createBucket(name: String) async throws { var input = CreateBucketInput( bucket: name ) // For regions other than "us-east-1", you must set the locationConstraint in the createBucketConfiguration. // For more information, see LocationConstraint in the S3 API guide. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#API_CreateBucket_RequestBody if let region = configuration.region { if region != "us-east-1" { input.createBucketConfiguration = S3ClientTypes.CreateBucketConfiguration(locationConstraint: S3ClientTypes.BucketLocationConstraint(rawValue: region)) } } do { _ = try await client.createBucket(input: input) } catch let error as BucketAlreadyOwnedByYou { print("The bucket '\(name)' already exists and is owned by you. You may wish to ignore this exception.") throw error } catch { print("ERROR: ", dump(error, name: "Creating a bucket")) throw error } } /// Delete a bucket. /// - Parameter name: Name of the bucket to delete. public func deleteBucket(name: String) async throws { let input = DeleteBucketInput( bucket: name ) do { _ = try await client.deleteBucket(input: input) } catch { print("ERROR: ", dump(error, name: "Deleting a bucket")) throw error } } /// Upload a file from local storage to the bucket. /// - Parameters: /// - bucket: Name of the bucket to upload the file to. /// - key: Name of the file to create. /// - file: Path name of the file to upload. public func uploadFile(bucket: String, key: String, file: String) async throws { let fileUrl = URL(fileURLWithPath: file) do { let fileData = try Data(contentsOf: fileUrl) let dataStream = ByteStream.data(fileData) let input = PutObjectInput( body: dataStream, bucket: bucket, key: key ) _ = try await client.putObject(input: input) } catch { print("ERROR: ", dump(error, name: "Putting an object.")) throw error } } /// Create a file in the specified bucket with the given name. The new /// file's contents are uploaded from a `Data` object. /// /// - Parameters: /// - bucket: Name of the bucket to create a file in. /// - key: Name of the file to create. /// - data: A `Data` object to write into the new file. public func createFile(bucket: String, key: String, withData data: Data) async throws { let dataStream = ByteStream.data(data) let input = PutObjectInput( body: dataStream, bucket: bucket, key: key ) do { _ = try await client.putObject(input: input) } catch { print("ERROR: ", dump(error, name: "Putting an object.")) throw error } } /// Download the named file to the given directory on the local device. /// /// - Parameters: /// - bucket: Name of the bucket that contains the file to be copied. /// - key: The name of the file to copy from the bucket. /// - to: The path of the directory on the local device where you want to /// download the file. public func downloadFile(bucket: String, key: String, to: String) async throws { let fileUrl = URL(fileURLWithPath: to).appendingPathComponent(key) let input = GetObjectInput( bucket: bucket, key: key ) do { let output = try await client.getObject(input: input) guard let body = output.body else { throw HandlerError.getObjectBody("GetObjectInput missing body.") } guard let data = try await body.readData() else { throw HandlerError.readGetObjectBody("GetObjectInput unable to read data.") } try data.write(to: fileUrl) } catch { print("ERROR: ", dump(error, name: "Downloading a file.")) throw error } } /// Read the specified file from the given S3 bucket into a Swift /// `Data` object. /// /// - Parameters: /// - bucket: Name of the bucket containing the file to read. /// - key: Name of the file within the bucket to read. /// /// - Returns: A `Data` object containing the complete file data. public func readFile(bucket: String, key: String) async throws -> Data { let input = GetObjectInput( bucket: bucket, key: key ) do { let output = try await client.getObject(input: input) guard let body = output.body else { throw HandlerError.getObjectBody("GetObjectInput missing body.") } guard let data = try await body.readData() else { throw HandlerError.readGetObjectBody("GetObjectInput unable to read data.") } return data } catch { print("ERROR: ", dump(error, name: "Reading a file.")) throw error } } /// Copy a file from one bucket to another. /// /// - Parameters: /// - sourceBucket: Name of the bucket containing the source file. /// - name: Name of the source file. /// - destBucket: Name of the bucket to copy the file into. public func copyFile(from sourceBucket: String, name: String, to destBucket: String) async throws { let srcUrl = ("\(sourceBucket)/\(name)").addingPercentEncoding(withAllowedCharacters: .urlPathAllowed) let input = CopyObjectInput( bucket: destBucket, copySource: srcUrl, key: name ) do { _ = try await client.copyObject(input: input) } catch { print("ERROR: ", dump(error, name: "Copying an object.")) throw error } } /// Deletes the specified file from Amazon S3. /// /// - Parameters: /// - bucket: Name of the bucket containing the file to delete. /// - key: Name of the file to delete. /// public func deleteFile(bucket: String, key: String) async throws { let input = DeleteObjectInput( bucket: bucket, key: key ) do { _ = try await client.deleteObject(input: input) } catch { print("ERROR: ", dump(error, name: "Deleting a file.")) throw error } } /// Returns an array of strings, each naming one file in the /// specified bucket. /// /// - Parameter bucket: Name of the bucket to get a file listing for. /// - Returns: An array of `String` objects, each giving the name of /// one file contained in the bucket. public func listBucketFiles(bucket: String) async throws -> [String] { do { let input = ListObjectsV2Input( bucket: bucket ) // Use "Paginated" to get all the objects. // This lets the SDK handle the 'continuationToken' in "ListObjectsV2Output". let output = client.listObjectsV2Paginated(input: input) var names: [String] = [] for try await page in output { guard let objList = page.contents else { print("ERROR: listObjectsV2Paginated returned nil contents.") continue } for obj in objList { if let objName = obj.key { names.append(objName) } } } return names } catch { print("ERROR: ", dump(error, name: "Listing objects.")) throw error } } }
import AWSS3 import Foundation import ServiceHandler import ArgumentParser /// The command-line arguments and options available for this /// example command. struct ExampleCommand: ParsableCommand { @Argument(help: "Name of the S3 bucket to create") var bucketName: String @Argument(help: "Pathname of the file to upload to the S3 bucket") var uploadSource: String @Argument(help: "The name (key) to give the file in the S3 bucket") var objName: String @Argument(help: "S3 bucket to copy the object to") var destBucket: String @Argument(help: "Directory where you want to download the file from the S3 bucket") var downloadDir: String static var configuration = CommandConfiguration( commandName: "s3-basics", abstract: "Demonstrates a series of basic AWS S3 functions.", discussion: """ Performs the following Amazon S3 commands: * `CreateBucket` * `PutObject` * `GetObject` * `CopyObject` * `ListObjects` * `DeleteObjects` * `DeleteBucket` """ ) /// Called by ``main()`` to do the actual running of the AWS /// example. func runAsync() async throws { let serviceHandler = try await ServiceHandler() // 1. Create the bucket. print("Creating the bucket \(bucketName)...") try await serviceHandler.createBucket(name: bucketName) // 2. Upload a file to the bucket. print("Uploading the file \(uploadSource)...") try await serviceHandler.uploadFile(bucket: bucketName, key: objName, file: uploadSource) // 3. Download the file. print("Downloading the file \(objName) to \(downloadDir)...") try await serviceHandler.downloadFile(bucket: bucketName, key: objName, to: downloadDir) // 4. Copy the file to another bucket. print("Copying the file to the bucket \(destBucket)...") try await serviceHandler.copyFile(from: bucketName, name: objName, to: destBucket) // 5. List the contents of the bucket. print("Getting a list of the files in the bucket \(bucketName)") let fileList = try await serviceHandler.listBucketFiles(bucket: bucketName) let numFiles = fileList.count if numFiles != 0 { print("\(numFiles) file\((numFiles > 1) ? "s" : "") in bucket \(bucketName):") for name in fileList { print(" \(name)") } } else { print("No files found in bucket \(bucketName)") } // 6. Delete the objects from the bucket. print("Deleting the file \(objName) from the bucket \(bucketName)...") try await serviceHandler.deleteFile(bucket: bucketName, key: objName) print("Deleting the file \(objName) from the bucket \(destBucket)...") try await serviceHandler.deleteFile(bucket: destBucket, key: objName) // 7. Delete the bucket. print("Deleting the bucket \(bucketName)...") try await serviceHandler.deleteBucket(name: bucketName) print("Done.") } } // // Main program entry point. // @main struct Main { static func main() async { let args = Array(CommandLine.arguments.dropFirst()) do { let command = try ExampleCommand.parse(args) try await command.runAsync() } catch { ExampleCommand.exit(withError: error) } } }