Ada lebih banyak contoh AWS SDK yang tersedia di repo Contoh SDK AWS Doc
Terjemahan disediakan oleh mesin penerjemah. Jika konten terjemahan yang diberikan bertentangan dengan versi bahasa Inggris aslinya, utamakan versi bahasa Inggris.
Contoh Amazon Bedrock Runtime menggunakan SDK for Go V2
Contoh kode berikut menunjukkan cara melakukan tindakan dan mengimplementasikan skenario umum dengan menggunakan AWS SDK untuk Go V2 dengan Amazon Bedrock Runtime.
Skenario adalah contoh kode yang menunjukkan kepada Anda bagaimana menyelesaikan tugas tertentu dengan memanggil beberapa fungsi dalam layanan atau dikombinasikan dengan yang lain Layanan AWS.
Setiap contoh menyertakan tautan ke kode sumber lengkap, di mana Anda dapat menemukan instruksi tentang cara mengatur dan menjalankan kode dalam konteks.
Memulai
Contoh kode berikut menunjukkan cara memulai menggunakan Amazon Bedrock.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. package main import ( "context" "encoding/json" "flag" "fmt" "log" "os" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // Each model provider defines their own individual request and response formats. // For the format, ranges, and default values for the different models, refer to: // https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html type ClaudeRequest struct { Prompt string `json:"prompt"` MaxTokensToSample int `json:"max_tokens_to_sample"` // Omitting optional request parameters } type ClaudeResponse struct { Completion string `json:"completion"` } // main uses the AWS SDK for Go (v2) to create an Amazon Bedrock Runtime client // and invokes Anthropic Claude 2 inside your account and the chosen region. // This example uses the default settings specified in your shared credentials // and config files. func main() { region := flag.String("region", "us-east-1", "The AWS region") flag.Parse() fmt.Printf("Using AWS region: %s\n", *region) ctx := context.Background() sdkConfig, err := config.LoadDefaultConfig(ctx, config.WithRegion(*region)) if err != nil { fmt.Println("Couldn't load default configuration. Have you set up your AWS account?") fmt.Println(err) return } client := bedrockruntime.NewFromConfig(sdkConfig) modelId := "anthropic.claude-v2" prompt := "Hello, how are you today?" // Anthropic Claude requires you to enclose the prompt as follows: prefix := "Human: " postfix := "\n\nAssistant:" wrappedPrompt := prefix + prompt + postfix request := ClaudeRequest{ Prompt: wrappedPrompt, MaxTokensToSample: 200, } body, err := json.Marshal(request) if err != nil { log.Panicln("Couldn't marshal the request: ", err) } result, err := client.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{ ModelId: aws.String(modelId), ContentType: aws.String("application/json"), Body: body, }) if err != nil { errMsg := err.Error() if strings.Contains(errMsg, "no such host") { fmt.Printf("Error: The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n") } else if strings.Contains(errMsg, "Could not resolve the foundation model") { fmt.Printf("Error: Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId) } else { fmt.Printf("Error: Couldn't invoke Anthropic Claude. Here's why: %v\n", err) } os.Exit(1) } var response ClaudeResponse err = json.Unmarshal(result.Body, &response) if err != nil { log.Fatal("failed to unmarshal", err) } fmt.Println("Prompt:\n", prompt) fmt.Println("Response from Anthropic Claude:\n", response.Completion) }
-
Untuk detail API, lihat InvokeModel
di Referensi AWS SDK untuk Go API.
-
Skenario
Contoh kode berikut menunjukkan cara menyiapkan dan mengirim prompt ke berbagai model berbahasa besar (LLMs) di Amazon Bedrock
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Gunakan beberapa model fondasi di Amazon Bedrock.
import ( "context" "encoding/base64" "fmt" "log" "math/rand" "os" "path/filepath" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" "github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/actions" "github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools" ) // InvokeModelsScenario demonstrates how to use the Amazon Bedrock Runtime client // to invoke various foundation models for text and image generation // // 1. Generate text with Anthropic Claude 2 // 2. Generate text with AI21 Labs Jurassic-2 // 3. Generate text with Meta Llama 2 Chat // 4. Generate text and asynchronously process the response stream with Anthropic Claude 2 // 5. Generate an image with the Amazon Titan image generation model // 6. Generate text with Amazon Titan Text G1 Express model type InvokeModelsScenario struct { sdkConfig aws.Config invokeModelWrapper actions.InvokeModelWrapper responseStreamWrapper actions.InvokeModelWithResponseStreamWrapper questioner demotools.IQuestioner } // NewInvokeModelsScenario constructs an InvokeModelsScenario instance from a configuration. // It uses the specified config to get a Bedrock Runtime client and create wrappers for the // actions used in the scenario. func NewInvokeModelsScenario(sdkConfig aws.Config, questioner demotools.IQuestioner) InvokeModelsScenario { client := bedrockruntime.NewFromConfig(sdkConfig) return InvokeModelsScenario{ sdkConfig: sdkConfig, invokeModelWrapper: actions.InvokeModelWrapper{BedrockRuntimeClient: client}, responseStreamWrapper: actions.InvokeModelWithResponseStreamWrapper{BedrockRuntimeClient: client}, questioner: questioner, } } // Runs the interactive scenario. func (scenario InvokeModelsScenario) Run(ctx context.Context) { defer func() { if r := recover(); r != nil { log.Printf("Something went wrong with the demo: %v\n", r) } }() log.Println(strings.Repeat("=", 77)) log.Println("Welcome to the Amazon Bedrock Runtime model invocation demo.") log.Println(strings.Repeat("=", 77)) log.Printf("First, let's invoke a few large-language models using the synchronous client:\n\n") text2textPrompt := "In one paragraph, who are you?" log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) scenario.InvokeClaude(ctx, text2textPrompt) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Jurassic-2 with prompt: %v\n", text2textPrompt) scenario.InvokeJurassic2(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Printf("Now, let's invoke Claude with the asynchronous client and process the response stream:\n\n") log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt) scenario.InvokeWithResponseStream(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Printf("Now, let's create an image with the Amazon Titan image generation model:\n\n") text2ImagePrompt := "stylized picture of a cute old steampunk robot" seed := rand.Int63n(2147483648) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Amazon Titan with prompt: %v\n", text2ImagePrompt) scenario.InvokeTitanImage(ctx, text2ImagePrompt, seed) log.Println(strings.Repeat("-", 77)) log.Printf("Invoking Titan Text Express with prompt: %v\n", text2textPrompt) scenario.InvokeTitanText(ctx, text2textPrompt) log.Println(strings.Repeat("=", 77)) log.Println("Thanks for watching!") log.Println(strings.Repeat("=", 77)) } func (scenario InvokeModelsScenario) InvokeClaude(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeClaude(ctx, prompt) if err != nil { panic(err) } log.Printf("\nClaude : %v\n", strings.TrimSpace(completion)) } func (scenario InvokeModelsScenario) InvokeJurassic2(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeJurassic2(ctx, prompt) if err != nil { panic(err) } log.Printf("\nJurassic-2 : %v\n", strings.TrimSpace(completion)) } func (scenario InvokeModelsScenario) InvokeWithResponseStream(ctx context.Context, prompt string) { log.Println("\nClaude with response stream:") _, err := scenario.responseStreamWrapper.InvokeModelWithResponseStream(ctx, prompt) if err != nil { panic(err) } log.Println() } func (scenario InvokeModelsScenario) InvokeTitanImage(ctx context.Context, prompt string, seed int64) { base64ImageData, err := scenario.invokeModelWrapper.InvokeTitanImage(ctx, prompt, seed) if err != nil { panic(err) } imagePath := saveImage(base64ImageData, "amazon.titan-image-generator-v1") fmt.Printf("The generated image has been saved to %s\n", imagePath) } func (scenario InvokeModelsScenario) InvokeTitanText(ctx context.Context, prompt string) { completion, err := scenario.invokeModelWrapper.InvokeTitanText(ctx, prompt) if err != nil { panic(err) } log.Printf("\nTitan Text Express : %v\n\n", strings.TrimSpace(completion)) }
-
Untuk detail API, lihat topik berikut di Referensi API AWS SDK untuk Go .
-
AI21 Lab Jurassic-2
Contoh kode berikut menunjukkan cara mengirim pesan teks ke AI21 Labs Jurassic-2, menggunakan Invoke Model API.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Gunakan API Invoke Model untuk mengirim pesan teks.
import ( "context" "encoding/json" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. // It contains a Bedrock Runtime client that is used to invoke foundation models. type InvokeModelWrapper struct { BedrockRuntimeClient *bedrockruntime.Client } // Each model provider has their own individual request and response formats. // For the format, ranges, and default values for AI21 Labs Jurassic-2, refer to: // https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-jurassic2.html type Jurassic2Request struct { Prompt string `json:"prompt"` MaxTokens int `json:"maxTokens,omitempty"` Temperature float64 `json:"temperature,omitempty"` } type Jurassic2Response struct { Completions []Completion `json:"completions"` } type Completion struct { Data Data `json:"data"` } type Data struct { Text string `json:"text"` } // Invokes AI21 Labs Jurassic-2 on Amazon Bedrock to run an inference using the input // provided in the request body. func (wrapper InvokeModelWrapper) InvokeJurassic2(ctx context.Context, prompt string) (string, error) { modelId := "ai21.j2-mid-v1" body, err := json.Marshal(Jurassic2Request{ Prompt: prompt, MaxTokens: 200, Temperature: 0.5, }) if err != nil { log.Fatal("failed to marshal", err) } output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{ ModelId: aws.String(modelId), ContentType: aws.String("application/json"), Body: body, }) if err != nil { ProcessError(err, modelId) } var response Jurassic2Response if err := json.Unmarshal(output.Body, &response); err != nil { log.Fatal("failed to unmarshal", err) } return response.Completions[0].Data.Text, nil }
-
Untuk detail API, lihat InvokeModel
di Referensi AWS SDK untuk Go API.
-
Generator Gambar Amazon Titan
Contoh kode berikut menunjukkan cara memanggil Amazon Titan Image di Amazon Bedrock untuk menghasilkan gambar.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Buat gambar dengan Amazon Titan Image Generator.
import ( "context" "encoding/json" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. // It contains a Bedrock Runtime client that is used to invoke foundation models. type InvokeModelWrapper struct { BedrockRuntimeClient *bedrockruntime.Client } type TitanImageRequest struct { TaskType string `json:"taskType"` TextToImageParams TextToImageParams `json:"textToImageParams"` ImageGenerationConfig ImageGenerationConfig `json:"imageGenerationConfig"` } type TextToImageParams struct { Text string `json:"text"` } type ImageGenerationConfig struct { NumberOfImages int `json:"numberOfImages"` Quality string `json:"quality"` CfgScale float64 `json:"cfgScale"` Height int `json:"height"` Width int `json:"width"` Seed int64 `json:"seed"` } type TitanImageResponse struct { Images []string `json:"images"` } // Invokes the Titan Image model to create an image using the input provided // in the request body. func (wrapper InvokeModelWrapper) InvokeTitanImage(ctx context.Context, prompt string, seed int64) (string, error) { modelId := "amazon.titan-image-generator-v1" body, err := json.Marshal(TitanImageRequest{ TaskType: "TEXT_IMAGE", TextToImageParams: TextToImageParams{ Text: prompt, }, ImageGenerationConfig: ImageGenerationConfig{ NumberOfImages: 1, Quality: "standard", CfgScale: 8.0, Height: 512, Width: 512, Seed: seed, }, }) if err != nil { log.Fatal("failed to marshal", err) } output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{ ModelId: aws.String(modelId), ContentType: aws.String("application/json"), Body: body, }) if err != nil { ProcessError(err, modelId) } var response TitanImageResponse if err := json.Unmarshal(output.Body, &response); err != nil { log.Fatal("failed to unmarshal", err) } base64ImageData := response.Images[0] return base64ImageData, nil }
-
Untuk detail API, lihat InvokeModel
di Referensi AWS SDK untuk Go API.
-
Teks Amazon Titan
Contoh kode berikut menunjukkan cara mengirim pesan teks ke Amazon Titan Text, menggunakan Invoke Model API.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Gunakan API Invoke Model untuk mengirim pesan teks.
import ( "context" "encoding/json" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. // It contains a Bedrock Runtime client that is used to invoke foundation models. type InvokeModelWrapper struct { BedrockRuntimeClient *bedrockruntime.Client } // Each model provider has their own individual request and response formats. // For the format, ranges, and default values for Amazon Titan Text, refer to: // https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-text.html type TitanTextRequest struct { InputText string `json:"inputText"` TextGenerationConfig TextGenerationConfig `json:"textGenerationConfig"` } type TextGenerationConfig struct { Temperature float64 `json:"temperature"` TopP float64 `json:"topP"` MaxTokenCount int `json:"maxTokenCount"` StopSequences []string `json:"stopSequences,omitempty"` } type TitanTextResponse struct { InputTextTokenCount int `json:"inputTextTokenCount"` Results []Result `json:"results"` } type Result struct { TokenCount int `json:"tokenCount"` OutputText string `json:"outputText"` CompletionReason string `json:"completionReason"` } func (wrapper InvokeModelWrapper) InvokeTitanText(ctx context.Context, prompt string) (string, error) { modelId := "amazon.titan-text-express-v1" body, err := json.Marshal(TitanTextRequest{ InputText: prompt, TextGenerationConfig: TextGenerationConfig{ Temperature: 0, TopP: 1, MaxTokenCount: 4096, }, }) if err != nil { log.Fatal("failed to marshal", err) } output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{ ModelId: aws.String(modelId), ContentType: aws.String("application/json"), Body: body, }) if err != nil { ProcessError(err, modelId) } var response TitanTextResponse if err := json.Unmarshal(output.Body, &response); err != nil { log.Fatal("failed to unmarshal", err) } return response.Results[0].OutputText, nil }
-
Untuk detail API, lihat InvokeModel
di Referensi AWS SDK untuk Go API.
-
Antropik Claude
Contoh kode berikut menunjukkan cara mengirim pesan teks ke Anthropic Claude, menggunakan Invoke Model API.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Gunakan model dasar Anthropic Claude 2 untuk menghasilkan teks.
import ( "context" "encoding/json" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. // It contains a Bedrock Runtime client that is used to invoke foundation models. type InvokeModelWrapper struct { BedrockRuntimeClient *bedrockruntime.Client } // Each model provider has their own individual request and response formats. // For the format, ranges, and default values for Anthropic Claude, refer to: // https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html type ClaudeRequest struct { Prompt string `json:"prompt"` MaxTokensToSample int `json:"max_tokens_to_sample"` Temperature float64 `json:"temperature,omitempty"` StopSequences []string `json:"stop_sequences,omitempty"` } type ClaudeResponse struct { Completion string `json:"completion"` } // Invokes Anthropic Claude on Amazon Bedrock to run an inference using the input // provided in the request body. func (wrapper InvokeModelWrapper) InvokeClaude(ctx context.Context, prompt string) (string, error) { modelId := "anthropic.claude-v2" // Anthropic Claude requires enclosing the prompt as follows: enclosedPrompt := "Human: " + prompt + "\n\nAssistant:" body, err := json.Marshal(ClaudeRequest{ Prompt: enclosedPrompt, MaxTokensToSample: 200, Temperature: 0.5, StopSequences: []string{"\n\nHuman:"}, }) if err != nil { log.Fatal("failed to marshal", err) } output, err := wrapper.BedrockRuntimeClient.InvokeModel(ctx, &bedrockruntime.InvokeModelInput{ ModelId: aws.String(modelId), ContentType: aws.String("application/json"), Body: body, }) if err != nil { ProcessError(err, modelId) } var response ClaudeResponse if err := json.Unmarshal(output.Body, &response); err != nil { log.Fatal("failed to unmarshal", err) } return response.Completion, nil }
-
Untuk detail API, lihat InvokeModel
di Referensi AWS SDK untuk Go API.
-
Contoh kode berikut menunjukkan cara mengirim pesan teks ke model Anthropic Claude, menggunakan Invoke Model API, dan mencetak aliran respons.
- SDK untuk Go V2
-
catatan
Ada lebih banyak tentang GitHub. Temukan contoh lengkapnya dan pelajari cara pengaturan dan menjalankannya di Repositori Contoh Kode AWS
. Gunakan API Invoke Model untuk mengirim pesan teks dan memproses aliran respons secara real-time.
import ( "context" "encoding/json" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockruntime" ) // InvokeModelWrapper encapsulates Amazon Bedrock actions used in the examples. // It contains a Bedrock Runtime client that is used to invoke foundation models. type InvokeModelWrapper struct { BedrockRuntimeClient *bedrockruntime.Client } // Each model provider defines their own individual request and response formats. // For the format, ranges, and default values for the different models, refer to: // https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html type Request struct { Prompt string `json:"prompt"` MaxTokensToSample int `json:"max_tokens_to_sample"` Temperature float64 `json:"temperature,omitempty"` } type Response struct { Completion string `json:"completion"` } // Invokes Anthropic Claude on Amazon Bedrock to run an inference and asynchronously // process the response stream. func (wrapper InvokeModelWithResponseStreamWrapper) InvokeModelWithResponseStream(ctx context.Context, prompt string) (string, error) { modelId := "anthropic.claude-v2" // Anthropic Claude requires you to enclose the prompt as follows: prefix := "Human: " postfix := "\n\nAssistant:" prompt = prefix + prompt + postfix request := ClaudeRequest{ Prompt: prompt, MaxTokensToSample: 200, Temperature: 0.5, StopSequences: []string{"\n\nHuman:"}, } body, err := json.Marshal(request) if err != nil { log.Panicln("Couldn't marshal the request: ", err) } output, err := wrapper.BedrockRuntimeClient.InvokeModelWithResponseStream(ctx, &bedrockruntime.InvokeModelWithResponseStreamInput{ Body: body, ModelId: aws.String(modelId), ContentType: aws.String("application/json"), }) if err != nil { errMsg := err.Error() if strings.Contains(errMsg, "no such host") { log.Printf("The Bedrock service is not available in the selected region. Please double-check the service availability for your region at https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/.\n") } else if strings.Contains(errMsg, "Could not resolve the foundation model") { log.Printf("Could not resolve the foundation model from model identifier: \"%v\". Please verify that the requested model exists and is accessible within the specified region.\n", modelId) } else { log.Printf("Couldn't invoke Anthropic Claude. Here's why: %v\n", err) } } resp, err := processStreamingOutput(ctx, output, func(ctx context.Context, part []byte) error { fmt.Print(string(part)) return nil }) if err != nil { log.Fatal("streaming output processing error: ", err) } return resp.Completion, nil } type StreamingOutputHandler func(ctx context.Context, part []byte) error func processStreamingOutput(ctx context.Context, output *bedrockruntime.InvokeModelWithResponseStreamOutput, handler StreamingOutputHandler) (Response, error) { var combinedResult string resp := Response{} for event := range output.GetStream().Events() { switch v := event.(type) { case *types.ResponseStreamMemberChunk: //fmt.Println("payload", string(v.Value.Bytes)) var resp Response err := json.NewDecoder(bytes.NewReader(v.Value.Bytes)).Decode(&resp) if err != nil { return resp, err } err = handler(ctx, []byte(resp.Completion)) if err != nil { return resp, err } combinedResult += resp.Completion case *types.UnknownUnionMember: fmt.Println("unknown tag:", v.Tag) default: fmt.Println("union is nil or unknown type") } } resp.Completion = combinedResult return resp, nil }
-
Untuk detail API, lihat InvokeModelWithResponseStream
di Referensi AWS SDK untuk Go API.
-