Utilizzare StartStreamTranscription con un AWS SDK - Esempi di codice dell'AWS SDK

Ci sono altri AWS SDK esempi disponibili nel repository AWS Doc SDK Examples GitHub .

Le traduzioni sono generate tramite traduzione automatica. In caso di conflitto tra il contenuto di una traduzione e la versione originale in Inglese, quest'ultima prevarrà.

Utilizzare StartStreamTranscription con un AWS SDK

I seguenti esempi di codice mostrano come utilizzareStartStreamTranscription.

C++
SDKper C++
Nota

C'è di più su. GitHub Trova l'esempio completo e scopri di più sulla configurazione e l'esecuzione nel Repository di esempi di codice AWS.

int main() { Aws::SDKOptions options; Aws::InitAPI(options); { //TODO(User): Set to the region of your AWS account. const Aws::String region = Aws::Region::US_WEST_2; //Load a profile that has been granted AmazonTranscribeFullAccess AWS managed permission policy. Aws::Client::ClientConfiguration config; #ifdef _WIN32 // ATTENTION: On Windows with the AWS C++ SDK, this example only runs if the SDK is built // with the curl library. // For more information, see the accompanying ReadMe. // For more information, see "Building the SDK for Windows with curl". // https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/setup-windows.html //TODO(User): Update to the location of your .crt file. config.caFile = "C:/curl/bin/curl-ca-bundle.crt"; #endif config.region = region; TranscribeStreamingServiceClient client(config); StartStreamTranscriptionHandler handler; handler.SetOnErrorCallback( [](const Aws::Client::AWSError<TranscribeStreamingServiceErrors> &error) { std::cerr << "ERROR: " + error.GetMessage() << std::endl; }); //SetTranscriptEventCallback called for every 'chunk' of file transcripted. // Partial results are returned in real time. handler.SetTranscriptEventCallback([](const TranscriptEvent &ev) { for (auto &&r: ev.GetTranscript().GetResults()) { if (r.GetIsPartial()) { std::cout << "[partial] "; } else { std::cout << "[Final] "; } for (auto &&alt: r.GetAlternatives()) { std::cout << alt.GetTranscript() << std::endl; } } }); StartStreamTranscriptionRequest request; request.SetMediaSampleRateHertz(SAMPLE_RATE); request.SetLanguageCode(LanguageCode::en_US); request.SetMediaEncoding( MediaEncoding::pcm); // wav and aiff files are PCM formats. request.SetEventStreamHandler(handler); auto OnStreamReady = [](AudioStream &stream) { Aws::FStream file(FILE_NAME, std::ios_base::in | std::ios_base::binary); if (!file.is_open()) { std::cerr << "Failed to open " << FILE_NAME << '\n'; } std::array<char, BUFFER_SIZE> buf; int i = 0; while (file) { file.read(&buf[0], buf.size()); if (!file) std::cout << "File: only " << file.gcount() << " could be read" << std::endl; Aws::Vector<unsigned char> bits{buf.begin(), buf.end()}; AudioEvent event(std::move(bits)); if (!stream) { std::cerr << "Failed to create a stream" << std::endl; break; } //The std::basic_istream::gcount() is used to count the characters in the given string. It returns //the number of characters extracted by the last read() operation. if (file.gcount() > 0) { if (!stream.WriteAudioEvent(event)) { std::cerr << "Failed to write an audio event" << std::endl; break; } } else { break; } std::this_thread::sleep_for(std::chrono::milliseconds( 25)); // Slow down because we are streaming from a file. } if (!stream.WriteAudioEvent( AudioEvent())) { // Per the spec, we have to send an empty event (an event without a payload) at the end. std::cerr << "Failed to send an empty frame" << std::endl; } else { std::cout << "Successfully sent the empty frame" << std::endl; } stream.flush(); stream.Close(); }; Aws::Utils::Threading::Semaphore signaling(0 /*initialCount*/, 1 /*maxCount*/); auto OnResponseCallback = [&signaling]( const TranscribeStreamingServiceClient * /*unused*/, const Model::StartStreamTranscriptionRequest & /*unused*/, const Model::StartStreamTranscriptionOutcome &outcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext> & /*unused*/) { if (!outcome.IsSuccess()) { std::cerr << "Transcribe streaming error " << outcome.GetError().GetMessage() << std::endl; } signaling.Release(); }; std::cout << "Starting..." << std::endl; client.StartStreamTranscriptionAsync(request, OnStreamReady, OnResponseCallback, nullptr /*context*/); signaling.WaitOne(); // Prevent the application from exiting until we're done. std::cout << "Done" << std::endl; } Aws::ShutdownAPI(options); return 0; }
Java
SDKper Java 2.x
Nota

C'è di più su. GitHub Trova l'esempio completo e scopri di più sulla configurazione e l'esecuzione nel Repository di esempi di codice AWS.

public class TranscribeStreamingDemoApp { private static final Region REGION = Region.US_EAST_1; private static TranscribeStreamingAsyncClient client; public static void main(String args[]) throws URISyntaxException, ExecutionException, InterruptedException, LineUnavailableException { client = TranscribeStreamingAsyncClient.builder() .credentialsProvider(getCredentials()) .region(REGION) .build(); CompletableFuture<Void> result = client.startStreamTranscription(getRequest(16_000), new AudioStreamPublisher(getStreamFromMic()), getResponseHandler()); result.get(); client.close(); } private static InputStream getStreamFromMic() throws LineUnavailableException { // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono int sampleRate = 16000; AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false); DataLine.Info info = new DataLine.Info(TargetDataLine.class, format); if (!AudioSystem.isLineSupported(info)) { System.out.println("Line not supported"); System.exit(0); } TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info); line.open(format); line.start(); InputStream audioStream = new AudioInputStream(line); return audioStream; } private static AwsCredentialsProvider getCredentials() { return DefaultCredentialsProvider.create(); } private static StartStreamTranscriptionRequest getRequest(Integer mediaSampleRateHertz) { return StartStreamTranscriptionRequest.builder() .languageCode(LanguageCode.EN_US.toString()) .mediaEncoding(MediaEncoding.PCM) .mediaSampleRateHertz(mediaSampleRateHertz) .build(); } private static StartStreamTranscriptionResponseHandler getResponseHandler() { return StartStreamTranscriptionResponseHandler.builder() .onResponse(r -> { System.out.println("Received Initial response"); }) .onError(e -> { System.out.println(e.getMessage()); StringWriter sw = new StringWriter(); e.printStackTrace(new PrintWriter(sw)); System.out.println("Error Occurred: " + sw.toString()); }) .onComplete(() -> { System.out.println("=== All records stream successfully ==="); }) .subscriber(event -> { List<Result> results = ((TranscriptEvent) event).transcript().results(); if (results.size() > 0) { if (!results.get(0).alternatives().get(0).transcript().isEmpty()) { System.out.println(results.get(0).alternatives().get(0).transcript()); } } }) .build(); } private InputStream getStreamFromFile(String audioFileName) { try { File inputFile = new File(getClass().getClassLoader().getResource(audioFileName).getFile()); InputStream audioStream = new FileInputStream(inputFile); return audioStream; } catch (FileNotFoundException e) { throw new RuntimeException(e); } } private static class AudioStreamPublisher implements Publisher<AudioStream> { private final InputStream inputStream; private static Subscription currentSubscription; private AudioStreamPublisher(InputStream inputStream) { this.inputStream = inputStream; } @Override public void subscribe(Subscriber<? super AudioStream> s) { if (this.currentSubscription == null) { this.currentSubscription = new SubscriptionImpl(s, inputStream); } else { this.currentSubscription.cancel(); this.currentSubscription = new SubscriptionImpl(s, inputStream); } s.onSubscribe(currentSubscription); } } public static class SubscriptionImpl implements Subscription { private static final int CHUNK_SIZE_IN_BYTES = 1024 * 1; private final Subscriber<? super AudioStream> subscriber; private final InputStream inputStream; private ExecutorService executor = Executors.newFixedThreadPool(1); private AtomicLong demand = new AtomicLong(0); SubscriptionImpl(Subscriber<? super AudioStream> s, InputStream inputStream) { this.subscriber = s; this.inputStream = inputStream; } @Override public void request(long n) { if (n <= 0) { subscriber.onError(new IllegalArgumentException("Demand must be positive")); } demand.getAndAdd(n); executor.submit(() -> { try { do { ByteBuffer audioBuffer = getNextEvent(); if (audioBuffer.remaining() > 0) { AudioEvent audioEvent = audioEventFromBuffer(audioBuffer); subscriber.onNext(audioEvent); } else { subscriber.onComplete(); break; } } while (demand.decrementAndGet() > 0); } catch (Exception e) { subscriber.onError(e); } }); } @Override public void cancel() { executor.shutdown(); } private ByteBuffer getNextEvent() { ByteBuffer audioBuffer = null; byte[] audioBytes = new byte[CHUNK_SIZE_IN_BYTES]; int len = 0; try { len = inputStream.read(audioBytes); if (len <= 0) { audioBuffer = ByteBuffer.allocate(0); } else { audioBuffer = ByteBuffer.wrap(audioBytes, 0, len); } } catch (IOException e) { throw new UncheckedIOException(e); } return audioBuffer; } private AudioEvent audioEventFromBuffer(ByteBuffer bb) { return AudioEvent.builder() .audioChunk(SdkBytes.fromByteBuffer(bb)) .build(); } } }