intmain(){
Aws::SDKOptions options;
Aws::InitAPI(options);
{//TODO(User): Set to the region of your AWS account.const Aws::String region = Aws::Region::US_WEST_2;
//Load a profile that has been granted AmazonTranscribeFullAccess AWS managed permission policy.
Aws::Client::ClientConfiguration config;
#ifdef _WIN32// ATTENTION: On Windows with the AWS C++ SDK, this example only runs if the SDK is built// with the curl library. // For more information, see the accompanying ReadMe.// For more information, see "Building the SDK for Windows with curl".// https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/setup-windows.html//TODO(User): Update to the location of your .crt file.
config.caFile = "C:/curl/bin/curl-ca-bundle.crt";
#endif
config.region = region;
TranscribeStreamingServiceClient client(config);
StartStreamTranscriptionHandler handler;
handler.SetOnErrorCallback(
[](const Aws::Client::AWSError<TranscribeStreamingServiceErrors> &error) {
std::cerr << "ERROR: " + error.GetMessage() << std::endl;
});
//SetTranscriptEventCallback called for every 'chunk' of file transcripted.// Partial results are returned in real time.
handler.SetTranscriptEventCallback([](const TranscriptEvent &ev) {for (auto &&r: ev.GetTranscript().GetResults()) {if (r.GetIsPartial()) {
std::cout << "[partial] ";
}
else{
std::cout << "[Final] ";
}
for (auto &&alt: r.GetAlternatives()) {
std::cout << alt.GetTranscript() << std::endl;
}
}
});
StartStreamTranscriptionRequest request;
request.SetMediaSampleRateHertz(SAMPLE_RATE);
request.SetLanguageCode(LanguageCode::en_US);
request.SetMediaEncoding(
MediaEncoding::pcm); // wav and aiff files are PCM formats.
request.SetEventStreamHandler(handler);
auto OnStreamReady = [](AudioStream &stream) {
Aws::FStream file(FILE_NAME, std::ios_base::in | std::ios_base::binary);
if (!file.is_open()) {
std::cerr << "Failed to open " << FILE_NAME << '\n';
}
std::array<char, BUFFER_SIZE> buf;
int i = 0;
while (file) {
file.read(&buf[0], buf.size());
if (!file)
std::cout << "File: only " << file.gcount() << " could be read"
<< std::endl;
Aws::Vector<unsignedchar> bits{buf.begin(), buf.end()};
AudioEvent event(std::move(bits));
if (!stream) {
std::cerr << "Failed to create a stream" << std::endl;
break;
}
//The std::basic_istream::gcount() is used to count the characters in the given string. It returns//the number of characters extracted by the last read() operation.if (file.gcount() > 0) {if (!stream.WriteAudioEvent(event)) {
std::cerr << "Failed to write an audio event" << std::endl;
break;
}
}
else{break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(
25)); // Slow down because we are streaming from a file.
}
if (!stream.WriteAudioEvent(
AudioEvent())) {// Per the spec, we have to send an empty event (an event without a payload) at the end.
std::cerr << "Failed to send an empty frame" << std::endl;
}
else{
std::cout << "Successfully sent the empty frame" << std::endl;
}
stream.flush();
stream.Close();
};
Aws::Utils::Threading::Semaphore signaling(0/*initialCount*/, 1/*maxCount*/);
auto OnResponseCallback = [&signaling](
const TranscribeStreamingServiceClient * /*unused*/,
const Model::StartStreamTranscriptionRequest & /*unused*/,
const Model::StartStreamTranscriptionOutcome &outcome,
const std::shared_ptr<const Aws::Client::AsyncCallerContext> & /*unused*/) {if (!outcome.IsSuccess()) {
std::cerr << "Transcribe streaming error "
<< outcome.GetError().GetMessage() << std::endl;
}
signaling.Release();
};
std::cout << "Starting..." << std::endl;
client.StartStreamTranscriptionAsync(request, OnStreamReady, OnResponseCallback,
nullptr/*context*/);
signaling.WaitOne(); // Prevent the application from exiting until we're done.
std::cout << "Done" << std::endl;
}
Aws::ShutdownAPI(options);
return0;
}
/// An example that demonstrates how to watch an transcribe event stream to/// transcribe audio from a file to the console.import ArgumentParser
import AWSClientRuntime
import AWSTranscribeStreaming
import Foundation
/// Identify one of the media file formats supported by Amazon Transcribe.enumTranscribeFormat: String, ExpressibleByArgument{case ogg ="ogg"case pcm ="pcm"case flac ="flac"
}
// -MARK: - Async command line toolstructExampleCommand: ParsableCommand{// -MARK: Command arguments@Flag(help: "Show partial results")
var showPartial =false@Option(help: "Language code to transcribe into")
var lang: String="en-US"@Option(help: "Format of the source audio file")
var format: TranscribeFormat@Option(help: "Sample rate of the source audio file in Hertz")
var sampleRate: Int=16000@Option(help: "Path of the source audio file")
var path: String@Option(help: "Name of the Amazon S3 Region to use (default: us-east-1)")
var region ="us-east-1"staticvar configuration =CommandConfiguration(
commandName: "tsevents",
abstract: """
This example shows how to use event streaming with Amazon Transcribe.
""",
discussion: """
"""
)
/// Create and return an Amazon Transcribe audio stream from the file/// specified in the arguments./// /// - Throws: Errors from `TranscribeError`.////// - Returns: `AsyncThrowingStream<TranscribeStreamingClientTypes.AudioStream, Error>`funccreateAudioStream()asyncthrows
-> AsyncThrowingStream<TranscribeStreamingClientTypes.AudioStream, Error> {let fileURL: URL=URL(fileURLWithPath: path)
let audioData =tryData(contentsOf: fileURL)
// Properties defining the size of audio chunks and the total size of// the audio file in bytes. You should try to send chunks that last on// average 125 milliseconds.let chunkSizeInMilliseconds =125.0let chunkSize =Int(chunkSizeInMilliseconds /1000.0*Double(sampleRate) *2.0)
let audioDataSize = audioData.count
// Create an audio stream from the source data. The stream's job is// to send the audio in chunks to Amazon Transcribe as// `AudioStream.audioevent` events.let audioStream =AsyncThrowingStream<TranscribeStreamingClientTypes.AudioStream,
Error> { continuation inTask{var currentStart =0var currentEnd =min(chunkSize, audioDataSize - currentStart)
// Generate and send chunks of audio data as `audioevent`// events until the entire file has been sent. Each event is// yielded to the SDK after being created.while currentStart < audioDataSize {let dataChunk = audioData[currentStart ..< currentEnd]
let audioEvent =TranscribeStreamingClientTypes.AudioStream.audioevent(
.init(audioChunk: dataChunk)
)
let yieldResult = continuation.yield(audioEvent)
switch yieldResult {case .enqueued(_):
// The chunk was successfully enqueued into the// stream. The `remaining` parameter estimates how// much room is left in the queue, but is ignored here.breakcase .dropped(_):
// The chunk was dropped because the queue buffer// is full. This will cause transcription errors.print("Warning: Dropped audio! The transcription will be incomplete.")
case .terminated:
print("Audio stream terminated.")
continuation.finish()
returndefault:
print("Warning: Unrecognized response during audio streaming.")
}
currentStart = currentEnd
currentEnd =min(currentStart + chunkSize, audioDataSize)
}
// Let the SDK's continuation block know the stream is over.
continuation.finish()
}
}
return audioStream
}
/// Run the transcription process.////// - Throws: An error from `TranscribeError`.functranscribe(encoding: TranscribeStreamingClientTypes.MediaEncoding)asyncthrows{// Create the Transcribe Streaming client.let client =TranscribeStreamingClient(
config: tryawaitTranscribeStreamingClient.TranscribeStreamingClientConfiguration(
region: region
)
)
// Start the transcription running on the audio stream.let output =tryawait client.startStreamTranscription(
input: StartStreamTranscriptionInput(
audioStream: tryawait createAudioStream(),
languageCode: TranscribeStreamingClientTypes.LanguageCode(rawValue: lang),
mediaEncoding: encoding,
mediaSampleRateHertz: sampleRate
)
)
// Iterate over the events in the returned transcript result stream.// Each `transcriptevent` contains a list of result fragments which// need to be concatenated together to build the final transcript.fortryawait event in output.transcriptResultStream!{switch event {case .transcriptevent(let event):
for result in event.transcript?.results ?? [] {guardlet transcript = result.alternatives?.first?.transcript else{continue
}
// If showing partial results is enabled and the result is// partial, show it. Partial results may be incomplete, and// may be inaccurate, with upcoming audio making the// transcription complete or by giving more context to make// transcription make more sense.if (result.isPartial && showPartial) {print("[Partial] \(transcript)")
}
// When the complete fragment of transcribed text is ready,// print it. This could just as easily be used to draw the// text as a subtitle over a playing video, though timing// would need to be managed.if!result.isPartial {if (showPartial) {print("[Final ] ", terminator: "")
}
print(transcript)
}
}
default:
print("Error: Unexpected message from Amazon Transcribe:")
}
}
}
/// Convert the value of the `--format` command line option into the/// corresponding Transcribe Streaming `MediaEncoding` type.////// - Returns: The `MediaEncoding` equivalent of the format specified on/// the command line.funcgetMediaEncoding() -> TranscribeStreamingClientTypes.MediaEncoding{let mediaEncoding: TranscribeStreamingClientTypes.MediaEncodingswitch format {case .flac:
mediaEncoding = .flac
case .ogg:
mediaEncoding = .oggOpus
case .pcm:
mediaEncoding = .pcm
}
return mediaEncoding
}
}
// -MARK: - Entry point/// The program's asynchronous entry point.@mainstructMain{staticfuncmain()async{let args =Array(CommandLine.arguments.dropFirst())
do{let command =tryExampleCommand.parse(args)
tryawait command.transcribe(encoding: command.getMediaEncoding())
} catchlet error asTranscribeError{print("ERROR: \(error.errorDescription ??"Unknown error")")
} catch{ExampleCommand.exit(withError: error)
}
}
}
/// Errors thrown by the example's functions.enumTranscribeError: Error{/// No transcription stream available.case noTranscriptionStream
/// The source media file couldn't be read.case readError
var errorDescription: String? {switchself{case .noTranscriptionStream:
return"No transcription stream returned by Amazon Transcribe."case .readError:
return"Unable to read the source audio file."
}
}
}
intmain(){
Aws::SDKOptions options;
Aws::InitAPI(options);
{//TODO(User): Set to the region of your AWS account.const Aws::String region = Aws::Region::US_WEST_2;
//Load a profile that has been granted AmazonTranscribeFullAccess AWS managed permission policy.
Aws::Client::ClientConfiguration config;
#ifdef _WIN32// ATTENTION: On Windows with the AWS C++ SDK, this example only runs if the SDK is built// with the curl library. // For more information, see the accompanying ReadMe.// For more information, see "Building the SDK for Windows with curl".// https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/setup-windows.html//TODO(User): Update to the location of your .crt file.
config.caFile = "C:/curl/bin/curl-ca-bundle.crt";
#endif
config.region = region;
TranscribeStreamingServiceClient client(config);
StartStreamTranscriptionHandler handler;
handler.SetOnErrorCallback(
[](const Aws::Client::AWSError<TranscribeStreamingServiceErrors> &error) {
std::cerr << "ERROR: " + error.GetMessage() << std::endl;
});
//SetTranscriptEventCallback called for every 'chunk' of file transcripted.// Partial results are returned in real time.
handler.SetTranscriptEventCallback([](const TranscriptEvent &ev) {for (auto &&r: ev.GetTranscript().GetResults()) {if (r.GetIsPartial()) {
std::cout << "[partial] ";
}
else{
std::cout << "[Final] ";
}
for (auto &&alt: r.GetAlternatives()) {
std::cout << alt.GetTranscript() << std::endl;
}
}
});
StartStreamTranscriptionRequest request;
request.SetMediaSampleRateHertz(SAMPLE_RATE);
request.SetLanguageCode(LanguageCode::en_US);
request.SetMediaEncoding(
MediaEncoding::pcm); // wav and aiff files are PCM formats.
request.SetEventStreamHandler(handler);
auto OnStreamReady = [](AudioStream &stream) {
Aws::FStream file(FILE_NAME, std::ios_base::in | std::ios_base::binary);
if (!file.is_open()) {
std::cerr << "Failed to open " << FILE_NAME << '\n';
}
std::array<char, BUFFER_SIZE> buf;
int i = 0;
while (file) {
file.read(&buf[0], buf.size());
if (!file)
std::cout << "File: only " << file.gcount() << " could be read"
<< std::endl;
Aws::Vector<unsignedchar> bits{buf.begin(), buf.end()};
AudioEvent event(std::move(bits));
if (!stream) {
std::cerr << "Failed to create a stream" << std::endl;
break;
}
//The std::basic_istream::gcount() is used to count the characters in the given string. It returns//the number of characters extracted by the last read() operation.if (file.gcount() > 0) {if (!stream.WriteAudioEvent(event)) {
std::cerr << "Failed to write an audio event" << std::endl;
break;
}
}
else{break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(
25)); // Slow down because we are streaming from a file.
}
if (!stream.WriteAudioEvent(
AudioEvent())) {// Per the spec, we have to send an empty event (an event without a payload) at the end.
std::cerr << "Failed to send an empty frame" << std::endl;
}
else{
std::cout << "Successfully sent the empty frame" << std::endl;
}
stream.flush();
stream.Close();
};
Aws::Utils::Threading::Semaphore signaling(0/*initialCount*/, 1/*maxCount*/);
auto OnResponseCallback = [&signaling](
const TranscribeStreamingServiceClient * /*unused*/,
const Model::StartStreamTranscriptionRequest & /*unused*/,
const Model::StartStreamTranscriptionOutcome &outcome,
const std::shared_ptr<const Aws::Client::AsyncCallerContext> & /*unused*/) {if (!outcome.IsSuccess()) {
std::cerr << "Transcribe streaming error "
<< outcome.GetError().GetMessage() << std::endl;
}
signaling.Release();
};
std::cout << "Starting..." << std::endl;
client.StartStreamTranscriptionAsync(request, OnStreamReady, OnResponseCallback,
nullptr/*context*/);
signaling.WaitOne(); // Prevent the application from exiting until we're done.
std::cout << "Done" << std::endl;
}
Aws::ShutdownAPI(options);
return0;
}