Select your cookie preferences

We use essential cookies and similar tools that are necessary to provide our site and services. We use performance cookies to collect anonymous statistics, so we can understand how customers use our site and make improvements. Essential cookies cannot be deactivated, but you can choose “Customize” or “Decline” to decline performance cookies.

If you agree, AWS and approved third parties will also use cookies to provide useful site features, remember your preferences, and display relevant content, including relevant advertising. To accept or decline all non-essential cookies, choose “Accept” or “Decline.” To make more detailed choices, choose “Customize.”

Use StartMedicalStreamTranscription with an AWS SDK - AWS SDK Code Examples

There are more AWS SDK examples available in the AWS Doc SDK Examples GitHub repo.

There are more AWS SDK examples available in the AWS Doc SDK Examples GitHub repo.

Use StartMedicalStreamTranscription with an AWS SDK

The following code example shows how to use StartMedicalStreamTranscription.

Java
SDK for Java 2.x
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

/* To run this AWS code example, ensure that you have set up your development environment, including your AWS credentials. For information, see this documentation topic: https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html This code demonstrates the process of starting a medical transcription job using the AWS Transcribe Streaming service, including setting up the audio input stream, configuring the transcription request, and handling the transcription response. */ public class TranscribeMedicalStreamingDemoApp { private static TranscribeStreamingAsyncClient client; public static void main(String args[]) throws ExecutionException, InterruptedException, LineUnavailableException { client = TranscribeStreamingAsyncClient.builder() .credentialsProvider(getCredentials()) .build(); CompletableFuture<Void> result = client.startMedicalStreamTranscription(getMedicalRequest(16_000), new AudioStreamPublisher(getStreamFromMic()), getMedicalResponseHandler()); result.get(); client.close(); } private static InputStream getStreamFromMic() throws LineUnavailableException { // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono int sampleRate = 16000; AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false); DataLine.Info info = new DataLine.Info(TargetDataLine.class, format); if (!AudioSystem.isLineSupported(info)) { System.out.println("Line not supported"); throw new LineUnavailableException("The audio system microphone line is not supported."); } TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info); line.open(format); line.start(); InputStream audioStream = new AudioInputStream(line); return audioStream; } private static AwsCredentialsProvider getCredentials() { return DefaultCredentialsProvider.create(); } private static StartMedicalStreamTranscriptionRequest getMedicalRequest(Integer mediaSampleRateHertz) { return StartMedicalStreamTranscriptionRequest.builder() .languageCode(LanguageCode.EN_US.toString()) // For medical transcription, EN_US is typically used. .mediaEncoding(MediaEncoding.PCM) .mediaSampleRateHertz(mediaSampleRateHertz) .specialty(Specialty.PRIMARYCARE) // Specify the medical specialty. .type(Type.CONVERSATION) // Set the type as CONVERSATION or DICTATION. .build(); } private static StartMedicalStreamTranscriptionResponseHandler getMedicalResponseHandler() { return StartMedicalStreamTranscriptionResponseHandler.builder() .onResponse(r -> { System.out.println("Received Initial response"); }) .onError(e -> { System.out.println(e.getMessage()); StringWriter sw = new StringWriter(); e.printStackTrace(new PrintWriter(sw)); System.out.println("Error Occurred: " + sw.toString()); }) .onComplete(() -> { System.out.println("=== All records streamed successfully ==="); }) .subscriber(event -> { List<MedicalResult> results = ((MedicalTranscriptEvent) event).transcript().results(); if (results.size() > 0) { if (!results.get(0).alternatives().get(0).transcript().isEmpty()) { System.out.println(results.get(0).alternatives().get(0).transcript()); } } }) .build(); } private static class AudioStreamPublisher implements Publisher<AudioStream> { private final InputStream inputStream; private static Subscription currentSubscription; private AudioStreamPublisher(InputStream inputStream) { this.inputStream = inputStream; } @Override public void subscribe(Subscriber<? super AudioStream> s) { if (this.currentSubscription == null) { this.currentSubscription = new SubscriptionImpl(s, inputStream); } else { this.currentSubscription.cancel(); this.currentSubscription = new SubscriptionImpl(s, inputStream); } s.onSubscribe(currentSubscription); } } public static class SubscriptionImpl implements Subscription { private static final int CHUNK_SIZE_IN_BYTES = 1024 * 1; private final Subscriber<? super AudioStream> subscriber; private final InputStream inputStream; private ExecutorService executor = Executors.newFixedThreadPool(1); private AtomicLong demand = new AtomicLong(0); SubscriptionImpl(Subscriber<? super AudioStream> s, InputStream inputStream) { this.subscriber = s; this.inputStream = inputStream; } @Override public void request(long n) { if (n <= 0) { subscriber.onError(new IllegalArgumentException("Demand must be positive")); } demand.getAndAdd(n); executor.submit(() -> { try { do { ByteBuffer audioBuffer = getNextEvent(); if (audioBuffer.remaining() > 0) { AudioEvent audioEvent = audioEventFromBuffer(audioBuffer); subscriber.onNext(audioEvent); } else { subscriber.onComplete(); break; } } while (demand.decrementAndGet() > 0); } catch (Exception e) { subscriber.onError(e); } }); } @Override public void cancel() { executor.shutdown(); } private ByteBuffer getNextEvent() { ByteBuffer audioBuffer = null; byte[] audioBytes = new byte[CHUNK_SIZE_IN_BYTES]; int len = 0; try { len = inputStream.read(audioBytes); if (len <= 0) { audioBuffer = ByteBuffer.allocate(0); } else { audioBuffer = ByteBuffer.wrap(audioBytes, 0, len); } } catch (IOException e) { throw new UncheckedIOException(e); } return audioBuffer; } private AudioEvent audioEventFromBuffer(ByteBuffer bb) { return AudioEvent.builder() .audioChunk(SdkBytes.fromByteBuffer(bb)) .build(); } } }
SDK for Java 2.x
Note

There's more on GitHub. Find the complete example and learn how to set up and run in the AWS Code Examples Repository.

/* To run this AWS code example, ensure that you have set up your development environment, including your AWS credentials. For information, see this documentation topic: https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html This code demonstrates the process of starting a medical transcription job using the AWS Transcribe Streaming service, including setting up the audio input stream, configuring the transcription request, and handling the transcription response. */ public class TranscribeMedicalStreamingDemoApp { private static TranscribeStreamingAsyncClient client; public static void main(String args[]) throws ExecutionException, InterruptedException, LineUnavailableException { client = TranscribeStreamingAsyncClient.builder() .credentialsProvider(getCredentials()) .build(); CompletableFuture<Void> result = client.startMedicalStreamTranscription(getMedicalRequest(16_000), new AudioStreamPublisher(getStreamFromMic()), getMedicalResponseHandler()); result.get(); client.close(); } private static InputStream getStreamFromMic() throws LineUnavailableException { // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono int sampleRate = 16000; AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false); DataLine.Info info = new DataLine.Info(TargetDataLine.class, format); if (!AudioSystem.isLineSupported(info)) { System.out.println("Line not supported"); throw new LineUnavailableException("The audio system microphone line is not supported."); } TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info); line.open(format); line.start(); InputStream audioStream = new AudioInputStream(line); return audioStream; } private static AwsCredentialsProvider getCredentials() { return DefaultCredentialsProvider.create(); } private static StartMedicalStreamTranscriptionRequest getMedicalRequest(Integer mediaSampleRateHertz) { return StartMedicalStreamTranscriptionRequest.builder() .languageCode(LanguageCode.EN_US.toString()) // For medical transcription, EN_US is typically used. .mediaEncoding(MediaEncoding.PCM) .mediaSampleRateHertz(mediaSampleRateHertz) .specialty(Specialty.PRIMARYCARE) // Specify the medical specialty. .type(Type.CONVERSATION) // Set the type as CONVERSATION or DICTATION. .build(); } private static StartMedicalStreamTranscriptionResponseHandler getMedicalResponseHandler() { return StartMedicalStreamTranscriptionResponseHandler.builder() .onResponse(r -> { System.out.println("Received Initial response"); }) .onError(e -> { System.out.println(e.getMessage()); StringWriter sw = new StringWriter(); e.printStackTrace(new PrintWriter(sw)); System.out.println("Error Occurred: " + sw.toString()); }) .onComplete(() -> { System.out.println("=== All records streamed successfully ==="); }) .subscriber(event -> { List<MedicalResult> results = ((MedicalTranscriptEvent) event).transcript().results(); if (results.size() > 0) { if (!results.get(0).alternatives().get(0).transcript().isEmpty()) { System.out.println(results.get(0).alternatives().get(0).transcript()); } } }) .build(); } private static class AudioStreamPublisher implements Publisher<AudioStream> { private final InputStream inputStream; private static Subscription currentSubscription; private AudioStreamPublisher(InputStream inputStream) { this.inputStream = inputStream; } @Override public void subscribe(Subscriber<? super AudioStream> s) { if (this.currentSubscription == null) { this.currentSubscription = new SubscriptionImpl(s, inputStream); } else { this.currentSubscription.cancel(); this.currentSubscription = new SubscriptionImpl(s, inputStream); } s.onSubscribe(currentSubscription); } } public static class SubscriptionImpl implements Subscription { private static final int CHUNK_SIZE_IN_BYTES = 1024 * 1; private final Subscriber<? super AudioStream> subscriber; private final InputStream inputStream; private ExecutorService executor = Executors.newFixedThreadPool(1); private AtomicLong demand = new AtomicLong(0); SubscriptionImpl(Subscriber<? super AudioStream> s, InputStream inputStream) { this.subscriber = s; this.inputStream = inputStream; } @Override public void request(long n) { if (n <= 0) { subscriber.onError(new IllegalArgumentException("Demand must be positive")); } demand.getAndAdd(n); executor.submit(() -> { try { do { ByteBuffer audioBuffer = getNextEvent(); if (audioBuffer.remaining() > 0) { AudioEvent audioEvent = audioEventFromBuffer(audioBuffer); subscriber.onNext(audioEvent); } else { subscriber.onComplete(); break; } } while (demand.decrementAndGet() > 0); } catch (Exception e) { subscriber.onError(e); } }); } @Override public void cancel() { executor.shutdown(); } private ByteBuffer getNextEvent() { ByteBuffer audioBuffer = null; byte[] audioBytes = new byte[CHUNK_SIZE_IN_BYTES]; int len = 0; try { len = inputStream.read(audioBytes); if (len <= 0) { audioBuffer = ByteBuffer.allocate(0); } else { audioBuffer = ByteBuffer.wrap(audioBytes, 0, len); } } catch (IOException e) { throw new UncheckedIOException(e); } return audioBuffer; } private AudioEvent audioEventFromBuffer(ByteBuffer bb) { return AudioEvent.builder() .audioChunk(SdkBytes.fromByteBuffer(bb)) .build(); } } }
PrivacySite termsCookie preferences
© 2025, Amazon Web Services, Inc. or its affiliates. All rights reserved.