1{
2  "version":"2.0",
3  "metadata":{
4    "apiVersion":"2017-10-26",
5    "endpointPrefix":"transcribestreaming",
6    "protocol":"rest-json",
7    "protocolSettings":{"h2":"eventstream"},
8    "serviceFullName":"Amazon Transcribe Streaming Service",
9    "serviceId":"Transcribe Streaming",
10    "signatureVersion":"v4",
11    "signingName":"transcribe",
12    "uid":"transcribe-streaming-2017-10-26"
13  },
14  "operations":{
15    "StartCallAnalyticsStreamTranscription":{
16      "name":"StartCallAnalyticsStreamTranscription",
17      "http":{
18        "method":"POST",
19        "requestUri":"/call-analytics-stream-transcription"
20      },
21      "input":{"shape":"StartCallAnalyticsStreamTranscriptionRequest"},
22      "output":{"shape":"StartCallAnalyticsStreamTranscriptionResponse"},
23      "errors":[
24        {"shape":"BadRequestException"},
25        {"shape":"LimitExceededException"},
26        {"shape":"InternalFailureException"},
27        {"shape":"ConflictException"},
28        {"shape":"ServiceUnavailableException"}
29      ],
30      "documentation":"<p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application. Use this operation for <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/call-analytics.html\">Call Analytics</a> transcriptions.</p> <p>The following parameters are required:</p> <ul> <li> <p> <code>language-code</code> </p> </li> <li> <p> <code>media-encoding</code> </p> </li> <li> <p> <code>sample-rate</code> </p> </li> </ul> <p>For more information on streaming with Amazon Transcribe, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html\">Transcribing streaming audio</a>.</p>"
31    },
32    "StartMedicalStreamTranscription":{
33      "name":"StartMedicalStreamTranscription",
34      "http":{
35        "method":"POST",
36        "requestUri":"/medical-stream-transcription"
37      },
38      "input":{"shape":"StartMedicalStreamTranscriptionRequest"},
39      "output":{"shape":"StartMedicalStreamTranscriptionResponse"},
40      "errors":[
41        {"shape":"BadRequestException"},
42        {"shape":"LimitExceededException"},
43        {"shape":"InternalFailureException"},
44        {"shape":"ConflictException"},
45        {"shape":"ServiceUnavailableException"}
46      ],
47      "documentation":"<p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe Medical and the transcription results are streamed to your application.</p> <p>The following parameters are required:</p> <ul> <li> <p> <code>language-code</code> </p> </li> <li> <p> <code>media-encoding</code> </p> </li> <li> <p> <code>sample-rate</code> </p> </li> </ul> <p>For more information on streaming with Amazon Transcribe Medical, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html\">Transcribing streaming audio</a>.</p>"
48    },
49    "StartStreamTranscription":{
50      "name":"StartStreamTranscription",
51      "http":{
52        "method":"POST",
53        "requestUri":"/stream-transcription"
54      },
55      "input":{"shape":"StartStreamTranscriptionRequest"},
56      "output":{"shape":"StartStreamTranscriptionResponse"},
57      "errors":[
58        {"shape":"BadRequestException"},
59        {"shape":"LimitExceededException"},
60        {"shape":"InternalFailureException"},
61        {"shape":"ConflictException"},
62        {"shape":"ServiceUnavailableException"}
63      ],
64      "documentation":"<p>Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application.</p> <p>The following parameters are required:</p> <ul> <li> <p> <code>language-code</code> or <code>identify-language</code> or <code>identify-multiple-language</code> </p> </li> <li> <p> <code>media-encoding</code> </p> </li> <li> <p> <code>sample-rate</code> </p> </li> </ul> <p>For more information on streaming with Amazon Transcribe, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html\">Transcribing streaming audio</a>.</p>"
65    }
66  },
67  "shapes":{
68    "Alternative":{
69      "type":"structure",
70      "members":{
71        "Transcript":{
72          "shape":"String",
73          "documentation":"<p>Contains transcribed text.</p>"
74        },
75        "Items":{
76          "shape":"ItemList",
77          "documentation":"<p>Contains words, phrases, or punctuation marks in your transcription output.</p>"
78        },
79        "Entities":{
80          "shape":"EntityList",
81          "documentation":"<p>Contains entities identified as personally identifiable information (PII) in your transcription output.</p>"
82        }
83      },
84      "documentation":"<p>A list of possible alternative transcriptions for the input audio. Each alternative may contain one or more of <code>Items</code>, <code>Entities</code>, or <code>Transcript</code>.</p>"
85    },
86    "AlternativeList":{
87      "type":"list",
88      "member":{"shape":"Alternative"}
89    },
90    "AudioChunk":{"type":"blob"},
91    "AudioEvent":{
92      "type":"structure",
93      "members":{
94        "AudioChunk":{
95          "shape":"AudioChunk",
96          "documentation":"<p>An audio blob that contains the next part of the audio that you want to transcribe. The maximum audio chunk size is 32 KB.</p>",
97          "eventpayload":true
98        }
99      },
100      "documentation":"<p>A wrapper for your audio chunks. Your audio stream consists of one or more audio events, which consist of one or more audio chunks.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html\">Event stream encoding</a>.</p>",
101      "event":true
102    },
103    "AudioStream":{
104      "type":"structure",
105      "members":{
106        "AudioEvent":{
107          "shape":"AudioEvent",
108          "documentation":"<p>A blob of audio from your application. Your audio stream consists of one or more audio events.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/event-stream.html\">Event stream encoding</a>.</p>"
109        },
110        "ConfigurationEvent":{
111          "shape":"ConfigurationEvent",
112          "documentation":"<p>Contains audio channel definitions and post-call analytics settings.</p>"
113        }
114      },
115      "documentation":"<p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html\">Transcribing streaming audio</a>.</p>",
116      "eventstream":true
117    },
118    "BadRequestException":{
119      "type":"structure",
120      "members":{
121        "Message":{"shape":"String"}
122      },
123      "documentation":"<p>One or more arguments to the <code>StartStreamTranscription</code>, <code>StartMedicalStreamTranscription</code>, or <code>StartCallAnalyticsStreamTranscription</code> operation was not valid. For example, <code>MediaEncoding</code> or <code>LanguageCode</code> used not valid values. Check the specified parameters and try your request again.</p>",
124      "error":{"httpStatusCode":400},
125      "exception":true
126    },
127    "Boolean":{"type":"boolean"},
128    "CallAnalyticsEntity":{
129      "type":"structure",
130      "members":{
131        "BeginOffsetMillis":{
132          "shape":"Long",
133          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the start of the identified entity.</p>"
134        },
135        "EndOffsetMillis":{
136          "shape":"Long",
137          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the end of the identified entity.</p>"
138        },
139        "Category":{
140          "shape":"String",
141          "documentation":"<p>The category of information identified. For example, <code>PII</code>.</p>"
142        },
143        "Type":{
144          "shape":"String",
145          "documentation":"<p>The type of PII identified. For example, <code>NAME</code> or <code>CREDIT_DEBIT_NUMBER</code>.</p>"
146        },
147        "Content":{
148          "shape":"String",
149          "documentation":"<p>The word or words that represent the identified entity.</p>"
150        },
151        "Confidence":{
152          "shape":"Confidence",
153          "documentation":"<p>The confidence score associated with the identification of an entity in your transcript.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.</p>"
154        }
155      },
156      "documentation":"<p>Contains entities identified as personally identifiable information (PII) in your transcription output, along with various associated attributes. Examples include category, confidence score, content, type, and start and end times.</p>"
157    },
158    "CallAnalyticsEntityList":{
159      "type":"list",
160      "member":{"shape":"CallAnalyticsEntity"}
161    },
162    "CallAnalyticsItem":{
163      "type":"structure",
164      "members":{
165        "BeginOffsetMillis":{
166          "shape":"Long",
167          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the start of the identified item.</p>"
168        },
169        "EndOffsetMillis":{
170          "shape":"Long",
171          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the end of the identified item.</p>"
172        },
173        "Type":{
174          "shape":"ItemType",
175          "documentation":"<p>The type of item identified. Options are: <code>PRONUNCIATION</code> (spoken words) and <code>PUNCTUATION</code>.</p>"
176        },
177        "Content":{
178          "shape":"String",
179          "documentation":"<p>The word or punctuation that was transcribed.</p>"
180        },
181        "Confidence":{
182          "shape":"Confidence",
183          "documentation":"<p>The confidence score associated with a word or phrase in your transcript.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.</p>"
184        },
185        "VocabularyFilterMatch":{
186          "shape":"Boolean",
187          "documentation":"<p>Indicates whether the specified item matches a word in the vocabulary filter included in your Call Analytics request. If <code>true</code>, there is a vocabulary filter match.</p>"
188        },
189        "Stable":{
190          "shape":"Stable",
191          "documentation":"<p>If partial result stabilization is enabled, <code>Stable</code> indicates whether the specified item is stable (<code>true</code>) or if it may change when the segment is complete (<code>false</code>).</p>"
192        }
193      },
194      "documentation":"<p>A word, phrase, or punctuation mark in your Call Analytics transcription output, along with various associated attributes, such as confidence score, type, and start and end times.</p>"
195    },
196    "CallAnalyticsItemList":{
197      "type":"list",
198      "member":{"shape":"CallAnalyticsItem"}
199    },
200    "CallAnalyticsLanguageCode":{
201      "type":"string",
202      "enum":[
203        "en-US",
204        "en-GB",
205        "es-US",
206        "fr-CA",
207        "fr-FR",
208        "en-AU",
209        "it-IT",
210        "de-DE",
211        "pt-BR"
212      ]
213    },
214    "CallAnalyticsTranscriptResultStream":{
215      "type":"structure",
216      "members":{
217        "UtteranceEvent":{
218          "shape":"UtteranceEvent",
219          "documentation":"<p>Contains set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to channel definitions, partial result stabilization, sentiment, issue detection, and other transcription-related data.</p>"
220        },
221        "CategoryEvent":{
222          "shape":"CategoryEvent",
223          "documentation":"<p>Provides information on matched categories that were used to generate real-time supervisor alerts.</p>"
224        },
225        "BadRequestException":{"shape":"BadRequestException"},
226        "LimitExceededException":{"shape":"LimitExceededException"},
227        "InternalFailureException":{"shape":"InternalFailureException"},
228        "ConflictException":{"shape":"ConflictException"},
229        "ServiceUnavailableException":{"shape":"ServiceUnavailableException"}
230      },
231      "documentation":"<p>Contains detailed information about your Call Analytics streaming session. These details are provided in the <code>UtteranceEvent</code> and <code>CategoryEvent</code> objects.</p>",
232      "eventstream":true
233    },
234    "CategoryEvent":{
235      "type":"structure",
236      "members":{
237        "MatchedCategories":{
238          "shape":"StringList",
239          "documentation":"<p>Lists the categories that were matched in your audio segment.</p>"
240        },
241        "MatchedDetails":{
242          "shape":"MatchedCategoryDetails",
243          "documentation":"<p>Contains information about the matched categories, including category names and timestamps.</p>"
244        }
245      },
246      "documentation":"<p>Provides information on any <code>TranscriptFilterType</code> categories that matched your transcription output. Matches are identified for each segment upon completion of that segment.</p>",
247      "event":true
248    },
249    "ChannelDefinition":{
250      "type":"structure",
251      "required":[
252        "ChannelId",
253        "ParticipantRole"
254      ],
255      "members":{
256        "ChannelId":{
257          "shape":"ChannelId",
258          "documentation":"<p>Specify the audio channel you want to define.</p>"
259        },
260        "ParticipantRole":{
261          "shape":"ParticipantRole",
262          "documentation":"<p>Specify the speaker you want to define. Omitting this parameter is equivalent to specifying both participants.</p>"
263        }
264      },
265      "documentation":"<p>Makes it possible to specify which speaker is on which audio channel. For example, if your agent is the first participant to speak, you would set <code>ChannelId</code> to <code>0</code> (to indicate the first channel) and <code>ParticipantRole</code> to <code>AGENT</code> (to indicate that it's the agent speaking).</p>"
266    },
267    "ChannelDefinitions":{
268      "type":"list",
269      "member":{"shape":"ChannelDefinition"},
270      "max":2,
271      "min":2
272    },
273    "ChannelId":{
274      "type":"integer",
275      "max":1,
276      "min":0
277    },
278    "CharacterOffsets":{
279      "type":"structure",
280      "members":{
281        "Begin":{
282          "shape":"Integer",
283          "documentation":"<p>Provides the character count of the first character where a match is identified. For example, the first character associated with an issue or a category match in a segment transcript.</p>"
284        },
285        "End":{
286          "shape":"Integer",
287          "documentation":"<p>Provides the character count of the last character where a match is identified. For example, the last character associated with an issue or a category match in a segment transcript.</p>"
288        }
289      },
290      "documentation":"<p>Provides the location, using character count, in your transcript where a match is identified. For example, the location of an issue or a category match within a segment.</p>"
291    },
292    "Confidence":{"type":"double"},
293    "ConfigurationEvent":{
294      "type":"structure",
295      "members":{
296        "ChannelDefinitions":{
297          "shape":"ChannelDefinitions",
298          "documentation":"<p>Indicates which speaker is on which audio channel.</p>"
299        },
300        "PostCallAnalyticsSettings":{
301          "shape":"PostCallAnalyticsSettings",
302          "documentation":"<p>Provides additional optional settings for your Call Analytics post-call request, including encryption and output locations for your redacted and unredacted transcript.</p>"
303        }
304      },
305      "documentation":"<p>Allows you to set audio channel definitions and post-call analytics settings.</p>",
306      "event":true
307    },
308    "ConflictException":{
309      "type":"structure",
310      "members":{
311        "Message":{"shape":"String"}
312      },
313      "documentation":"<p>A new stream started with the same session ID. The current stream has been terminated.</p>",
314      "error":{"httpStatusCode":409},
315      "exception":true
316    },
317    "ContentIdentificationType":{
318      "type":"string",
319      "enum":["PII"]
320    },
321    "ContentRedactionOutput":{
322      "type":"string",
323      "enum":[
324        "redacted",
325        "redacted_and_unredacted"
326      ]
327    },
328    "ContentRedactionType":{
329      "type":"string",
330      "enum":["PII"]
331    },
332    "Double":{"type":"double"},
333    "Entity":{
334      "type":"structure",
335      "members":{
336        "StartTime":{
337          "shape":"Double",
338          "documentation":"<p>The start time, in milliseconds, of the utterance that was identified as PII.</p>"
339        },
340        "EndTime":{
341          "shape":"Double",
342          "documentation":"<p>The end time, in milliseconds, of the utterance that was identified as PII.</p>"
343        },
344        "Category":{
345          "shape":"String",
346          "documentation":"<p>The category of information identified. The only category is <code>PII</code>.</p>"
347        },
348        "Type":{
349          "shape":"String",
350          "documentation":"<p>The type of PII identified. For example, <code>NAME</code> or <code>CREDIT_DEBIT_NUMBER</code>.</p>"
351        },
352        "Content":{
353          "shape":"String",
354          "documentation":"<p>The word or words identified as PII.</p>"
355        },
356        "Confidence":{
357          "shape":"Confidence",
358          "documentation":"<p>The confidence score associated with the identified PII entity in your audio.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.</p>"
359        }
360      },
361      "documentation":"<p>Contains entities identified as personally identifiable information (PII) in your transcription output, along with various associated attributes. Examples include category, confidence score, type, stability score, and start and end times.</p>"
362    },
363    "EntityList":{
364      "type":"list",
365      "member":{"shape":"Entity"}
366    },
367    "Integer":{"type":"integer"},
368    "InternalFailureException":{
369      "type":"structure",
370      "members":{
371        "Message":{"shape":"String"}
372      },
373      "documentation":"<p>A problem occurred while processing the audio. Amazon Transcribe terminated processing.</p>",
374      "error":{"httpStatusCode":500},
375      "exception":true,
376      "fault":true
377    },
378    "IssueDetected":{
379      "type":"structure",
380      "members":{
381        "CharacterOffsets":{
382          "shape":"CharacterOffsets",
383          "documentation":"<p>Provides the timestamps that identify when in an audio segment the specified issue occurs.</p>"
384        }
385      },
386      "documentation":"<p>Lists the issues that were identified in your audio segment.</p>"
387    },
388    "IssuesDetected":{
389      "type":"list",
390      "member":{"shape":"IssueDetected"}
391    },
392    "Item":{
393      "type":"structure",
394      "members":{
395        "StartTime":{
396          "shape":"Double",
397          "documentation":"<p>The start time, in milliseconds, of the transcribed item.</p>"
398        },
399        "EndTime":{
400          "shape":"Double",
401          "documentation":"<p>The end time, in milliseconds, of the transcribed item.</p>"
402        },
403        "Type":{
404          "shape":"ItemType",
405          "documentation":"<p>The type of item identified. Options are: <code>PRONUNCIATION</code> (spoken words) and <code>PUNCTUATION</code>.</p>"
406        },
407        "Content":{
408          "shape":"String",
409          "documentation":"<p>The word or punctuation that was transcribed.</p>"
410        },
411        "VocabularyFilterMatch":{
412          "shape":"Boolean",
413          "documentation":"<p>Indicates whether the specified item matches a word in the vocabulary filter included in your request. If <code>true</code>, there is a vocabulary filter match.</p>"
414        },
415        "Speaker":{
416          "shape":"String",
417          "documentation":"<p>If speaker partitioning is enabled, <code>Speaker</code> labels the speaker of the specified item.</p>"
418        },
419        "Confidence":{
420          "shape":"Confidence",
421          "documentation":"<p>The confidence score associated with a word or phrase in your transcript.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.</p>"
422        },
423        "Stable":{
424          "shape":"Stable",
425          "documentation":"<p>If partial result stabilization is enabled, <code>Stable</code> indicates whether the specified item is stable (<code>true</code>) or if it may change when the segment is complete (<code>false</code>).</p>"
426        }
427      },
428      "documentation":"<p>A word, phrase, or punctuation mark in your transcription output, along with various associated attributes, such as confidence score, type, and start and end times.</p>"
429    },
430    "ItemList":{
431      "type":"list",
432      "member":{"shape":"Item"}
433    },
434    "ItemType":{
435      "type":"string",
436      "enum":[
437        "pronunciation",
438        "punctuation"
439      ]
440    },
441    "LanguageCode":{
442      "type":"string",
443      "enum":[
444        "en-US",
445        "en-GB",
446        "es-US",
447        "fr-CA",
448        "fr-FR",
449        "en-AU",
450        "it-IT",
451        "de-DE",
452        "pt-BR",
453        "ja-JP",
454        "ko-KR",
455        "zh-CN",
456        "hi-IN",
457        "th-TH"
458      ]
459    },
460    "LanguageIdentification":{
461      "type":"list",
462      "member":{"shape":"LanguageWithScore"}
463    },
464    "LanguageOptions":{
465      "type":"string",
466      "max":200,
467      "min":1,
468      "pattern":"^[a-zA-Z-,]+"
469    },
470    "LanguageWithScore":{
471      "type":"structure",
472      "members":{
473        "LanguageCode":{
474          "shape":"LanguageCode",
475          "documentation":"<p>The language code of the identified language.</p>"
476        },
477        "Score":{
478          "shape":"Double",
479          "documentation":"<p>The confidence score associated with the identified language code. Confidence scores are values between zero and one; larger values indicate a higher confidence in the identified language.</p>"
480        }
481      },
482      "documentation":"<p>The language code that represents the language identified in your audio, including the associated confidence score. If you enabled channel identification in your request and each channel contained a different language, you will have more than one <code>LanguageWithScore</code> result.</p>"
483    },
484    "LimitExceededException":{
485      "type":"structure",
486      "members":{
487        "Message":{"shape":"String"}
488      },
489      "documentation":"<p>Your client has exceeded one of the Amazon Transcribe limits. This is typically the audio length limit. Break your audio stream into smaller chunks and try your request again.</p>",
490      "error":{"httpStatusCode":429},
491      "exception":true
492    },
493    "Long":{"type":"long"},
494    "MatchedCategoryDetails":{
495      "type":"map",
496      "key":{"shape":"String"},
497      "value":{"shape":"PointsOfInterest"}
498    },
499    "MediaEncoding":{
500      "type":"string",
501      "enum":[
502        "pcm",
503        "ogg-opus",
504        "flac"
505      ]
506    },
507    "MediaSampleRateHertz":{
508      "type":"integer",
509      "max":48000,
510      "min":8000
511    },
512    "MedicalAlternative":{
513      "type":"structure",
514      "members":{
515        "Transcript":{
516          "shape":"String",
517          "documentation":"<p>Contains transcribed text.</p>"
518        },
519        "Items":{
520          "shape":"MedicalItemList",
521          "documentation":"<p>Contains words, phrases, or punctuation marks in your transcription output.</p>"
522        },
523        "Entities":{
524          "shape":"MedicalEntityList",
525          "documentation":"<p>Contains entities identified as personal health information (PHI) in your transcription output.</p>"
526        }
527      },
528      "documentation":"<p>A list of possible alternative transcriptions for the input audio. Each alternative may contain one or more of <code>Items</code>, <code>Entities</code>, or <code>Transcript</code>.</p>"
529    },
530    "MedicalAlternativeList":{
531      "type":"list",
532      "member":{"shape":"MedicalAlternative"}
533    },
534    "MedicalContentIdentificationType":{
535      "type":"string",
536      "enum":["PHI"]
537    },
538    "MedicalEntity":{
539      "type":"structure",
540      "members":{
541        "StartTime":{
542          "shape":"Double",
543          "documentation":"<p>The start time, in milliseconds, of the utterance that was identified as PHI.</p>"
544        },
545        "EndTime":{
546          "shape":"Double",
547          "documentation":"<p>The end time, in milliseconds, of the utterance that was identified as PHI.</p>"
548        },
549        "Category":{
550          "shape":"String",
551          "documentation":"<p>The category of information identified. The only category is <code>PHI</code>.</p>"
552        },
553        "Content":{
554          "shape":"String",
555          "documentation":"<p>The word or words identified as PHI.</p>"
556        },
557        "Confidence":{
558          "shape":"Confidence",
559          "documentation":"<p>The confidence score associated with the identified PHI entity in your audio.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.</p>"
560        }
561      },
562      "documentation":"<p>Contains entities identified as personal health information (PHI) in your transcription output, along with various associated attributes. Examples include category, confidence score, type, stability score, and start and end times.</p>"
563    },
564    "MedicalEntityList":{
565      "type":"list",
566      "member":{"shape":"MedicalEntity"}
567    },
568    "MedicalItem":{
569      "type":"structure",
570      "members":{
571        "StartTime":{
572          "shape":"Double",
573          "documentation":"<p>The start time, in milliseconds, of the transcribed item.</p>"
574        },
575        "EndTime":{
576          "shape":"Double",
577          "documentation":"<p>The end time, in milliseconds, of the transcribed item.</p>"
578        },
579        "Type":{
580          "shape":"ItemType",
581          "documentation":"<p>The type of item identified. Options are: <code>PRONUNCIATION</code> (spoken words) and <code>PUNCTUATION</code>.</p>"
582        },
583        "Content":{
584          "shape":"String",
585          "documentation":"<p>The word or punctuation that was transcribed.</p>"
586        },
587        "Confidence":{
588          "shape":"Confidence",
589          "documentation":"<p>The confidence score associated with a word or phrase in your transcript.</p> <p>Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.</p>"
590        },
591        "Speaker":{
592          "shape":"String",
593          "documentation":"<p>If speaker partitioning is enabled, <code>Speaker</code> labels the speaker of the specified item.</p>"
594        }
595      },
596      "documentation":"<p>A word, phrase, or punctuation mark in your transcription output, along with various associated attributes, such as confidence score, type, and start and end times.</p>"
597    },
598    "MedicalItemList":{
599      "type":"list",
600      "member":{"shape":"MedicalItem"}
601    },
602    "MedicalResult":{
603      "type":"structure",
604      "members":{
605        "ResultId":{
606          "shape":"String",
607          "documentation":"<p>Provides a unique identifier for the <code>Result</code>.</p>"
608        },
609        "StartTime":{
610          "shape":"Double",
611          "documentation":"<p>The start time, in milliseconds, of the <code>Result</code>.</p>"
612        },
613        "EndTime":{
614          "shape":"Double",
615          "documentation":"<p>The end time, in milliseconds, of the <code>Result</code>.</p>"
616        },
617        "IsPartial":{
618          "shape":"Boolean",
619          "documentation":"<p>Indicates if the segment is complete.</p> <p>If <code>IsPartial</code> is <code>true</code>, the segment is not complete. If <code>IsPartial</code> is <code>false</code>, the segment is complete.</p>"
620        },
621        "Alternatives":{
622          "shape":"MedicalAlternativeList",
623          "documentation":"<p>A list of possible alternative transcriptions for the input audio. Each alternative may contain one or more of <code>Items</code>, <code>Entities</code>, or <code>Transcript</code>.</p>"
624        },
625        "ChannelId":{
626          "shape":"String",
627          "documentation":"<p>Indicates the channel identified for the <code>Result</code>.</p>"
628        }
629      },
630      "documentation":"<p>The <code>Result</code> associated with a <code/>.</p> <p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
631    },
632    "MedicalResultList":{
633      "type":"list",
634      "member":{"shape":"MedicalResult"}
635    },
636    "MedicalTranscript":{
637      "type":"structure",
638      "members":{
639        "Results":{
640          "shape":"MedicalResultList",
641          "documentation":"<p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
642        }
643      },
644      "documentation":"<p>The <code>MedicalTranscript</code> associated with a <code/>.</p> <p> <code>MedicalTranscript</code> contains <code>Results</code>, which contains a set of transcription results from one or more audio segments, along with additional information per your request parameters.</p>"
645    },
646    "MedicalTranscriptEvent":{
647      "type":"structure",
648      "members":{
649        "Transcript":{
650          "shape":"MedicalTranscript",
651          "documentation":"<p>Contains <code>Results</code>, which contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
652        }
653      },
654      "documentation":"<p>The <code>MedicalTranscriptEvent</code> associated with a <code>MedicalTranscriptResultStream</code>.</p> <p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters.</p>",
655      "event":true
656    },
657    "MedicalTranscriptResultStream":{
658      "type":"structure",
659      "members":{
660        "TranscriptEvent":{
661          "shape":"MedicalTranscriptEvent",
662          "documentation":"<p>The <code>MedicalTranscriptEvent</code> associated with a <code>MedicalTranscriptResultStream</code>.</p> <p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
663        },
664        "BadRequestException":{"shape":"BadRequestException"},
665        "LimitExceededException":{"shape":"LimitExceededException"},
666        "InternalFailureException":{"shape":"InternalFailureException"},
667        "ConflictException":{"shape":"ConflictException"},
668        "ServiceUnavailableException":{"shape":"ServiceUnavailableException"}
669      },
670      "documentation":"<p>Contains detailed information about your streaming session.</p>",
671      "eventstream":true
672    },
673    "ModelName":{
674      "type":"string",
675      "max":200,
676      "min":1,
677      "pattern":"^[0-9a-zA-Z._-]+"
678    },
679    "NumberOfChannels":{
680      "type":"integer",
681      "min":2
682    },
683    "PartialResultsStability":{
684      "type":"string",
685      "enum":[
686        "high",
687        "medium",
688        "low"
689      ]
690    },
691    "ParticipantRole":{
692      "type":"string",
693      "enum":[
694        "AGENT",
695        "CUSTOMER"
696      ]
697    },
698    "PiiEntityTypes":{
699      "type":"string",
700      "max":300,
701      "min":1,
702      "pattern":"^[A-Z_, ]+"
703    },
704    "PointsOfInterest":{
705      "type":"structure",
706      "members":{
707        "TimestampRanges":{
708          "shape":"TimestampRanges",
709          "documentation":"<p>Contains the timestamp ranges (start time through end time) of matched categories and rules.</p>"
710        }
711      },
712      "documentation":"<p>Contains the timestamps of matched categories.</p>"
713    },
714    "PostCallAnalyticsSettings":{
715      "type":"structure",
716      "required":[
717        "OutputLocation",
718        "DataAccessRoleArn"
719      ],
720      "members":{
721        "OutputLocation":{
722          "shape":"String",
723          "documentation":"<p>The Amazon S3 location where you want your Call Analytics post-call transcription output stored. You can use any of the following formats to specify the output location:</p> <ol> <li> <p>s3://DOC-EXAMPLE-BUCKET</p> </li> <li> <p>s3://DOC-EXAMPLE-BUCKET/my-output-folder/</p> </li> <li> <p>s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json</p> </li> </ol>"
724        },
725        "DataAccessRoleArn":{
726          "shape":"String",
727          "documentation":"<p>The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.</p> <p>IAM role ARNs have the format <code>arn:partition:iam::account:role/role-name-with-path</code>. For example: <code>arn:aws:iam::111122223333:role/Admin</code>. For more information, see <a href=\"https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns\">IAM ARNs</a>.</p>"
728        },
729        "ContentRedactionOutput":{
730          "shape":"ContentRedactionOutput",
731          "documentation":"<p>Specify whether you want only a redacted transcript or both a redacted and an unredacted transcript. If you choose redacted and unredacted, two JSON files are generated and stored in the Amazon S3 output location you specify.</p> <p>Note that to include <code>ContentRedactionOutput</code> in your request, you must enable content redaction (<code>ContentRedactionType</code>).</p>"
732        },
733        "OutputEncryptionKMSKeyId":{
734          "shape":"String",
735          "documentation":"<p>The KMS key you want to use to encrypt your Call Analytics post-call output.</p> <p>If using a key located in the <b>current</b> Amazon Web Services account, you can specify your KMS key in one of four ways:</p> <ol> <li> <p>Use the KMS key ID itself. For example, <code>1234abcd-12ab-34cd-56ef-1234567890ab</code>.</p> </li> <li> <p>Use an alias for the KMS key ID. For example, <code>alias/ExampleAlias</code>.</p> </li> <li> <p>Use the Amazon Resource Name (ARN) for the KMS key ID. For example, <code>arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>.</p> </li> <li> <p>Use the ARN for the KMS key alias. For example, <code>arn:aws:kms:region:account-ID:alias/ExampleAlias</code>.</p> </li> </ol> <p>If using a key located in a <b>different</b> Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways:</p> <ol> <li> <p>Use the ARN for the KMS key ID. For example, <code>arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>.</p> </li> <li> <p>Use the ARN for the KMS key alias. For example, <code>arn:aws:kms:region:account-ID:alias/ExampleAlias</code>.</p> </li> </ol> <p>Note that the user making the request must have permission to use the specified KMS key.</p>"
736        }
737      },
738      "documentation":"<p>Allows you to specify additional settings for your streaming Call Analytics post-call request, including output locations for your redacted and unredacted transcript, which IAM role to use, and, optionally, which encryption key to use.</p> <p> <code>ContentRedactionOutput</code>, <code>DataAccessRoleArn</code>, and <code>OutputLocation</code> are required fields.</p>"
739    },
740    "RequestId":{"type":"string"},
741    "Result":{
742      "type":"structure",
743      "members":{
744        "ResultId":{
745          "shape":"String",
746          "documentation":"<p>Provides a unique identifier for the <code>Result</code>.</p>"
747        },
748        "StartTime":{
749          "shape":"Double",
750          "documentation":"<p>The start time, in milliseconds, of the <code>Result</code>.</p>"
751        },
752        "EndTime":{
753          "shape":"Double",
754          "documentation":"<p>The end time, in milliseconds, of the <code>Result</code>.</p>"
755        },
756        "IsPartial":{
757          "shape":"Boolean",
758          "documentation":"<p>Indicates if the segment is complete.</p> <p>If <code>IsPartial</code> is <code>true</code>, the segment is not complete. If <code>IsPartial</code> is <code>false</code>, the segment is complete.</p>"
759        },
760        "Alternatives":{
761          "shape":"AlternativeList",
762          "documentation":"<p>A list of possible alternative transcriptions for the input audio. Each alternative may contain one or more of <code>Items</code>, <code>Entities</code>, or <code>Transcript</code>.</p>"
763        },
764        "ChannelId":{
765          "shape":"String",
766          "documentation":"<p>Indicates which audio channel is associated with the <code>Result</code>.</p>"
767        },
768        "LanguageCode":{
769          "shape":"LanguageCode",
770          "documentation":"<p>The language code that represents the language spoken in your audio stream.</p>"
771        },
772        "LanguageIdentification":{
773          "shape":"LanguageIdentification",
774          "documentation":"<p>The language code of the dominant language identified in your stream.</p> <p>If you enabled channel identification and each channel of your audio contains a different language, you may have more than one result.</p>"
775        }
776      },
777      "documentation":"<p>The <code>Result</code> associated with a <code/>.</p> <p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
778    },
779    "ResultList":{
780      "type":"list",
781      "member":{"shape":"Result"}
782    },
783    "Sentiment":{
784      "type":"string",
785      "enum":[
786        "POSITIVE",
787        "NEGATIVE",
788        "MIXED",
789        "NEUTRAL"
790      ]
791    },
792    "ServiceUnavailableException":{
793      "type":"structure",
794      "members":{
795        "Message":{"shape":"String"}
796      },
797      "documentation":"<p>The service is currently unavailable. Try your request later.</p>",
798      "error":{"httpStatusCode":503},
799      "exception":true
800    },
801    "SessionId":{
802      "type":"string",
803      "max":36,
804      "min":36,
805      "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}"
806    },
807    "Specialty":{
808      "type":"string",
809      "enum":[
810        "PRIMARYCARE",
811        "CARDIOLOGY",
812        "NEUROLOGY",
813        "ONCOLOGY",
814        "RADIOLOGY",
815        "UROLOGY"
816      ]
817    },
818    "Stable":{"type":"boolean"},
819    "StartCallAnalyticsStreamTranscriptionRequest":{
820      "type":"structure",
821      "required":[
822        "LanguageCode",
823        "MediaSampleRateHertz",
824        "MediaEncoding",
825        "AudioStream"
826      ],
827      "members":{
828        "LanguageCode":{
829          "shape":"CallAnalyticsLanguageCode",
830          "documentation":"<p>Specify the language code that represents the language spoken in your audio.</p> <p>If you're unsure of the language spoken in your audio, consider using <code>IdentifyLanguage</code> to enable automatic language identification.</p> <p>For a list of languages supported with streaming Call Analytics, refer to the <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html\">Supported languages</a> table.</p>",
831          "location":"header",
832          "locationName":"x-amzn-transcribe-language-code"
833        },
834        "MediaSampleRateHertz":{
835          "shape":"MediaSampleRateHertz",
836          "documentation":"<p>The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>",
837          "location":"header",
838          "locationName":"x-amzn-transcribe-sample-rate"
839        },
840        "MediaEncoding":{
841          "shape":"MediaEncoding",
842          "documentation":"<p>Specify the encoding of your input audio. Supported formats are:</p> <ul> <li> <p>FLAC</p> </li> <li> <p>OPUS-encoded audio in an Ogg container</p> </li> <li> <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p> </li> </ul> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio\">Media formats</a>.</p>",
843          "location":"header",
844          "locationName":"x-amzn-transcribe-media-encoding"
845        },
846        "VocabularyName":{
847          "shape":"VocabularyName",
848          "documentation":"<p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p> <p>If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html\">Custom vocabularies</a>.</p>",
849          "location":"header",
850          "locationName":"x-amzn-transcribe-vocabulary-name"
851        },
852        "SessionId":{
853          "shape":"SessionId",
854          "documentation":"<p>Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.</p> <p>You can use a session ID to retry a streaming session.</p>",
855          "location":"header",
856          "locationName":"x-amzn-transcribe-session-id"
857        },
858        "AudioStream":{"shape":"AudioStream"},
859        "VocabularyFilterName":{
860          "shape":"VocabularyFilterName",
861          "documentation":"<p>Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p> <p>If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html\">Using vocabulary filtering with unwanted words</a>.</p>",
862          "location":"header",
863          "locationName":"x-amzn-transcribe-vocabulary-filter-name"
864        },
865        "VocabularyFilterMethod":{
866          "shape":"VocabularyFilterMethod",
867          "documentation":"<p>Specify how you want your vocabulary filter applied to your transcript.</p> <p>To replace words with <code>***</code>, choose <code>mask</code>.</p> <p>To delete words, choose <code>remove</code>.</p> <p>To flag words without changing them, choose <code>tag</code>.</p>",
868          "location":"header",
869          "locationName":"x-amzn-transcribe-vocabulary-filter-method"
870        },
871        "LanguageModelName":{
872          "shape":"ModelName",
873          "documentation":"<p>Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.</p> <p>The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html\">Custom language models</a>.</p>",
874          "location":"header",
875          "locationName":"x-amzn-transcribe-language-model-name"
876        },
877        "EnablePartialResultsStabilization":{
878          "shape":"Boolean",
879          "documentation":"<p>Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization\">Partial-result stabilization</a>.</p>",
880          "location":"header",
881          "locationName":"x-amzn-transcribe-enable-partial-results-stabilization"
882        },
883        "PartialResultsStability":{
884          "shape":"PartialResultsStability",
885          "documentation":"<p>Specify the level of stability to use when you enable partial results stabilization (<code>EnablePartialResultsStabilization</code>).</p> <p>Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization\">Partial-result stabilization</a>.</p>",
886          "location":"header",
887          "locationName":"x-amzn-transcribe-partial-results-stability"
888        },
889        "ContentIdentificationType":{
890          "shape":"ContentIdentificationType",
891          "documentation":"<p>Labels all personally identifiable information (PII) identified in your transcript.</p> <p>Content identification is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is flagged upon complete transcription of an audio segment.</p> <p>You can’t set <code>ContentIdentificationType</code> and <code>ContentRedactionType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html\">Redacting or identifying personally identifiable information</a>.</p>",
892          "location":"header",
893          "locationName":"x-amzn-transcribe-content-identification-type"
894        },
895        "ContentRedactionType":{
896          "shape":"ContentRedactionType",
897          "documentation":"<p>Redacts all personally identifiable information (PII) identified in your transcript.</p> <p>Content redaction is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is redacted upon complete transcription of an audio segment.</p> <p>You can’t set <code>ContentRedactionType</code> and <code>ContentIdentificationType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html\">Redacting or identifying personally identifiable information</a>.</p>",
898          "location":"header",
899          "locationName":"x-amzn-transcribe-content-redaction-type"
900        },
901        "PiiEntityTypes":{
902          "shape":"PiiEntityTypes",
903          "documentation":"<p>Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select <code>ALL</code>.</p> <p>To include <code>PiiEntityTypes</code> in your Call Analytics request, you must also include either <code>ContentIdentificationType</code> or <code>ContentRedactionType</code>.</p> <p>Values must be comma-separated and can include: <code>BANK_ACCOUNT_NUMBER</code>, <code>BANK_ROUTING</code>, <code>CREDIT_DEBIT_NUMBER</code>, <code>CREDIT_DEBIT_CVV</code>, <code>CREDIT_DEBIT_EXPIRY</code>, <code>PIN</code>, <code>EMAIL</code>, <code>ADDRESS</code>, <code>NAME</code>, <code>PHONE</code>, <code>SSN</code>, or <code>ALL</code>.</p>",
904          "location":"header",
905          "locationName":"x-amzn-transcribe-pii-entity-types"
906        }
907      },
908      "payload":"AudioStream"
909    },
910    "StartCallAnalyticsStreamTranscriptionResponse":{
911      "type":"structure",
912      "members":{
913        "RequestId":{
914          "shape":"RequestId",
915          "documentation":"<p>Provides the identifier for your Call Analytics streaming request.</p>",
916          "location":"header",
917          "locationName":"x-amzn-request-id"
918        },
919        "LanguageCode":{
920          "shape":"CallAnalyticsLanguageCode",
921          "documentation":"<p>Provides the language code that you specified in your Call Analytics request.</p>",
922          "location":"header",
923          "locationName":"x-amzn-transcribe-language-code"
924        },
925        "MediaSampleRateHertz":{
926          "shape":"MediaSampleRateHertz",
927          "documentation":"<p>Provides the sample rate that you specified in your Call Analytics request.</p>",
928          "location":"header",
929          "locationName":"x-amzn-transcribe-sample-rate"
930        },
931        "MediaEncoding":{
932          "shape":"MediaEncoding",
933          "documentation":"<p>Provides the media encoding you specified in your Call Analytics request.</p>",
934          "location":"header",
935          "locationName":"x-amzn-transcribe-media-encoding"
936        },
937        "VocabularyName":{
938          "shape":"VocabularyName",
939          "documentation":"<p>Provides the name of the custom vocabulary that you specified in your Call Analytics request.</p>",
940          "location":"header",
941          "locationName":"x-amzn-transcribe-vocabulary-name"
942        },
943        "SessionId":{
944          "shape":"SessionId",
945          "documentation":"<p>Provides the identifier for your Call Analytics transcription session.</p>",
946          "location":"header",
947          "locationName":"x-amzn-transcribe-session-id"
948        },
949        "CallAnalyticsTranscriptResultStream":{
950          "shape":"CallAnalyticsTranscriptResultStream",
951          "documentation":"<p>Provides detailed information about your Call Analytics streaming session.</p>"
952        },
953        "VocabularyFilterName":{
954          "shape":"VocabularyFilterName",
955          "documentation":"<p>Provides the name of the custom vocabulary filter that you specified in your Call Analytics request.</p>",
956          "location":"header",
957          "locationName":"x-amzn-transcribe-vocabulary-filter-name"
958        },
959        "VocabularyFilterMethod":{
960          "shape":"VocabularyFilterMethod",
961          "documentation":"<p>Provides the vocabulary filtering method used in your Call Analytics transcription.</p>",
962          "location":"header",
963          "locationName":"x-amzn-transcribe-vocabulary-filter-method"
964        },
965        "LanguageModelName":{
966          "shape":"ModelName",
967          "documentation":"<p>Provides the name of the custom language model that you specified in your Call Analytics request.</p>",
968          "location":"header",
969          "locationName":"x-amzn-transcribe-language-model-name"
970        },
971        "EnablePartialResultsStabilization":{
972          "shape":"Boolean",
973          "documentation":"<p>Shows whether partial results stabilization was enabled for your Call Analytics transcription.</p>",
974          "location":"header",
975          "locationName":"x-amzn-transcribe-enable-partial-results-stabilization"
976        },
977        "PartialResultsStability":{
978          "shape":"PartialResultsStability",
979          "documentation":"<p>Provides the stabilization level used for your transcription.</p>",
980          "location":"header",
981          "locationName":"x-amzn-transcribe-partial-results-stability"
982        },
983        "ContentIdentificationType":{
984          "shape":"ContentIdentificationType",
985          "documentation":"<p>Shows whether content identification was enabled for your Call Analytics transcription.</p>",
986          "location":"header",
987          "locationName":"x-amzn-transcribe-content-identification-type"
988        },
989        "ContentRedactionType":{
990          "shape":"ContentRedactionType",
991          "documentation":"<p>Shows whether content redaction was enabled for your Call Analytics transcription.</p>",
992          "location":"header",
993          "locationName":"x-amzn-transcribe-content-redaction-type"
994        },
995        "PiiEntityTypes":{
996          "shape":"PiiEntityTypes",
997          "documentation":"<p>Lists the PII entity types you specified in your Call Analytics request.</p>",
998          "location":"header",
999          "locationName":"x-amzn-transcribe-pii-entity-types"
1000        }
1001      },
1002      "payload":"CallAnalyticsTranscriptResultStream"
1003    },
1004    "StartMedicalStreamTranscriptionRequest":{
1005      "type":"structure",
1006      "required":[
1007        "LanguageCode",
1008        "MediaSampleRateHertz",
1009        "MediaEncoding",
1010        "Specialty",
1011        "Type",
1012        "AudioStream"
1013      ],
1014      "members":{
1015        "LanguageCode":{
1016          "shape":"LanguageCode",
1017          "documentation":"<p>Specify the language code that represents the language spoken in your audio.</p> <important> <p>Amazon Transcribe Medical only supports US English (<code>en-US</code>).</p> </important>",
1018          "location":"header",
1019          "locationName":"x-amzn-transcribe-language-code"
1020        },
1021        "MediaSampleRateHertz":{
1022          "shape":"MediaSampleRateHertz",
1023          "documentation":"<p>The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>",
1024          "location":"header",
1025          "locationName":"x-amzn-transcribe-sample-rate"
1026        },
1027        "MediaEncoding":{
1028          "shape":"MediaEncoding",
1029          "documentation":"<p>Specify the encoding used for the input audio. Supported formats are:</p> <ul> <li> <p>FLAC</p> </li> <li> <p>OPUS-encoded audio in an Ogg container</p> </li> <li> <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p> </li> </ul> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio\">Media formats</a>.</p>",
1030          "location":"header",
1031          "locationName":"x-amzn-transcribe-media-encoding"
1032        },
1033        "VocabularyName":{
1034          "shape":"VocabularyName",
1035          "documentation":"<p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p>",
1036          "location":"header",
1037          "locationName":"x-amzn-transcribe-vocabulary-name"
1038        },
1039        "Specialty":{
1040          "shape":"Specialty",
1041          "documentation":"<p>Specify the medical specialty contained in your audio.</p>",
1042          "location":"header",
1043          "locationName":"x-amzn-transcribe-specialty"
1044        },
1045        "Type":{
1046          "shape":"Type",
1047          "documentation":"<p>Specify the type of input audio. For example, choose <code>DICTATION</code> for a provider dictating patient notes and <code>CONVERSATION</code> for a dialogue between a patient and a medical professional.</p>",
1048          "location":"header",
1049          "locationName":"x-amzn-transcribe-type"
1050        },
1051        "ShowSpeakerLabel":{
1052          "shape":"Boolean",
1053          "documentation":"<p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html\">Partitioning speakers (diarization)</a>.</p>",
1054          "location":"header",
1055          "locationName":"x-amzn-transcribe-show-speaker-label"
1056        },
1057        "SessionId":{
1058          "shape":"SessionId",
1059          "documentation":"<p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.</p> <p>You can use a session ID to retry a streaming session.</p>",
1060          "location":"header",
1061          "locationName":"x-amzn-transcribe-session-id"
1062        },
1063        "AudioStream":{"shape":"AudioStream"},
1064        "EnableChannelIdentification":{
1065          "shape":"Boolean",
1066          "documentation":"<p>Enables channel identification in multi-channel audio.</p> <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p> <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html\">Transcribing multi-channel audio</a>.</p>",
1067          "location":"header",
1068          "locationName":"x-amzn-transcribe-enable-channel-identification"
1069        },
1070        "NumberOfChannels":{
1071          "shape":"NumberOfChannels",
1072          "documentation":"<p>Specify the number of channels in your audio stream. Up to two channels are supported.</p>",
1073          "location":"header",
1074          "locationName":"x-amzn-transcribe-number-of-channels"
1075        },
1076        "ContentIdentificationType":{
1077          "shape":"MedicalContentIdentificationType",
1078          "documentation":"<p>Labels all personal health information (PHI) identified in your transcript.</p> <p>Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html\">Identifying personal health information (PHI) in a transcription</a>.</p>",
1079          "location":"header",
1080          "locationName":"x-amzn-transcribe-content-identification-type"
1081        }
1082      },
1083      "payload":"AudioStream"
1084    },
1085    "StartMedicalStreamTranscriptionResponse":{
1086      "type":"structure",
1087      "members":{
1088        "RequestId":{
1089          "shape":"RequestId",
1090          "documentation":"<p>Provides the identifier for your streaming request.</p>",
1091          "location":"header",
1092          "locationName":"x-amzn-request-id"
1093        },
1094        "LanguageCode":{
1095          "shape":"LanguageCode",
1096          "documentation":"<p>Provides the language code that you specified in your request. This must be <code>en-US</code>.</p>",
1097          "location":"header",
1098          "locationName":"x-amzn-transcribe-language-code"
1099        },
1100        "MediaSampleRateHertz":{
1101          "shape":"MediaSampleRateHertz",
1102          "documentation":"<p>Provides the sample rate that you specified in your request.</p>",
1103          "location":"header",
1104          "locationName":"x-amzn-transcribe-sample-rate"
1105        },
1106        "MediaEncoding":{
1107          "shape":"MediaEncoding",
1108          "documentation":"<p>Provides the media encoding you specified in your request.</p>",
1109          "location":"header",
1110          "locationName":"x-amzn-transcribe-media-encoding"
1111        },
1112        "VocabularyName":{
1113          "shape":"VocabularyName",
1114          "documentation":"<p>Provides the name of the custom vocabulary that you specified in your request.</p>",
1115          "location":"header",
1116          "locationName":"x-amzn-transcribe-vocabulary-name"
1117        },
1118        "Specialty":{
1119          "shape":"Specialty",
1120          "documentation":"<p>Provides the medical specialty that you specified in your request.</p>",
1121          "location":"header",
1122          "locationName":"x-amzn-transcribe-specialty"
1123        },
1124        "Type":{
1125          "shape":"Type",
1126          "documentation":"<p>Provides the type of audio you specified in your request.</p>",
1127          "location":"header",
1128          "locationName":"x-amzn-transcribe-type"
1129        },
1130        "ShowSpeakerLabel":{
1131          "shape":"Boolean",
1132          "documentation":"<p>Shows whether speaker partitioning was enabled for your transcription.</p>",
1133          "location":"header",
1134          "locationName":"x-amzn-transcribe-show-speaker-label"
1135        },
1136        "SessionId":{
1137          "shape":"SessionId",
1138          "documentation":"<p>Provides the identifier for your transcription session.</p>",
1139          "location":"header",
1140          "locationName":"x-amzn-transcribe-session-id"
1141        },
1142        "TranscriptResultStream":{
1143          "shape":"MedicalTranscriptResultStream",
1144          "documentation":"<p>Provides detailed information about your streaming session.</p>"
1145        },
1146        "EnableChannelIdentification":{
1147          "shape":"Boolean",
1148          "documentation":"<p>Shows whether channel identification was enabled for your transcription.</p>",
1149          "location":"header",
1150          "locationName":"x-amzn-transcribe-enable-channel-identification"
1151        },
1152        "NumberOfChannels":{
1153          "shape":"NumberOfChannels",
1154          "documentation":"<p>Provides the number of channels that you specified in your request.</p>",
1155          "location":"header",
1156          "locationName":"x-amzn-transcribe-number-of-channels"
1157        },
1158        "ContentIdentificationType":{
1159          "shape":"MedicalContentIdentificationType",
1160          "documentation":"<p>Shows whether content identification was enabled for your transcription.</p>",
1161          "location":"header",
1162          "locationName":"x-amzn-transcribe-content-identification-type"
1163        }
1164      },
1165      "payload":"TranscriptResultStream"
1166    },
1167    "StartStreamTranscriptionRequest":{
1168      "type":"structure",
1169      "required":[
1170        "MediaSampleRateHertz",
1171        "MediaEncoding",
1172        "AudioStream"
1173      ],
1174      "members":{
1175        "LanguageCode":{
1176          "shape":"LanguageCode",
1177          "documentation":"<p>Specify the language code that represents the language spoken in your audio.</p> <p>If you're unsure of the language spoken in your audio, consider using <code>IdentifyLanguage</code> to enable automatic language identification.</p> <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html\">Supported languages</a> table.</p>",
1178          "location":"header",
1179          "locationName":"x-amzn-transcribe-language-code"
1180        },
1181        "MediaSampleRateHertz":{
1182          "shape":"MediaSampleRateHertz",
1183          "documentation":"<p>The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.</p>",
1184          "location":"header",
1185          "locationName":"x-amzn-transcribe-sample-rate"
1186        },
1187        "MediaEncoding":{
1188          "shape":"MediaEncoding",
1189          "documentation":"<p>Specify the encoding of your input audio. Supported formats are:</p> <ul> <li> <p>FLAC</p> </li> <li> <p>OPUS-encoded audio in an Ogg container</p> </li> <li> <p>PCM (only signed 16-bit little-endian audio formats, which does not include WAV)</p> </li> </ul> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio\">Media formats</a>.</p>",
1190          "location":"header",
1191          "locationName":"x-amzn-transcribe-media-encoding"
1192        },
1193        "VocabularyName":{
1194          "shape":"VocabularyName",
1195          "documentation":"<p>Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p> <p>If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.</p> <important> <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more custom vocabularies with your transcription, use the <code>VocabularyNames</code> parameter instead.</p> </important> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html\">Custom vocabularies</a>.</p>",
1196          "location":"header",
1197          "locationName":"x-amzn-transcribe-vocabulary-name"
1198        },
1199        "SessionId":{
1200          "shape":"SessionId",
1201          "documentation":"<p>Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.</p> <p>You can use a session ID to retry a streaming session.</p>",
1202          "location":"header",
1203          "locationName":"x-amzn-transcribe-session-id"
1204        },
1205        "AudioStream":{
1206          "shape":"AudioStream",
1207          "documentation":"<p>An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html\">Transcribing streaming audio</a>.</p>"
1208        },
1209        "VocabularyFilterName":{
1210          "shape":"VocabularyFilterName",
1211          "documentation":"<p>Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p> <p>If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.</p> <important> <p>This parameter is <b>not</b> intended for use with the <code>IdentifyLanguage</code> parameter. If you're including <code>IdentifyLanguage</code> in your request and want to use one or more vocabulary filters with your transcription, use the <code>VocabularyFilterNames</code> parameter instead.</p> </important> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html\">Using vocabulary filtering with unwanted words</a>.</p>",
1212          "location":"header",
1213          "locationName":"x-amzn-transcribe-vocabulary-filter-name"
1214        },
1215        "VocabularyFilterMethod":{
1216          "shape":"VocabularyFilterMethod",
1217          "documentation":"<p>Specify how you want your vocabulary filter applied to your transcript.</p> <p>To replace words with <code>***</code>, choose <code>mask</code>.</p> <p>To delete words, choose <code>remove</code>.</p> <p>To flag words without changing them, choose <code>tag</code>.</p>",
1218          "location":"header",
1219          "locationName":"x-amzn-transcribe-vocabulary-filter-method"
1220        },
1221        "ShowSpeakerLabel":{
1222          "shape":"Boolean",
1223          "documentation":"<p>Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html\">Partitioning speakers (diarization)</a>.</p>",
1224          "location":"header",
1225          "locationName":"x-amzn-transcribe-show-speaker-label"
1226        },
1227        "EnableChannelIdentification":{
1228          "shape":"Boolean",
1229          "documentation":"<p>Enables channel identification in multi-channel audio.</p> <p>Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.</p> <p>If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html\">Transcribing multi-channel audio</a>.</p>",
1230          "location":"header",
1231          "locationName":"x-amzn-transcribe-enable-channel-identification"
1232        },
1233        "NumberOfChannels":{
1234          "shape":"NumberOfChannels",
1235          "documentation":"<p>Specify the number of channels in your audio stream. Up to two channels are supported.</p>",
1236          "location":"header",
1237          "locationName":"x-amzn-transcribe-number-of-channels"
1238        },
1239        "EnablePartialResultsStabilization":{
1240          "shape":"Boolean",
1241          "documentation":"<p>Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization\">Partial-result stabilization</a>.</p>",
1242          "location":"header",
1243          "locationName":"x-amzn-transcribe-enable-partial-results-stabilization"
1244        },
1245        "PartialResultsStability":{
1246          "shape":"PartialResultsStability",
1247          "documentation":"<p>Specify the level of stability to use when you enable partial results stabilization (<code>EnablePartialResultsStabilization</code>).</p> <p>Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization\">Partial-result stabilization</a>.</p>",
1248          "location":"header",
1249          "locationName":"x-amzn-transcribe-partial-results-stability"
1250        },
1251        "ContentIdentificationType":{
1252          "shape":"ContentIdentificationType",
1253          "documentation":"<p>Labels all personally identifiable information (PII) identified in your transcript.</p> <p>Content identification is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is flagged upon complete transcription of an audio segment.</p> <p>You can’t set <code>ContentIdentificationType</code> and <code>ContentRedactionType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html\">Redacting or identifying personally identifiable information</a>.</p>",
1254          "location":"header",
1255          "locationName":"x-amzn-transcribe-content-identification-type"
1256        },
1257        "ContentRedactionType":{
1258          "shape":"ContentRedactionType",
1259          "documentation":"<p>Redacts all personally identifiable information (PII) identified in your transcript.</p> <p>Content redaction is performed at the segment level; PII specified in <code>PiiEntityTypes</code> is redacted upon complete transcription of an audio segment.</p> <p>You can’t set <code>ContentRedactionType</code> and <code>ContentIdentificationType</code> in the same request. If you set both, your request returns a <code>BadRequestException</code>.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html\">Redacting or identifying personally identifiable information</a>.</p>",
1260          "location":"header",
1261          "locationName":"x-amzn-transcribe-content-redaction-type"
1262        },
1263        "PiiEntityTypes":{
1264          "shape":"PiiEntityTypes",
1265          "documentation":"<p>Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select <code>ALL</code>.</p> <p>To include <code>PiiEntityTypes</code> in your request, you must also include either <code>ContentIdentificationType</code> or <code>ContentRedactionType</code>.</p> <p>Values must be comma-separated and can include: <code>BANK_ACCOUNT_NUMBER</code>, <code>BANK_ROUTING</code>, <code>CREDIT_DEBIT_NUMBER</code>, <code>CREDIT_DEBIT_CVV</code>, <code>CREDIT_DEBIT_EXPIRY</code>, <code>PIN</code>, <code>EMAIL</code>, <code>ADDRESS</code>, <code>NAME</code>, <code>PHONE</code>, <code>SSN</code>, or <code>ALL</code>.</p>",
1266          "location":"header",
1267          "locationName":"x-amzn-transcribe-pii-entity-types"
1268        },
1269        "LanguageModelName":{
1270          "shape":"ModelName",
1271          "documentation":"<p>Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.</p> <p>The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.</p> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html\">Custom language models</a>.</p>",
1272          "location":"header",
1273          "locationName":"x-amzn-transcribe-language-model-name"
1274        },
1275        "IdentifyLanguage":{
1276          "shape":"Boolean",
1277          "documentation":"<p>Enables automatic language identification for your transcription.</p> <p>If you include <code>IdentifyLanguage</code>, you can optionally include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your audio stream. Including language options can improve transcription accuracy.</p> <p>You can also include a preferred language using <code>PreferredLanguage</code>. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter.</p> <p>If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel.</p> <p>Note that you must include either <code>LanguageCode</code> or <code>IdentifyLanguage</code> or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p> <p>Streaming language identification can't be combined with custom language models or redaction.</p>",
1278          "location":"header",
1279          "locationName":"x-amzn-transcribe-identify-language"
1280        },
1281        "LanguageOptions":{
1282          "shape":"LanguageOptions",
1283          "documentation":"<p>Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.</p> <p>Including language options can improve the accuracy of language identification.</p> <p>If you include <code>LanguageOptions</code> in your request, you must also include <code>IdentifyLanguage</code>.</p> <p>For a list of languages supported with Amazon Transcribe streaming, refer to the <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html\">Supported languages</a> table.</p> <important> <p>You can only include one language dialect per language per stream. For example, you cannot include <code>en-US</code> and <code>en-AU</code> in the same request.</p> </important>",
1284          "location":"header",
1285          "locationName":"x-amzn-transcribe-language-options"
1286        },
1287        "PreferredLanguage":{
1288          "shape":"LanguageCode",
1289          "documentation":"<p>Specify a preferred language from the subset of languages codes you specified in <code>LanguageOptions</code>.</p> <p>You can only use this parameter if you've included <code>IdentifyLanguage</code> and <code>LanguageOptions</code> in your request.</p>",
1290          "location":"header",
1291          "locationName":"x-amzn-transcribe-preferred-language"
1292        },
1293        "IdentifyMultipleLanguages":{
1294          "shape":"Boolean",
1295          "documentation":"<p>Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.</p> <p>If you include <code>IdentifyMultipleLanguages</code>, you can optionally include a list of language codes, using <code>LanguageOptions</code>, that you think may be present in your stream. Including <code>LanguageOptions</code> restricts <code>IdentifyMultipleLanguages</code> to only the language options that you specify, which can improve transcription accuracy.</p> <p>If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include <code>VocabularyNames</code> or <code>VocabularyFilterNames</code>.</p> <p>Note that you must include one of <code>LanguageCode</code>, <code>IdentifyLanguage</code>, or <code>IdentifyMultipleLanguages</code> in your request. If you include more than one of these parameters, your transcription job fails.</p>",
1296          "location":"header",
1297          "locationName":"x-amzn-transcribe-identify-multiple-languages"
1298        },
1299        "VocabularyNames":{
1300          "shape":"VocabularyNames",
1301          "documentation":"<p>Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive.</p> <p>If none of the languages of the specified custom vocabularies match the language identified in your media, your job fails.</p> <important> <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary with your transcription, use the <code>VocabularyName</code> parameter instead.</p> </important> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html\">Custom vocabularies</a>.</p>",
1302          "location":"header",
1303          "locationName":"x-amzn-transcribe-vocabulary-names"
1304        },
1305        "VocabularyFilterNames":{
1306          "shape":"VocabularyFilterNames",
1307          "documentation":"<p>Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive.</p> <p>If none of the languages of the specified custom vocabulary filters match the language identified in your media, your job fails.</p> <important> <p>This parameter is only intended for use <b>with</b> the <code>IdentifyLanguage</code> parameter. If you're <b>not</b> including <code>IdentifyLanguage</code> in your request and want to use a custom vocabulary filter with your transcription, use the <code>VocabularyFilterName</code> parameter instead.</p> </important> <p>For more information, see <a href=\"https://docs.aws.amazon.com/transcribe/latest/dg/vocabulary-filtering.html\">Using vocabulary filtering with unwanted words</a>.</p>",
1308          "location":"header",
1309          "locationName":"x-amzn-transcribe-vocabulary-filter-names"
1310        }
1311      },
1312      "payload":"AudioStream"
1313    },
1314    "StartStreamTranscriptionResponse":{
1315      "type":"structure",
1316      "members":{
1317        "RequestId":{
1318          "shape":"RequestId",
1319          "documentation":"<p>Provides the identifier for your streaming request.</p>",
1320          "location":"header",
1321          "locationName":"x-amzn-request-id"
1322        },
1323        "LanguageCode":{
1324          "shape":"LanguageCode",
1325          "documentation":"<p>Provides the language code that you specified in your request.</p>",
1326          "location":"header",
1327          "locationName":"x-amzn-transcribe-language-code"
1328        },
1329        "MediaSampleRateHertz":{
1330          "shape":"MediaSampleRateHertz",
1331          "documentation":"<p>Provides the sample rate that you specified in your request.</p>",
1332          "location":"header",
1333          "locationName":"x-amzn-transcribe-sample-rate"
1334        },
1335        "MediaEncoding":{
1336          "shape":"MediaEncoding",
1337          "documentation":"<p>Provides the media encoding you specified in your request.</p>",
1338          "location":"header",
1339          "locationName":"x-amzn-transcribe-media-encoding"
1340        },
1341        "VocabularyName":{
1342          "shape":"VocabularyName",
1343          "documentation":"<p>Provides the name of the custom vocabulary that you specified in your request.</p>",
1344          "location":"header",
1345          "locationName":"x-amzn-transcribe-vocabulary-name"
1346        },
1347        "SessionId":{
1348          "shape":"SessionId",
1349          "documentation":"<p>Provides the identifier for your transcription session.</p>",
1350          "location":"header",
1351          "locationName":"x-amzn-transcribe-session-id"
1352        },
1353        "TranscriptResultStream":{
1354          "shape":"TranscriptResultStream",
1355          "documentation":"<p>Provides detailed information about your streaming session.</p>"
1356        },
1357        "VocabularyFilterName":{
1358          "shape":"VocabularyFilterName",
1359          "documentation":"<p>Provides the name of the custom vocabulary filter that you specified in your request.</p>",
1360          "location":"header",
1361          "locationName":"x-amzn-transcribe-vocabulary-filter-name"
1362        },
1363        "VocabularyFilterMethod":{
1364          "shape":"VocabularyFilterMethod",
1365          "documentation":"<p>Provides the vocabulary filtering method used in your transcription.</p>",
1366          "location":"header",
1367          "locationName":"x-amzn-transcribe-vocabulary-filter-method"
1368        },
1369        "ShowSpeakerLabel":{
1370          "shape":"Boolean",
1371          "documentation":"<p>Shows whether speaker partitioning was enabled for your transcription.</p>",
1372          "location":"header",
1373          "locationName":"x-amzn-transcribe-show-speaker-label"
1374        },
1375        "EnableChannelIdentification":{
1376          "shape":"Boolean",
1377          "documentation":"<p>Shows whether channel identification was enabled for your transcription.</p>",
1378          "location":"header",
1379          "locationName":"x-amzn-transcribe-enable-channel-identification"
1380        },
1381        "NumberOfChannels":{
1382          "shape":"NumberOfChannels",
1383          "documentation":"<p>Provides the number of channels that you specified in your request.</p>",
1384          "location":"header",
1385          "locationName":"x-amzn-transcribe-number-of-channels"
1386        },
1387        "EnablePartialResultsStabilization":{
1388          "shape":"Boolean",
1389          "documentation":"<p>Shows whether partial results stabilization was enabled for your transcription.</p>",
1390          "location":"header",
1391          "locationName":"x-amzn-transcribe-enable-partial-results-stabilization"
1392        },
1393        "PartialResultsStability":{
1394          "shape":"PartialResultsStability",
1395          "documentation":"<p>Provides the stabilization level used for your transcription.</p>",
1396          "location":"header",
1397          "locationName":"x-amzn-transcribe-partial-results-stability"
1398        },
1399        "ContentIdentificationType":{
1400          "shape":"ContentIdentificationType",
1401          "documentation":"<p>Shows whether content identification was enabled for your transcription.</p>",
1402          "location":"header",
1403          "locationName":"x-amzn-transcribe-content-identification-type"
1404        },
1405        "ContentRedactionType":{
1406          "shape":"ContentRedactionType",
1407          "documentation":"<p>Shows whether content redaction was enabled for your transcription.</p>",
1408          "location":"header",
1409          "locationName":"x-amzn-transcribe-content-redaction-type"
1410        },
1411        "PiiEntityTypes":{
1412          "shape":"PiiEntityTypes",
1413          "documentation":"<p>Lists the PII entity types you specified in your request.</p>",
1414          "location":"header",
1415          "locationName":"x-amzn-transcribe-pii-entity-types"
1416        },
1417        "LanguageModelName":{
1418          "shape":"ModelName",
1419          "documentation":"<p>Provides the name of the custom language model that you specified in your request.</p>",
1420          "location":"header",
1421          "locationName":"x-amzn-transcribe-language-model-name"
1422        },
1423        "IdentifyLanguage":{
1424          "shape":"Boolean",
1425          "documentation":"<p>Shows whether automatic language identification was enabled for your transcription.</p>",
1426          "location":"header",
1427          "locationName":"x-amzn-transcribe-identify-language"
1428        },
1429        "LanguageOptions":{
1430          "shape":"LanguageOptions",
1431          "documentation":"<p>Provides the language codes that you specified in your request.</p>",
1432          "location":"header",
1433          "locationName":"x-amzn-transcribe-language-options"
1434        },
1435        "PreferredLanguage":{
1436          "shape":"LanguageCode",
1437          "documentation":"<p>Provides the preferred language that you specified in your request.</p>",
1438          "location":"header",
1439          "locationName":"x-amzn-transcribe-preferred-language"
1440        },
1441        "IdentifyMultipleLanguages":{
1442          "shape":"Boolean",
1443          "documentation":"<p>Shows whether automatic multi-language identification was enabled for your transcription.</p>",
1444          "location":"header",
1445          "locationName":"x-amzn-transcribe-identify-multiple-languages"
1446        },
1447        "VocabularyNames":{
1448          "shape":"VocabularyNames",
1449          "documentation":"<p>Provides the names of the custom vocabularies that you specified in your request.</p>",
1450          "location":"header",
1451          "locationName":"x-amzn-transcribe-vocabulary-names"
1452        },
1453        "VocabularyFilterNames":{
1454          "shape":"VocabularyFilterNames",
1455          "documentation":"<p>Provides the names of the custom vocabulary filters that you specified in your request.</p>",
1456          "location":"header",
1457          "locationName":"x-amzn-transcribe-vocabulary-filter-names"
1458        }
1459      },
1460      "payload":"TranscriptResultStream"
1461    },
1462    "String":{"type":"string"},
1463    "StringList":{
1464      "type":"list",
1465      "member":{"shape":"String"}
1466    },
1467    "TimestampRange":{
1468      "type":"structure",
1469      "members":{
1470        "BeginOffsetMillis":{
1471          "shape":"Long",
1472          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the start of the category match.</p>"
1473        },
1474        "EndOffsetMillis":{
1475          "shape":"Long",
1476          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the end of the category match.</p>"
1477        }
1478      },
1479      "documentation":"<p>Contains the timestamp range (start time through end time) of a matched category.</p>"
1480    },
1481    "TimestampRanges":{
1482      "type":"list",
1483      "member":{"shape":"TimestampRange"}
1484    },
1485    "Transcript":{
1486      "type":"structure",
1487      "members":{
1488        "Results":{
1489          "shape":"ResultList",
1490          "documentation":"<p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
1491        }
1492      },
1493      "documentation":"<p>The <code>Transcript</code> associated with a <code/>.</p> <p> <code>Transcript</code> contains <code>Results</code>, which contains a set of transcription results from one or more audio segments, along with additional information per your request parameters.</p>"
1494    },
1495    "TranscriptEvent":{
1496      "type":"structure",
1497      "members":{
1498        "Transcript":{
1499          "shape":"Transcript",
1500          "documentation":"<p>Contains <code>Results</code>, which contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.</p>"
1501        }
1502      },
1503      "documentation":"<p>The <code>TranscriptEvent</code> associated with a <code>TranscriptResultStream</code>.</p> <p>Contains a set of transcription results from one or more audio segments, along with additional information per your request parameters.</p>",
1504      "event":true
1505    },
1506    "TranscriptResultStream":{
1507      "type":"structure",
1508      "members":{
1509        "TranscriptEvent":{
1510          "shape":"TranscriptEvent",
1511          "documentation":"<p>Contains <code>Transcript</code>, which contains <code>Results</code>. The <code/> object contains a set of transcription results from one or more audio segments, along with additional information per your request parameters.</p>"
1512        },
1513        "BadRequestException":{
1514          "shape":"BadRequestException",
1515          "documentation":"<p>A client error occurred when the stream was created. Check the parameters of the request and try your request again.</p>"
1516        },
1517        "LimitExceededException":{
1518          "shape":"LimitExceededException",
1519          "documentation":"<p>Your client has exceeded one of the Amazon Transcribe limits. This is typically the audio length limit. Break your audio stream into smaller chunks and try your request again.</p>"
1520        },
1521        "InternalFailureException":{
1522          "shape":"InternalFailureException",
1523          "documentation":"<p>A problem occurred while processing the audio. Amazon Transcribe terminated processing.</p>"
1524        },
1525        "ConflictException":{
1526          "shape":"ConflictException",
1527          "documentation":"<p>A new stream started with the same session ID. The current stream has been terminated.</p>"
1528        },
1529        "ServiceUnavailableException":{
1530          "shape":"ServiceUnavailableException",
1531          "documentation":"<p>The service is currently unavailable. Try your request later.</p>"
1532        }
1533      },
1534      "documentation":"<p>Contains detailed information about your streaming session.</p>",
1535      "eventstream":true
1536    },
1537    "Type":{
1538      "type":"string",
1539      "enum":[
1540        "CONVERSATION",
1541        "DICTATION"
1542      ]
1543    },
1544    "UtteranceEvent":{
1545      "type":"structure",
1546      "members":{
1547        "UtteranceId":{
1548          "shape":"String",
1549          "documentation":"<p>The unique identifier that is associated with the specified <code>UtteranceEvent</code>.</p>"
1550        },
1551        "IsPartial":{
1552          "shape":"Boolean",
1553          "documentation":"<p>Indicates whether the segment in the <code>UtteranceEvent</code> is complete (<code>FALSE</code>) or partial (<code>TRUE</code>).</p>"
1554        },
1555        "ParticipantRole":{
1556          "shape":"ParticipantRole",
1557          "documentation":"<p>Provides the role of the speaker for each audio channel, either <code>CUSTOMER</code> or <code>AGENT</code>.</p>"
1558        },
1559        "BeginOffsetMillis":{
1560          "shape":"Long",
1561          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the start of the <code>UtteranceEvent</code>.</p>"
1562        },
1563        "EndOffsetMillis":{
1564          "shape":"Long",
1565          "documentation":"<p>The time, in milliseconds, from the beginning of the audio stream to the start of the <code>UtteranceEvent</code>.</p>"
1566        },
1567        "Transcript":{
1568          "shape":"String",
1569          "documentation":"<p>Contains transcribed text.</p>"
1570        },
1571        "Items":{
1572          "shape":"CallAnalyticsItemList",
1573          "documentation":"<p>Contains words, phrases, or punctuation marks that are associated with the specified <code>UtteranceEvent</code>.</p>"
1574        },
1575        "Entities":{
1576          "shape":"CallAnalyticsEntityList",
1577          "documentation":"<p>Contains entities identified as personally identifiable information (PII) in your transcription output.</p>"
1578        },
1579        "Sentiment":{
1580          "shape":"Sentiment",
1581          "documentation":"<p>Provides the sentiment that was detected in the specified segment.</p>"
1582        },
1583        "IssuesDetected":{
1584          "shape":"IssuesDetected",
1585          "documentation":"<p>Provides the issue that was detected in the specified segment.</p>"
1586        }
1587      },
1588      "documentation":"<p>Contains set of transcription results from one or more audio segments, along with additional information about the parameters included in your request. For example, channel definitions, partial result stabilization, sentiment, and issue detection.</p>",
1589      "event":true
1590    },
1591    "VocabularyFilterMethod":{
1592      "type":"string",
1593      "enum":[
1594        "remove",
1595        "mask",
1596        "tag"
1597      ]
1598    },
1599    "VocabularyFilterName":{
1600      "type":"string",
1601      "max":200,
1602      "min":1,
1603      "pattern":"^[0-9a-zA-Z._-]+"
1604    },
1605    "VocabularyFilterNames":{
1606      "type":"string",
1607      "max":3000,
1608      "min":1,
1609      "pattern":"^[a-zA-Z0-9,-._]+"
1610    },
1611    "VocabularyName":{
1612      "type":"string",
1613      "max":200,
1614      "min":1,
1615      "pattern":"^[0-9a-zA-Z._-]+"
1616    },
1617    "VocabularyNames":{
1618      "type":"string",
1619      "max":3000,
1620      "min":1,
1621      "pattern":"^[a-zA-Z0-9,-._]+"
1622    }
1623  },
1624  "documentation":"<p>Amazon Transcribe streaming offers three main types of real-time transcription: <b>Standard</b>, <b>Medical</b>, and <b>Call Analytics</b>.</p> <ul> <li> <p> <b>Standard transcriptions</b> are the most common option. Refer to for details.</p> </li> <li> <p> <b>Medical transcriptions</b> are tailored to medical professionals and incorporate medical terms. A common use case for this service is transcribing doctor-patient dialogue in real time, so doctors can focus on their patient instead of taking notes. Refer to for details.</p> </li> <li> <p> <b>Call Analytics transcriptions</b> are designed for use with call center audio on two different channels; if you're looking for insight into customer service calls, use this option. Refer to for details.</p> </li> </ul>"
1625}
1626