diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.AutoHighlightsResult2.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.AutoHighlightsResult2.g.cs
index 26da6c8..514d0a5 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.AutoHighlightsResult2.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.AutoHighlightsResult2.g.cs
@@ -5,7 +5,7 @@ namespace AssemblyAI
{
///
/// An array of results for the Key Phrases model, if it is enabled.
- /// See [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
+ /// See [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.
/// Example: {"status":"success","results":[{"count":1,"rank":0.08,"text":"air quality alerts","timestamps":[{"start":3978,"end":5114}]},{"count":1,"rank":0.08,"text":"wide ranging air quality consequences","timestamps":[{"start":235388,"end":238694}]},{"count":1,"rank":0.07,"text":"more wildfires","timestamps":[{"start":230972,"end":232354}]},{"count":1,"rank":0.07,"text":"air pollution","timestamps":[{"start":156004,"end":156910}]},{"count":3,"rank":0.07,"text":"weather systems","timestamps":[{"start":47344,"end":47958},{"start":205268,"end":205818},{"start":211588,"end":213434}]},{"count":2,"rank":0.06,"text":"high levels","timestamps":[{"start":121128,"end":121646},{"start":155412,"end":155866}]},{"count":1,"rank":0.06,"text":"health conditions","timestamps":[{"start":152138,"end":152666}]},{"count":2,"rank":0.06,"text":"Peter de Carlo","timestamps":[{"start":18948,"end":19930},{"start":268298,"end":269194}]},{"count":1,"rank":0.06,"text":"New York City","timestamps":[{"start":125768,"end":126274}]},{"count":1,"rank":0.05,"text":"respiratory conditions","timestamps":[{"start":152964,"end":153786}]},{"count":3,"rank":0.05,"text":"New York","timestamps":[{"start":125768,"end":126034},{"start":171448,"end":171938},{"start":176008,"end":176322}]},{"count":3,"rank":0.05,"text":"climate change","timestamps":[{"start":229548,"end":230230},{"start":244576,"end":245162},{"start":263348,"end":263950}]},{"count":1,"rank":0.05,"text":"Johns Hopkins University Varsity","timestamps":[{"start":23972,"end":25490}]},{"count":1,"rank":0.05,"text":"heart conditions","timestamps":[{"start":153988,"end":154506}]},{"count":1,"rank":0.05,"text":"air quality warnings","timestamps":[{"start":12308,"end":13434}]}]}
///
public sealed partial class AutoHighlightsResult2
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.ContentSafetyLabelsResult.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.ContentSafetyLabelsResult.g.cs
index 9403b1d..026cc81 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.ContentSafetyLabelsResult.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.ContentSafetyLabelsResult.g.cs
@@ -5,7 +5,7 @@ namespace AssemblyAI
{
///
/// An array of results for the Content Moderation model, if it is enabled.
- /// See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
+ /// See [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.
/// Example: {"status":"success","results":[{"text":"Smoke from hundreds of wildfires in Canada is triggering air quality alerts throughout the US. Skylines from Maine to Maryland to Minnesota are gray and smoggy. And in some places, the air quality warnings include the warning to stay inside. We wanted to better understand what\u0027s happening here and why, so we called Peter de Carlo, an associate professor in the Department of Environmental Health and Engineering at Johns Hopkins University Varsity. Good morning, professor. Good morning.","labels":[{"label":"disasters","confidence":0.8142836093902588,"severity":0.4093044400215149}],"sentences_idx_start":0,"sentences_idx_end":5,"timestamp":{"start":250,"end":28840}}],"summary":{"disasters":0.9940800441842205,"health_issues":0.9216489289040967},"severity_score_summary":{"disasters":{"low":0.5733263024656846,"medium":0.42667369753431533,"high":0.0},"health_issues":{"low":0.22863814977924785,"medium":0.45014154926938227,"high":0.32122030095136983}}}
///
public sealed partial class ContentSafetyLabelsResult
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBody.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBody.g.cs
index 13803fd..85cb070 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBody.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBody.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- ///
+ /// Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting).
///
public sealed partial class CustomFormattingRequestBody
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBodyCustomFormatting.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBodyCustomFormatting.g.cs
index 22eab9c..1cd45b5 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBodyCustomFormatting.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.CustomFormattingRequestBodyCustomFormatting.g.cs
@@ -9,19 +9,19 @@ namespace AssemblyAI
public sealed partial class CustomFormattingRequestBodyCustomFormatting
{
///
- /// Date format pattern (e.g., `"mm/dd/yyyy"`)
+ /// Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("date")]
public string? Date { get; set; }
///
- /// Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`)
+ /// Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("phone_number")]
public string? PhoneNumber { get; set; }
///
- /// Email format pattern (e.g., `"username@domain.com"`)
+ /// Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("email")]
public string? Email { get; set; }
@@ -36,13 +36,13 @@ public sealed partial class CustomFormattingRequestBodyCustomFormatting
/// Initializes a new instance of the class.
///
///
- /// Date format pattern (e.g., `"mm/dd/yyyy"`)
+ /// Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
///
- /// Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`)
+ /// Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
///
- /// Email format pattern (e.g., `"username@domain.com"`)
+ /// Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.RedactPiiAudioQuality.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.RedactPiiAudioQuality.g.cs
index ebc49f7..8cce009 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.RedactPiiAudioQuality.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.RedactPiiAudioQuality.g.cs
@@ -4,17 +4,17 @@
namespace AssemblyAI
{
///
- /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
/// Example: mp3
///
public enum RedactPiiAudioQuality
{
///
- /// //www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// //www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
///
Mp3,
///
- /// //www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// //www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
///
Wav,
}
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SentimentAnalysisResult.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SentimentAnalysisResult.g.cs
index 1305e51..7bf2717 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SentimentAnalysisResult.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SentimentAnalysisResult.g.cs
@@ -52,7 +52,7 @@ public sealed partial class SentimentAnalysisResult
public string? Channel { get; set; }
///
- /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -86,7 +86,7 @@ public sealed partial class SentimentAnalysisResult
/// The channel of this utterance. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially.
///
///
- /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBody.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBody.g.cs
index 73bbdfa..40296f8 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBody.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBody.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- ///
+ /// Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification).
///
public sealed partial class SpeakerIdentificationRequestBody
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentification.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentification.g.cs
index 3712711..d356632 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentification.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentification.g.cs
@@ -9,7 +9,7 @@ namespace AssemblyAI
public sealed partial class SpeakerIdentificationRequestBodySpeakerIdentification
{
///
- /// Type of speaker identification
+ /// Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker_type")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerTypeJsonConverter))]
@@ -32,7 +32,7 @@ public sealed partial class SpeakerIdentificationRequestBodySpeakerIdentificatio
/// Initializes a new instance of the class.
///
///
- /// Type of speaker identification
+ /// Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type.
///
///
/// Required if speaker_type is "role". Each value must be 35 characters or less.
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType.g.cs
index 10dc778..5847113 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- /// Type of speaker identification
+ /// Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type.
///
public enum SpeakerIdentificationRequestBodySpeakerIdentificationSpeakerType
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SubstitutionPolicy.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SubstitutionPolicy.g.cs
index 32f5df6..fbbb1d6 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SubstitutionPolicy.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.SubstitutionPolicy.g.cs
@@ -4,16 +4,16 @@
namespace AssemblyAI
{
///
- /// The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
///
public enum SubstitutionPolicy
{
///
- /// //www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// //www.assemblyai.com/docs/pii-redaction) for more details.
///
EntityName,
///
- /// //www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// //www.assemblyai.com/docs/pii-redaction) for more details.
///
Hash,
}
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TopicDetectionModelResult.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TopicDetectionModelResult.g.cs
index 47a14fc..eff8980 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TopicDetectionModelResult.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TopicDetectionModelResult.g.cs
@@ -5,7 +5,7 @@ namespace AssemblyAI
{
///
/// The result of the Topic Detection model, if it is enabled.
- /// See [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.
+ /// See [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.
/// Example: {"status":"success","results":[{"text":"Smoke from hundreds of wildfires in Canada is triggering air quality alerts throughout the US. Skylines from Maine to Maryland to Minnesota are gray and smoggy. And in some places, the air quality warnings include the warning to stay inside. We wanted to better understand what\u0027s happening here and why, so we called Peter de Carlo, an associate professor in the Department of Environmental Health and Engineering at Johns Hopkins University Varsity. Good morning, professor. Good morning.","labels":[{"relevance":0.988274097442627,"label":"Home\u0026Garden\u003EIndoorEnvironmentalQuality"},{"relevance":0.5821335911750793,"label":"NewsAndPolitics\u003EWeather"},{"relevance":0.0042327106930315495,"label":"MedicalHealth\u003EDiseasesAndConditions\u003ELungAndRespiratoryHealth"},{"relevance":0.0033971222583204508,"label":"NewsAndPolitics\u003EDisasters"},{"relevance":0.002469958271831274,"label":"BusinessAndFinance\u003EBusiness\u003EGreenSolutions"},{"relevance":0.0014376690378412604,"label":"MedicalHealth\u003EDiseasesAndConditions\u003ECancer"},{"relevance":0.0014294233405962586,"label":"Science\u003EEnvironment"},{"relevance":0.001234519761055708,"label":"Travel\u003ETravelLocations\u003EPolarTravel"},{"relevance":0.0010231725173071027,"label":"MedicalHealth\u003EDiseasesAndConditions\u003EColdAndFlu"},{"relevance":0.0007445293595083058,"label":"BusinessAndFinance\u003EIndustries\u003EPowerAndEnergyIndustry"}],"timestamp":{"start":250,"end":28840}}],"summary":{"NewsAndPolitics\u003EWeather":1.0,"Home\u0026Garden\u003EIndoorEnvironmentalQuality":0.9043831825256348,"Science\u003EEnvironment":0.16117265820503235,"BusinessAndFinance\u003EIndustries\u003EEnvironmentalServicesIndustry":0.14393523335456848,"MedicalHealth\u003EDiseasesAndConditions\u003ELungAndRespiratoryHealth":0.11401086300611496,"BusinessAndFinance\u003EBusiness\u003EGreenSolutions":0.06348437070846558,"NewsAndPolitics\u003EDisasters":0.05041387677192688,"Travel\u003ETravelLocations\u003EPolarTravel":0.01308488193899393,"HealthyLiving":0.008222488686442375,"MedicalHealth\u003EDiseasesAndConditions\u003EColdAndFlu":0.0022315620444715023,"MedicalHealth\u003EDiseasesAndConditions\u003EHeartAndCardiovascularDiseases":0.00213034451007843,"HealthyLiving\u003EWellness\u003ESmokingCessation":0.001540527562610805,"MedicalHealth\u003EDiseasesAndConditions\u003EInjuries":0.0013950627762824297,"BusinessAndFinance\u003EIndustries\u003EPowerAndEnergyIndustry":0.0012570273829624057,"MedicalHealth\u003EDiseasesAndConditions\u003ECancer":0.001097781932912767,"MedicalHealth\u003EDiseasesAndConditions\u003EAllergies":0.0010148967849090695,"MedicalHealth\u003EDiseasesAndConditions\u003EMentalHealth":0.000717321818228811,"Style\u0026Fashion\u003EPersonalCare\u003EDeodorantAndAntiperspirant":0.0006022014422342181,"Technology\u0026Computing\u003EComputing\u003EComputerNetworking":0.0005461975233629346,"MedicalHealth\u003EDiseasesAndConditions\u003EInjuries\u003EFirstAid":0.0004885646631009877}}
///
public sealed partial class TopicDetectionModelResult
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.Transcript.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.Transcript.g.cs
index 0ef9045..de01f3d 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.Transcript.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.Transcript.g.cs
@@ -12,7 +12,7 @@ namespace AssemblyAI
public sealed partial class Transcript
{
///
- /// The number of audio channels in the audio file. This is only present when multichannel is enabled.
+ /// The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled.
///
[global::System.Text.Json.Serialization.JsonPropertyName("audio_channels")]
public int? AudioChannels { get; set; }
@@ -24,13 +24,13 @@ public sealed partial class Transcript
public int? AudioDuration { get; set; }
///
- /// The point in time, in milliseconds, in the file at which the transcription was terminated
+ /// The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("audio_end_at")]
public int? AudioEndAt { get; set; }
///
- /// The point in time, in milliseconds, in the file at which the transcription was started
+ /// The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("audio_start_from")]
public int? AudioStartFrom { get; set; }
@@ -43,13 +43,13 @@ public sealed partial class Transcript
public required string AudioUrl { get; set; }
///
- /// Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
+ /// Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("auto_chapters")]
public bool? AutoChapters { get; set; }
///
- /// Whether Key Phrases is enabled, either true or false
+ /// Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("auto_highlights")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -57,14 +57,14 @@ public sealed partial class Transcript
///
/// An array of results for the Key Phrases model, if it is enabled.
- /// See [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
+ /// See [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("auto_highlights_result")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.OneOfJsonConverter))]
public global::AssemblyAI.OneOf? AutoHighlightsResult { get; set; }
///
- /// An array of temporally sequential chapters for the audio file
+ /// An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("chapters")]
public global::System.Collections.Generic.IList? Chapters { get; set; }
@@ -76,40 +76,40 @@ public sealed partial class Transcript
public double? Confidence { get; set; }
///
- /// Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
+ /// Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("content_safety")]
public bool? ContentSafety { get; set; }
///
/// An array of results for the Content Moderation model, if it is enabled.
- /// See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
+ /// See [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("content_safety_labels")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.OneOfJsonConverter))]
public global::AssemblyAI.OneOf? ContentSafetyLabels { get; set; }
///
- /// Customize how words are spelled and formatted using to and from values
+ /// Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("custom_spelling")]
public global::System.Collections.Generic.IList? CustomSpelling { get; set; }
///
- /// Transcribe Filler Words, like "umm", in your media file; can be true or false
+ /// Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("disfluencies")]
public bool? Disfluencies { get; set; }
///
/// An array of results for the Entity Detection model, if it is enabled.
- /// See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
+ /// See [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("entities")]
public global::System.Collections.Generic.IList? Entities { get; set; }
///
- /// Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
+ /// Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("entity_detection")]
public bool? EntityDetection { get; set; }
@@ -121,26 +121,26 @@ public sealed partial class Transcript
public string? Error { get; set; }
///
- /// Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
+ /// Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("filter_profanity")]
public bool? FilterProfanity { get; set; }
///
- /// Whether Text Formatting is enabled, either true or false
+ /// Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("format_text")]
public bool? FormatText { get; set; }
///
- /// Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
+ /// Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("iab_categories")]
public bool? IabCategories { get; set; }
///
/// The result of the Topic Detection model, if it is enabled.
- /// See [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.
+ /// See [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("iab_categories_result")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.OneOfJsonConverter))]
@@ -154,14 +154,14 @@ public sealed partial class Transcript
public required global::System.Guid Id { get; set; }
///
- /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("keyterms_prompt")]
public global::System.Collections.Generic.IList? KeytermsPrompt { get; set; }
///
/// The language of your audio file.
- /// Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ /// Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
/// The default value is 'en_us'.
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_code")]
@@ -176,7 +176,7 @@ public sealed partial class Transcript
public global::System.Collections.Generic.IList? LanguageCodes { get; set; }
///
- /// The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)
+ /// The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_confidence")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -184,7 +184,8 @@ public sealed partial class Transcript
///
/// The confidence threshold for the automatically detected language.
- /// An error will be returned if the language confidence is below this threshold.
+ /// An error will be returned if the language confidence is below this threshold.
+ /// See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_confidence_threshold")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -197,32 +198,32 @@ public sealed partial class Transcript
public bool? LanguageDetection { get; set; }
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_detection_options")]
public global::AssemblyAI.TranscriptLanguageDetectionOptions? LanguageDetectionOptions { get; set; }
///
- /// Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false
+ /// Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("multichannel")]
public bool? Multichannel { get; set; }
///
- /// Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ /// Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
/// Note: This parameter is only supported for the Universal-3-Pro model.
///
[global::System.Text.Json.Serialization.JsonPropertyName("prompt")]
public string? Prompt { get; set; }
///
- /// Whether Automatic Punctuation is enabled, either true or false
+ /// Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("punctuate")]
public bool? Punctuate { get; set; }
///
- /// Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
+ /// Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -230,14 +231,14 @@ public sealed partial class Transcript
///
/// Whether a redacted version of the audio file was generated,
- /// either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// either true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_audio")]
public bool? RedactPiiAudio { get; set; }
///
/// The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.
- /// See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_audio_quality")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.OneOfJsonConverter))]
@@ -245,64 +246,64 @@ public sealed partial class Transcript
///
/// The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
- /// See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_policies")]
public global::System.Collections.Generic.IList? RedactPiiPolicies { get; set; }
///
- /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_sub")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::AssemblyAI.JsonConverters.SubstitutionPolicyJsonConverter))]
public global::AssemblyAI.SubstitutionPolicy? RedactPiiSub { get; set; }
///
- /// Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
+ /// Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("sentiment_analysis")]
public bool? SentimentAnalysis { get; set; }
///
/// An array of results for the Sentiment Analysis model, if it is enabled.
- /// See [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
+ /// See [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("sentiment_analysis_results")]
public global::System.Collections.Generic.IList? SentimentAnalysisResults { get; set; }
///
- /// Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
+ /// Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker_labels")]
public bool? SpeakerLabels { get; set; }
///
- /// Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ /// Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speakers_expected")]
public int? SpeakersExpected { get; set; }
///
- /// The speech model that was actually used for the transcription.
+ /// The speech model that was actually used for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_model_used")]
public string? SpeechModelUsed { get; set; }
///
- /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_models")]
public global::System.Collections.Generic.IList? SpeechModels { get; set; }
///
/// Defaults to null. Reject audio files that contain less than this fraction of speech.
- /// Valid values are in the range [0, 1] inclusive.
+ /// Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_threshold")]
public float? SpeechThreshold { get; set; }
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_understanding")]
public global::AssemblyAI.TranscriptSpeechUnderstanding? SpeechUnderstanding { get; set; }
@@ -316,33 +317,33 @@ public sealed partial class Transcript
public required global::AssemblyAI.TranscriptStatus Status { get; set; }
///
- /// Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
+ /// Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false
///
[global::System.Text.Json.Serialization.JsonPropertyName("summarization")]
[global::System.Text.Json.Serialization.JsonRequired]
public required bool Summarization { get; set; }
///
- /// The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled
///
[global::System.Text.Json.Serialization.JsonPropertyName("summary")]
public string? Summary { get; set; }
///
/// The Summarization model used to generate the summary,
- /// if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled
///
[global::System.Text.Json.Serialization.JsonPropertyName("summary_model")]
public string? SummaryModel { get; set; }
///
- /// The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled
///
[global::System.Text.Json.Serialization.JsonPropertyName("summary_type")]
public string? SummaryType { get; set; }
///
- /// The temperature that was used for the model's response.
+ /// The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
/// Note: This parameter can only be used with the Universal-3-Pro model.
///
[global::System.Text.Json.Serialization.JsonPropertyName("temperature")]
@@ -362,39 +363,38 @@ public sealed partial class Transcript
///
/// When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
- /// See [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.
+ /// See [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.
///
[global::System.Text.Json.Serialization.JsonPropertyName("utterances")]
public global::System.Collections.Generic.IList? Utterances { get; set; }
///
- /// Whether webhook authentication details were provided
+ /// Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_auth")]
[global::System.Text.Json.Serialization.JsonRequired]
public required bool WebhookAuth { get; set; }
///
- /// The header name to be sent with the transcript completed or failed webhook requests
+ /// The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_auth_header_name")]
public string? WebhookAuthHeaderName { get; set; }
///
- /// The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided
+ /// The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_status_code")]
public int? WebhookStatusCode { get; set; }
///
- /// The URL to which we send webhook requests.
+ /// The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_url")]
public string? WebhookUrl { get; set; }
///
- /// An array of temporally-sequential word objects, one for each word in the transcript.
- /// See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
+ /// An array of temporally-sequential word objects, one for each word in the transcript.
///
[global::System.Text.Json.Serialization.JsonPropertyName("words")]
public global::System.Collections.Generic.IList? Words { get; set; }
@@ -443,7 +443,7 @@ public sealed partial class Transcript
public global::System.Collections.Generic.IList? Topics { get; set; }
///
- /// Translated text keyed by language code
+ /// Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("translated_texts")]
public global::AssemblyAI.TranscriptTranslatedTexts? TranslatedTexts { get; set; }
@@ -458,81 +458,81 @@ public sealed partial class Transcript
/// Initializes a new instance of the class.
///
///
- /// The number of audio channels in the audio file. This is only present when multichannel is enabled.
+ /// The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled.
///
///
/// The duration of this transcript object's media file, in seconds
///
///
- /// The point in time, in milliseconds, in the file at which the transcription was terminated
+ /// The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
///
- /// The point in time, in milliseconds, in the file at which the transcription was started
+ /// The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
///
/// The URL of the media that was transcribed
///
///
- /// Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
+ /// Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false
///
///
- /// Whether Key Phrases is enabled, either true or false
+ /// Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false
///
///
/// An array of results for the Key Phrases model, if it is enabled.
- /// See [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
+ /// See [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.
///
///
- /// An array of temporally sequential chapters for the audio file
+ /// An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information.
///
///
/// The confidence score for the transcript, between 0.0 (low confidence) and 1.0 (high confidence)
///
///
- /// Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
+ /// Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false
///
///
/// An array of results for the Content Moderation model, if it is enabled.
- /// See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
+ /// See [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.
///
///
- /// Customize how words are spelled and formatted using to and from values
+ /// Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
///
///
- /// Transcribe Filler Words, like "umm", in your media file; can be true or false
+ /// Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
///
///
/// An array of results for the Entity Detection model, if it is enabled.
- /// See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
+ /// See [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.
///
///
- /// Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
+ /// Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false
///
///
/// Error message of why the transcript failed
///
///
- /// Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
+ /// Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false
///
///
- /// Whether Text Formatting is enabled, either true or false
+ /// Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
///
///
- /// Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
+ /// Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false
///
///
/// The result of the Topic Detection model, if it is enabled.
- /// See [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.
+ /// See [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.
///
///
/// The unique identifier of your transcript
///
///
- /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
///
///
/// The language of your audio file.
- /// Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ /// Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
/// The default value is 'en_us'.
///
///
@@ -540,90 +540,91 @@ public sealed partial class Transcript
/// One of the values specified must be `en`.
///
///
- /// The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)
+ /// The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
///
/// The confidence threshold for the automatically detected language.
- /// An error will be returned if the language confidence is below this threshold.
+ /// An error will be returned if the language confidence is below this threshold.
+ /// See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
///
/// Whether [Automatic language detection](/docs/pre-recorded-audio/automatic-language-detection) is enabled, either true or false
///
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
///
- /// Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false
+ /// Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false
///
///
- /// Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ /// Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
/// Note: This parameter is only supported for the Universal-3-Pro model.
///
///
- /// Whether Automatic Punctuation is enabled, either true or false
+ /// Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
///
///
- /// Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
+ /// Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false
///
///
/// Whether a redacted version of the audio file was generated,
- /// either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// either true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
///
///
/// The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.
- /// See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
///
///
/// The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
- /// See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ /// See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.
///
///
- /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
///
///
- /// Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
+ /// Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false
///
///
/// An array of results for the Sentiment Analysis model, if it is enabled.
- /// See [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
+ /// See [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.
///
///
- /// Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
+ /// Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false
///
///
- /// Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ /// Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
///
///
- /// The speech model that was actually used for the transcription.
+ /// The speech model that was actually used for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models.
///
///
- /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
///
///
/// Defaults to null. Reject audio files that contain less than this fraction of speech.
- /// Valid values are in the range [0, 1] inclusive.
+ /// Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
///
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
///
/// The status of your transcript. Possible values are queued, processing, completed, or error.
///
///
- /// Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
+ /// Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false
///
///
- /// The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled
///
///
/// The Summarization model used to generate the summary,
- /// if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled
///
///
- /// The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ /// The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled
///
///
- /// The temperature that was used for the model's response.
+ /// The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
/// Note: This parameter can only be used with the Universal-3-Pro model.
///
///
@@ -634,23 +635,22 @@ public sealed partial class Transcript
///
///
/// When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
- /// See [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.
+ /// See [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.
///
///
- /// Whether webhook authentication details were provided
+ /// Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided
///
///
- /// The header name to be sent with the transcript completed or failed webhook requests
+ /// The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
///
///
- /// The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided
+ /// The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided
///
///
- /// The URL to which we send webhook requests.
+ /// The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
///
///
- /// An array of temporally-sequential word objects, one for each word in the transcript.
- /// See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
+ /// An array of temporally-sequential word objects, one for each word in the transcript.
///
///
/// This parameter does not currently have any functionality attached to it.
@@ -662,7 +662,7 @@ public sealed partial class Transcript
/// This parameter has been replaced with the `speech_models` parameter, learn more about the `speech_models` parameter [here](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model).
///
///
- /// Translated text keyed by language code
+ /// Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageCode.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageCode.g.cs
index 476e54f..abaf89d 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageCode.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageCode.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
/// The default value is 'en_us'.
///
public enum TranscriptLanguageCode
@@ -26,7 +26,7 @@ public enum TranscriptLanguageCode
///
EnUs,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Es,
///
@@ -34,7 +34,7 @@ public enum TranscriptLanguageCode
///
Fr,
///
- ///
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
De,
///
@@ -42,7 +42,7 @@ public enum TranscriptLanguageCode
///
It,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ ///
///
Pt,
///
@@ -62,7 +62,7 @@ public enum TranscriptLanguageCode
///
Am,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Ar,
///
@@ -70,7 +70,7 @@ public enum TranscriptLanguageCode
///
Hy,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
As,
///
@@ -122,7 +122,7 @@ public enum TranscriptLanguageCode
///
Hr,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Cs,
///
@@ -134,11 +134,11 @@ public enum TranscriptLanguageCode
///
Et,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Fo,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Fi,
///
@@ -154,11 +154,11 @@ public enum TranscriptLanguageCode
///
El,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Gu,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Ht,
///
@@ -170,7 +170,7 @@ public enum TranscriptLanguageCode
///
Haw,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
He,
///
@@ -218,7 +218,7 @@ public enum TranscriptLanguageCode
///
Lo,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
La,
///
@@ -282,7 +282,7 @@ public enum TranscriptLanguageCode
///
Nn,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Oc,
///
@@ -290,7 +290,7 @@ public enum TranscriptLanguageCode
///
Pa,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Ps,
///
@@ -326,7 +326,7 @@ public enum TranscriptLanguageCode
///
Sd,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Si,
///
@@ -342,7 +342,7 @@ public enum TranscriptLanguageCode
///
So,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Su,
///
@@ -366,11 +366,11 @@ public enum TranscriptLanguageCode
///
Ta,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Tt,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Te,
///
@@ -394,7 +394,7 @@ public enum TranscriptLanguageCode
///
Uk,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Ur,
///
@@ -414,7 +414,7 @@ public enum TranscriptLanguageCode
///
Yi,
///
- /// //www.assemblyai.com/docs/concepts/supported-languages).
+ /// //www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
///
Yo,
}
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageDetectionOptions.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageDetectionOptions.g.cs
index 6426900..677de73 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageDetectionOptions.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptLanguageDetectionOptions.g.cs
@@ -4,32 +4,32 @@
namespace AssemblyAI
{
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
public sealed partial class TranscriptLanguageDetectionOptions
{
///
- /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("expected_languages")]
public global::System.Collections.Generic.IList? ExpectedLanguages { get; set; }
///
- /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: auto
///
[global::System.Text.Json.Serialization.JsonPropertyName("fallback_language")]
public string? FallbackLanguage { get; set; }
///
- /// Whether code switching should be detected.
+ /// Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("code_switching")]
public bool? CodeSwitching { get; set; }
///
- /// The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ /// The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
/// Default Value: 0.3
///
[global::System.Text.Json.Serialization.JsonPropertyName("code_switching_confidence_threshold")]
@@ -45,18 +45,18 @@ public sealed partial class TranscriptLanguageDetectionOptions
/// Initializes a new instance of the class.
///
///
- /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
///
- /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: auto
///
///
- /// Whether code switching should be detected.
+ /// Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
/// Default Value: false
///
///
- /// The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ /// The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
/// Default Value: 0.3
///
#if NET7_0_OR_GREATER
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParams.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParams.g.cs
index b2cd0ad..0565194 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParams.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParams.g.cs
@@ -12,94 +12,94 @@ namespace AssemblyAI
public sealed partial class TranscriptOptionalParams
{
///
- /// The point in time, in milliseconds, to stop transcribing in your media file
+ /// The point in time, in milliseconds, to stop transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("audio_end_at")]
public int? AudioEndAt { get; set; }
///
- /// The point in time, in milliseconds, to begin transcribing in your media file
+ /// The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("audio_start_from")]
public int? AudioStartFrom { get; set; }
///
- /// Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
+ /// Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("auto_chapters")]
public bool? AutoChapters { get; set; }
///
- /// Enable Key Phrases, either true or false
+ /// Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("auto_highlights")]
public bool? AutoHighlights { get; set; }
///
- /// Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
+ /// Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("content_safety")]
public bool? ContentSafety { get; set; }
///
- /// The confidence threshold for the Content Moderation model. Values must be between 25 and 100.
+ /// The confidence threshold for the [Content Moderation](https://www.assemblyai.com/docs/content-moderation) model. Values must be between 25 and 100.
/// Default Value: 50
///
[global::System.Text.Json.Serialization.JsonPropertyName("content_safety_confidence")]
public int? ContentSafetyConfidence { get; set; }
///
- /// Customize how words are spelled and formatted using to and from values
+ /// Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("custom_spelling")]
public global::System.Collections.Generic.IList? CustomSpelling { get; set; }
///
- /// Transcribe Filler Words, like "umm", in your media file; can be true or false
+ /// Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("disfluencies")]
public bool? Disfluencies { get; set; }
///
- /// Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
+ /// Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("entity_detection")]
public bool? EntityDetection { get; set; }
///
- /// Filter profanity from the transcribed text, can be true or false
+ /// Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("filter_profanity")]
public bool? FilterProfanity { get; set; }
///
- /// Enable Text Formatting, can be true or false
+ /// Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
/// Default Value: true
///
[global::System.Text.Json.Serialization.JsonPropertyName("format_text")]
public bool? FormatText { get; set; }
///
- /// Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
+ /// Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("iab_categories")]
public bool? IabCategories { get; set; }
///
- /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("keyterms_prompt")]
public global::System.Collections.Generic.IList? KeytermsPrompt { get; set; }
///
- /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
/// The default value is 'en_us'.
/// Default Value: en_us
///
@@ -117,68 +117,68 @@ public sealed partial class TranscriptOptionalParams
///
/// The confidence threshold for the automatically detected language.
/// An error will be returned if the language confidence is below this threshold.
- /// Defaults to 0.
+ /// Defaults to 0. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: 0
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_confidence_threshold")]
public float? LanguageConfidenceThreshold { get; set; }
///
- /// Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.
+ /// Enable [Automatic language detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection), either true or false.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_detection")]
public bool? LanguageDetection { get; set; }
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
[global::System.Text.Json.Serialization.JsonPropertyName("language_detection_options")]
public global::AssemblyAI.TranscriptOptionalParamsLanguageDetectionOptions? LanguageDetectionOptions { get; set; }
///
- /// Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false.
+ /// Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("multichannel")]
public bool? Multichannel { get; set; }
///
- /// Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ /// Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
/// Note: This parameter is only supported for the Universal-3-Pro model.
///
[global::System.Text.Json.Serialization.JsonPropertyName("prompt")]
public string? Prompt { get; set; }
///
- /// Enable Automatic Punctuation, can be true or false
+ /// Enable [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
/// Default Value: true
///
[global::System.Text.Json.Serialization.JsonPropertyName("punctuate")]
public bool? Punctuate { get; set; }
///
- /// Redact PII from the transcribed text using the Redact PII model, can be true or false
+ /// Redact PII from the transcribed text using the Redact PII model, can be true or false. See [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii")]
public bool? RedactPii { get; set; }
///
- /// Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_audio")]
public bool? RedactPiiAudio { get; set; }
///
- /// Specify options for PII redacted audio files.
+ /// Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_audio_options")]
public global::AssemblyAI.TranscriptOptionalParamsRedactPiiAudioOptions? RedactPiiAudioOptions { get; set; }
///
- /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
/// Default Value: mp3
/// Example: mp3
///
@@ -188,13 +188,13 @@ public sealed partial class TranscriptOptionalParams
public global::AssemblyAI.RedactPiiAudioQuality? RedactPiiAudioQuality { get; set; }
///
- /// The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_policies")]
public global::System.Collections.Generic.IList? RedactPiiPolicies { get; set; }
///
- /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
/// Default Value: hash
///
[global::System.Text.Json.Serialization.JsonPropertyName("redact_pii_sub")]
@@ -202,34 +202,34 @@ public sealed partial class TranscriptOptionalParams
public global::AssemblyAI.OneOf? RedactPiiSub { get; set; }
///
- /// Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
+ /// Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("sentiment_analysis")]
public bool? SentimentAnalysis { get; set; }
///
- /// Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
+ /// Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker_labels")]
public bool? SpeakerLabels { get; set; }
///
- /// Specify options for speaker diarization.
+ /// Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker_options")]
public global::AssemblyAI.TranscriptOptionalParamsSpeakerOptions? SpeakerOptions { get; set; }
///
- /// Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ /// Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
[global::System.Text.Json.Serialization.JsonPropertyName("speakers_expected")]
public int? SpeakersExpected { get; set; }
///
- /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_models")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -237,27 +237,27 @@ public sealed partial class TranscriptOptionalParams
///
/// Reject audio files that contain less than this fraction of speech.
- /// Valid values are in the range [0, 1] inclusive.
+ /// Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
/// Default Value: 0
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_threshold")]
public float? SpeechThreshold { get; set; }
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
[global::System.Text.Json.Serialization.JsonPropertyName("speech_understanding")]
public global::AssemblyAI.TranscriptOptionalParamsSpeechUnderstanding? SpeechUnderstanding { get; set; }
///
- /// Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
+ /// Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("summarization")]
public bool? Summarization { get; set; }
///
- /// The model to summarize the transcript
+ /// The model to summarize the transcript. See [Summary models](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) for available models and when to use each.
/// Default Value: informative
///
[global::System.Text.Json.Serialization.JsonPropertyName("summary_model")]
@@ -265,7 +265,7 @@ public sealed partial class TranscriptOptionalParams
public global::AssemblyAI.SummaryModel? SummaryModel { get; set; }
///
- /// The type of summary
+ /// The type of summary. See [Summary types](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) for descriptions of the available summary types.
/// Default Value: bullets
///
[global::System.Text.Json.Serialization.JsonPropertyName("summary_type")]
@@ -273,7 +273,7 @@ public sealed partial class TranscriptOptionalParams
public global::AssemblyAI.SummaryType? SummaryType { get; set; }
///
- /// Control the amount of randomness injected into the model's response.
+ /// Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
/// Note: This parameter can only be used with the Universal-3-Pro model.
/// Default Value: 0.0
///
@@ -281,21 +281,21 @@ public sealed partial class TranscriptOptionalParams
public double? Temperature { get; set; }
///
- /// The header name to be sent with the transcript completed or failed webhook requests
+ /// The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_auth_header_name")]
public string? WebhookAuthHeaderName { get; set; }
///
- /// The header value to send back with the transcript completed or failed webhook requests for added security
+ /// The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_auth_header_value")]
public string? WebhookAuthHeaderValue { get; set; }
///
- /// The URL to which we send webhook requests.
+ /// The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
///
[global::System.Text.Json.Serialization.JsonPropertyName("webhook_url")]
public string? WebhookUrl { get; set; }
@@ -332,55 +332,55 @@ public sealed partial class TranscriptOptionalParams
/// Initializes a new instance of the class.
///
///
- /// The point in time, in milliseconds, to stop transcribing in your media file
+ /// The point in time, in milliseconds, to stop transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
///
- /// The point in time, in milliseconds, to begin transcribing in your media file
+ /// The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
///
///
- /// Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
+ /// Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false
/// Default Value: false
///
///
- /// Enable Key Phrases, either true or false
+ /// Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false
/// Default Value: false
///
///
- /// Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
+ /// Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false
/// Default Value: false
///
///
- /// The confidence threshold for the Content Moderation model. Values must be between 25 and 100.
+ /// The confidence threshold for the [Content Moderation](https://www.assemblyai.com/docs/content-moderation) model. Values must be between 25 and 100.
/// Default Value: 50
///
///
- /// Customize how words are spelled and formatted using to and from values
+ /// Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
///
///
- /// Transcribe Filler Words, like "umm", in your media file; can be true or false
+ /// Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
/// Default Value: false
///
///
- /// Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
+ /// Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false
/// Default Value: false
///
///
- /// Filter profanity from the transcribed text, can be true or false
+ /// Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details.
/// Default Value: false
///
///
- /// Enable Text Formatting, can be true or false
+ /// Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
/// Default Value: true
///
///
- /// Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
+ /// Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false
/// Default Value: false
///
///
- /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ /// Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
///
///
- /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ /// The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
/// The default value is 'en_us'.
/// Default Value: en_us
///
@@ -391,104 +391,104 @@ public sealed partial class TranscriptOptionalParams
///
/// The confidence threshold for the automatically detected language.
/// An error will be returned if the language confidence is below this threshold.
- /// Defaults to 0.
+ /// Defaults to 0. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: 0
///
///
- /// Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.
+ /// Enable [Automatic language detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection), either true or false.
/// Default Value: false
///
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
///
- /// Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false.
+ /// Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false.
/// Default Value: false
///
///
- /// Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ /// Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
/// Note: This parameter is only supported for the Universal-3-Pro model.
///
///
- /// Enable Automatic Punctuation, can be true or false
+ /// Enable [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
/// Default Value: true
///
///
- /// Redact PII from the transcribed text using the Redact PII model, can be true or false
+ /// Redact PII from the transcribed text using the Redact PII model, can be true or false. See [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
/// Default Value: false
///
///
- /// Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
/// Default Value: false
///
///
- /// Specify options for PII redacted audio files.
+ /// Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files.
///
///
- /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
/// Default Value: mp3
/// Example: mp3
///
///
- /// The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
///
///
- /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ /// The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
/// Default Value: hash
///
///
- /// Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
+ /// Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false
/// Default Value: false
///
///
- /// Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
+ /// Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false
/// Default Value: false
///
///
- /// Specify options for speaker diarization.
+ /// Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers.
///
///
- /// Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ /// Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
///
- /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ /// List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
///
///
/// Reject audio files that contain less than this fraction of speech.
- /// Valid values are in the range [0, 1] inclusive.
+ /// Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
/// Default Value: 0
///
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
///
- /// Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
+ /// Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false
/// Default Value: false
///
///
- /// The model to summarize the transcript
+ /// The model to summarize the transcript. See [Summary models](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) for available models and when to use each.
/// Default Value: informative
///
///
- /// The type of summary
+ /// The type of summary. See [Summary types](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) for descriptions of the available summary types.
/// Default Value: bullets
///
///
- /// Control the amount of randomness injected into the model's response.
+ /// Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
/// Note: This parameter can only be used with the Universal-3-Pro model.
/// Default Value: 0.0
///
///
- /// The header name to be sent with the transcript completed or failed webhook requests
+ /// The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
///
- /// The header value to send back with the transcript completed or failed webhook requests for added security
+ /// The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security
/// Default Value: openapi-json-null-sentinel-value-2BF93600-0FE4-4250-987A-E5DDB203E464
///
///
- /// The URL to which we send webhook requests.
+ /// The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsLanguageDetectionOptions.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsLanguageDetectionOptions.g.cs
index f5f1eb7..55453fe 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsLanguageDetectionOptions.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsLanguageDetectionOptions.g.cs
@@ -4,32 +4,32 @@
namespace AssemblyAI
{
///
- /// Specify options for Automatic Language Detection.
+ /// Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
///
public sealed partial class TranscriptOptionalParamsLanguageDetectionOptions
{
///
- /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
[global::System.Text.Json.Serialization.JsonPropertyName("expected_languages")]
public global::System.Collections.Generic.IList? ExpectedLanguages { get; set; }
///
- /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: auto
///
[global::System.Text.Json.Serialization.JsonPropertyName("fallback_language")]
public string? FallbackLanguage { get; set; }
///
- /// Whether code switching should be detected.
+ /// Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
/// Default Value: false
///
[global::System.Text.Json.Serialization.JsonPropertyName("code_switching")]
public bool? CodeSwitching { get; set; }
///
- /// The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ /// The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
/// Default Value: 0.3
///
[global::System.Text.Json.Serialization.JsonPropertyName("code_switching_confidence_threshold")]
@@ -45,18 +45,18 @@ public sealed partial class TranscriptOptionalParamsLanguageDetectionOptions
/// Initializes a new instance of the class.
///
///
- /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ /// List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
///
///
- /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ /// If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
/// Default Value: auto
///
///
- /// Whether code switching should be detected.
+ /// Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
/// Default Value: false
///
///
- /// The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ /// The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
/// Default Value: 0.3
///
#if NET7_0_OR_GREATER
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsRedactPiiAudioOptions.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsRedactPiiAudioOptions.g.cs
index 55dabb0..5a0647f 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsRedactPiiAudioOptions.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsRedactPiiAudioOptions.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- /// Specify options for PII redacted audio files.
+ /// Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files.
///
public sealed partial class TranscriptOptionalParamsRedactPiiAudioOptions
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeakerOptions.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeakerOptions.g.cs
index 350bf7f..f61fec0 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeakerOptions.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeakerOptions.g.cs
@@ -4,12 +4,12 @@
namespace AssemblyAI
{
///
- /// Specify options for speaker diarization.
+ /// Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers.
///
public sealed partial class TranscriptOptionalParamsSpeakerOptions
{
///
- /// The minimum number of speakers expected in the audio file.
+ /// The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
/// Default Value: 1
///
[global::System.Text.Json.Serialization.JsonPropertyName("min_speakers_expected")]
@@ -17,7 +17,7 @@ public sealed partial class TranscriptOptionalParamsSpeakerOptions
///
/// <Warning>Setting this parameter too high may hurt model accuracy</Warning>
- /// The maximum number of speakers expected in the audio file.
+ /// The maximum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
/// Default Value: 10
///
[global::System.Text.Json.Serialization.JsonPropertyName("max_speakers_expected")]
@@ -33,12 +33,12 @@ public sealed partial class TranscriptOptionalParamsSpeakerOptions
/// Initializes a new instance of the class.
///
///
- /// The minimum number of speakers expected in the audio file.
+ /// The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
/// Default Value: 1
///
///
/// <Warning>Setting this parameter too high may hurt model accuracy</Warning>
- /// The maximum number of speakers expected in the audio file.
+ /// The maximum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
/// Default Value: 10
///
#if NET7_0_OR_GREATER
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeechUnderstanding.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeechUnderstanding.g.cs
index b03d1eb..fad1109 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeechUnderstanding.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptOptionalParamsSpeechUnderstanding.g.cs
@@ -6,7 +6,7 @@
namespace AssemblyAI
{
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
public sealed partial class TranscriptOptionalParamsSpeechUnderstanding
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSentence.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSentence.g.cs
index fa7c969..dd6bf6f 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSentence.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSentence.g.cs
@@ -50,7 +50,7 @@ public sealed partial class TranscriptSentence
public string? Channel { get; set; }
///
- /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -84,7 +84,7 @@ public sealed partial class TranscriptSentence
/// The channel of the sentence. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially.
///
///
- /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSpeechUnderstanding.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSpeechUnderstanding.g.cs
index 8bd8fb4..9a85e9b 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSpeechUnderstanding.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptSpeechUnderstanding.g.cs
@@ -6,7 +6,7 @@
namespace AssemblyAI
{
///
- /// Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ /// Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
///
public sealed partial class TranscriptSpeechUnderstanding
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptTranslatedTexts.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptTranslatedTexts.g.cs
index 97571a8..84dcffb 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptTranslatedTexts.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptTranslatedTexts.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- /// Translated text keyed by language code
+ /// Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
///
public sealed partial class TranscriptTranslatedTexts
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptWord.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptWord.g.cs
index db8b047..b670cdd 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptWord.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranscriptWord.g.cs
@@ -43,7 +43,7 @@ public sealed partial class TranscriptWord
public string? Channel { get; set; }
///
- /// The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
[global::System.Text.Json.Serialization.JsonPropertyName("speaker")]
[global::System.Text.Json.Serialization.JsonRequired]
@@ -74,7 +74,7 @@ public sealed partial class TranscriptWord
/// The channel of the word. The left and right channels are channels 1 and 2. Additional channels increment the channel number sequentially.
///
///
- /// The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ /// The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
///
#if NET7_0_OR_GREATER
[global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers]
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBody.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBody.g.cs
index a2ef239..f6e8729 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBody.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBody.g.cs
@@ -4,7 +4,7 @@
namespace AssemblyAI
{
///
- ///
+ /// Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation).
///
public sealed partial class TranslationRequestBody
{
diff --git a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBodyTranslation.g.cs b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBodyTranslation.g.cs
index 6cf9bf8..96a53de 100644
--- a/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBodyTranslation.g.cs
+++ b/src/libs/AssemblyAI/Generated/AssemblyAI.Models.TranslationRequestBodyTranslation.g.cs
@@ -9,14 +9,14 @@ namespace AssemblyAI
public sealed partial class TranslationRequestBodyTranslation
{
///
- /// List of target language codes (e.g., `["es", "de"]`)
+ /// List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.
///
[global::System.Text.Json.Serialization.JsonPropertyName("target_languages")]
[global::System.Text.Json.Serialization.JsonRequired]
public required global::System.Collections.Generic.IList TargetLanguages { get; set; }
///
- /// Use formal language style
+ /// Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
/// Default Value: true
///
[global::System.Text.Json.Serialization.JsonPropertyName("formal")]
@@ -39,10 +39,10 @@ public sealed partial class TranslationRequestBodyTranslation
/// Initializes a new instance of the class.
///
///
- /// List of target language codes (e.g., `["es", "de"]`)
+ /// List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.
///
///
- /// Use formal language style
+ /// Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
/// Default Value: true
///
///
diff --git a/src/libs/AssemblyAI/openapi.yaml b/src/libs/AssemblyAI/openapi.yaml
index b5de035..aa4dbcc 100644
--- a/src/libs/AssemblyAI/openapi.yaml
+++ b/src/libs/AssemblyAI/openapi.yaml
@@ -27,7 +27,7 @@ tags:
- name: streaming
description: Streaming Speech-to-Text
externalDocs:
- url: https://www.assemblyai.com/docs/speech-to-text/streaming
+ url: https://www.assemblyai.com/docs/streaming/universal-streaming
security:
- ApiKey: []
@@ -1250,35 +1250,35 @@ components:
properties:
audio_end_at:
x-label: Audio end at
- description: The point in time, in milliseconds, to stop transcribing in your media file
+ description: The point in time, in milliseconds, to stop transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
type: integer
audio_start_from:
x-label: Audio start from
- description: The point in time, in milliseconds, to begin transcribing in your media file
+ description: The point in time, in milliseconds, to begin transcribing in your media file. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
type: integer
auto_chapters:
x-label: Auto chapters
- description: Enable [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters), can be true or false
+ description: Enable [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters), can be true or false
type: boolean
default: false
auto_highlights:
x-label: Key phrases
- description: Enable Key Phrases, either true or false
+ description: Enable [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases), either true or false
type: boolean
default: false
content_safety:
x-label: Content Moderation
- description: Enable [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation), can be true or false
+ description: Enable [Content Moderation](https://www.assemblyai.com/docs/content-moderation), can be true or false
type: boolean
default: false
content_safety_confidence:
x-label: Content Moderation confidence
- description: The confidence threshold for the Content Moderation model. Values must be between 25 and 100.
+ description: The confidence threshold for the [Content Moderation](https://www.assemblyai.com/docs/content-moderation) model. Values must be between 25 and 100.
type: integer
default: 50
minimum: 25
@@ -1286,7 +1286,7 @@ components:
custom_spelling:
x-label: Custom spellings
- description: Customize how words are spelled and formatted using to and from values
+ description: Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
type: array
items:
x-label: Custom spelling
@@ -1294,38 +1294,38 @@ components:
disfluencies:
x-label: Disfluencies
- description: Transcribe Filler Words, like "umm", in your media file; can be true or false
+ description: Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
type: boolean
default: false
entity_detection:
x-label: Entity Detection
- description: Enable [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection), can be true or false
+ description: Enable [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection), can be true or false
type: boolean
default: false
filter_profanity:
x-label: Filter profanity
- description: Filter profanity from the transcribed text, can be true or false
+ description: Filter profanity from the transcribed text, can be true or false. See [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) for more details.
type: boolean
default: false
format_text:
x-label: Format text
- description: Enable Text Formatting, can be true or false
+ description: Enable [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
type: boolean
default: true
iab_categories:
x-label: Topic Detection
- description: Enable [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection), can be true or false
+ description: Enable [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection), can be true or false
type: boolean
default: false
keyterms_prompt:
x-label: Keyterms prompt
description: |
- Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
type: array
items:
x-label: Keyterm
@@ -1334,7 +1334,7 @@ components:
language_code:
x-label: Language code
description: |
- The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
The default value is 'en_us'.
oneOf:
- anyOf:
@@ -1359,7 +1359,7 @@ components:
description: |
The confidence threshold for the automatically detected language.
An error will be returned if the language confidence is below this threshold.
- Defaults to 0.
+ Defaults to 0. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: number
format: float
minimum: 0
@@ -1368,19 +1368,19 @@ components:
language_detection:
x-label: Language detection
- description: Enable [Automatic language detection](https://www.assemblyai.com/docs/models/speech-recognition#automatic-language-detection), either true or false.
+ description: Enable [Automatic language detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection), either true or false.
type: boolean
default: false
language_detection_options:
x-label: Specify options for Automatic Language Detection.
- description: Specify options for Automatic Language Detection.
+ description: Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
type: object
additionalProperties: false
properties:
expected_languages:
x-label: Expected languages
- description: List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ description: List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: array
items:
x-label: language
@@ -1388,19 +1388,19 @@ components:
fallback_language:
x-label: Fallback language
description: |
- If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: string
default: "auto"
code_switching:
x-label: Code switching
description: |
- Whether code switching should be detected.
+ Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
type: boolean
default: false
code_switching_confidence_threshold:
x-label: Code switching confidence threshold
description: |
- The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
type: number
minimum: 0
maximum: 1
@@ -1408,39 +1408,39 @@ components:
multichannel:
x-label: Multichannel
- description: Enable [Multichannel](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) transcription, can be true or false.
+ description: Enable [Multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) transcription, can be true or false.
type: boolean
default: false
prompt:
x-label: Prompt
description: |
- Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
Note: This parameter is only supported for the Universal-3-Pro model.
type: string
punctuate:
x-label: Punctuate
- description: Enable Automatic Punctuation, can be true or false
+ description: Enable [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio), can be true or false
type: boolean
default: true
redact_pii:
x-label: Redact PII
- description: Redact PII from the transcribed text using the Redact PII model, can be true or false
+ description: Redact PII from the transcribed text using the Redact PII model, can be true or false. See [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
type: boolean
default: false
redact_pii_audio:
x-label: Redact PII audio
- description: Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
type: boolean
default: false
redact_pii_audio_options:
x-label: Specify options for PII redacted audio files.
- description: Specify options for PII redacted audio files.
+ description: Specify options for [PII redacted audio](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) files.
type: object
additionalProperties: false
properties:
@@ -1452,13 +1452,13 @@ components:
redact_pii_audio_quality:
x-label: Redact PII audio quality
- description: Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
default: mp3
$ref: "#/components/schemas/RedactPiiAudioQuality"
redact_pii_policies:
x-label: Redact PII policies
- description: The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: The list of PII Redaction policies to enable. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
type: array
items:
x-label: PII policy
@@ -1466,7 +1466,7 @@ components:
redact_pii_sub:
x-label: Redact PII substitution
- description: The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
oneOf:
- $ref: "#/components/schemas/SubstitutionPolicy"
- type: "null"
@@ -1474,45 +1474,45 @@ components:
sentiment_analysis:
x-label: Sentiment Analysis
- description: Enable [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis), can be true or false
+ description: Enable [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis), can be true or false
type: boolean
default: false
speaker_labels:
x-label: Speaker labels
- description: Enable [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization), can be true or false
+ description: Enable [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization), can be true or false
type: boolean
default: false
speaker_options:
x-label: Specify options for speaker diarization.
- description: Specify options for speaker diarization.
+ description: Specify options for [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers). Use this to set a range of possible speakers.
type: object
additionalProperties: false
properties:
min_speakers_expected:
x-label: Minimum speakers expected
- description: The minimum number of speakers expected in the audio file.
+ description: The minimum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
type: integer
default: 1
max_speakers_expected:
x-label: Maximum speakers expected
description: |
Setting this parameter too high may hurt model accuracy
- The maximum number of speakers expected in the audio file.
+ The maximum number of speakers expected in the audio file. See [Set a range of possible speakers](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-a-range-of-possible-speakers) for more details.
type: integer
default: 10
speakers_expected:
x-label: Speakers expected
- description: Tells the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ description: Tells the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
type: [integer, "null"]
default: null
speech_models:
x-label: Speech models
description: |
- List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
type: array
items:
x-label: Speech model
@@ -1522,7 +1522,7 @@ components:
x-label: Speech threshold
description: |
Reject audio files that contain less than this fraction of speech.
- Valid values are in the range [0, 1] inclusive.
+ Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
type: [number, "null"]
format: float
minimum: 0
@@ -1531,7 +1531,8 @@ components:
speech_understanding:
x-label: Speech Understanding
- description: Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ description: |
+ Enable speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
type: object
properties:
request:
@@ -1543,26 +1544,26 @@ components:
- request
summarization:
x-label: Enable Summarization
- description: Enable [Summarization](https://www.assemblyai.com/docs/models/summarization), can be true or false
+ description: Enable [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization), can be true or false
type: boolean
default: false
summary_model:
x-label: Summary model
- description: The model to summarize the transcript
+ description: The model to summarize the transcript. See [Summary models](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) for available models and when to use each.
default: informative
$ref: "#/components/schemas/SummaryModel"
summary_type:
x-label: Summary type
- description: The type of summary
+ description: The type of summary. See [Summary types](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) for descriptions of the available summary types.
default: bullets
$ref: "#/components/schemas/SummaryType"
temperature:
x-label: Temperature
description: |
- Control the amount of randomness injected into the model's response.
+ Control the amount of randomness injected into the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
Note: This parameter can only be used with the Universal-3-Pro model.
type: number
@@ -1572,20 +1573,20 @@ components:
webhook_auth_header_name:
x-label: Webhook auth header name
- description: The header name to be sent with the transcript completed or failed webhook requests
+ description: The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
type: [string, "null"]
default: null
webhook_auth_header_value:
x-label: Webhook auth header value
- description: The header value to send back with the transcript completed or failed webhook requests for added security
+ description: The header value to send back with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests for added security
type: [string, "null"]
default: null
webhook_url:
x-label: Webhook URL
description: |
- The URL to which we send webhook requests.
+ The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
type: string
format: url
@@ -1987,7 +1988,7 @@ components:
x-label: Redact PII substitution
type: string
x-fern-sdk-group-name: transcripts
- description: The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: The replacement logic for detected PII, can be `entity_name` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
enum:
- entity_name
- hash
@@ -2000,7 +2001,7 @@ components:
RedactPiiAudioQuality:
x-label: Redact PII audio quality
type: string
- description: Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more details.
x-fern-sdk-group-name: transcripts
enum:
- mp3
@@ -2291,14 +2292,14 @@ components:
SpeechModel:
x-label: Speech model
type: string
- description: The speech model to use for the transcription.
+ description: The speech model to use for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models.
x-fern-sdk-group-name: transcripts
TranscriptLanguageCode:
x-label: Language code
type: string
description: |
- The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ The language of your audio file. Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
The default value is 'en_us'.
x-fern-sdk-group-name: transcripts
enum:
@@ -2667,7 +2668,7 @@ components:
properties:
audio_channels:
x-label: Audio channels
- description: The number of audio channels in the audio file. This is only present when multichannel is enabled.
+ description: The number of audio channels in the audio file. This is only present when [multichannel](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) is enabled.
type: integer
audio_duration:
@@ -2677,12 +2678,12 @@ components:
audio_end_at:
x-label: Audio end at
- description: The point in time, in milliseconds, in the file at which the transcription was terminated
+ description: The point in time, in milliseconds, in the file at which the transcription was terminated. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
type: [integer, "null"]
audio_start_from:
x-label: Audio start from
- description: The point in time, in milliseconds, in the file at which the transcription was started
+ description: The point in time, in milliseconds, in the file at which the transcription was started. See [Set the start and end of the transcript](https://www.assemblyai.com/docs/pre-recorded-audio/set-the-start-and-end-of-the-transcript) for more details.
type: [integer, "null"]
audio_url:
@@ -2693,26 +2694,26 @@ components:
auto_chapters:
x-label: Auto Chapters enabled
- description: Whether [Auto Chapters](https://www.assemblyai.com/docs/models/auto-chapters) is enabled, can be true or false
+ description: Whether [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) is enabled, can be true or false
type: [boolean, "null"]
auto_highlights:
x-label: Key Phrases
- description: Whether Key Phrases is enabled, either true or false
+ description: Whether [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) is enabled, either true or false
type: boolean
auto_highlights_result:
x-label: Key Phrases result
description: |
An array of results for the Key Phrases model, if it is enabled.
- See [Key Phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
+ See [Key Phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.
oneOf:
- $ref: "#/components/schemas/AutoHighlightsResult"
- type: "null"
chapters:
x-label: Chapters
- description: An array of temporally sequential chapters for the audio file
+ description: An array of temporally sequential chapters for the audio file. See [Auto Chapters](https://www.assemblyai.com/docs/speech-understanding/auto-chapters) for more information.
type: [array, "null"]
items:
x-label: Chapter
@@ -2728,21 +2729,21 @@ components:
content_safety:
x-label: Content Moderation
- description: Whether [Content Moderation](https://www.assemblyai.com/docs/models/content-moderation) is enabled, can be true or false
+ description: Whether [Content Moderation](https://www.assemblyai.com/docs/content-moderation) is enabled, can be true or false
type: [boolean, "null"]
content_safety_labels:
x-label: Content Moderation labels
description: |
An array of results for the Content Moderation model, if it is enabled.
- See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
+ See [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.
oneOf:
- $ref: "#/components/schemas/ContentSafetyLabelsResult"
- type: "null"
custom_spelling:
x-label: Custom spellings
- description: Customize how words are spelled and formatted using to and from values
+ description: Customize how words are spelled and formatted using to and from values. See [Custom Spelling](https://www.assemblyai.com/docs/pre-recorded-audio/custom-spelling) for more details.
type: [array, "null"]
items:
x-label: Custom spelling
@@ -2750,14 +2751,14 @@ components:
disfluencies:
x-label: Disfluencies
- description: Transcribe Filler Words, like "umm", in your media file; can be true or false
+ description: Transcribe [Filler Words](https://www.assemblyai.com/docs/pre-recorded-audio/filler-words), like "umm", in your media file; can be true or false
type: [boolean, "null"]
entities:
x-label: Entities
description: |
An array of results for the Entity Detection model, if it is enabled.
- See [Entity detection](https://www.assemblyai.com/docs/models/entity-detection) for more information.
+ See [Entity detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) for more information.
type: [array, "null"]
items:
x-label: Entity
@@ -2765,7 +2766,7 @@ components:
entity_detection:
x-label: Entity Detection
- description: Whether [Entity Detection](https://www.assemblyai.com/docs/models/entity-detection) is enabled, can be true or false
+ description: Whether [Entity Detection](https://www.assemblyai.com/docs/speech-understanding/entity-detection) is enabled, can be true or false
type: [boolean, "null"]
error:
@@ -2775,24 +2776,24 @@ components:
filter_profanity:
x-label: Filter profanity
- description: Whether [Profanity Filtering](https://www.assemblyai.com/docs/models/speech-recognition#profanity-filtering) is enabled, either true or false
+ description: Whether [Profanity Filtering](https://www.assemblyai.com/docs/profanity-filtering) is enabled, either true or false
type: [boolean, "null"]
format_text:
x-label: Format text
- description: Whether Text Formatting is enabled, either true or false
+ description: Whether [Text Formatting](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
type: [boolean, "null"]
iab_categories:
x-label: Topic Detection
- description: Whether [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) is enabled, can be true or false
+ description: Whether [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) is enabled, can be true or false
type: [boolean, "null"]
iab_categories_result:
x-label: Topic Detection result
description: |
The result of the Topic Detection model, if it is enabled.
- See [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.
+ See [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.
oneOf:
- $ref: "#/components/schemas/TopicDetectionModelResult"
- type: "null"
@@ -2806,7 +2807,7 @@ components:
keyterms_prompt:
x-label: Keyterms prompt
description: |
- Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase).
+ Improve accuracy with up to 200 (for Universal-2) or 1000 (for Universal-3-Pro) domain-specific words or phrases (maximum 6 words per phrase). See [Keyterms Prompting](https://www.assemblyai.com/docs/pre-recorded-audio/keyterms-prompting) for more details.
type: array
items:
x-label: Keyterm
@@ -2816,7 +2817,7 @@ components:
x-label: Language code
description: |
The language of your audio file.
- Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/concepts/supported-languages).
+ Possible values are found in [Supported Languages](https://www.assemblyai.com/docs/pre-recorded-audio/supported-languages).
The default value is 'en_us'.
anyOf:
- $ref: "#/components/schemas/TranscriptLanguageCode"
@@ -2835,7 +2836,7 @@ components:
language_confidence:
x-label: Language confidence
- description: The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence)
+ description: The confidence score for the detected language, between 0.0 (low confidence) and 1.0 (high confidence). See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: [number, "null"]
format: double
minimum: 0
@@ -2846,6 +2847,7 @@ components:
description: |
The confidence threshold for the automatically detected language.
An error will be returned if the language confidence is below this threshold.
+ See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: [number, "null"]
format: float
minimum: 0
@@ -2858,13 +2860,13 @@ components:
language_detection_options:
x-label: Specify options for Automatic Language Detection.
- description: Specify options for Automatic Language Detection.
+ description: Specify options for [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection).
type: object
additionalProperties: false
properties:
expected_languages:
x-label: Expected languages
- description: List of languages expected in the audio file. Defaults to `["all"]` when unspecified.
+ description: List of languages expected in the audio file. Defaults to `["all"]` when unspecified. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: array
items:
x-label: language
@@ -2872,19 +2874,19 @@ components:
fallback_language:
x-label: Fallback language
description: |
- If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score.
+ If the detected language of the audio file is not in the list of expected languages, the `fallback_language` is used. Specify `["auto"]` to let our model choose the fallback language from `expected_languages` with the highest confidence score. See [Automatic Language Detection](https://www.assemblyai.com/docs/pre-recorded-audio/language-detection) for more details.
type: string
default: "auto"
code_switching:
x-label: Code switching
description: |
- Whether code switching should be detected.
+ Whether [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) should be detected.
type: boolean
default: false
code_switching_confidence_threshold:
x-label: Code switching confidence threshold
description: |
- The confidence threshold for code switching detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
+ The confidence threshold for [code switching](/docs/speech-to-text/pre-recorded-audio/code-switching) detection. If the code switching confidence is below this threshold, the transcript will be processed in the language with the highest `language_detection_confidence` score.
type: number
minimum: 0
maximum: 1
@@ -2892,39 +2894,39 @@ components:
multichannel:
x-label: Multichannel
- description: Whether [Multichannel transcription](https://www.assemblyai.com/docs/models/speech-recognition#multichannel-transcription) was enabled in the transcription request, either true or false
+ description: Whether [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) was enabled in the transcription request, either true or false
type: [boolean, "null"]
prompt:
x-label: Prompt
description: |
- Provide natural language prompting of up to 1,500 words of contextual information to the model.
+ Provide natural language prompting of up to 1,500 words of contextual information to the model. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for best practices.
Note: This parameter is only supported for the Universal-3-Pro model.
type: string
punctuate:
x-label: Punctuate
- description: Whether Automatic Punctuation is enabled, either true or false
+ description: Whether [Automatic Punctuation](https://www.assemblyai.com/docs/pre-recorded-audio) is enabled, either true or false
type: [boolean, "null"]
redact_pii:
x-label: Redact PII
- description: Whether [PII Redaction](https://www.assemblyai.com/docs/models/pii-redaction) is enabled, either true or false
+ description: Whether [PII Redaction](https://www.assemblyai.com/docs/pii-redaction) is enabled, either true or false
type: boolean
redact_pii_audio:
x-label: Redact PII audio
description: |
Whether a redacted version of the audio file was generated,
- either true or false. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ either true or false. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
type: [boolean, "null"]
redact_pii_audio_quality:
x-label: Redact PII audio quality
description: |
The audio quality of the PII-redacted audio file, if redact_pii_audio is enabled.
- See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ See [PII redaction](https://www.assemblyai.com/docs/pii-redaction#request-for-redacted-audio) for more information.
oneOf:
- $ref: "#/components/schemas/RedactPiiAudioQuality"
- type: "null"
@@ -2933,7 +2935,7 @@ components:
x-label: Redact PII policies
description: |
The list of PII Redaction policies that were enabled, if PII Redaction is enabled.
- See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more information.
+ See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more information.
type: [array, "null"]
items:
x-label: PII policy
@@ -2941,19 +2943,19 @@ components:
redact_pii_sub:
x-label: Redact PII substitution
- description: The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/models/pii-redaction) for more details.
+ description: The replacement logic for detected PII, can be `entity_type` or `hash`. See [PII redaction](https://www.assemblyai.com/docs/pii-redaction) for more details.
$ref: "#/components/schemas/SubstitutionPolicy"
sentiment_analysis:
x-label: Sentiment Analysis
- description: Whether [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) is enabled, can be true or false
+ description: Whether [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) is enabled, can be true or false
type: [boolean, "null"]
sentiment_analysis_results:
x-label: Sentiment Analysis results
description: |
An array of results for the Sentiment Analysis model, if it is enabled.
- See [Sentiment Analysis](https://www.assemblyai.com/docs/models/sentiment-analysis) for more information.
+ See [Sentiment Analysis](https://www.assemblyai.com/docs/speech-understanding/sentiment-analysis) for more information.
type: [array, "null"]
items:
x-label: Sentiment Analysis result
@@ -2961,24 +2963,24 @@ components:
speaker_labels:
x-label: Speaker labels
- description: Whether [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, can be true or false
+ description: Whether [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, can be true or false
type: [boolean, "null"]
speakers_expected:
x-label: Speakers expected
- description: Tell the speaker label model how many speakers it should attempt to identify. See [Speaker diarization](https://www.assemblyai.com/docs/models/speaker-diarization) for more details.
+ description: Tell the speaker label model how many speakers it should attempt to identify. See [Set number of speakers expected](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization#set-number-of-speakers-expected) for more details.
type: [integer, "null"]
speech_model_used:
x-label: Speech model used
- description: The speech model that was actually used for the transcription.
+ description: The speech model that was actually used for the transcription. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models.
type: string
$ref: "#/components/schemas/SpeechModel"
speech_models:
x-label: Speech models
description: |
- List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option.
+ List multiple speech models in priority order, allowing our system to automatically route your audio to the best available option. See [Model Selection](https://www.assemblyai.com/docs/pre-recorded-audio/select-the-speech-model) for available models and routing behavior.
type: [array, "null"]
items:
x-label: Speech model
@@ -2988,7 +2990,7 @@ components:
x-label: Speech threshold
description: |
Defaults to null. Reject audio files that contain less than this fraction of speech.
- Valid values are in the range [0, 1] inclusive.
+ Valid values are in the range [0, 1] inclusive. See [Speech Threshold](https://www.assemblyai.com/docs/speech-threshold) for more details.
type: [number, "null"]
minimum: 0
maximum: 1
@@ -2996,7 +2998,8 @@ components:
speech_understanding:
x-label: Speech Understanding
- description: Enable speech understanding tasks like translation, speaker identification, and custom formatting
+ description: |
+ Speech understanding tasks like [Translation](https://www.assemblyai.com/docs/speech-understanding/translation), [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification), and [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting). See the task-specific docs for available options and configuration.
type: object
properties:
request:
@@ -3017,30 +3020,30 @@ components:
summarization:
x-label: Summarization enabled
- description: Whether [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled, either true or false
+ description: Whether [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled, either true or false
type: boolean
summary:
x-label: Summary
- description: The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ description: The generated summary of the media file, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization) is enabled
type: [string, "null"]
summary_model:
x-label: Summary model
description: |
The Summarization model used to generate the summary,
- if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-models) is enabled
type: [string, "null"]
summary_type:
x-label: Summary type
- description: The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/models/summarization) is enabled
+ description: The type of summary generated, if [Summarization](https://www.assemblyai.com/docs/speech-understanding/summarization#summary-types) is enabled
type: [string, "null"]
temperature:
x-label: Temperature
description: |
- The temperature that was used for the model's response.
+ The temperature that was used for the model's response. See the [Prompting Guide](https://www.assemblyai.com/docs/pre-recorded-audio/prompting) for more details.
Note: This parameter can only be used with the Universal-3-Pro model.
type: [number, "null"]
@@ -3061,7 +3064,7 @@ components:
x-label: Utterances
description: |
When multichannel or speaker_labels is enabled, a list of turn-by-turn utterance objects.
- See [Speaker diarization](https://www.assemblyai.com/docs/speech-to-text/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/speech-to-text/speech-recognition#multichannel-transcription) for more information.
+ See [Speaker diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) and [Multichannel transcription](https://www.assemblyai.com/docs/pre-recorded-audio/multichannel) for more information.
type: [array, "null"]
items:
x-label: Utterance
@@ -3069,23 +3072,23 @@ components:
webhook_auth:
x-label: Webhook auth enabled
- description: Whether webhook authentication details were provided
+ description: Whether [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) authentication details were provided
type: boolean
webhook_auth_header_name:
x-label: Webhook auth header name
- description: The header name to be sent with the transcript completed or failed webhook requests
+ description: The header name to be sent with the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests
type: [string, "null"]
webhook_status_code:
x-label: Webhook HTTP status code
- description: The status code we received from your server when delivering the transcript completed or failed webhook request, if a webhook URL was provided
+ description: The status code we received from your server when delivering the transcript completed or failed [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) request, if a webhook URL was provided
type: [integer, "null"]
webhook_url:
x-label: Webhook URL
description: |
- The URL to which we send webhook requests.
+ The URL to which we send [webhook](https://www.assemblyai.com/docs/deployment/webhooks-for-pre-recorded-audio) requests.
type: [string, "null"]
format: url
@@ -3093,7 +3096,6 @@ components:
x-label: Words
description: |
An array of temporally-sequential word objects, one for each word in the transcript.
- See [Speech recognition](https://www.assemblyai.com/docs/models/speech-recognition) for more information.
type: [array, "null"]
items:
x-label: Word
@@ -3147,7 +3149,7 @@ components:
translated_texts:
# x-label: Translated text
type: object
- description: Translated text keyed by language code
+ description: Translated text keyed by language code. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
properties:
language_code:
x-label: Language code
@@ -3779,7 +3781,7 @@ components:
x-label: Topic Detection result
description: |
The result of the Topic Detection model, if it is enabled.
- See [Topic Detection](https://www.assemblyai.com/docs/models/topic-detection) for more information.
+ See [Topic Detection](https://www.assemblyai.com/docs/speech-understanding/topic-detection) for more information.
x-fern-sdk-group-name: transcripts
type: object
required:
@@ -3889,7 +3891,7 @@ components:
x-label: Content Moderation labels result
description: |
An array of results for the Content Moderation model, if it is enabled.
- See [Content moderation](https://www.assemblyai.com/docs/models/content-moderation) for more information.
+ See [Content moderation](https://www.assemblyai.com/docs/content-moderation) for more information.
x-fern-sdk-group-name: transcripts
type: object
required:
@@ -4349,7 +4351,7 @@ components:
type: [string, "null"]
speaker:
x-label: Speaker
- description: The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ description: The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
type: [string, "null"]
example:
{
@@ -4584,7 +4586,7 @@ components:
x-label: Auto highlights result
description: |
An array of results for the Key Phrases model, if it is enabled.
- See [Key phrases](https://www.assemblyai.com/docs/models/key-phrases) for more information.
+ See [Key phrases](https://www.assemblyai.com/docs/speech-understanding/key-phrases) for more information.
x-fern-sdk-group-name: transcripts
type: object
required:
@@ -4801,7 +4803,7 @@ components:
type: [string, "null"]
speaker:
x-label: Speaker
- description: The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ description: The speaker of the word if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
type: [string, "null"]
example:
{
@@ -4858,7 +4860,7 @@ components:
type: [string, "null"]
speaker:
x-label: Speaker
- description: The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/models/speaker-diarization) is enabled, else null
+ description: The speaker of the sentence if [Speaker Diarization](https://www.assemblyai.com/docs/pre-recorded-audio/speaker-diarization) is enabled, else null
type: [string, "null"]
example:
{
@@ -6091,6 +6093,7 @@ components:
# Speech Understanding Request Schemas
TranslationRequestBody:
x-label: Translation request body
+ description: Request body for [Translation](https://www.assemblyai.com/docs/speech-understanding/translation).
type: object
properties:
translation:
@@ -6100,10 +6103,10 @@ components:
type: array
items:
type: string
- description: List of target language codes (e.g., `["es", "de"]`)
+ description: List of target language codes (e.g., `["es", "de"]`). See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for supported languages.
formal:
type: boolean
- description: Use formal language style
+ description: Use formal language style. See [Translation](https://www.assemblyai.com/docs/speech-understanding/translation) for more details.
default: true
match_original_utterance:
type: boolean
@@ -6116,6 +6119,7 @@ components:
SpeakerIdentificationRequestBody:
type: object
+ description: Request body for [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification).
properties:
speaker_identification:
type: object
@@ -6123,7 +6127,7 @@ components:
speaker_type:
type: string
enum: [role, name]
- description: Type of speaker identification
+ description: Type of speaker identification. See [Speaker Identification](https://www.assemblyai.com/docs/speech-understanding/speaker-identification) for details on each type.
known_values:
type: array
items:
@@ -6136,19 +6140,20 @@ components:
CustomFormattingRequestBody:
type: object
+ description: Request body for [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting).
properties:
custom_formatting:
type: object
properties:
date:
type: string
- description: Date format pattern (e.g., `"mm/dd/yyyy"`)
+ description: Date format pattern (e.g., `"mm/dd/yyyy"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
phone_number:
type: string
- description: Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`)
+ description: Phone number format pattern (e.g., `"(xxx)xxx-xxxx"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
email:
type: string
- description: Email format pattern (e.g., `"username@domain.com"`)
+ description: Email format pattern (e.g., `"username@domain.com"`). See [Custom Formatting](https://www.assemblyai.com/docs/speech-understanding/custom-formatting) for more details.
required:
- custom_formatting