curl --request GET \
--url https://api.gladia.io/v2/live \
--header 'x-gladia-key: <api-key>'{
"first": "https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20",
"current": "https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20",
"next": "https://api.gladia.io/v2/transcription?status=done&offset=20&limit=20",
"items": [
{
"id": "45463597-20b7-4af7-b3b3-f5fb778203ab",
"request_id": "G-45463597",
"version": 2,
"status": "queued",
"created_at": "2023-12-28T09:04:17.210Z",
"post_session_metadata": {},
"kind": "live",
"completed_at": "2023-12-28T09:04:37.210Z",
"custom_metadata": {
"user": "John Doe"
},
"error_code": 500,
"file": {
"id": "<string>",
"filename": "<string>",
"source": "<string>",
"audio_duration": 3600,
"number_of_channels": 1
},
"request_params": {
"encoding": "wav/pcm",
"bit_depth": 16,
"sample_rate": 16000,
"channels": 1,
"model": "solaria-1",
"endpointing": 0.05,
"maximum_duration_without_endpointing": 5,
"language_config": {
"languages": [],
"code_switching": false
},
"pre_processing": {
"audio_enhancer": false,
"speech_threshold": 0.6
},
"realtime_processing": {
"custom_vocabulary": false,
"custom_vocabulary_config": {
"vocabulary": [
"Westeros",
{
"value": "Stark"
},
{
"value": "Night's Watch",
"pronunciations": [
"Nightz Watch"
],
"intensity": 0.4,
"language": "en"
}
],
"default_intensity": 0.5
},
"custom_spelling": false,
"custom_spelling_config": {
"spelling_dictionary": {
"Gettleman": [
"gettleman"
],
"SQL": [
"Sequel"
]
}
},
"translation": false,
"translation_config": {
"target_languages": [
"en"
],
"model": "base",
"match_original_utterances": true,
"lipsync": true,
"context_adaptation": true,
"context": "<string>",
"informal": false
},
"named_entity_recognition": false,
"sentiment_analysis": false
},
"post_processing": {
"summarization": false,
"summarization_config": {
"type": "general"
},
"chapterization": false
},
"messages_config": {
"receive_partial_transcripts": false,
"receive_final_transcripts": true,
"receive_speech_events": true,
"receive_pre_processing_events": true,
"receive_realtime_processing_events": true,
"receive_post_processing_events": true,
"receive_acknowledgments": true,
"receive_errors": true,
"receive_lifecycle_events": false
},
"callback": false,
"callback_config": {
"url": "https://callback.example",
"receive_partial_transcripts": false,
"receive_final_transcripts": true,
"receive_speech_events": false,
"receive_pre_processing_events": true,
"receive_realtime_processing_events": true,
"receive_post_processing_events": true,
"receive_acknowledgments": false,
"receive_errors": false,
"receive_lifecycle_events": true
}
},
"result": {
"metadata": {
"audio_duration": 3600,
"number_of_distinct_channels": 1,
"billing_time": 3600,
"transcription_time": 20
},
"transcription": {
"full_transcript": "<string>",
"languages": [
"en"
],
"utterances": [
{
"start": 123,
"end": 123,
"confidence": 123,
"channel": 1,
"words": [
{
"word": "<string>",
"start": 123,
"end": 123,
"confidence": 123
}
],
"text": "<string>",
"language": "en",
"speaker": 1
}
],
"sentences": [
{
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
"<string>"
]
}
],
"subtitles": [
{
"format": "srt",
"subtitles": "<string>"
}
]
},
"translation": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
{
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"full_transcript": "<string>",
"languages": [
"en"
],
"utterances": [
{
"start": 123,
"end": 123,
"confidence": 123,
"channel": 1,
"words": [
{
"word": "<string>",
"start": 123,
"end": 123,
"confidence": 123
}
],
"text": "<string>",
"language": "en",
"speaker": 1
}
],
"sentences": [
{
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
"<string>"
]
}
],
"subtitles": [
{
"format": "srt",
"subtitles": "<string>"
}
]
}
]
},
"summarization": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": "<string>"
},
"named_entity_recognition": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"entity": "<string>"
},
"sentiment_analysis": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": "<string>"
},
"chapterization": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": {}
},
"messages": [
"<string>"
]
}
}
]
}List all the live transcriptions matching the parameters.
curl --request GET \
--url https://api.gladia.io/v2/live \
--header 'x-gladia-key: <api-key>'{
"first": "https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20",
"current": "https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20",
"next": "https://api.gladia.io/v2/transcription?status=done&offset=20&limit=20",
"items": [
{
"id": "45463597-20b7-4af7-b3b3-f5fb778203ab",
"request_id": "G-45463597",
"version": 2,
"status": "queued",
"created_at": "2023-12-28T09:04:17.210Z",
"post_session_metadata": {},
"kind": "live",
"completed_at": "2023-12-28T09:04:37.210Z",
"custom_metadata": {
"user": "John Doe"
},
"error_code": 500,
"file": {
"id": "<string>",
"filename": "<string>",
"source": "<string>",
"audio_duration": 3600,
"number_of_channels": 1
},
"request_params": {
"encoding": "wav/pcm",
"bit_depth": 16,
"sample_rate": 16000,
"channels": 1,
"model": "solaria-1",
"endpointing": 0.05,
"maximum_duration_without_endpointing": 5,
"language_config": {
"languages": [],
"code_switching": false
},
"pre_processing": {
"audio_enhancer": false,
"speech_threshold": 0.6
},
"realtime_processing": {
"custom_vocabulary": false,
"custom_vocabulary_config": {
"vocabulary": [
"Westeros",
{
"value": "Stark"
},
{
"value": "Night's Watch",
"pronunciations": [
"Nightz Watch"
],
"intensity": 0.4,
"language": "en"
}
],
"default_intensity": 0.5
},
"custom_spelling": false,
"custom_spelling_config": {
"spelling_dictionary": {
"Gettleman": [
"gettleman"
],
"SQL": [
"Sequel"
]
}
},
"translation": false,
"translation_config": {
"target_languages": [
"en"
],
"model": "base",
"match_original_utterances": true,
"lipsync": true,
"context_adaptation": true,
"context": "<string>",
"informal": false
},
"named_entity_recognition": false,
"sentiment_analysis": false
},
"post_processing": {
"summarization": false,
"summarization_config": {
"type": "general"
},
"chapterization": false
},
"messages_config": {
"receive_partial_transcripts": false,
"receive_final_transcripts": true,
"receive_speech_events": true,
"receive_pre_processing_events": true,
"receive_realtime_processing_events": true,
"receive_post_processing_events": true,
"receive_acknowledgments": true,
"receive_errors": true,
"receive_lifecycle_events": false
},
"callback": false,
"callback_config": {
"url": "https://callback.example",
"receive_partial_transcripts": false,
"receive_final_transcripts": true,
"receive_speech_events": false,
"receive_pre_processing_events": true,
"receive_realtime_processing_events": true,
"receive_post_processing_events": true,
"receive_acknowledgments": false,
"receive_errors": false,
"receive_lifecycle_events": true
}
},
"result": {
"metadata": {
"audio_duration": 3600,
"number_of_distinct_channels": 1,
"billing_time": 3600,
"transcription_time": 20
},
"transcription": {
"full_transcript": "<string>",
"languages": [
"en"
],
"utterances": [
{
"start": 123,
"end": 123,
"confidence": 123,
"channel": 1,
"words": [
{
"word": "<string>",
"start": 123,
"end": 123,
"confidence": 123
}
],
"text": "<string>",
"language": "en",
"speaker": 1
}
],
"sentences": [
{
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
"<string>"
]
}
],
"subtitles": [
{
"format": "srt",
"subtitles": "<string>"
}
]
},
"translation": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
{
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"full_transcript": "<string>",
"languages": [
"en"
],
"utterances": [
{
"start": 123,
"end": 123,
"confidence": 123,
"channel": 1,
"words": [
{
"word": "<string>",
"start": 123,
"end": 123,
"confidence": 123
}
],
"text": "<string>",
"language": "en",
"speaker": 1
}
],
"sentences": [
{
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": [
"<string>"
]
}
],
"subtitles": [
{
"format": "srt",
"subtitles": "<string>"
}
]
}
]
},
"summarization": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": "<string>"
},
"named_entity_recognition": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"entity": "<string>"
},
"sentiment_analysis": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": "<string>"
},
"chapterization": {
"success": true,
"is_empty": true,
"exec_time": 123,
"error": {
"status_code": 500,
"exception": "<string>",
"message": "<string>"
},
"results": {}
},
"messages": [
"<string>"
]
}
}
]
}Your personal Gladia API key
The starting point for pagination. A value of 0 starts from the first item.
x >= 0The maximum number of items to return. Useful for pagination and controlling data payload size.
x >= 1Filter items relevant to a specific date in ISO format (YYYY-MM-DD).
"2025-12-09"
Include items that occurred before the specified date in ISO format.
"2025-12-09T15:26:24.922Z"
Filter for items after the specified date. Use with before_date for a range. Date in ISO format.
"2025-12-09T15:26:24.922Z"
Filter the list based on item status. Accepts multiple values from the predefined list.
queued, processing, done, error ["done"]{ "user": "John Doe" }A list of live jobs matching the parameters.
URL to fetch the first page
"https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20"
URL to fetch the current page
"https://api.gladia.io/v2/transcription?status=done&offset=0&limit=20"
URL to fetch the next page
"https://api.gladia.io/v2/transcription?status=done&offset=20&limit=20"
List of live transcriptions
Show child attributes
Id of the job
"45463597-20b7-4af7-b3b3-f5fb778203ab"
Debug id
"G-45463597"
API version
2
"queued": the job has been queued. "processing": the job is being processed. "done": the job has been processed and the result is available. "error": an error occurred during the job's processing.
queued, processing, done, error Creation date
"2023-12-28T09:04:17.210Z"
For debugging purposes, send data that could help to identify issues
live "live"
Completion date when status is "done" or "error"
"2023-12-28T09:04:37.210Z"
Custom metadata given in the initial request
{ "user": "John Doe" }HTTP status code of the error if status is "error"
400 <= x <= 599500
The file data you uploaded. Can be null if status is "error"
Show child attributes
The file id
The name of the uploaded file
The link used to download the file if audio_url was used
Duration of the audio file
3600
Number of channels in the audio file
x >= 11
Parameters used for this live transcription. Can be null if status is "error"
Show child attributes
The encoding format of the audio stream. Supported formats:
Note: No need to add WAV headers to raw audio as the API supports both formats.
wav/pcm, wav/alaw, wav/ulaw The bit depth of the audio stream
8, 16, 24, 32 The sample rate of the audio stream
8000, 16000, 32000, 44100, 48000 The number of channels of the audio stream
1 <= x <= 8The model used to process the audio. "solaria-1" is used by default.
solaria-1 The endpointing duration in seconds. Endpointing is the duration of silence which will cause an utterance to be considered as finished
0.01 <= x <= 10The maximum duration in seconds without endpointing. If endpointing is not detected after this duration, current utterance will be considered as finished
5 <= x <= 60Specify the language configuration
Show child attributes
If one language is set, it will be used for the transcription. Otherwise, language will be auto-detected by the model.
Specify the language in which it will be pronounced when sound comparison occurs. Default to transcription language.
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, yi, yo, zh If true, language will be auto-detected on each utterance. Otherwise, language will be auto-detected on first utterance and then used for the rest of the transcription. If one language is set, this option will be ignored.
Specify the pre-processing configuration
Show child attributes
If true, apply pre-processing to the audio stream to enhance the quality.
Sensitivity configuration for Speech Threshold. A value close to 1 will apply stricter thresholds, making it less likely to detect background sounds as speech.
0 <= x <= 1Specify the realtime processing configuration
Show child attributes
If true, enable custom vocabulary for the transcription.
Custom vocabulary configuration, if custom_vocabulary is enabled
Show child attributes
Specific vocabulary list to feed the transcription model with. Each item can be a string or an object with the following properties: value, intensity, pronunciations, language.
Show child attributes
The text used to replace in the transcription.
"Gladia"
The global intensity of the feature.
0 <= x <= 10.5
The pronunciations used in the transcription.
Specify the language in which it will be pronounced when sound comparison occurs. Default to transcription language.
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, yi, yo, zh "en"
[
"Westeros",
{ "value": "Stark" },
{
"value": "Night's Watch",
"pronunciations": ["Nightz Watch"],
"intensity": 0.4,
"language": "en"
}
]Default intensity for the custom vocabulary
0 <= x <= 10.5
If true, enable custom spelling for the transcription.
Custom spelling configuration, if custom_spelling is enabled
Show child attributes
The list of spelling applied on the audio transcription
Show child attributes
{
"Gettleman": ["gettleman"],
"SQL": ["Sequel"]
}If true, enable translation for the transcription
Translation configuration, if translation is enabled
Show child attributes
Target language in iso639-1 format you want the transcription translated to
1Target language in iso639-1 format you want the transcription translated to
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, wo, yi, yo, zh ["en"]Model you want the translation model to use to translate
base, enhanced Align translated utterances with the original ones
Whether to apply lipsync to the translated transcription.
Enables or disables context-aware translation features that allow the model to adapt translations based on provided context.
Context information to improve translation accuracy
Forces the translation to use informal language forms when available in the target language.
If true, enable named entity recognition for the transcription.
If true, enable sentiment analysis for the transcription.
Specify the post-processing configuration
Show child attributes
If true, generates summarization for the whole transcription.
Summarization configuration, if summarization is enabled
Show child attributes
The type of summarization to apply
general, bullet_points, concise If true, generates chapters for the whole transcription.
Specify the websocket messages configuration
Show child attributes
If true, partial transcript will be sent to websocket.
If true, final transcript will be sent to websocket.
If true, begin and end speech events will be sent to websocket.
If true, pre-processing events will be sent to websocket.
If true, realtime processing events will be sent to websocket.
If true, post-processing events will be sent to websocket.
If true, acknowledgments will be sent to websocket.
If true, errors will be sent to websocket.
If true, lifecycle events will be sent to websocket.
If true, messages will be sent to configured url.
Specify the callback configuration
Show child attributes
URL on which we will do a POST request with configured messages
"https://callback.example"
If true, partial transcript will be sent to the defined callback.
If true, final transcript will be sent to the defined callback.
If true, begin and end speech events will be sent to the defined callback.
If true, pre-processing events will be sent to the defined callback.
If true, realtime processing events will be sent to the defined callback.
If true, post-processing events will be sent to the defined callback.
If true, acknowledgments will be sent to the defined callback.
If true, errors will be sent to the defined callback.
If true, lifecycle events will be sent to the defined callback.
Live transcription's result when status is "done"
Show child attributes
Metadata for the given transcription & audio file
Show child attributes
Duration of the transcribed audio file
3600
Number of distinct channels in the transcribed audio file
x >= 11
Billed duration in seconds (audio_duration * number_of_distinct_channels)
3600
Duration of the transcription in seconds
20
Transcription of the audio speech
Show child attributes
All transcription on text format without any other information
All the detected languages in the audio sorted from the most detected to the less detected
Specify the language in which it will be pronounced when sound comparison occurs. Default to transcription language.
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, yi, yo, zh ["en"]Transcribed speech utterances present in the audio
Show child attributes
Start timestamp in seconds of this utterance
End timestamp in seconds of this utterance
Confidence on the transcribed utterance (1 = 100% confident)
Audio channel of where this utterance has been transcribed from
x >= 0List of words of the utterance, split by timestamp
Show child attributes
Spoken word
Start timestamps in seconds of the spoken word
End timestamps in seconds of the spoken word
Confidence on the transcribed word (1 = 100% confident)
Transcription for this utterance
Spoken language in this utterance
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, yi, yo, zh "en"
If diarization enabled, speaker identification number
x >= 0If sentences has been enabled, sentences results
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
Show child attributes
Status code of the addon error
500
Reason of the addon error
Detailed message of the addon error
If sentences has been enabled, transcription as sentences.
If subtitles has been enabled, subtitles results
If translation has been enabled, translation of the audio speech transcription
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
List of translated transcriptions, one for each target_languages
Show child attributes
Contains the error details of the failed addon
Show child attributes
All transcription on text format without any other information
All the detected languages in the audio sorted from the most detected to the less detected
Target language in iso639-1 format you want the transcription translated to
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, wo, yi, yo, zh ["en"]Transcribed speech utterances present in the audio
Show child attributes
Start timestamp in seconds of this utterance
End timestamp in seconds of this utterance
Confidence on the transcribed utterance (1 = 100% confident)
Audio channel of where this utterance has been transcribed from
x >= 0List of words of the utterance, split by timestamp
Show child attributes
Spoken word
Start timestamps in seconds of the spoken word
End timestamps in seconds of the spoken word
Confidence on the transcribed word (1 = 100% confident)
Transcription for this utterance
Spoken language in this utterance
af, am, ar, as, az, ba, be, bg, bn, bo, br, bs, ca, cs, cy, da, de, el, en, es, et, eu, fa, fi, fo, fr, gl, gu, ha, haw, he, hi, hr, ht, hu, hy, id, is, it, ja, jw, ka, kk, km, kn, ko, la, lb, ln, lo, lt, lv, mg, mi, mk, ml, mn, mr, ms, mt, my, ne, nl, nn, no, oc, pa, pl, ps, pt, ro, ru, sa, sd, si, sk, sl, sn, so, sq, sr, su, sv, sw, ta, te, tg, th, tk, tl, tr, tt, uk, ur, uz, vi, yi, yo, zh "en"
If diarization enabled, speaker identification number
x >= 0If sentences has been enabled, sentences results for this translation
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
Show child attributes
Status code of the addon error
500
Reason of the addon error
Detailed message of the addon error
If sentences has been enabled, transcription as sentences.
If subtitles has been enabled, subtitles results for this translation
If summarization has been enabled, summarization of the audio speech transcription
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
If summarization has been enabled, summary of the transcription
If named_entity_recognition has been enabled, the detected entities
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
Show child attributes
Status code of the addon error
500
Reason of the addon error
Detailed message of the addon error
If named_entity_recognition has been enabled, the detected entities.
If sentiment_analysis has been enabled, sentiment analysis of the audio speech transcription
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
Show child attributes
If sentiment_analysis has been enabled, Gladia will analyze the sentiments and emotions of the audio
If chapterization has been enabled, will generate chapters name for different parts of the given audio.
Show child attributes
The audio intelligence model succeeded to get a valid output
The audio intelligence model returned an empty value
Time audio intelligence model took to complete the task
null if success is true. Contains the error details of the failed model
If chapterization has been enabled, will generate chapters name for different parts of the given audio.
Real-Time messages sent by the server during the live transcription