diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7532c65..9021c7b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,6 +22,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: '11' - uses: nttld/setup-ndk@v1 id: setup-ndk with: @@ -193,7 +197,7 @@ jobs: - uses: subosito/flutter-action@v2 with: - flutter-version: 3.0.0 + flutter-version: 3.10.0 cache: true - run: flutter pub get @@ -217,14 +221,15 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 if: ${{ matrix.platform == 'apk' }} with: - java-version: "11" + distribution: 'temurin' + java-version: '11' - uses: subosito/flutter-action@v2 with: - flutter-version: 3.0.0 + flutter-version: 3.10.0 cache: true - run: flutter pub get diff --git a/cxx/AudioFrameObserver.cpp b/cxx/AudioFrameObserver.cpp index dc88fc1..8b82bd8 100644 --- a/cxx/AudioFrameObserver.cpp +++ b/cxx/AudioFrameObserver.cpp @@ -39,13 +39,23 @@ bool AudioFrameObserver::onPlaybackAudioFrameBeforeMixing( bool AudioFrameObserver::onRecordAudioFrame(const char *channelId, AudioFrame &audioFrame) { - auto *buffer = (char *) audioFrame.buffer; - for (int i = 0; i < audioFrame.bytesPerSample * audioFrame.channels - * audioFrame.samplesPerChannel; - i += audioFrame.bytesPerSample) { - // set the audio frame to noise, if you set the audio frame to silence, you can remove this if statement - if (i % 2 == 0) { buffer[i] = 0; } + static std::random_device rd; + static std::mt19937 gen(rd()); + + auto *buffer = static_cast(audioFrame.buffer); + int totalSamples = audioFrame.channels * audioFrame.samplesPerChannel; + + // Generate white noise: fill audio buffer with random values + // Noise amplitude can be adjusted via noiseAmplitude (0.0 - 1.0) + constexpr float noiseAmplitude = 0.3f; + std::uniform_int_distribution dist( + static_cast(-32768 * noiseAmplitude), + static_cast(32767 * noiseAmplitude)); + + for (int i = 0; i < totalSamples; ++i) { + buffer[i] = dist(gen); } + return true; } diff --git a/include/AgoraBase.h b/include/AgoraBase.h index 7ccb689..3e2b34a 100644 --- a/include/AgoraBase.h +++ b/include/AgoraBase.h @@ -262,19 +262,15 @@ class AList { } // namespace util /** - * The channel profile. + * @brief The channel profile. */ enum CHANNEL_PROFILE_TYPE { /** - * 0: Communication. - * - * This profile prioritizes smoothness and applies to the one-to-one scenario. + * 0: Communication. Use this profile when there are only two users in the channel. */ CHANNEL_PROFILE_COMMUNICATION = 0, /** - * 1: (Default) Live Broadcast. - * - * This profile prioritizes supporting a large audience in a live broadcast channel. + * 1: Live streaming. Use this profile when there are more than two users in the channel. */ CHANNEL_PROFILE_LIVE_BROADCASTING = 1, /** @@ -283,8 +279,8 @@ enum CHANNEL_PROFILE_TYPE { */ CHANNEL_PROFILE_GAME __deprecated = 2, /** - * 3: Cloud Gaming. - * + * Cloud gaming. The scenario is optimized for latency. Use this profile if the use case requires + * frequent interactions between users. * @deprecated This profile is deprecated. */ CHANNEL_PROFILE_CLOUD_GAMING __deprecated = 3, @@ -427,6 +423,10 @@ enum WARN_CODE_TYPE { * 1053: Audio Device Module: The settings are improper. */ WARN_ADM_IMPROPER_SETTINGS = 1053, + /** + * 1055: Audio Device Module: The audio device is in a pop state. + */ + WARN_ADM_POP_STATE = 1055, /** * 1322: No recording device. */ @@ -447,59 +447,75 @@ enum WARN_CODE_TYPE { }; /** - * The error codes. + * @brief Error codes. + * + * @details + * An error code indicates that the SDK encountered an unrecoverable error that requires application + * intervention. For example, an error is returned when the camera fails to open, and the app needs + * to inform the user that the camera cannot be used. + * */ enum ERROR_CODE_TYPE { /** - * 0: No error occurs. + * 0: No error. */ ERR_OK = 0, // 1~1000 /** - * 1: A general error occurs (no specified reason). + * 1: General error with no classified reason. Try calling the method again. */ ERR_FAILED = 1, /** - * 2: The argument is invalid. For example, the specific channel name - * includes illegal characters. + * 2: An invalid parameter is used. For example, the specified channel name includes illegal + * characters. Reset the parameter. */ ERR_INVALID_ARGUMENT = 2, /** - * 3: The SDK module is not ready. Choose one of the following solutions: - * - Check the audio device. - * - Check the completeness of the app. - * - Reinitialize the RTC engine. + * 3: The SDK is not ready. Possible reasons include the following: + * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`. + * - No user has joined the channel when the method is called. Check the code logic. + * - The user has not left the channel when the `rate` or `complain` method is called. Check the + * code logic. + * - The audio module is disabled. + * - The program is not complete. */ ERR_NOT_READY = 3, /** - * 4: The SDK does not support this function. + * 4: The `IRtcEngine` does not support the request. Possible reasons include the following: + * - The built-in encryption mode is incorrect, or the SDK fails to load the external encryption + * library. Check the encryption mode setting, or reload the external encryption library. */ ERR_NOT_SUPPORTED = 4, /** - * 5: The request is rejected. + * 5: The request is rejected. Possible reasons include the following: + * - The `IRtcEngine` initialization fails. Reinitialize the `IRtcEngine`. + * - The channel name is set as the empty string `""` when joining the channel. Reset the channel + * name. + * - When the `joinChannelEx` method is called to join multiple channels, the specified channel name + * is already in use. Reset the channel name. */ ERR_REFUSED = 5, /** - * 6: The buffer size is not big enough to store the returned data. + * 6: The buffer size is insufficient to store the returned data. */ ERR_BUFFER_TOO_SMALL = 6, /** - * 7: The SDK is not initialized before calling this method. + * 7: A method is called before the initialization of `IRtcEngine`. Ensure that the `IRtcEngine` + * object is initialized before using this method. */ ERR_NOT_INITIALIZED = 7, /** - * 8: The state is invalid. + * 8: Invalid state. */ ERR_INVALID_STATE = 8, /** - * 9: No permission. This is for internal use only, and does - * not return to the app through any method or callback. + * 9: Permission to access is not granted. Check whether your app has access to the audio and video + * device. */ ERR_NO_PERMISSION = 9, /** - * 10: An API timeout occurs. Some API methods require the SDK to return the - * execution result, and this error occurs if the request takes too long - * (more than 10 seconds) for the SDK to process. + * 10: A timeout occurs. Some API calls require the SDK to return the execution result. This error + * occurs if the SDK takes too long (more than 10 seconds) to return the result. */ ERR_TIMEDOUT = 10, /** @@ -525,125 +541,112 @@ enum ERROR_CODE_TYPE { */ ERR_NET_DOWN = 14, /** - * 17: The request to join the channel is rejected. This error usually occurs - * when the user is already in the channel, and still calls the method to join - * the channel, for example, \ref agora::rtc::IRtcEngine::joinChannel "joinChannel()". + * 17: The request to join the channel is rejected. Possible reasons include the following: + * - The user is already in the channel. Agora recommends that you use the + * `onConnectionStateChanged` callback to see whether the user is in the channel. Do not call this + * method to join the channel unless you receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - After calling `startEchoTest` for the call test, the user tries to join the channel without + * calling `stopEchoTest` to end the current test. To join a channel, the call test must be ended by + * calling `stopEchoTest`. */ ERR_JOIN_CHANNEL_REJECTED = 17, /** - * 18: The request to leave the channel is rejected. This error usually - * occurs when the user has already left the channel, and still calls the - * method to leave the channel, for example, \ref agora::rtc::IRtcEngine::leaveChannel - * "leaveChannel". + * 18: Fails to leave the channel. Possible reasons include the following: + * - The user has left the channel before calling the `leaveChannel(const LeaveChannelOptions& + * options)` method. Stop calling this + * method to clear this error. + * - The user calls the `leaveChannel(const LeaveChannelOptions& options)` method to leave the + * channel before joining the channel. + * In this case, no extra operation is needed. */ ERR_LEAVE_CHANNEL_REJECTED = 18, /** - * 19: The resources have been occupied and cannot be reused. + * 19: Resources are already in use. */ ERR_ALREADY_IN_USE = 19, /** - * 20: The SDK gives up the request due to too many requests. This is for - * internal use only, and does not return to the app through any method or callback. + * 20: The request is abandoned by the SDK, possibly because the request has been sent too + * frequently. */ ERR_ABORTED = 20, /** - * 21: On Windows, specific firewall settings can cause the SDK to fail to - * initialize and crash. + * 21: The `IRtcEngine` fails to initialize and has crashed because of specific Windows firewall + * settings. */ ERR_INIT_NET_ENGINE = 21, /** - * 22: The app uses too much of the system resource and the SDK - * fails to allocate any resource. + * 22: The SDK fails to allocate resources because your app uses too many system resources or system + * resources are insufficient. */ ERR_RESOURCE_LIMITED = 22, /** - * 101: The App ID is invalid, usually because the data format of the App ID is incorrect. - * - * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to initialize the Agora service. + * 23: The function is prohibited. Please allow it in the console, or contact the Agora technical support. + * @technical preview + */ + ERR_FUNC_IS_PROHIBITED = 23, + /** + * 101: The specified App ID is invalid. Rejoin the channel with a valid App ID. */ ERR_INVALID_APP_ID = 101, /** - * 102: The specified channel name is invalid. Please try to rejoin the - * channel with a valid channel name. + * 102: The specified channel name is invalid. A possible reason is that the parameter's data type + * is incorrect. Rejoin the channel with a valid channel name. */ ERR_INVALID_CHANNEL_NAME = 102, /** - * 103: Fails to get server resources in the specified region. Please try to - * specify another region when calling \ref agora::rtc::IRtcEngine::initialize - * "initialize". + * 103: Fails to get server resources in the specified region. Try another region when initializing + * `IRtcEngine`. */ ERR_NO_SERVER_RESOURCES = 103, /** - * 109: The token has expired, usually for the following reasons: - * - Timeout for token authorization: Once a token is generated, you must use it to access the - * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it. - * - The token privilege expires: To generate a token, you need to set a timestamp for the token - * privilege to expire. For example, If you set it as seven days, the token expires seven days after - * its usage. In that case, you can no longer access the Agora service. The users cannot make calls, - * or are kicked out of the channel. - * - * Solution: Regardless of whether token authorization times out or the token privilege expires, - * you need to generate a new token on your server, and try to join the channel. + * 109: The current token has expired. Apply for a new token on the server and call `renewToken`. */ ERR_TOKEN_EXPIRED = 109, /** - * 110: The token is invalid, usually for one of the following reasons: - * - Did not provide a token when joining a channel in a situation where the project has enabled the - * App Certificate. - * - Tried to join a channel with a token in a situation where the project has not enabled the App - * Certificate. - * - The App ID, user ID and channel name that you use to generate the token on the server do not match - * those that you use when joining a channel. - * - * Solution: - * - Before joining a channel, check whether your project has enabled the App certificate. If yes, you - * must provide a token when joining a channel; if no, join a channel without a token. - * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that you - * use to generate the token is the same as the App ID that you use to initialize the Agora service, and - * the user ID and channel name that you use to join the channel. + * 110: Invalid token. Typical reasons include the following: + * - App Certificate is enabled in Agora Console, but the code still uses App ID for authentication. + * Once App Certificate is enabled for a project, you must use token-based authentication. + * - The `uid` used to generate the token is not the same as the `uid` used to join the channel. */ ERR_INVALID_TOKEN = 110, /** - * 111: The internet connection is interrupted. This applies to the Agora Web - * SDK only. + * 111: The network connection is interrupted. The SDK triggers this callback when it loses + * connection with the server for more than four seconds after the connection is established. */ ERR_CONNECTION_INTERRUPTED = 111, // only used in web sdk /** - * 112: The internet connection is lost. This applies to the Agora Web SDK - * only. + * 112: The network connection is lost. Occurs when the SDK cannot reconnect to Agora's edge server + * 10 seconds after its connection to the server is interrupted. */ ERR_CONNECTION_LOST = 112, // only used in web sdk /** - * 113: The user is not in the channel when calling the - * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method. + * 113: The user is not in the channel when calling the `sendStreamMessage` method. */ ERR_NOT_IN_CHANNEL = 113, /** - * 114: The data size is over 1024 bytes when the user calls the - * \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()" method. + * 114: The data size exceeds 1 KB when calling the `sendStreamMessage` method. */ ERR_SIZE_TOO_LARGE = 114, /** - * 115: The bitrate of the sent data exceeds the limit of 6 Kbps when the - * user calls the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage()". + * 115: The data bitrate exceeds 6 KB/s when calling the `sendStreamMessage` method. */ ERR_BITRATE_LIMIT = 115, /** - * 116: Too many data streams (over 5) are created when the user - * calls the \ref agora::rtc::IRtcEngine::createDataStream "createDataStream()" method. + * 116: More than five data streams are created when calling the `createDataStream(int* streamId, + * const DataStreamConfig& config)` method. */ ERR_TOO_MANY_DATA_STREAMS = 116, /** - * 117: A timeout occurs for the data stream transmission. + * 117: The data stream transmission times out. */ ERR_STREAM_MESSAGE_TIMEOUT = 117, /** - * 119: Switching the user role fails. Please try to rejoin the channel. + * 119: Switching roles fails, try rejoining the channel. */ ERR_SET_CLIENT_ROLE_NOT_AUTHORIZED = 119, /** - * 120: MediaStream decryption fails. The user may have tried to join the channel with a wrong - * password. Check your settings or try rejoining the channel. + * 120: Media streams decryption fails. The user might use an incorrect password to join the + * channel. Check the entered password, or tell the user to try rejoining the channel. */ ERR_DECRYPTION_FAILED = 120, /** @@ -651,18 +654,16 @@ enum ERROR_CODE_TYPE { */ ERR_INVALID_USER_ID = 121, /** - * 122: DataStream decryption fails. The peer may have tried to join the channel with a wrong - * password, or did't enable datastream encryption + * 122: Data streams decryption fails. The user might use an incorrect password to join the channel. + * Check the entered password, or tell the user to try rejoining the channel. */ ERR_DATASTREAM_DECRYPTION_FAILED = 122, /** - * 123: The app is banned by the server. + * 123: The user is banned from the server. */ ERR_CLIENT_IS_BANNED_BY_SERVER = 123, /** - * 130: Encryption is enabled when the user calls the - * \ref agora::rtc::IRtcEngine::addPublishStreamUrl "addPublishStreamUrl()" method - * (CDN live streaming does not support encrypted streams). + * 130: The SDK does not support pushing encrypted streams to CDN. */ ERR_ENCRYPTED_STREAM_NOT_ALLOWED_PUBLISH = 130, @@ -672,13 +673,14 @@ enum ERROR_CODE_TYPE { ERR_LICENSE_CREDENTIAL_INVALID = 131, /** - * 134: The user account is invalid, usually because the data format of the user account is incorrect. + * 134: The user account is invalid, possibly because it contains invalid parameters. */ ERR_INVALID_USER_ACCOUNT = 134, /** 157: The necessary dynamical library is not integrated. For example, if you call - * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do not integrate the dynamical - * library for the deep-learning noise reduction into your project, the SDK reports this error code. + * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do + * not integrate the dynamical library for the deep-learning noise reduction into your project, + * the SDK reports this error code. * */ ERR_MODULE_NOT_FOUND = 157, @@ -698,9 +700,49 @@ enum ERROR_CODE_TYPE { ERR_CERT_REQUEST = 168, // PcmSend Error num - ERR_PCMSEND_FORMAT = 200, // unsupport pcm format + /** + * 200: Unsupported PCM format. + */ + ERR_PCMSEND_FORMAT = 200, // unsupport pcm format + /** + * 201: Buffer overflow, the PCM send rate too quickly. + */ ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly + /// @cond + // RDT error code: 250~270 + /** + * 250: The user does not exist + * @technical preview + */ + ERR_RDT_USER_NOT_EXIST = 250, + /** + * 251: The RDT state with the user is not ready + * @technical preview + */ + ERR_RDT_USER_NOT_READY = 251, + /** + * 252: The RDT data stream is blocked + * @technical preview + */ + ERR_RDT_DATA_BLOCKED = 252, + /** + * 253: The RDT CMD stream exceeds the limit (size <= 256 Bytes, freq <= 100/sec) + * @technical preview + */ + ERR_RDT_CMD_EXCEED_LIMIT = 253, + /** + * 254: The RDT DATA stream exceeds the limit (size <= 128 KBytes, speed <= 4 Mbps) + * @technical preview + */ + ERR_RDT_DATA_EXCEED_LIMIT = 254, + /** + * 255: The RDT encryption error. The SDK Failed to process RDT data encryption/decryption + * @technical preview + */ + ERR_RDT_ENCRYPTION = 255, + /// @endcond + /// @cond // signaling: 400~600 ERR_LOGIN_ALREADY_LOGIN = 428, @@ -708,43 +750,43 @@ enum ERROR_CODE_TYPE { /// @endcond // 1001~2000 /** - * 1001: Fails to load the media engine. + * 1001: The SDK fails to load the media engine. */ ERR_LOAD_MEDIA_ENGINE = 1001, /** - * 1005: Audio device module: A general error occurs in the Audio Device Module (no specified - * reason). Check if the audio device is used by another app, or try - * rejoining the channel. + * 1005: A general error occurs (no specified reason). Check whether the audio device is already in + * use by another app, or try rejoining the channel. */ ERR_ADM_GENERAL_ERROR = 1005, /** - * 1008: Audio Device Module: An error occurs in initializing the playback - * device. + * 1008: An error occurs when initializing the playback device. Check whether the playback device is + * already in use by another app, or try rejoining the channel. */ ERR_ADM_INIT_PLAYOUT = 1008, /** - * 1009: Audio Device Module: An error occurs in starting the playback device. + * 1009: An error occurs when starting the playback device. Check the playback device. */ ERR_ADM_START_PLAYOUT = 1009, /** - * 1010: Audio Device Module: An error occurs in stopping the playback device. + * 1010: An error occurs when stopping the playback device. */ ERR_ADM_STOP_PLAYOUT = 1010, /** - * 1011: Audio Device Module: An error occurs in initializing the recording - * device. + * 1011: An error occurs when initializing the recording device. Check the recording device, or try + * rejoining the channel. */ ERR_ADM_INIT_RECORDING = 1011, /** - * 1012: Audio Device Module: An error occurs in starting the recording device. + * 1012: An error occurs when starting the recording device. Check the recording device. */ ERR_ADM_START_RECORDING = 1012, /** - * 1013: Audio Device Module: An error occurs in stopping the recording device. + * 1013: An error occurs when stopping the recording device. */ ERR_ADM_STOP_RECORDING = 1013, /** - * 1501: Video Device Module: The camera is not authorized. + * 1501: Permission to access the camera is not granted. Check whether permission to access the + * camera permission is granted. */ ERR_VDM_CAMERA_NOT_AUTHORIZED = 1501, }; @@ -752,36 +794,36 @@ enum ERROR_CODE_TYPE { enum LICENSE_ERROR_TYPE { /** * 1: Invalid license - */ + */ LICENSE_ERR_INVALID = 1, /** * 2: License expired - */ + */ LICENSE_ERR_EXPIRE = 2, /** * 3: Exceed license minutes limit - */ + */ LICENSE_ERR_MINUTES_EXCEED = 3, /** * 4: License use in limited period - */ + */ LICENSE_ERR_LIMITED_PERIOD = 4, /** * 5: Same license used in different devices at the same time - */ + */ LICENSE_ERR_DIFF_DEVICES = 5, /** * 99: SDK internal error - */ + */ LICENSE_ERR_INTERNAL = 99, }; /** - * The operational permission of the SDK on the audio session. + * @brief The operation permissions of the SDK on the audio session. */ enum AUDIO_SESSION_OPERATION_RESTRICTION { /** - * 0: No restriction; the SDK can change the audio session. + * 0: No restriction, the SDK can change the audio session. */ AUDIO_SESSION_OPERATION_RESTRICTION_NONE = 0, /** @@ -793,13 +835,13 @@ enum AUDIO_SESSION_OPERATION_RESTRICTION { */ AUDIO_SESSION_OPERATION_RESTRICTION_CONFIGURE_SESSION = 1 << 1, /** - * 4: The SDK keeps the audio session active when the user leaves the - * channel, for example, to play an audio file in the background. + * 4: The SDK keeps the audio session active when the user leaves the channel, for example, to play + * an audio file in the background. */ AUDIO_SESSION_OPERATION_RESTRICTION_DEACTIVATE_SESSION = 1 << 2, /** - * 128: Completely restricts the operational permission of the SDK on the - * audio session; the SDK cannot change the audio session. + * 128: Completely restricts the operation permissions of the SDK on the audio session; the SDK + * cannot change the audio session. */ AUDIO_SESSION_OPERATION_RESTRICTION_ALL = 1 << 7, }; @@ -808,7 +850,7 @@ typedef const char* user_id_t; typedef void* view_t; /** - * The definition of the UserInfo struct. + * @brief The information of the user. */ struct UserInfo { /** @@ -837,17 +879,18 @@ typedef util::AList UserList; namespace rtc { /** - * Reasons for a user being offline. + * @brief Reasons for a user being offline. */ enum USER_OFFLINE_REASON_TYPE { /** - * 0: The user leaves the current channel. + * 0: The user quits the call. */ USER_OFFLINE_QUIT = 0, /** - * 1: The SDK times out and the user drops offline because no data packet was received within a certain - * period of time. If a user quits the call and the message is not passed to the SDK (due to an - * unreliable channel), the SDK assumes that the user drops offline. + * 1: The SDK times out and the user drops offline because no data packet is received within a + * certain period of time. + * @note If the user quits the call and the message is not passed to the SDK (due to an unreliable + * channel), the SDK assumes the user dropped offline. */ USER_OFFLINE_DROPPED = 1, /** @@ -856,25 +899,43 @@ enum USER_OFFLINE_REASON_TYPE { USER_OFFLINE_BECOME_AUDIENCE = 2, }; +/** + * @brief The interface class. + */ enum INTERFACE_ID_TYPE { + /** + * 1: The `IAudioDeviceManager` interface class. + */ AGORA_IID_AUDIO_DEVICE_MANAGER = 1, + /** + * 2: The `IVideoDeviceManager` interface class. + */ AGORA_IID_VIDEO_DEVICE_MANAGER = 2, + /** + * This interface class is deprecated. + */ AGORA_IID_PARAMETER_ENGINE = 3, + /** + * 4: The `IMediaEngine` interface class. + */ AGORA_IID_MEDIA_ENGINE = 4, AGORA_IID_AUDIO_ENGINE = 5, AGORA_IID_VIDEO_ENGINE = 6, AGORA_IID_RTC_CONNECTION = 7, + /** + * This interface class is deprecated. + */ AGORA_IID_SIGNALING_ENGINE = 8, AGORA_IID_MEDIA_ENGINE_REGULATOR = 9, AGORA_IID_LOCAL_SPATIAL_AUDIO = 11, AGORA_IID_STATE_SYNC = 13, AGORA_IID_META_SERVICE = 14, AGORA_IID_MUSIC_CONTENT_CENTER = 15, - AGORA_IID_H265_TRANSCODER = 16, + AGORA_IID_H265_TRANSCODER = 16, }; /** - * The network quality types. + * @brief Network quality types. */ enum QUALITY_TYPE { /** @@ -883,16 +944,15 @@ enum QUALITY_TYPE { */ QUALITY_UNKNOWN __deprecated = 0, /** - * 1: The quality is excellent. + * 1: The network quality is excellent. */ QUALITY_EXCELLENT = 1, /** - * 2: The quality is quite good, but the bitrate may be slightly - * lower than excellent. + * 2: The network quality is quite good, but the bitrate may be slightly lower than excellent. */ QUALITY_GOOD = 2, /** - * 3: Users can feel the communication slightly impaired. + * 3: Users can feel the communication is slightly impaired. */ QUALITY_POOR = 3, /** @@ -900,11 +960,11 @@ enum QUALITY_TYPE { */ QUALITY_BAD = 4, /** - * 5: Users can barely communicate. + * 5: The quality is so bad that users can barely communicate. */ QUALITY_VBAD = 5, /** - * 6: Users cannot communicate at all. + * 6: The network is down and users cannot communicate at all. */ QUALITY_DOWN = 6, /** @@ -912,7 +972,7 @@ enum QUALITY_TYPE { */ QUALITY_UNSUPPORTED = 7, /** - * 8: Detecting the network quality. + * 8: The last-mile network probe test is in progress. */ QUALITY_DETECTING = 8, }; @@ -936,29 +996,29 @@ enum FIT_MODE_TYPE { }; /** - * The rotation information. + * @brief The clockwise rotation of the video. */ enum VIDEO_ORIENTATION { /** - * 0: Rotate the video by 0 degree clockwise. + * 0: (Default) No rotation. */ VIDEO_ORIENTATION_0 = 0, /** - * 90: Rotate the video by 90 degrees clockwise. + * 90: 90 degrees. */ VIDEO_ORIENTATION_90 = 90, /** - * 180: Rotate the video by 180 degrees clockwise. + * 180: 180 degrees. */ VIDEO_ORIENTATION_180 = 180, /** - * 270: Rotate the video by 270 degrees clockwise. + * 270: 270 degrees. */ VIDEO_ORIENTATION_270 = 270 }; /** - * The video frame rate. + * @brief The video frame rate. */ enum FRAME_RATE { /** @@ -986,7 +1046,8 @@ enum FRAME_RATE { */ FRAME_RATE_FPS_30 = 30, /** - * 60: 60 fps. Applies to Windows and macOS only. + * 60: 60 fps. + * @note For Windows and macOS only. */ FRAME_RATE_FPS_60 = 60, }; @@ -999,80 +1060,98 @@ enum FRAME_HEIGHT { FRAME_HEIGHT_540 = 540, }; - /** - * Types of the video frame. + * @brief The video frame type. */ enum VIDEO_FRAME_TYPE { - /** 0: A black frame. */ + /** + * 0: A black frame. + */ VIDEO_FRAME_TYPE_BLANK_FRAME = 0, - /** 3: Key frame. */ + /** + * 3: Key frame. + */ VIDEO_FRAME_TYPE_KEY_FRAME = 3, - /** 4: Delta frame. */ + /** + * 4: Delta frame. + */ VIDEO_FRAME_TYPE_DELTA_FRAME = 4, - /** 5: The B frame.*/ + /** + * 5: The B frame. + */ VIDEO_FRAME_TYPE_B_FRAME = 5, - /** 6: A discarded frame. */ + /** + * 6: A discarded frame. + */ VIDEO_FRAME_TYPE_DROPPABLE_FRAME = 6, - /** Unknown frame. */ + /** + * Unknown frame. + */ VIDEO_FRAME_TYPE_UNKNOW }; /** - * Video output orientation modes. + * @brief Video output orientation mode. */ enum ORIENTATION_MODE { /** - * 0: The output video always follows the orientation of the captured video. The receiver takes - * the rotational information passed on from the video encoder. This mode applies to scenarios - * where video orientation can be adjusted on the receiver: + * 0: (Default) The output video always follows the orientation of the captured video. The receiver + * takes the rotational information passed on from the video encoder. This mode applies to scenarios + * where video orientation can be adjusted on the receiver. * - If the captured video is in landscape mode, the output video is in landscape mode. * - If the captured video is in portrait mode, the output video is in portrait mode. */ ORIENTATION_MODE_ADAPTIVE = 0, /** - * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode. - * If the captured video is in portrait mode, the video encoder crops it to fit the output. Applies - * to situations where the receiving end cannot process the rotational information. For example, - * CDN live streaming. + * 1: In this mode, the SDK always outputs videos in landscape (horizontal) mode. If the captured + * video is in portrait mode, the video encoder crops it to fit the output. Applies to situations + * where the receiving end cannot process the rotational information. For example, CDN live + * streaming. */ ORIENTATION_MODE_FIXED_LANDSCAPE = 1, /** - * 2: Portrait mode. In this mode, the SDK always outputs video in portrait (portrait) mode. If - * the captured video is in landscape mode, the video encoder crops it to fit the output. Applies - * to situations where the receiving end cannot process the rotational information. For example, - * CDN live streaming. + * 2: In this mode, the SDK always outputs video in portrait (portrait) mode. If the captured video + * is in landscape mode, the video encoder crops it to fit the output. Applies to situations where + * the receiving end cannot process the rotational information. For example, CDN live streaming. */ ORIENTATION_MODE_FIXED_PORTRAIT = 2, }; /** - * (For future use) Video degradation preferences under limited bandwidth. + * @brief Video degradation preferences when the bandwidth is a constraint. */ enum DEGRADATION_PREFERENCE { /** - * 0: (Default) Prefers to reduce the video frame rate while maintaining video quality during video + * -1: (Default) Automatic mode. The SDK will automatically select MAINTAIN_FRAMERATE, + * MAINTAIN_BALANCED or MAINTAIN_RESOLUTION based on the video scenario you set, in order to achieve + * the best overall quality of experience (QoE). + */ + MAINTAIN_AUTO = -1, + /** + * 0: Prefers to reduce the video frame rate while maintaining video resolution during video * encoding under limited bandwidth. This degradation preference is suitable for scenarios where * video quality is prioritized. - * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so - * remote users need to handle this issue. */ MAINTAIN_QUALITY = 0, /** - * 1: Prefers to reduce the video quality while maintaining the video frame rate during video - * encoding under limited bandwidth. This degradation preference is suitable for scenarios where - * smoothness is prioritized and video quality is allowed to be reduced. + * 1: Reduces the video resolution while maintaining the video frame rate during video encoding + * under limited bandwidth. This degradation preference is suitable for scenarios where smoothness + * is prioritized and video quality is allowed to be reduced. */ MAINTAIN_FRAMERATE = 1, /** - * 2: Reduces the video frame rate and video quality simultaneously during video encoding under - * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and MAINTAIN_FRAMERATE, - * and this preference is suitable for scenarios where both smoothness and video quality are a - * priority. + * 2: Reduces the video frame rate and video resolution simultaneously during video encoding under + * limited bandwidth. The MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and + * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and video + * quality are a priority. + * @note The resolution of the video sent may change, so remote users need to handle this issue. See + * `onVideoSizeChanged`. */ MAINTAIN_BALANCED = 2, /** - * 3: Degrade framerate in order to maintain resolution. + * 3: Reduces the video frame rate while maintaining the video resolution during video encoding + * under limited bandwidth. This degradation preference is suitable for scenarios where video + * quality is prioritized. */ MAINTAIN_RESOLUTION = 3, /** @@ -1082,15 +1161,15 @@ enum DEGRADATION_PREFERENCE { }; /** - * The definition of the VideoDimensions struct. + * @brief The video dimension. */ struct VideoDimensions { /** - * The width of the video, in pixels. + * The width (pixels) of the video. */ int width; /** - * The height of the video, in pixels. + * The height (pixels) of the video. */ int height; VideoDimensions() : width(640), height(480) {} @@ -1127,34 +1206,58 @@ const int DEFAULT_MIN_BITRATE = -1; const int DEFAULT_MIN_BITRATE_EQUAL_TO_TARGET_BITRATE = -2; /** - * screen sharing supported capability level. + * @brief The highest frame rate supported by the screen sharing device. */ enum SCREEN_CAPTURE_FRAMERATE_CAPABILITY { + /** + * 0: The device supports the frame rate of up to 15 fps. + */ SCREEN_CAPTURE_FRAMERATE_CAPABILITY_15_FPS = 0, + /** + * 1: The device supports the frame rate of up to 30 fps. + */ SCREEN_CAPTURE_FRAMERATE_CAPABILITY_30_FPS = 1, + /** + * 2: The device supports the frame rate of up to 60 fps. + */ SCREEN_CAPTURE_FRAMERATE_CAPABILITY_60_FPS = 2, }; /** - * Video codec capability levels. + * @brief The level of the codec capability. */ enum VIDEO_CODEC_CAPABILITY_LEVEL { - /** No specified level */ + /** + * -1: Unsupported video type. Currently, only H.264 and H.265 formats are supported. If the video + * is in another format, this value will be returned. + */ CODEC_CAPABILITY_LEVEL_UNSPECIFIED = -1, - /** Only provide basic support for the codec type */ + /** + * 5: Supports encoding and decoding videos up to 1080p and 30 fps. + */ CODEC_CAPABILITY_LEVEL_BASIC_SUPPORT = 5, - /** Can process 1080p video at a rate of approximately 30 fps. */ + /** + * 10: Supports encoding and decoding videos up to1080p and 30 fps. + */ CODEC_CAPABILITY_LEVEL_1080P30FPS = 10, - /** Can process 1080p video at a rate of approximately 60 fps. */ + /** + * 20: Support encoding and decoding videos up to 1080p and 60 fps. + */ CODEC_CAPABILITY_LEVEL_1080P60FPS = 20, - /** Can process 4k video at a rate of approximately 30 fps. */ + /** + * 30: Support encoding and decoding videos up to 4K and 30 fps. + */ CODEC_CAPABILITY_LEVEL_4K60FPS = 30, }; /** - * The video codec types. + * @brief Video codec types. */ enum VIDEO_CODEC_TYPE { + /** + * 0: (Default) Unspecified codec format. The SDK automatically matches the appropriate codec format + * based on the current video stream's resolution and device performance. + */ VIDEO_CODEC_NONE = 0, /** * 1: Standard VP8. @@ -1170,11 +1273,13 @@ enum VIDEO_CODEC_TYPE { VIDEO_CODEC_H265 = 3, /** * 6: Generic. This type is used for transmitting raw video data, such as encrypted video frames. - * The SDK returns this type of video frames in callbacks, and you need to decode and render the frames yourself. + * The SDK returns this type of video frames in callbacks, and you need to decode and render the + * frames yourself. */ VIDEO_CODEC_GENERIC = 6, /** * 7: Generic H264. + * @deprecated This codec type is deprecated. */ VIDEO_CODEC_GENERIC_H264 = 7, /** @@ -1193,23 +1298,26 @@ enum VIDEO_CODEC_TYPE { }; /** - * Camera focal length type. + * @brief The camera focal length types. + * + * @note This enumeration class applies to Android and iOS only. + * */ enum CAMERA_FOCAL_LENGTH_TYPE { /** - * By default, there are no wide-angle and ultra-wide-angle properties. + * 0: (Default) Standard lens. */ CAMERA_FOCAL_LENGTH_DEFAULT = 0, /** - * Lens with focal length from 24mm to 35mm. + * 1: Wide-angle lens. */ CAMERA_FOCAL_LENGTH_WIDE_ANGLE = 1, /** - * Lens with focal length of less than 24mm. + * 2: Ultra-wide-angle lens. */ CAMERA_FOCAL_LENGTH_ULTRA_WIDE = 2, /** - * Telephoto lens. + * 3: (For iOS only) Telephoto lens. */ CAMERA_FOCAL_LENGTH_TELEPHOTO = 3, }; @@ -1237,7 +1345,8 @@ struct SenderOptions { */ TCcMode ccMode; /** - * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE "VIDEO_CODEC_TYPE". + * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE + * "VIDEO_CODEC_TYPE". */ VIDEO_CODEC_TYPE codecType; @@ -1249,12 +1358,14 @@ struct SenderOptions { * - \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE": (Recommended) Standard bitrate. * - Communication profile: The encoding bitrate equals the base bitrate. * - Live-broadcast profile: The encoding bitrate is twice the base bitrate. - * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate stays the same + * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate + stays the same * regardless of the profile. * * The Communication profile prioritizes smoothness, while the Live Broadcast * profile prioritizes video quality (requiring a higher bitrate). Agora - * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or simply to + * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or + simply to * address this difference. * * The following table lists the recommended video encoder configurations, @@ -1262,7 +1373,8 @@ struct SenderOptions { * bitrate based on this table. If the bitrate you set is beyond the proper * range, the SDK automatically sets it to within the range. - | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live Bitrate (Kbps, for Live Broadcast)| + | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live + Bitrate (Kbps, for Live Broadcast)| |------------------------|------------------|----------------------------------------|----------------------------------------| | 160 × 120 | 15 | 65 | 130 | | 120 × 120 | 15 | 50 | 100 | @@ -1299,14 +1411,11 @@ struct SenderOptions { */ int targetBitrate; - SenderOptions() - : ccMode(CC_ENABLED), - codecType(VIDEO_CODEC_H265), - targetBitrate(6500) {} + SenderOptions() : ccMode(CC_ENABLED), codecType(VIDEO_CODEC_H265), targetBitrate(6500) {} }; /** - * Audio codec types. + * @brief The codec type of audio. */ enum AUDIO_CODEC_TYPE { /** @@ -1330,11 +1439,11 @@ enum AUDIO_CODEC_TYPE { /** 7: AAC. */ // AUDIO_CODEC_AAC = 7, /** - * 8: AAC LC. + * 8: LC-AAC. */ AUDIO_CODEC_AACLC = 8, /** - * 9: HE AAC. + * 9: HE-AAC. */ AUDIO_CODEC_HEAAC = 9, /** @@ -1356,89 +1465,87 @@ enum AUDIO_CODEC_TYPE { }; /** - * Audio encoding types of the audio encoded frame observer. + * @brief Audio encoding type. */ enum AUDIO_ENCODING_TYPE { /** - * AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10 - * minutes is approximately 1.2 MB after encoding. + * 0x010101: AAC encoding format, 16000 Hz sampling rate, bass quality. A file with an audio + * duration of 10 minutes is approximately 1.2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101, /** - * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * 0x010102: AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102, /** - * AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio duration of 10 - * minutes is approximately 1.2 MB after encoding. + * 0x010201: AAC encoding format, 32000 Hz sampling rate, bass quality. A file with an audio + * duration of 10 minutes is approximately 1.2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201, /** - * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * 0x010202: AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202, /** - * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * 0x010203: AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio + * duration of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203, /** - * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * 0x010302: AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302, /** - * AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration - * of 10 minutes is approximately 3.5 MB after encoding. + * 0x010303: AAC encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio + * duration of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_48000_HIGH = 0x010303, /** - * OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio duration of 10 - * minutes is approximately 2 MB after encoding. + * 0x020101: OPUS encoding format, 16000 Hz sampling rate, bass quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101, /** - * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * 0x020102: OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an + * audio duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102, /** - * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * 0x020302: OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an + * audio duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302, /** - * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * 0x020303: OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio + * duration of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303, }; /** - * The adaptation mode of the watermark. + * @brief The adaptation mode of the watermark. */ enum WATERMARK_FIT_MODE { /** - * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in #WatermarkOptions. - * The settings in `WatermarkRatio` are invalid. + * 0: Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in + * `WatermarkOptions`. The settings in `WatermarkRatio` are invalid. */ - FIT_MODE_COVER_POSITION, + FIT_MODE_COVER_POSITION = 0, /** - * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and `positionInPortraitMode` - * in `WatermarkOptions` are invalid. + * 1: Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and + * `positionInPortraitMode` in `WatermarkOptions` are invalid. */ - FIT_MODE_USE_IMAGE_RATIO + FIT_MODE_USE_IMAGE_RATIO = 1, }; /** * The advanced settings of encoded audio frame. */ struct EncodedAudioFrameAdvancedSettings { - EncodedAudioFrameAdvancedSettings() - : speech(true), - sendEvenIfEmpty(true) {} + EncodedAudioFrameAdvancedSettings() : speech(true), sendEvenIfEmpty(true) {} /** * Determines whether the audio source is speech. @@ -1455,43 +1562,41 @@ struct EncodedAudioFrameAdvancedSettings { }; /** - * The definition of the EncodedAudioFrameInfo struct. + * @brief Audio information after encoding. */ struct EncodedAudioFrameInfo { EncodedAudioFrameInfo() - : codec(AUDIO_CODEC_AACLC), - sampleRateHz(0), - samplesPerChannel(0), - numberOfChannels(0), - captureTimeMs(0) {} + : codec(AUDIO_CODEC_AACLC), + sampleRateHz(0), + samplesPerChannel(0), + numberOfChannels(0), + captureTimeMs(0) {} EncodedAudioFrameInfo(const EncodedAudioFrameInfo& rhs) - : codec(rhs.codec), - sampleRateHz(rhs.sampleRateHz), - samplesPerChannel(rhs.samplesPerChannel), - numberOfChannels(rhs.numberOfChannels), - advancedSettings(rhs.advancedSettings), - captureTimeMs(rhs.captureTimeMs) {} + : codec(rhs.codec), + sampleRateHz(rhs.sampleRateHz), + samplesPerChannel(rhs.samplesPerChannel), + numberOfChannels(rhs.numberOfChannels), + advancedSettings(rhs.advancedSettings), + captureTimeMs(rhs.captureTimeMs) {} /** - * The audio codec: #AUDIO_CODEC_TYPE. + * Audio Codec type: `AUDIO_CODEC_TYPE`. */ AUDIO_CODEC_TYPE codec; /** - * The sample rate (Hz) of the audio frame. + * Audio sample rate (Hz). */ int sampleRateHz; /** - * The number of samples per audio channel. - * - * If this value is not set, it is 1024 for AAC, or 960 for OPUS by default. + * The number of audio samples per channel. */ int samplesPerChannel; /** - * The number of audio channels of the audio frame. + * The number of audio channels. */ int numberOfChannels; /** - * The advanced settings of the audio frame. + * This function is currently not supported. */ EncodedAudioFrameAdvancedSettings advancedSettings; @@ -1504,14 +1609,15 @@ struct EncodedAudioFrameInfo { * The definition of the AudioPcmDataInfo struct. */ struct AudioPcmDataInfo { - AudioPcmDataInfo() : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} + AudioPcmDataInfo() + : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} AudioPcmDataInfo(const AudioPcmDataInfo& rhs) - : samplesPerChannel(rhs.samplesPerChannel), - channelNum(rhs.channelNum), - samplesOut(rhs.samplesOut), - elapsedTimeMs(rhs.elapsedTimeMs), - ntpTimeMs(rhs.ntpTimeMs) {} + : samplesPerChannel(rhs.samplesPerChannel), + channelNum(rhs.channelNum), + samplesOut(rhs.samplesOut), + elapsedTimeMs(rhs.elapsedTimeMs), + ntpTimeMs(rhs.ntpTimeMs) {} /** * The sample count of the PCM data that you expect. @@ -1545,111 +1651,116 @@ enum H264PacketizeMode { /** * Single NAL unit mode. See RFC 6184. */ - SingleNalUnit, // Mode 0 - only single NALU allowed + SingleNalUnit, // Mode 0 - only single NALU allowed }; /** - * Video stream types. + * @brief The type of video streams. */ enum VIDEO_STREAM_TYPE { /** - * 0: The high-quality video stream, which has the highest resolution and bitrate. + * 0: High-quality video stream, that is, a video stream with the highest resolution and bitrate. */ VIDEO_STREAM_HIGH = 0, /** - * 1: The low-quality video stream, which has the lowest resolution and bitrate. + * 1: Low-quality video stream, that is, a video stream with the lowest resolution and bitrate. */ VIDEO_STREAM_LOW = 1, /** - * 4: The video stream of layer_1, which has a lower resolution and bitrate than VIDEO_STREAM_HIGH. + * 4. Video stream layer 1. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_HIGH. */ VIDEO_STREAM_LAYER_1 = 4, /** - * 5: The video stream of layer_2, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_1. + * 5: Video stream layer 2. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_LAYER_1. */ VIDEO_STREAM_LAYER_2 = 5, /** - * 6: The video stream of layer_3, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_2. + * 6: Video stream layer 3. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_LAYER_2. */ VIDEO_STREAM_LAYER_3 = 6, /** - * 7: The video stream of layer_4, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_3. + * 7: Video stream layer 4. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_LAYER_3. */ VIDEO_STREAM_LAYER_4 = 7, /** - * 8: The video stream of layer_5, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_4. + * 8: Video stream layer 5. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_LAYER_4. */ VIDEO_STREAM_LAYER_5 = 8, /** - * 9: The video stream of layer_6, which has a lower resolution and bitrate than VIDEO_STREAM_LAYER_5. + * 9: Video stream layer 6. The resolution of this quality level is only lower than that of + * VIDEO_STREAM_LAYER_5. */ VIDEO_STREAM_LAYER_6 = 9, }; +/** + * @brief Video subscription options. + */ struct VideoSubscriptionOptions { - /** - * The type of the video stream to subscribe to. - * - * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality - * video stream. - */ - Optional type; - /** - * Whether to subscribe to encoded video data only: - * - `true`: Subscribe to encoded video data only. - * - `false`: (Default) Subscribe to decoded video data. - */ - Optional encodedFrameOnly; + /** + * The video stream type that you want to subscribe to. The default value is VIDEO_STREAM_HIGH, + * indicating that the high-quality video streams are subscribed. See `VIDEO_STREAM_TYPE`. + */ + Optional type; + /** + * Whether to subscribe to encoded video frames only: + * - `true`: Subscribe to the encoded video data (structured data) only; the SDK does not decode or + * render raw video data. + * - `false`: (Default) Subscribe to both raw video data and encoded video data. + */ + Optional encodedFrameOnly; - VideoSubscriptionOptions() {} + VideoSubscriptionOptions() {} }; - -/** The maximum length of the user account. +/** + * @brief The maximum length of the user account. */ -enum MAX_USER_ACCOUNT_LENGTH_TYPE -{ - /** The maximum length of the user account is 256 bytes. +enum MAX_USER_ACCOUNT_LENGTH_TYPE { + /** + * The maximum length of the user account is 256 bytes. */ MAX_USER_ACCOUNT_LENGTH = 256 }; /** - * The definition of the EncodedVideoFrameInfo struct, which contains the information of the external encoded video frame. + * @brief Information about externally encoded video frames. */ struct EncodedVideoFrameInfo { EncodedVideoFrameInfo() - : uid(0), - codecType(VIDEO_CODEC_H264), - width(0), - height(0), - framesPerSecond(0), - frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), - rotation(VIDEO_ORIENTATION_0), - trackId(0), - captureTimeMs(0), - decodeTimeMs(0), - streamType(VIDEO_STREAM_HIGH), - presentationMs(-1) {} + : codecType(VIDEO_CODEC_H264), + width(0), + height(0), + framesPerSecond(0), + frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), + rotation(VIDEO_ORIENTATION_0), + trackId(0), + captureTimeMs(0), + decodeTimeMs(0), + streamType(VIDEO_STREAM_HIGH), + presentationMs(-1) {} EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs) - : uid(rhs.uid), - codecType(rhs.codecType), - width(rhs.width), - height(rhs.height), - framesPerSecond(rhs.framesPerSecond), - frameType(rhs.frameType), - rotation(rhs.rotation), - trackId(rhs.trackId), - captureTimeMs(rhs.captureTimeMs), - decodeTimeMs(rhs.decodeTimeMs), - streamType(rhs.streamType), - presentationMs(rhs.presentationMs) {} + : codecType(rhs.codecType), + width(rhs.width), + height(rhs.height), + framesPerSecond(rhs.framesPerSecond), + frameType(rhs.frameType), + rotation(rhs.rotation), + trackId(rhs.trackId), + captureTimeMs(rhs.captureTimeMs), + decodeTimeMs(rhs.decodeTimeMs), + streamType(rhs.streamType), + presentationMs(rhs.presentationMs) {} EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) { if (this == &rhs) return *this; - uid = rhs.uid; codecType = rhs.codecType; width = rhs.width; height = rhs.height; @@ -1665,50 +1776,47 @@ struct EncodedVideoFrameInfo { } /** - * ID of the user that pushes the the external encoded video frame.. - */ - uid_t uid; - /** - * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is `VIDEO_CODEC_H265 (3)`. + * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`. The default value is + * `VIDEO_CODEC_H264 (2)`. */ VIDEO_CODEC_TYPE codecType; /** - * The width (px) of the video frame. + * Width (pixel) of the video frame. */ int width; /** - * The height (px) of the video frame. + * Height (pixel) of the video frame. */ int height; /** * The number of video frames per second. - * When this parameter is not 0, you can use it to calculate the Unix timestamp of the external + * When this parameter is not `0`, you can use it to calculate the Unix timestamp of externally * encoded video frames. */ int framesPerSecond; /** - * The video frame type: #VIDEO_FRAME_TYPE. + * The video frame type. See `VIDEO_FRAME_TYPE`. */ VIDEO_FRAME_TYPE frameType; /** - * The rotation information of the video frame: #VIDEO_ORIENTATION. + * The rotation information of the video frame. See `VIDEO_ORIENTATION`. */ VIDEO_ORIENTATION rotation; /** - * The track ID of the video frame. + * Reserved for future use. */ int trackId; // This can be reserved for multiple video tracks, we need to create different ssrc // and additional payload for later implementation. /** - * This is a input parameter which means the timestamp for capturing the video. + * The Unix timestamp (ms) for capturing the external encoded video frames. */ int64_t captureTimeMs; /** - * The timestamp for decoding the video. + * The Unix timestamp (ms) for decoding the external encoded video frames. */ int64_t decodeTimeMs; /** - * The stream type of video frame. + * The type of video streams. See `VIDEO_STREAM_TYPE`. */ VIDEO_STREAM_TYPE streamType; @@ -1717,60 +1825,71 @@ struct EncodedVideoFrameInfo { }; /** -* Video compression preference. -*/ + * @brief Compression preference for video encoding. + */ enum COMPRESSION_PREFERENCE { /** - * (Default) Low latency is preferred, usually used in real-time communication where low latency is the number one priority. - */ - PREFER_LOW_LATENCY, + * -1: (Default) Automatic mode. The SDK will automatically select PREFER_LOW_LATENCY or + * PREFER_QUALITY based on the video scenario you set to achieve the best user experience. + */ + PREFER_COMPRESSION_AUTO = -1, /** - * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps - */ - PREFER_QUALITY, + * 0: Low latency preference. The SDK compresses video frames to reduce latency. This preference is + * suitable for scenarios where smoothness is prioritized and reduced video quality is acceptable. + */ + PREFER_LOW_LATENCY = 0, + /** + * 1: High quality preference. The SDK compresses video frames while maintaining video quality. This + * preference is suitable for scenarios where video quality is prioritized. + */ + PREFER_QUALITY = 1, }; /** -* The video encoder type preference. -*/ + * @brief Video encoder preference. + */ enum ENCODING_PREFERENCE { /** - *Default . + * -1: Adaptive preference. The SDK automatically selects the optimal encoding type for encoding + * based on factors such as platform and device type. */ PREFER_AUTO = -1, /** - * Software encoding. - */ + * 0: Software coding preference. The SDK prefers software encoders for video encoding. + */ PREFER_SOFTWARE = 0, /** - * Hardware encoding + * 1: Hardware encoding preference. The SDK prefers a hardware encoder for video encoding. When the + * device does not support hardware encoding, the SDK automatically uses software encoding and + * reports the currently used video encoder type through `hwEncoderAccelerating` in the + * `onLocalVideoStats` callback. */ PREFER_HARDWARE = 1, }; /** - * The definition of the AdvanceOptions struct. + * @brief Advanced options for video encoding. */ struct AdvanceOptions { - /** - * The video encoder type preference.. + * Video encoder preference. See `ENCODING_PREFERENCE`. */ ENCODING_PREFERENCE encodingPreference; /** - * Video compression preference. - */ + * Compression preference for video encoding. See `COMPRESSION_PREFERENCE`. + */ COMPRESSION_PREFERENCE compressionPreference; /** - * Whether to encode and send the alpha data to the remote when alpha data is present. - * The default value is false. - */ + * Whether to encode and send the Alpha data present in the video frame to the remote end: + * - `true`: Encode and send Alpha data. + * - `false`: (Default) Do not encode and send Alpha data. + */ bool encodeAlpha; AdvanceOptions() : encodingPreference(PREFER_AUTO), - compressionPreference(PREFER_LOW_LATENCY), + compressionPreference(PREFER_COMPRESSION_AUTO), encodeAlpha(false) {} AdvanceOptions(ENCODING_PREFERENCE encoding_preference, @@ -1785,23 +1904,25 @@ struct AdvanceOptions { compressionPreference == rhs.compressionPreference && encodeAlpha == rhs.encodeAlpha; } - }; /** - * Video mirror mode types. + * @brief Video mirror mode. */ enum VIDEO_MIRROR_MODE_TYPE { /** - * 0: The mirror mode determined by the SDK. + * 0: The SDK determines the mirror mode. + * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the + * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default. + * - For the remote user: The mirror mode is disabled by default. */ VIDEO_MIRROR_MODE_AUTO = 0, /** - * 1: Enable the mirror mode. + * 1: Enable mirror mode. */ VIDEO_MIRROR_MODE_ENABLED = 1, /** - * 2: Disable the mirror mode. + * 2: Disable mirror mode. */ VIDEO_MIRROR_MODE_DISABLED = 2, }; @@ -1818,158 +1939,177 @@ enum CAMERA_FORMAT_TYPE { }; #endif -/** Supported codec type bit mask. */ +enum VIDEO_MODULE_TYPE { + /** Video capture module */ + VIDEO_MODULE_CAPTURER = 0, + /** Video software encoder module */ + VIDEO_MODULE_SOFTWARE_ENCODER = 1, + /** Video hardware encoder module */ + VIDEO_MODULE_HARDWARE_ENCODER = 2, + /** Video software decoder module */ + VIDEO_MODULE_SOFTWARE_DECODER = 3, + /** Video hardware decoder module */ + VIDEO_MODULE_HARDWARE_DECODER = 4, + /** Video render module */ + VIDEO_MODULE_RENDERER = 5, +}; + +enum HDR_CAPABILITY { + /** The result of static check is not reliable, by defualt*/ + HDR_CAPABILITY_UNKNOWN = -1, + /** The module you query doesn't support HDR */ + HDR_CAPABILITY_UNSUPPORTED = 0, + /** The module you query supports HDR */ + HDR_CAPABILITY_SUPPORTED = 1, +}; + +/** + * @brief The bit mask of the codec type. + */ enum CODEC_CAP_MASK { - /** 0: No codec support. */ + /** + * (0): The device does not support encoding or decoding. + */ CODEC_CAP_MASK_NONE = 0, - /** bit 1: Hardware decoder support flag. */ + /** + * (1 << 0): The device supports hardware decoding. + */ CODEC_CAP_MASK_HW_DEC = 1 << 0, - /** bit 2: Hardware encoder support flag. */ + /** + * (1 << 1): The device supports hardware encoding. + */ CODEC_CAP_MASK_HW_ENC = 1 << 1, - /** bit 3: Software decoder support flag. */ + /** + * (1 << 2): The device supports software decoding. + */ CODEC_CAP_MASK_SW_DEC = 1 << 2, - /** bit 4: Software encoder support flag. */ + /** + * (1 << 3): The device supports software ecoding. + */ CODEC_CAP_MASK_SW_ENC = 1 << 3, }; +/** + * @brief The level of the codec capability. + */ struct CodecCapLevels { + /** + * Hardware decoding capability level, which represents the device's ability to perform hardware + * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`. + */ VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel; + /** + * Software decoding capability level, which represents the device's ability to perform software + * decoding on videos of different quality. See `VIDEO_CODEC_CAPABILITY_LEVEL`. + */ VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel; - CodecCapLevels(): hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} + CodecCapLevels() + : hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), + swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} }; -/** The codec support information. */ +/** + * @brief The codec capability of the SDK. + */ struct CodecCapInfo { - /** The codec type: #VIDEO_CODEC_TYPE. */ + /** + * The video codec types. See `VIDEO_CODEC_TYPE`. + */ VIDEO_CODEC_TYPE codecType; - /** The codec support flag. */ + /** + * Bit mask of the codec types in SDK. See `CODEC_CAP_MASK`. + */ int codecCapMask; - /** The codec capability level, estimated based on the device hardware.*/ + /** + * Codec capability of the SDK. See `CodecCapLevels`. + */ CodecCapLevels codecLevels; - CodecCapInfo(): codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} + CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} }; -/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. */ +/** + * @brief Focal length information supported by the camera, including the camera direction and focal + * length type. + * + * @note This enumeration class applies to Android and iOS only. + * + */ struct FocalLengthInfo { - /** The camera direction. */ + /** + * The camera direction. See `CAMERA_DIRECTION`. + */ int cameraDirection; - /** Camera focal segment type. */ + /** + * The focal length type. See `CAMERA_FOCAL_LENGTH_TYPE`. + */ CAMERA_FOCAL_LENGTH_TYPE focalLengthType; }; /** - * The definition of the VideoEncoderConfiguration struct. + * @brief Video encoder configurations. */ struct VideoEncoderConfiguration { /** - * The video encoder code type: #VIDEO_CODEC_TYPE. + * The codec type of the local video stream. See `VIDEO_CODEC_TYPE`. */ VIDEO_CODEC_TYPE codecType; /** - * The video dimension: VideoDimensions. + * The dimensions of the encoded video (px). See `VideoDimensions`. This parameter measures the + * video encoding quality in the format of length × width. The default value is 960 × 540. You can + * set a custom value. */ VideoDimensions dimensions; /** - * The frame rate of the video. You can set it manually, or choose one from #FRAME_RATE. + * The frame rate (fps) of the encoding video frame. The default value is 15. See `FRAME_RATE`. */ int frameRate; /** - * The bitrate (Kbps) of the video. - * - * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond the - * proper range, the SDK automatically adjusts it to a value within the range. You can also choose - * from the following options: - * - * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ between - * the Live Broadcast and Communication profiles: - * - In the Communication profile, the video bitrate is the same as the base bitrate. - * - In the Live Broadcast profile, the video bitrate is twice the base bitrate. - * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the bitrate - * stays the same regardless of the profile. If you choose this mode for the Live Broadcast profile, - * the video frame rate may be lower than the set value. - * - * Agora uses different video codecs for different profiles to optimize the user experience. For example, - * the communication profile prioritizes the smoothness while the live-broadcast profile prioritizes the - * video quality (a higher bitrate). Therefore, We recommend setting this parameter as #STANDARD_BITRATE. - * - * | Resolution | Frame Rate (fps) | Base Bitrate (Kbps) | Live Bitrate (Kbps)| - * |------------------------|------------------|---------------------|--------------------| - * | 160 * 120 | 15 | 65 | 110 | - * | 120 * 120 | 15 | 50 | 90 | - * | 320 * 180 | 15 | 140 | 240 | - * | 180 * 180 | 15 | 100 | 160 | - * | 240 * 180 | 15 | 120 | 200 | - * | 320 * 240 | 15 | 200 | 300 | - * | 240 * 240 | 15 | 140 | 240 | - * | 424 * 240 | 15 | 220 | 370 | - * | 640 * 360 | 15 | 400 | 680 | - * | 360 * 360 | 15 | 260 | 440 | - * | 640 * 360 | 30 | 600 | 1030 | - * | 360 * 360 | 30 | 400 | 670 | - * | 480 * 360 | 15 | 320 | 550 | - * | 480 * 360 | 30 | 490 | 830 | - * | 640 * 480 | 15 | 500 | 750 | - * | 480 * 480 | 15 | 400 | 680 | - * | 640 * 480 | 30 | 750 | 1130 | - * | 480 * 480 | 30 | 600 | 1030 | - * | 848 * 480 | 15 | 610 | 920 | - * | 848 * 480 | 30 | 930 | 1400 | - * | 640 * 480 | 10 | 400 | 600 | - * | 960 * 540 | 15 | 750 | 1100 | - * | 960 * 540 | 30 | 1110 | 1670 | - * | 1280 * 720 | 15 | 1130 | 1600 | - * | 1280 * 720 | 30 | 1710 | 2400 | - * | 960 * 720 | 15 | 910 | 1280 | - * | 960 * 720 | 30 | 1380 | 2000 | - * | 1920 * 1080 | 15 | 2080 | 2500 | - * | 1920 * 1080 | 30 | 3150 | 3780 | - * | 1920 * 1080 | 60 | 4780 | 5730 | - * | 2560 * 1440 | 30 | 4850 | 4850 | - * | 2560 * 1440 | 60 | 7350 | 7350 | - * | 3840 * 2160 | 30 | 8910 | 8910 | - * | 3840 * 2160 | 60 | 13500 | 13500 | + * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the + * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable + * bitrate based on the video resolution and frame rate you have set. For the correspondence between + * video resolution and frame rate, see `Video profile`. + * - STANDARD_BITRATE (0): (Recommended) Standard bitrate mode. + * - COMPATIBLE_BITRATE (-1): Adaptive bitrate mode. In general, Agora suggests that you do not use + * this value. */ int bitrate; /** - * The minimum encoding bitrate (Kbps). - * - * The Agora SDK automatically adjusts the encoding bitrate to adapt to the - * network conditions. - * - * Using a value greater than the default value forces the video encoder to - * output high-quality images but may cause more packet loss and hence - * sacrifice the smoothness of the video transmission. That said, unless you - * have special requirements for image quality, Agora does not recommend - * changing this value. - * - * @note - * This parameter applies to the live-broadcast profile only. + * The minimum encoding bitrate (Kbps) of the video. + * The SDK automatically adjusts the encoding bitrate to adapt to the network conditions. Using a + * value greater than the default value forces the video encoder to output high-quality images but + * may cause more packet loss and sacrifice the smoothness of the video transmission. Unless you + * have special requirements for image quality, Agora does not recommend changing this value. + * @note This parameter only applies to the interactive streaming profile. */ int minBitrate; /** - * The video orientation mode: #ORIENTATION_MODE. + * The orientation mode of the encoded video. See `ORIENTATION_MODE`. */ ORIENTATION_MODE orientationMode; /** - * The video degradation preference under limited bandwidth: #DEGRADATION_PREFERENCE. + * Video degradation preference under limited bandwidth. See `DEGRADATION_PREFERENCE`. + * @note When this parameter is set to MAINTAIN_FRAMERATE (1) or MAINTAIN_BALANCED (2), + * `orientationMode` needs to be set to ORIENTATION_MODE_ADAPTIVE (0) at the same time, otherwise + * the setting will not take effect. */ DEGRADATION_PREFERENCE degradationPreference; /** - * The mirror mode is disabled by default - * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored before encoding. + * Sets the mirror mode of the published local video stream. It only affects the video that the + * remote user sees. See `VIDEO_MIRROR_MODE_TYPE`. + * @note By default, the video is not mirrored. */ VIDEO_MIRROR_MODE_TYPE mirrorMode; /** - * The advanced options for the video encoder configuration. See AdvanceOptions. + * Advanced options for video encoding. See `AdvanceOptions`. */ AdvanceOptions advanceOptions; @@ -1980,9 +2120,9 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(int width, int height, int f, int b, ORIENTATION_MODE m, VIDEO_MIRROR_MODE_TYPE mirror = VIDEO_MIRROR_MODE_DISABLED) : codecType(VIDEO_CODEC_NONE), dimensions(width, height), @@ -1990,19 +2130,19 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(const VideoEncoderConfiguration& config) - : codecType(config.codecType), - dimensions(config.dimensions), - frameRate(config.frameRate), - bitrate(config.bitrate), - minBitrate(config.minBitrate), - orientationMode(config.orientationMode), - degradationPreference(config.degradationPreference), - mirrorMode(config.mirrorMode), - advanceOptions(config.advanceOptions) {} + : codecType(config.codecType), + dimensions(config.dimensions), + frameRate(config.frameRate), + bitrate(config.bitrate), + minBitrate(config.minBitrate), + orientationMode(config.orientationMode), + degradationPreference(config.degradationPreference), + mirrorMode(config.mirrorMode), + advanceOptions(config.advanceOptions) {} VideoEncoderConfiguration() : codecType(VIDEO_CODEC_NONE), dimensions(FRAME_WIDTH_960, FRAME_HEIGHT_540), @@ -2010,9 +2150,9 @@ struct VideoEncoderConfiguration { bitrate(STANDARD_BITRATE), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(ORIENTATION_MODE_ADAPTIVE), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(VIDEO_MIRROR_MODE_DISABLED), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration& operator=(const VideoEncoderConfiguration& rhs) { if (this == &rhs) return *this; @@ -2030,63 +2170,77 @@ struct VideoEncoderConfiguration { }; /** - * The configurations for the data stream. + * @brief The configurations for the data stream. + * + * @details + * The following table shows the SDK behaviors under different parameter settings: + * | `syncWithAudio` | `ordered` | SDK behaviors | + * | --------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + * | `false` | `false` | The SDK triggers the `onStreamMessage` callback immediately after the receiver receives a data packet. | + * | `true` | `false` | If the data packet delay is within the audio delay, the SDK triggers the onStreamMessage callback when the synchronized audio packet is played out. If the data packet delay exceeds the audio delay, the SDK triggers the onStreamMessage callback as soon as the data packet is received. | + * | `false` | `true` | If the delay of a data packet is less than five seconds, the SDK corrects the order of the data packet. If the delay of a data packet exceeds five seconds, the SDK discards the data packet. | + * | `true` | `true` | If the delay of the data packet is within the range of the audio delay, the SDK corrects the order of the data packet. If the delay of a data packet exceeds the audio delay, the SDK discards this data packet. | + * */ struct DataStreamConfig { /** * Whether to synchronize the data packet with the published audio packet. - * - `true`: Synchronize the data packet with the audio packet. - * - `false`: Do not synchronize the data packet with the audio packet. - * + * - `true`: Synchronize the data packet with the audio packet. This setting is suitable for special + * scenarios such as lyrics synchronization. + * - `false`: Do not synchronize the data packet with the audio packet. This setting is suitable for + * scenarios where data packets need to arrive at the receiving end immediately. * When you set the data packet to synchronize with the audio, then if the data packet delay is * within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized - * audio packet is played out. Do not set this parameter as true if you need the receiver to receive - * the data packet immediately. Agora recommends that you set this parameter to `true` only when you - * need to implement specific functions, for example lyric synchronization. + * audio packet is played out. */ bool syncWithAudio; /** * Whether the SDK guarantees that the receiver receives the data in the sent order. * - `true`: Guarantee that the receiver receives the data in the sent order. * - `false`: Do not guarantee that the receiver receives the data in the sent order. - * - * Do not set this parameter as `true` if you need the receiver to receive the data packet immediately. + * Do not set this parameter as `true` if you need the receiver to receive the data packet + * immediately. */ bool ordered; }; /** - * The definition of SIMULCAST_STREAM_MODE + * @brief The mode in which the video stream is sent. */ enum SIMULCAST_STREAM_MODE { - /* - * disable simulcast stream until receive request for enable simulcast stream by other broadcaster - */ + /** + * -1: By default, do not send the low-quality video stream until a subscription request for the + * low-quality video stream is received from the receiving end, then automatically start sending + * low-quality video stream. + */ AUTO_SIMULCAST_STREAM = -1, - /* - * disable simulcast stream - */ + /** + * 0: Never send low-quality video stream. + */ DISABLE_SIMULCAST_STREAM = 0, - /* - * always enable simulcast stream - */ + /** + * 1: Always send low-quality video stream. + */ ENABLE_SIMULCAST_STREAM = 1, }; /** - * The configuration of the low-quality video stream. + * @brief The configuration of the low-quality video stream. */ struct SimulcastStreamConfig { /** - * The video frame dimension: VideoDimensions. The default value is 160 × 120. + * The video dimension. See `VideoDimensions`. The default value is 50% of the high-quality video + * stream. */ VideoDimensions dimensions; /** - * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log level is 5. + * Video bitrate (Kbps). The default value is -1. This parameter does not need to be set. The SDK + * automatically matches the most suitable bitrate based on the video resolution and frame rate you + * set. */ int kBitrate; /** - * The capture frame rate (fps) of the local video. The default value is 5. + * The frame rate (fps) of the local video. The default value is 5. */ int framerate; SimulcastStreamConfig() : dimensions(160, 120), kBitrate(65), framerate(5) {} @@ -2097,70 +2251,93 @@ struct SimulcastStreamConfig { }; /** - * The configuration of the multi-layer video stream. + * @brief Configure video streams of different quality levels. + * + * @since v4.6.0 */ struct SimulcastConfig { /** - * The index of multi-layer video stream + * @brief Index of video streams of different quality levels. */ enum StreamLayerIndex { /** - * 0: video stream index of layer_1 + * (0): Video stream layer_1, with lower resolution and bitrate than VIDEO_STREAM_HIGH. */ STREAM_LAYER_1 = 0, /** - * 1: video stream index of layer_2 + * (1): Video stream layer_2, with lower resolution and bitrate than VIDEO_STREAM_LAYER_1. */ STREAM_LAYER_2 = 1, /** - * 2: video stream index of layer_3 + * (2): Video stream layer_3, with lower resolution and bitrate than VIDEO_STREAM_LAYER_2. */ STREAM_LAYER_3 = 2, /** - * 3: video stream index of layer_4 + * (3): Video stream layer_4, with lower resolution and bitrate than VIDEO_STREAM_LAYER_3. */ STREAM_LAYER_4 = 3, /** - * 4: video stream index of layer_5 + * (4): Video stream layer_5, with lower resolution and bitrate than VIDEO_STREAM_LAYER_4. */ STREAM_LAYER_5 = 4, /** - * 5: video stream index of layer_6 + * (5): Video stream layer_6, with lower resolution and bitrate than VIDEO_STREAM_LAYER_5. */ STREAM_LAYER_6 = 5, /** - * 6: video stream index of low + * (6): Low-quality video stream, with the lowest resolution and bitrate. */ STREAM_LOW = 6, /** - * 7: max count of video stream layers + * (7): Maximum number of video stream layers. */ STREAM_LAYER_COUNT_MAX = 7 }; + /** + * @brief Configures the parameters of a specific layer in multi-quality video streams. + * + * @details + * Used to configure the resolution, frame rate, and enable status of a specific layer in + * multi-quality video streams. + * + */ struct StreamLayerConfig { /** - * The video frame dimension. The default value is 0. + * Video frame size. Default is 0. See `VideoDimensions`. */ VideoDimensions dimensions; /** - * The capture frame rate (fps) of the local video. The default value is 0. + * Frame rate (fps) of the local video capture. Default is 0. */ int framerate; /** - * Whether to enable the corresponding layer of video stream. The default value is false. + * Whether to enable the video stream for the corresponding layer. Default is false. + * - `true`: Enables the video stream for the corresponding layer. + * - `false`: (Default) Disables the video stream for the corresponding layer. */ bool enable; StreamLayerConfig() : dimensions(0, 0), framerate(0), enable(false) {} }; /** - * The array of StreamLayerConfig, which contains STREAM_LAYER_COUNT_MAX layers of video stream at most. + * Configurations for multi-layer streaming: `StreamLayerConfig`. */ StreamLayerConfig configs[STREAM_LAYER_COUNT_MAX]; + /** + * Whether to enable fallback publishing: + * - `true`: Enable fallback publishing. When the device performance or network is poor at the + * publishing end, the SDK will dynamically disable multiple video streams of different quality + * levels, from layer1 to layer6. At least the video streams of the highest and lowest quality are + * retained to maintain basic video continuity. + * - `false`: (Default) Disable fallback publishing. + */ + bool publish_fallback_enable; + + SimulcastConfig(): publish_fallback_enable(false) {} }; /** - * The location of the target area relative to the screen or window. If you do not set this parameter, - * the SDK selects the whole screen or window. + * @brief The location of the target area relative to the screen or window. If you do not set this + * parameter, the SDK selects the whole screen or window. */ struct Rectangle { /** @@ -2172,11 +2349,11 @@ struct Rectangle { */ int y; /** - * The width of the region. + * The width of the target area. */ int width; /** - * The height of the region. + * The height of the target area. */ int height; @@ -2185,12 +2362,16 @@ struct Rectangle { }; /** - * The position and size of the watermark on the screen. + * @brief The position and size of the watermark on the screen. + * + * @details + * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and + * `widthRatio`: + * - ( `xRatio`, `yRatio` ) refers to the coordinates of the upper left corner of the watermark, + * which determines the distance from the upper left corner of the watermark to the upper left + * corner of the screen. + * - The `widthRatio` determines the width of the watermark. * - * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and `widthRatio`: - * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which determines - * the distance from the upper left corner of the watermark to the upper left corner of the screen. - * The `widthRatio` determines the width of the watermark. */ struct WatermarkRatio { /** @@ -2201,14 +2382,15 @@ struct WatermarkRatio { float xRatio; /** * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the - * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the upper - * left corner of the screen. The value range is [0.0,1.0], and the default value is 0. + * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the + * upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0. */ float yRatio; /** - * The width of the watermark. The SDK calculates the height of the watermark proportionally according - * to this parameter value to ensure that the enlarged or reduced watermark image is not distorted. - * The value range is [0,1], and the default value is 0, which means no watermark is displayed. + * The width of the watermark. The SDK calculates the height of the watermark proportionally + * according to this parameter value to ensure that the enlarged or reduced watermark image is not + * distorted. The value range is [0,1], and the default value is 0, which means no watermark is + * displayed. */ float widthRatio; @@ -2217,100 +2399,401 @@ struct WatermarkRatio { }; /** - * Configurations of the watermark image. + * @brief Watermark image configurations. + * + * @details + * Configuration options for setting the watermark image to be added. + * */ struct WatermarkOptions { /** - * Whether or not the watermark image is visible in the local video preview: - * - true: (Default) The watermark image is visible in preview. - * - false: The watermark image is not visible in preview. + * Whether the watermark is visible in the local preview view: + * - `true`: (Default) The watermark is visible in the local preview view. + * - `false`: The watermark is not visible in the local preview view. */ bool visibleInPreview; /** - * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the - * area of the watermark image in landscape mode. See #FIT_MODE_COVER_POSITION for details. + * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area + * of the watermark image in landscape mode. See `Rectangle`. */ Rectangle positionInLandscapeMode; /** - * When the adaptation mode of the watermark is `FIT_MODE_COVER_POSITION`, it is used to set the - * area of the watermark image in portrait mode. See #FIT_MODE_COVER_POSITION for details. + * When the adaptation mode of the watermark is FIT_MODE_COVER_POSITION, it is used to set the area + * of the watermark image in portrait mode. See `Rectangle`. */ Rectangle positionInPortraitMode; /** - * When the watermark adaptation mode is `FIT_MODE_USE_IMAGE_RATIO`, this parameter is used to set - * the watermark coordinates. See WatermarkRatio for details. + * When the watermark adaptation mode is FIT_MODE_USE_IMAGE_RATIO, this parameter is used to set the + * watermark coordinates. See `WatermarkRatio`. */ WatermarkRatio watermarkRatio; /** - * The adaptation mode of the watermark. See #WATERMARK_FIT_MODE for details. + * The adaptation mode of the watermark. See `WATERMARK_FIT_MODE`. */ WATERMARK_FIT_MODE mode; + /** + * Layer order of the watermark image. The default value is 0. + */ + int zOrder; WatermarkOptions() - : visibleInPreview(true), - positionInLandscapeMode(0, 0, 0, 0), - positionInPortraitMode(0, 0, 0, 0), - mode(FIT_MODE_COVER_POSITION) {} + : visibleInPreview(true), + positionInLandscapeMode(0, 0, 0, 0), + positionInPortraitMode(0, 0, 0, 0), + mode(FIT_MODE_COVER_POSITION), + zOrder(0) {} }; /** - * The definition of the RtcStats struct. + * @brief Type of watermark source. + * + * @since 4.6.0 */ -struct RtcStats { +enum WATERMARK_SOURCE_TYPE { /** - * The call duration (s), represented by an aggregate value. + * (0): The watermark source is an image. */ - unsigned int duration; + IMAGE = 0, /** - * The total number of bytes transmitted, represented by an aggregate value. + * (1): The watermark source is a buffer. */ - unsigned int txBytes; + BUFFER = 1, /** - * The total number of bytes received, represented by an aggregate value. + * 2: The watermark source is a literal. + * + * @note This is only supported in linux platform. */ - unsigned int rxBytes; + LITERAL = 2, /** - * The total number of audio bytes sent (bytes), represented by an aggregate value. + * 3: The watermark source is a timestamp. + * + * @note This is only supported in linux platform. */ - unsigned int txAudioBytes; + TIMESTAMPS = 3, +}; + +/** + * @brief The definition of the WatermarkTimestamp struct. + * + * @since 4.6.0 + * @note This is only supported in linux platform. + */ +struct WatermarkTimestamp{ /** - * The total number of video bytes sent (bytes), represented by an aggregate value. + * The font size of the timestamp. The default value is 10. */ - unsigned int txVideoBytes; + int fontSize; /** - * The total number of audio bytes received (bytes), represented by an aggregate value. + * The path of the font file for the timestamp. The default value is NULL. + * The font file should be a .ttf file. If not set, the SDK uses the system default font if available. + * + * @note If used asynchronously, copy the path to memory that will not be released. */ - unsigned int rxAudioBytes; + const char* fontFilePath; /** - * The total number of video bytes received (bytes), represented by an aggregate value. + * The stroke width of the timestamp. The default value is 1. */ - unsigned int rxVideoBytes; + int strokeWidth; /** - * The transmission bitrate (Kbps), represented by an instantaneous value. + * The format of the timestamp. The default is '%F %X'. + * The format follows the standard C library function strftime. You can find in the website: + * https://cplusplus.com/reference/ctime/strftime/?kw=strftime + * + * @note If used asynchronously, copy the format string to memory that will not be released. */ - unsigned short txKBitRate; + const char* format; + + WatermarkTimestamp() : fontSize(10), fontFilePath(NULL), strokeWidth(1), format(NULL) {} +}; + +/** + * @brief The definition of the WatermarkLiteral struct. + * + * @since 4.6.0 + * @note This is only supported in linux platform.. + */ +struct WatermarkLiteral { + /** - * The receiving bitrate (Kbps), represented by an instantaneous value. + * The font size of the literal. The default value is 10. */ - unsigned short rxKBitRate; + int fontSize; /** - * Audio receiving bitrate (Kbps), represented by an instantaneous value. + * The stroke width of the literal. The default value is 1. */ - unsigned short rxAudioKBitRate; + int strokeWidth; /** - * The audio transmission bitrate (Kbps), represented by an instantaneous value. + * The literal content of the watermark. The default value is NULL. + * + * @note If used asynchronously, copy the string to memory that will not be released. */ - unsigned short txAudioKBitRate; + const char* wmLiteral; /** - * The video receive bitrate (Kbps), represented by an instantaneous value. + * The path of the font file for the literal. The default value is NULL. + * The font file should be a .ttf file. If not set, the SDK uses the system default font if available. + * + * @note If used asynchronously, copy the string to memory that will not be released. */ - unsigned short rxVideoKBitRate; + const char* fontFilePath; + + WatermarkLiteral() : wmLiteral(NULL), fontFilePath(NULL), fontSize(10), strokeWidth(1) {} +}; + +/** + * @brief Configures the format, size, and pixel buffer of the watermark image. + * + * @since 4.6.0 + * + * @details + * Defines the buffer data structure of the watermark image, including image width, height, format, + * length, and image data buffer. + * + */ +struct WatermarkBuffer { + /** - * The video transmission bitrate (Kbps), represented by an instantaneous value. + * Width of the watermark buffer, in pixels. + */ + int width; + /** + * Height of the watermark buffer, in pixels. + */ + int height; + /** + * Length of the watermark buffer, in bytes. + */ + int length; + /** + * Format of the watermark buffer. See `VIDEO_PIXEL_FORMAT`. Default is VIDEO_PIXEL_I420. Currently + * supported formats include: VIDEO_PIXEL_I420, VIDEO_PIXEL_RGBA, VIDEO_PIXEL_BGRA, and + * VIDEO_PIXEL_NV21. + */ + media::base::VIDEO_PIXEL_FORMAT format; + + /** + * Buffer data of the watermark. + */ + const uint8_t* buffer; + + WatermarkBuffer() : buffer(NULL), width(0), height(0), length(0), format(media::base::VIDEO_PIXEL_I420) {} +}; + +/** + * @brief Used to configure watermark-related information. + * + * @since 4.6.0 + */ +struct WatermarkConfig { + /** + * Unique identifier for the watermark. It is recommended to use a UUID. + */ + const char* id; + /** + * Type of the watermark. See `WATERMARK_SOURCE_TYPE`. + */ + WATERMARK_SOURCE_TYPE type; + union { + /** + * Buffer of the watermark. See `WatermarkBuffer`. + */ + WatermarkBuffer buffer; + /** + * The watermark timestamp. See WatermarkTimestamp. + * + * @note This is only supported in linux platform. + */ + WatermarkTimestamp timestamp; + /** + * The watermark literal. See WatermarkLiteral. + * + * @note This is only supported in linux platform. + */ + WatermarkLiteral literal; + /** + * URL of the watermark image file. Default value is NULL. + */ + const char* imageUrl; + }; + + /** + * Options for the watermark. See `WatermarkOptions`. + */ + WatermarkOptions options; + + WatermarkConfig() : id(NULL), type(IMAGE), imageUrl(NULL) {} +}; + +/** + * @brief The transmission mode of data over multiple network paths. + * + * @since 4.6.0 + */ +enum MultipathMode { + /** + * Duplicate mode, the same piece of data is redundantly transmitted over all available paths. + * @technical preview + */ + Duplicate= 0, + /** + * (1): Dynamic transmission mode. The SDK dynamically selects the optimal path for data + * transmission based on the current network conditions to improve transmission performance. + */ + Dynamic +}; + +/** + * @brief Network path types used in multipath transmission. + * + * @since 4.6.0 + */ +enum MultipathType { + /** + * (0): Local Area Network (LAN) path. + */ + LAN = 0, + /** + * (1): Wi-Fi path. + */ + WIFI, + /** + * (2): Mobile network path. + */ + Mobile, + /** + * (99): Unknown or unspecified network path. + */ + Unknown = 99 +}; + +/** + * @brief Statistical information about a specific network path. + * + * @since 4.6.0 + */ +struct PathStats { + /** + * Types of network path. See `MultipathType`. + */ + MultipathType type; + /** + * The transmission bitrate of the path in Kbps. + */ + int txKBitRate; + /** + * The receiving bitrate of the path in Kbps. + */ + int rxKBitRate; + PathStats() : type(Unknown), txKBitRate(0), rxKBitRate(0) {} + PathStats(MultipathType t, int tx, int rx) : type(t), txKBitRate(tx), rxKBitRate(rx) {} +}; + +/** + * @brief Aggregates statistics of each network path in multipath transmission. + * + * @since 4.6.0 + */ +struct MultipathStats { + /** + * The total number of bytes sent over the LAN path. + */ + uint32_t lanTxBytes; + /** + * The total number of bytes received over the LAN path. + */ + uint32_t lanRxBytes; + /** + * The total number of bytes sent over the Wi-Fi path. + */ + uint32_t wifiTxBytes; + /** + * The total number of bytes received over the Wi-Fi path. + */ + uint32_t wifiRxBytes; + /** + * The total number of bytes sent over the mobile network path. + */ + uint32_t mobileTxBytes; + /** + * The total number of bytes received over the mobile network path. + */ + uint32_t mobileRxBytes; + /** + * The number of active transmission paths. + */ + int activePathNum; + /** + * An array of statistics for each active transmission path. See `PathStats`. + */ + const PathStats* pathStats; + MultipathStats() + : lanTxBytes(0), + lanRxBytes(0), + wifiTxBytes(0), + wifiRxBytes(0), + mobileTxBytes(0), + mobileRxBytes(0), + activePathNum(0), + pathStats(nullptr) {} +}; + +/** + * @brief Statistics of a call session. + */ +struct RtcStats { + /** + * Call duration of the local user in seconds, represented by an aggregate value. + */ + unsigned int duration; + /** + * The number of bytes sent. + */ + unsigned int txBytes; + /** + * The number of bytes received. + */ + unsigned int rxBytes; + /** + * The total number of audio bytes sent, represented by an aggregate value. + */ + unsigned int txAudioBytes; + /** + * The total number of video bytes sent, represented by an aggregate value. + */ + unsigned int txVideoBytes; + /** + * The total number of audio bytes received, represented by an aggregate value. + */ + unsigned int rxAudioBytes; + /** + * The total number of video bytes received, represented by an aggregate value. + */ + unsigned int rxVideoBytes; + /** + * The actual bitrate (Kbps) while sending the local video stream. + */ + unsigned short txKBitRate; + /** + * The receiving bitrate (Kbps). + */ + unsigned short rxKBitRate; + /** + * The bitrate (Kbps) of receiving the audio. + */ + unsigned short rxAudioKBitRate; + /** + * The bitrate (Kbps) of sending the audio packet. + */ + unsigned short txAudioKBitRate; + /** + * The bitrate (Kbps) of receiving the video. + */ + unsigned short rxVideoKBitRate; + /** + * The bitrate (Kbps) of sending the video. */ unsigned short txVideoKBitRate; /** - * The VOS client-server latency (ms). + * The client-to-server delay (milliseconds). */ unsigned short lastmileDelay; /** @@ -2318,7 +2801,7 @@ struct RtcStats { */ unsigned int userCount; /** - * The app CPU usage (%). + * Application CPU usage (%). * @note * - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback. * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. @@ -2326,7 +2809,6 @@ struct RtcStats { double cpuAppUsage; /** * The system CPU usage (%). - * * For Windows, in the multi-kernel environment, this member represents the average CPU usage. The * value = (100 - System Idle Progress in Task Manager)/100. * @note @@ -2335,29 +2817,33 @@ struct RtcStats { */ double cpuTotalUsage; /** - * The round-trip time delay from the client to the local router. - * @note On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE` + * The round-trip time delay (ms) from the client to the local router. + * @note + * This property is disabled on devices running iOS 14 or later, and enabled on devices running + * versions earlier than iOS 14 by default. + * To enable this property on devices running iOS 14 or later, `technical support`. + * On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE` * permission after `` in the `AndroidManifest.xml` file in your project. */ int gatewayRtt; /** - * The memory usage ratio of the app (%). + * The memory ratio occupied by the app (%). * @note This value is for reference only. Due to system limitations, you may not get this value. */ double memoryAppUsageRatio; /** - * The memory usage ratio of the system (%). + * The memory occupied by the system (%). * @note This value is for reference only. Due to system limitations, you may not get this value. */ double memoryTotalUsageRatio; /** - * The memory usage of the app (KB). + * The memory size occupied by the app (KB). * @note This value is for reference only. Due to system limitations, you may not get this value. */ int memoryAppUsageInKbytes; /** - * The time elapsed from the when the app starts connecting to an Agora channel - * to when the connection is established. 0 indicates that this member does not apply. + * The duration (ms) between the SDK starts connecting and the connection is established. If the + * value reported is 0, it means invalid. */ int connectTimeMs; /** @@ -2406,127 +2892,140 @@ struct RtcStats { */ int firstVideoKeyFrameRenderedDurationAfterUnmute; /** - * The packet loss rate of sender(broadcaster). + * The packet loss rate (%) from the client to the Agora server before applying the anti-packet-loss + * algorithm. */ int txPacketLossRate; /** - * The packet loss rate of receiver(audience). + * The packet loss rate (%) from the Agora server to the client before using the anti-packet-loss + * method. */ int rxPacketLossRate; + /** + * The local network acceleration state. + * A value of 1 indicates that local network acceleration is active, while 0 indicates it is inactive. + * @technical preview + */ + int lanAccelerateState; + RtcStats() - : duration(0), - txBytes(0), - rxBytes(0), - txAudioBytes(0), - txVideoBytes(0), - rxAudioBytes(0), - rxVideoBytes(0), - txKBitRate(0), - rxKBitRate(0), - rxAudioKBitRate(0), - txAudioKBitRate(0), - rxVideoKBitRate(0), - txVideoKBitRate(0), - lastmileDelay(0), - userCount(0), - cpuAppUsage(0.0), - cpuTotalUsage(0.0), - gatewayRtt(0), - memoryAppUsageRatio(0.0), - memoryTotalUsageRatio(0.0), - memoryAppUsageInKbytes(0), - connectTimeMs(0), - firstAudioPacketDuration(0), - firstVideoPacketDuration(0), - firstVideoKeyFramePacketDuration(0), - packetsBeforeFirstKeyFramePacket(0), - firstAudioPacketDurationAfterUnmute(0), - firstVideoPacketDurationAfterUnmute(0), - firstVideoKeyFramePacketDurationAfterUnmute(0), - firstVideoKeyFrameDecodedDurationAfterUnmute(0), - firstVideoKeyFrameRenderedDurationAfterUnmute(0), - txPacketLossRate(0), - rxPacketLossRate(0) {} -}; - -/** - * User role types. + : duration(0), + txBytes(0), + rxBytes(0), + txAudioBytes(0), + txVideoBytes(0), + rxAudioBytes(0), + rxVideoBytes(0), + txKBitRate(0), + rxKBitRate(0), + rxAudioKBitRate(0), + txAudioKBitRate(0), + rxVideoKBitRate(0), + txVideoKBitRate(0), + lastmileDelay(0), + userCount(0), + cpuAppUsage(0.0), + cpuTotalUsage(0.0), + gatewayRtt(0), + memoryAppUsageRatio(0.0), + memoryTotalUsageRatio(0.0), + memoryAppUsageInKbytes(0), + connectTimeMs(0), + firstAudioPacketDuration(0), + firstVideoPacketDuration(0), + firstVideoKeyFramePacketDuration(0), + packetsBeforeFirstKeyFramePacket(0), + firstAudioPacketDurationAfterUnmute(0), + firstVideoPacketDurationAfterUnmute(0), + firstVideoKeyFramePacketDurationAfterUnmute(0), + firstVideoKeyFrameDecodedDurationAfterUnmute(0), + firstVideoKeyFrameRenderedDurationAfterUnmute(0), + txPacketLossRate(0), + rxPacketLossRate(0), + lanAccelerateState(0) {} +}; + +/** + * @brief The user role in the interactive live streaming. */ enum CLIENT_ROLE_TYPE { /** - * 1: Broadcaster. A broadcaster can both send and receive streams. + * 1: Host. A host can both send and receive streams. */ CLIENT_ROLE_BROADCASTER = 1, /** - * 2: Audience. An audience member can only receive streams. + * 2: (Default) Audience. An audience member can only receive streams. */ CLIENT_ROLE_AUDIENCE = 2, }; /** - * Quality change of the local video in terms of target frame rate and target bit rate since last count. + * @brief Quality change of the local video in terms of target frame rate and target bit rate since + * last count. */ enum QUALITY_ADAPT_INDICATION { /** - * 0: The quality of the local video stays the same. + * 0: The local video quality stays the same. */ ADAPT_NONE = 0, /** - * 1: The quality improves because the network bandwidth increases. + * 1: The local video quality improves because the network bandwidth increases. */ ADAPT_UP_BANDWIDTH = 1, /** - * 2: The quality worsens because the network bandwidth decreases. + * 2: The local video quality deteriorates because the network bandwidth decreases. */ ADAPT_DOWN_BANDWIDTH = 2, }; /** - * The latency level of an audience member in interactive live streaming. This enum takes effect only - * when the user role is set to `CLIENT_ROLE_AUDIENCE`. + * @brief The latency level of an audience member in interactive live streaming. This enum takes + * effect only when the user role is set to CLIENT_ROLE_AUDIENCE . */ -enum AUDIENCE_LATENCY_LEVEL_TYPE -{ +enum AUDIENCE_LATENCY_LEVEL_TYPE { /** * 1: Low latency. */ AUDIENCE_LATENCY_LEVEL_LOW_LATENCY = 1, /** - * 2: Ultra low latency. + * 2: (Default) Ultra low latency. */ AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY = 2, }; /** - * The detailed options of a user. + * @brief Setting of user role properties. */ -struct ClientRoleOptions -{ +struct ClientRoleOptions { /** - * The latency level of an audience member in interactive live streaming. See `AUDIENCE_LATENCY_LEVEL_TYPE`. + * The latency level of an audience member in interactive live streaming. See + * `AUDIENCE_LATENCY_LEVEL_TYPE`. */ AUDIENCE_LATENCY_LEVEL_TYPE audienceLatencyLevel; - ClientRoleOptions() - : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} + ClientRoleOptions() : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} }; /** - * Quality of experience (QoE) of the local user when receiving a remote audio stream. + * @brief The Quality of Experience (QoE) of the local user when receiving a remote audio stream. */ enum EXPERIENCE_QUALITY_TYPE { - /** 0: QoE of the local user is good. */ + /** + * 0: The QoE of the local user is good. + */ EXPERIENCE_QUALITY_GOOD = 0, - /** 1: QoE of the local user is poor. */ + /** + * 1: The QoE of the local user is poor. + */ EXPERIENCE_QUALITY_BAD = 1, }; /** - * Reasons why the QoE of the local user when receiving a remote audio stream is poor. + * @brief Reasons why the QoE of the local user when receiving a remote audio stream is poor. */ enum EXPERIENCE_POOR_REASON { /** - * 0: No reason, indicating good QoE of the local user. + * 0: No reason, indicating a good QoE of the local user. */ EXPERIENCE_REASON_NONE = 0, /** @@ -2549,34 +3048,40 @@ enum EXPERIENCE_POOR_REASON { }; /** - * Audio AINS mode + * @brief AI noise suppression modes. */ enum AUDIO_AINS_MODE { - /** - * AINS mode with soft suppression level. - */ - AINS_MODE_BALANCED = 0, - /** - * AINS mode with high suppression level. - */ - AINS_MODE_AGGRESSIVE = 1, - /** - * AINS mode with high suppression level and ultra-low-latency - */ - AINS_MODE_ULTRALOWLATENCY = 2 + /** + * 0: (Default) Balance mode. This mode allows for a balanced performance on noice suppression and + * time delay. + */ + AINS_MODE_BALANCED = 0, + /** + * 1: Aggressive mode. In scenarios where high performance on noise suppression is required, such as + * live streaming outdoor events, this mode reduces nosie more dramatically, but may sometimes + * affect the original character of the audio. + */ + AINS_MODE_AGGRESSIVE = 1, + /** + * 2: Aggressive mode with low latency. The noise suppression delay of this mode is about only half + * of that of the balance and aggressive modes. It is suitable for scenarios that have high + * requirements on noise suppression with low latency, such as sing together online in real time. + */ + AINS_MODE_ULTRALOWLATENCY = 2 }; /** - * Audio profile types. + * @brief The audio profile. */ enum AUDIO_PROFILE_TYPE { /** * 0: The default audio profile. - * - For the Communication profile: + * - For the interactive streaming profile: A sample rate of 48 kHz, music encoding, mono, and a + * bitrate of up to 64 Kbps. + * - For the communication profile: * - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps. - * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. - * of up to 16 Kbps. - * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. + * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 + * Kbps. */ AUDIO_PROFILE_DEFAULT = 0, /** @@ -2588,9 +3093,8 @@ enum AUDIO_PROFILE_TYPE { */ AUDIO_PROFILE_MUSIC_STANDARD = 2, /** - * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. - * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` + * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. To implement + * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3, @@ -2599,77 +3103,87 @@ enum AUDIO_PROFILE_TYPE { */ AUDIO_PROFILE_MUSIC_HIGH_QUALITY = 4, /** - * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. - * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` + * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. To implement + * stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5, /** - * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AES) enabled. + * 6: A sample rate of 16 kHz, audio encoding, mono, and Acoustic Echo Cancellation (AEC) enabled. */ AUDIO_PROFILE_IOT = 6, + /** + * Enumerator boundary. + */ AUDIO_PROFILE_NUM = 7 }; /** - * The audio scenario. + * @brief The audio scenarios. */ enum AUDIO_SCENARIO_TYPE { /** - * 0: Automatic scenario, where the SDK chooses the appropriate audio quality according to the - * user role and audio route. + * 0: (Default) Automatic scenario match, where the SDK chooses the appropriate audio quality + * according to the user role and audio route. */ AUDIO_SCENARIO_DEFAULT = 0, /** - * 3: (Recommended) The live gaming scenario, which needs to enable gaming - * audio effects in the speaker. Choose this scenario to achieve high-fidelity - * music playback. + * 3: High-quality audio scenario, where users mainly play music. For example, instrument tutoring. */ AUDIO_SCENARIO_GAME_STREAMING = 3, /** - * 5: The chatroom scenario, which needs to keep recording when setClientRole to audience. - * Normally, app developer can also use mute api to achieve the same result, - * and we implement this 'non-orthogonal' behavior only to make API backward compatible. + * 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the + * microphone. For example, education scenarios. */ AUDIO_SCENARIO_CHATROOM = 5, /** - * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low latency. + * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low + * latency. */ AUDIO_SCENARIO_CHORUS = 7, /** - * 8: Meeting + * 8: Meeting scenario that mainly contains the human voice. */ AUDIO_SCENARIO_MEETING = 8, /** - * 9: The number of enumerations. + * 9: AI Server. + * @technical preview + */ + AUDIO_SCENARIO_AI_SERVER = 9, + /** + * 10: AI conversation scenario, which is only applicable to scenarios where the user interacts with + * the conversational AI agent created by `Conversational AI Engine`. + */ + AUDIO_SCENARIO_AI_CLIENT = 10, + /** + * The number of enumerations. */ - AUDIO_SCENARIO_NUM = 9, + AUDIO_SCENARIO_NUM = 11, }; /** - * The format of the video frame. + * @brief The format of the video frame. */ struct VideoFormat { - OPTIONAL_ENUM_SIZE_T { - /** The maximum value (px) of the width. */ - kMaxWidthInPixels = 3840, - /** The maximum value (px) of the height. */ - kMaxHeightInPixels = 2160, - /** The maximum value (fps) of the frame rate. */ - kMaxFps = 60, + OPTIONAL_ENUM_SIZE_T{ + /** The maximum value (px) of the width. */ + kMaxWidthInPixels = 3840, + /** The maximum value (px) of the height. */ + kMaxHeightInPixels = 2160, + /** The maximum value (fps) of the frame rate. */ + kMaxFps = 60, }; /** - * The width (px) of the video. + * The width (px) of the video frame. The default value is 960. */ - int width; // Number of pixels. + int width; // Number of pixels. /** - * The height (px) of the video. + * The height (px) of the video frame. The default value is 540. */ int height; // Number of pixels. /** - * The video frame rate (fps). + * The video frame rate (fps). The default value is 15. */ int fps; VideoFormat() : width(FRAME_WIDTH_960), height(FRAME_HEIGHT_540), fps(FRAME_RATE_FPS_15) {} @@ -2687,78 +3201,120 @@ struct VideoFormat { bool operator==(const VideoFormat& fmt) const { return width == fmt.width && height == fmt.height && fps == fmt.fps; } - bool operator!=(const VideoFormat& fmt) const { - return !operator==(fmt); - } + bool operator!=(const VideoFormat& fmt) const { return !operator==(fmt); } }; /** - * Video content hints. + * @brief The content hint for screen sharing. */ enum VIDEO_CONTENT_HINT { /** - * (Default) No content hint. In this case, the SDK balances smoothness with sharpness. + * (Default) No content hint. */ CONTENT_HINT_NONE, /** - * Choose this option if you prefer smoothness or when - * you are sharing motion-intensive content such as a video clip, movie, or video game. - * - * + * Motion-intensive content. Choose this option if you prefer smoothness or when you are sharing a + * video clip, movie, or video game. */ CONTENT_HINT_MOTION, /** - * Choose this option if you prefer sharpness or when you are - * sharing montionless content such as a picture, PowerPoint slide, ot text. - * + * Motionless content. Choose this option if you prefer sharpness or when you are sharing a picture, + * PowerPoint slides, or texts. */ CONTENT_HINT_DETAILS }; /** - * The screen sharing scenario. + * @brief The screen sharing scenario. */ enum SCREEN_SCENARIO_TYPE { /** - * 1: Document. This scenario prioritizes the video quality of screen sharing and reduces the - * latency of the shared video for the receiver. If you share documents, slides, and tables, - * you can set this scenario. + * 1: (Default) Document. This scenario prioritizes the video quality of screen sharing and reduces + * the latency of the shared video for the receiver. If you share documents, slides, and tables, you + * can set this scenario. */ SCREEN_SCENARIO_DOCUMENT = 1, /** - * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you - * can set this scenario. + * 2: Game. This scenario prioritizes the smoothness of screen sharing. If you share games, you can + * set this scenario. */ SCREEN_SCENARIO_GAMING = 2, /** - * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or - * live videos, you can set this scenario. + * 3: Video. This scenario prioritizes the smoothness of screen sharing. If you share movies or live + * videos, you can set this scenario. */ SCREEN_SCENARIO_VIDEO = 3, /** - * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces - * the latency of the shared video for the receiver. If you share the device desktop being - * remotely controlled, you can set this scenario. + * 4: Remote control. This scenario prioritizes the video quality of screen sharing and reduces the + * latency of the shared video for the receiver. If you share the device desktop being remotely + * controlled, you can set this scenario. */ SCREEN_SCENARIO_RDC = 4, }; - /** - * The video application scenario type. + * @brief The video application scenarios. */ enum VIDEO_APPLICATION_SCENARIO_TYPE { /** - * 0: Default Scenario. + * 0: (Default) The general scenario. */ APPLICATION_SCENARIO_GENERAL = 0, /** - * 1: Meeting Scenario. This scenario is the best QoE practice of meeting application. + * 1: The meeting scenario. + * `APPLICATION_SCENARIO_MEETING` (1) is suitable for meeting scenarios. The SDK automatically + * enables the following strategies: + * - In meeting scenarios where low-quality video streams are required to have a high bitrate, the + * SDK automatically enables multiple technologies used to deal with network congestions, to enhance + * the performance of the low-quality streams and to ensure the smooth reception by subscribers. + * - The SDK monitors the number of subscribers to the high-quality video stream in real time and + * dynamically adjusts its configuration based on the number of subscribers. + * - If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate + * and frame rate to save upstream bandwidth. + * - If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to + * the `VideoEncoderConfiguration` configuration used in the most recent calling of + * `setVideoEncoderConfiguration`. If no configuration has been set by the user previously, the + * following values are used: + * - Resolution: (Windows and macOS) 1280 × 720; (Android and iOS) 960 × 540 + * - Frame rate: 15 fps + * - Bitrate: (Windows and macOS) 1600 Kbps; (Android and iOS) 1000 Kbps + * - The SDK monitors the number of subscribers to the low-quality video stream in real time and + * dynamically enables or disables it based on the number of subscribers. + * - If nobody subscribes to the low-quality stream, the SDK automatically disables it to save + * upstream bandwidth. + * - If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and + * resets it to the `SimulcastStreamConfig` configuration used in the most recent calling of + * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`. If no + * configuration has been set by the user previously, the following + * values are used: + * - Resolution: 480 × 272 + * - Frame rate: 15 fps + * - Bitrate: 500 Kbps + * @note If the user has called `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const + * SimulcastStreamConfig& streamConfig)` to set that never send low-quality video + * stream ( `DISABLE_SIMULCAST_STREAM` ), the dynamic adjustment of the low-quality stream in + * meeting scenarios will not take effect. */ APPLICATION_SCENARIO_MEETING = 1, /** - * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call. + * 2: 1v1 video call scenario. + * `APPLICATION_SCENARIO_1V1` (2) This is applicable to the `one to one live` scenario. To meet the + * requirements for low latency and high-quality video in this scenario, the SDK optimizes its + * strategies, improving performance in terms of video quality, first frame rendering, latency on + * mid-to-low-end devices, and smoothness under weak network conditions. + * @note This enumeration value is only applicable to the broadcaster vs. broadcaster scenario. */ APPLICATION_SCENARIO_1V1 = 2, + /** + * 3. Live show scenario. + * `APPLICATION_SCENARIO_LIVESHOW` (3) This is applicable to the `show room` scenario. In this + * scenario, fast video rendering and high image quality are crucial. The SDK implements several + * performance optimizations, including automatically enabling accelerated audio and video frame + * rendering to minimize first-frame latency (no need to call `enableInstantMediaRendering` ), and + * B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides + * enhanced video quality and smooth playback, even in poor network conditions or on lower-end + * devices. + */ + APPLICATION_SCENARIO_LIVESHOW = 3, }; /** @@ -2785,39 +3341,58 @@ enum VIDEO_QOE_PREFERENCE_TYPE { }; /** - * The brightness level of the video image captured by the local camera. + * @brief The brightness level of the video image captured by the local camera. */ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { - /** -1: The SDK does not detect the brightness level of the video image. - * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next callback. + /** + * -1: The SDK does not detect the brightness level of the video image. Wait a few seconds to get + * the brightness level from `captureBrightnessLevel` in the next callback. */ CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1, - /** 0: The brightness level of the video image is normal. + /** + * 0: The brightness level of the video image is normal. */ CAPTURE_BRIGHTNESS_LEVEL_NORMAL = 0, - /** 1: The brightness level of the video image is too bright. + /** + * 1: The brightness level of the video image is too bright. */ CAPTURE_BRIGHTNESS_LEVEL_BRIGHT = 1, - /** 2: The brightness level of the video image is too dark. + /** + * 2: The brightness level of the video image is too dark. */ CAPTURE_BRIGHTNESS_LEVEL_DARK = 2, }; +/** + * @brief Camera stabilization modes. + * + * @details + * The camera stabilization effect increases in the order of 1 < 2 < 3, and the latency will also + * increase accordingly. + * + */ enum CAMERA_STABILIZATION_MODE { - /** The camera stabilization mode is disabled. - */ + /** + * -1: (Default) Camera stabilization mode off. + */ CAMERA_STABILIZATION_MODE_OFF = -1, - /** device choose stabilization mode automatically. - */ + /** + * 0: Automatic camera stabilization. The system automatically selects a stabilization mode based on + * the status of the camera. However, the latency is relatively high in this mode, so it is + * recommended not to use this enumeration. + */ CAMERA_STABILIZATION_MODE_AUTO = 0, - /** stabilization mode level 1. - */ + /** + * 1: (Recommended) Level 1 camera stabilization. + */ CAMERA_STABILIZATION_MODE_LEVEL_1 = 1, - /** stabilization mode level 2. - */ + /** + * 2: Level 2 camera stabilization. + */ CAMERA_STABILIZATION_MODE_LEVEL_2 = 2, - /** stabilization mode level 3. - */ + /** + * 3: Level 3 camera stabilization. + */ CAMERA_STABILIZATION_MODE_LEVEL_3 = 3, /** The maximum level of the camera stabilization mode. */ @@ -2825,7 +3400,7 @@ enum CAMERA_STABILIZATION_MODE { }; /** - * Local audio states. + * @brief The state of the local audio. */ enum LOCAL_AUDIO_STREAM_STATE { /** @@ -2833,7 +3408,7 @@ enum LOCAL_AUDIO_STREAM_STATE { */ LOCAL_AUDIO_STREAM_STATE_STOPPED = 0, /** - * 1: The capturing device starts successfully. + * 1: The local audio capturing device starts successfully. */ LOCAL_AUDIO_STREAM_STATE_RECORDING = 1, /** @@ -2847,7 +3422,7 @@ enum LOCAL_AUDIO_STREAM_STATE { }; /** - * Local audio state error codes. + * @brief Reasons for local audio state changes. */ enum LOCAL_AUDIO_STREAM_REASON { /** @@ -2855,47 +3430,61 @@ enum LOCAL_AUDIO_STREAM_REASON { */ LOCAL_AUDIO_STREAM_REASON_OK = 0, /** - * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the channel. + * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the + * channel. */ LOCAL_AUDIO_STREAM_REASON_FAILURE = 1, /** - * 2: No permission to use the local audio device. Remind your users to grant permission. + * 2: No permission to use the local audio capturing device. Remind your users to grant permission. */ LOCAL_AUDIO_STREAM_REASON_DEVICE_NO_PERMISSION = 2, /** - * 3: (Android and iOS only) The local audio capture device is used. Remind your users to check - * whether another application occupies the microphone. Local audio capture automatically resume - * after the microphone is idle for about five seconds. You can also try to rejoin the channel - * after the microphone is idle. + * 3: (Android and iOS only) The local audio capture device is already in use. Remind your users to + * check whether another application occupies the microphone. Local audio capture automatically + * resumes after the microphone is idle for about five seconds. You can also try to rejoin the + * channel after the microphone is idle. */ LOCAL_AUDIO_STREAM_REASON_DEVICE_BUSY = 3, /** - * 4: The local audio capture failed. + * 4: The local audio capture fails. */ LOCAL_AUDIO_STREAM_REASON_RECORD_FAILURE = 4, /** - * 5: The local audio encoding failed. + * 5: The local audio encoding fails. */ LOCAL_AUDIO_STREAM_REASON_ENCODE_FAILURE = 5, - /** 6: The SDK cannot find the local audio recording device. + /** + * 6: (Windows and macOS only) No local audio capture device. Remind your users to check whether the + * microphone is connected to the device properly in the control panel of the device or if the + * microphone is working properly. */ LOCAL_AUDIO_STREAM_REASON_NO_RECORDING_DEVICE = 6, - /** 7: The SDK cannot find the local audio playback device. + /** + * 7: (Windows and macOS only) No local audio capture device. Remind your users to check whether the + * speaker is connected to the device properly in the control panel of the device or if the speaker + * is working properly. */ LOCAL_AUDIO_STREAM_REASON_NO_PLAYOUT_DEVICE = 7, /** - * 8: The local audio capturing is interrupted by the system call. + * 8: (Android and iOS only) The local audio capture is interrupted by a system call, smart + * assistants, or alarm clock. Prompt your users to end the phone call, smart assistants, or alarm + * clock if the local audio capture is required. */ LOCAL_AUDIO_STREAM_REASON_INTERRUPTED = 8, - /** 9: An invalid audio capture device ID. + /** + * 9: (Windows only) The ID of the local audio-capture device is invalid. Prompt the user to check + * the audio capture device ID. */ LOCAL_AUDIO_STREAM_REASON_RECORD_INVALID_ID = 9, - /** 10: An invalid audio playback device ID. + /** + * 10: (Windows only) The ID of the local audio-playback device is invalid. Prompt the user to check + * the audio playback device ID. */ LOCAL_AUDIO_STREAM_REASON_PLAYOUT_INVALID_ID = 10, }; -/** Local video state types. +/** + * @brief Local video state types. */ enum LOCAL_VIDEO_STREAM_STATE { /** @@ -2903,8 +3492,8 @@ enum LOCAL_VIDEO_STREAM_STATE { */ LOCAL_VIDEO_STREAM_STATE_STOPPED = 0, /** - * 1: The local video capturing device starts successfully. The SDK also reports this state when - * you call `startScreenCaptureByWindowId` to share a maximized window. + * 1: The local video capturing device starts successfully. The SDK also reports this state when you + * call `startScreenCaptureByWindowId` to share a maximized window. */ LOCAL_VIDEO_STREAM_STATE_CAPTURING = 1, /** @@ -2918,7 +3507,30 @@ enum LOCAL_VIDEO_STREAM_STATE { }; /** - * Local video state error codes. + * @brief The local video event type. + * @since v4.6.1 + */ +enum LOCAL_VIDEO_EVENT_TYPE { + /** + * 1: (Android only) The screen capture window is hidden. + */ + LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_HIDDEN = 1, + /** + * 2: (Android only) The screen capture window is recovered from hidden. + */ + LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 2, + /** + * 3: (Android only) The screen capture is stopped by user. + */ + LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_STOPPED_BY_USER = 3, + /** + * 4: (Android only) An internal error occurs during the screen capture. + */ + LOCAL_VIDEO_EVENT_TYPE_SCREEN_CAPTURE_SYSTEM_INTERNAL_ERROR = 4, +}; + +/** + * @brief Reasons for local video state changes. */ enum LOCAL_VIDEO_STREAM_REASON { /** @@ -2930,35 +3542,34 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_FAILURE = 1, /** - * 2: No permission to use the local video capturing device. Remind the user to grant permission + * 2: No permission to use the local video capturing device. Prompt the user to grant permissions * and rejoin the channel. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_NO_PERMISSION = 2, /** - * 3: The local video capturing device is in use. Remind the user to check whether another - * application occupies the camera. + * 3: The local video capturing device is in use. Prompt the user to check if the camera is being + * used by another app, or try to rejoin the channel. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_BUSY = 3, /** - * 4: The local video capture fails. Remind the user to check whether the video capture device - * is working properly or the camera is occupied by another application, and then to rejoin the - * channel. + * 4: The local video capture fails. Prompt the user to check whether the video capture device is + * working properly, whether the camera is used by another app, or try to rejoin the channel. */ LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE = 4, /** - * 5: The local video encoder is not supported. + * 5: The local video encoding fails. */ LOCAL_VIDEO_STREAM_REASON_CODEC_NOT_SUPPORT = 5, /** - * 6: (iOS only) The app is in the background. Remind the user that video capture cannot be + * 6: (iOS only) The app is in the background. Prompt the user that video capture cannot be * performed normally when the app is in the background. */ LOCAL_VIDEO_STREAM_REASON_CAPTURE_INBACKGROUND = 6, /** - * 7: (iOS only) The current application window is running in Slide Over, Split View, or Picture - * in Picture mode, and another app is occupying the camera. Remind the user that the application - * cannot capture video properly when the app is running in Slide Over, Split View, or Picture in - * Picture mode and another app is occupying the camera. + * 7: (iOS only) The current app window is running in Slide Over, Split View, or Picture in Picture + * mode, and another app is occupying the camera. Prompt the user that the app cannot capture video + * properly when it is running in Slide Over, Split View, or Picture in Picture mode and another app + * is occupying the camera. */ LOCAL_VIDEO_STREAM_REASON_CAPTURE_MULTIPLE_FOREGROUND_APPS = 7, /** @@ -2968,23 +3579,28 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8, /** - * 9: (macOS only) The video capture device currently in use is disconnected (such as being - * unplugged). + * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as + * being unplugged). */ LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9, /** - * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list. - * Check whether the ID of the video device is valid. + * 10: (macOS and Windows only) The SDK cannot find the video device in the video device list. Check + * whether the ID of the video device is valid. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_INVALID_ID = 10, /** - * 14: (Android only) Video capture was interrupted, possibly due to the camera being occupied - * or some policy reasons such as background termination. + * 14: (Android only) Video capture is interrupted. Possible reasons include the following: + * - The camera is being used by another app. Prompt the user to check if the camera is being used + * by another app. + * - The current app has been switched to the background. You can use foreground services to notify + * the operating system and ensure that the app can still collect video when it switches to the + * background. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14, /** - * 15: (Android only) The device may need to be shut down and restarted to restore camera function, - * or there may be a persistent hardware problem. + * 15: (Android only) The video capture device encounters an error. Prompt the user to close and + * restart the camera to restore functionality. If this operation does not solve the problem, check + * if the camera has a hardware failure. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15, /** @@ -2992,101 +3608,129 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_SYSTEM_PRESSURE = 101, /** - * 11: (macOS only) The shared window is minimized when you call `startScreenCaptureByWindowId` - * to share a window. The SDK cannot share a minimized window. You can cancel the minimization - * of this window at the application layer, for example by maximizing this window. + * 11: (macOS and Windows only) The shared window is minimized when you call the + * `startScreenCaptureByWindowId` method to share a window. The SDK cannot share a minimized window. + * Please prompt the user to unminimize the shared window. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_MINIMIZED = 11, /** - * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID - * has been closed or a full-screen window shared by the window ID has exited full-screen mode. - * After exiting full-screen mode, remote users cannot see the shared window. To prevent remote - * users from seeing a black screen, Agora recommends that you immediately stop screen sharing. - * - * Common scenarios for reporting this error code: - * - When the local user closes the shared window, the SDK reports this error code. - * - The local user shows some slides in full-screen mode first, and then shares the windows of - * the slides. After the user exits full-screen mode, the SDK reports this error code. - * - The local user watches a web video or reads a web document in full-screen mode first, and - * then shares the window of the web video or document. After the user exits full-screen mode, - * the SDK reports this error code. + * 12: (macOS and Windows only) The error code indicates that a window shared by the window ID has + * been closed or a full-screen window shared by the window ID has exited full-screen mode. After + * exiting full-screen mode, remote users cannot see the shared window. To prevent remote users from + * seeing a black screen, Agora recommends that you immediately stop screen sharing. + * Common scenarios reporting this error code: + * - The local user closes the shared window. + * - The local user shows some slides in full-screen mode first, and then shares the windows of the + * slides. After the user exits full-screen mode, the SDK reports this error code. + * - The local user watches a web video or reads a web document in full-screen mode first, and then + * shares the window of the web video or document. After the user exits full-screen mode, the SDK + * reports this error code. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_CLOSED = 12, - /** 13: The local screen capture window is occluded. */ + /** + * 13: (Windows only) The window being shared is overlapped by another window, so the overlapped + * area is blacked out by the SDK during window sharing. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_OCCLUDED = 13, /** 20: The local screen capture window is not supported. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_NOT_SUPPORTED = 20, - /** 21: The screen capture fails. */ + /** + * 21: (Windows and Android only) The currently captured window has no data. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_FAILURE = 21, - /** 22: No permision to capture screen. */ + /** + * 22: (Windows and macOS only) No permission for screen capture. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22, /** - * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the screen - * sharing process, resulting in performance degradation. However, the screen sharing process itself is - * functioning normally. + * 24: (Windows only) An unexpected error occurred during screen sharing (possibly due to window + * blocking failure), resulting in decreased performance, but the screen sharing process itself was + * not affected. + * @note During screen sharing, if blocking a specific window fails due to device driver issues, the + * SDK will report this event and automatically fall back to sharing the entire screen. If your use + * case requires masking specific windows to protect privacy, we recommend listening for this event + * and implementing additional privacy protection mechanisms when it is triggered. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24, - /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the desktop. */ + /** + * 25: (Windows only) The window for the current screen capture is hidden and not visible on the + * current screen. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25, - /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */ + /** + * 26: (Windows only) The window for screen capture has been restored from hidden state. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26, - /** 27: (Windows and macOS only) The window is recovered from miniminzed */ + /** + * 27: (macOS and Windows only) The window for screen capture has been restored from the minimized + * state. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27, - /** - * 28: The screen capture paused. - * - * Common scenarios for reporting this error code: - * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on - * Windows platform, the SDK reports this error code. + /** + * 28: (Windows only) Screen capture has been paused. Common scenarios reporting this error code: + * The current screen may have been switched to a secure desktop, such as a UAC dialog box or + * Winlogon desktop. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_PAUSED = 28, - /** 29: The screen capture is resumed. */ + /** + * 29: (Windows only) Screen capture has resumed from paused state. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_RESUMED = 29, - /** 30: The shared display has been disconnected */ + /** + * 30: (Windows and macOS only) The displayer used for screen capture is disconnected. The current + * screen sharing has been paused. Prompt the user to restart the screen sharing. + */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_DISPLAY_DISCONNECTED = 30, - + /* 30: (HMOS only) ScreenCapture stopped by user */ + LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_USER = 31, + /* 31: (HMOS only) ScreenCapture interrupted by other screen capture */ + LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_INTERRUPTED_BY_OTHER = 32, + /* 32: (HMOS only) ScreenCapture stopped by SIM call */ + LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_STOPPED_BY_CALL = 33, + /** 34: (Windows only) Some windows of the exclude window list failed to be excluded from the screen capture. */ + LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_EXCLUDE_WINDOW_FAILED = 34, }; /** - * Remote audio states. + * @brief Remote audio states. */ -enum REMOTE_AUDIO_STATE -{ +enum REMOTE_AUDIO_STATE { /** - * 0: The remote audio is in the default state. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or - * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`. + * 0: The local audio is in the initial state. The SDK reports this state in the case of + * `REMOTE_AUDIO_REASON_LOCAL_MUTED`, `REMOTE_AUDIO_REASON_REMOTE_MUTED` or + * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE`. */ - REMOTE_AUDIO_STATE_STOPPED = 0, // Default state, audio is started or remote user disabled/muted audio stream + REMOTE_AUDIO_STATE_STOPPED = + 0, // Default state, audio is started or remote user disabled/muted audio stream /** * 1: The first remote audio packet is received. */ REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received /** - * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or - * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`. + * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case + * of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED` or + * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED`. */ - REMOTE_AUDIO_STATE_DECODING = 2, // The first remote audio frame has been decoded or fronzen state ends + REMOTE_AUDIO_STATE_DECODING = + 2, // The first remote audio frame has been decoded or fronzen state ends /** * 3: The remote audio is frozen. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`. + * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION`. */ - REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue + REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue /** * 4: The remote audio fails to start. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_INTERNAL(0)`. + * `REMOTE_AUDIO_REASON_INTERNAL`. */ - REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed + REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed }; /** - * Reasons for the remote audio state change. + * @brief The reason for the remote audio state change. */ -enum REMOTE_AUDIO_STATE_REASON -{ +enum REMOTE_AUDIO_STATE_REASON { /** - * 0: The SDK reports this reason when the video state changes. + * 0: The SDK reports this reason when the audio state changes. */ REMOTE_AUDIO_REASON_INTERNAL = 0, /** @@ -3098,23 +3742,19 @@ enum REMOTE_AUDIO_STATE_REASON */ REMOTE_AUDIO_REASON_NETWORK_RECOVERY = 2, /** - * 3: The local user stops receiving the remote audio stream or - * disables the audio module. + * 3: The local user stops receiving the remote audio stream or disables the audio module. */ REMOTE_AUDIO_REASON_LOCAL_MUTED = 3, /** - * 4: The local user resumes receiving the remote audio stream or - * enables the audio module. + * 4: The local user resumes receiving the remote audio stream or enables the audio module. */ REMOTE_AUDIO_REASON_LOCAL_UNMUTED = 4, /** - * 5: The remote user stops sending the audio stream or disables the - * audio module. + * 5: The remote user stops sending the audio stream or disables the audio module. */ REMOTE_AUDIO_REASON_REMOTE_MUTED = 5, /** - * 6: The remote user resumes sending the audio stream or enables the - * audio module. + * 6: The remote user resumes sending the audio stream or enables the audio module. */ REMOTE_AUDIO_REASON_REMOTE_UNMUTED = 6, /** @@ -3132,13 +3772,13 @@ enum REMOTE_AUDIO_STATE_REASON }; /** - * The state of the remote video. + * @brief The state of the remote video stream. */ enum REMOTE_VIDEO_STATE { /** - * 0: The remote video is in the default state. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK (8)`. + * 0: The remote video is in the initial state. The SDK reports this state in the case of + * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED`, or + * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE`. */ REMOTE_VIDEO_STATE_STOPPED = 0, /** @@ -3146,62 +3786,67 @@ enum REMOTE_VIDEO_STATE { */ REMOTE_VIDEO_STATE_STARTING = 1, /** - * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. + * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case + * of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED`, + * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED`, or + * `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY`. */ REMOTE_VIDEO_STATE_DECODING = 2, - /** 3: The remote video is frozen, probably due to - * #REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION (1). + /** + * 3: The remote video is frozen. The SDK reports this state in the case of + * `REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION` or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK`. */ REMOTE_VIDEO_STATE_FROZEN = 3, - /** 4: The remote video fails to start. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_INTERNAL (0)`. + /** + * 4: The remote video fails to start. The SDK reports this state in the case of + * `REMOTE_VIDEO_STATE_REASON_INTERNAL`. */ REMOTE_VIDEO_STATE_FAILED = 4, }; /** - * The reason for the remote video state change. + * @brief The reason for the remote video state change. */ enum REMOTE_VIDEO_STATE_REASON { /** - * 0: The SDK reports this reason when the video state changes. - */ + * 0: The SDK reports this reason when the video state changes. + */ REMOTE_VIDEO_STATE_REASON_INTERNAL = 0, /** - * 1: Network congestion. - */ + * 1: Network congestion. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1, /** - * 2: Network recovery. - */ + * 2: Network is recovered. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2, /** - * 3: The local user stops receiving the remote video stream or disables the video module. - */ + * 3: The local user stops receiving the remote video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED = 3, /** - * 4: The local user resumes receiving the remote video stream or enables the video module. - */ + * 4: The local user resumes receiving the remote video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED = 4, /** - * 5: The remote user stops sending the video stream or disables the video module. - */ + * 5: The remote user stops sending the video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED = 5, /** - * 6: The remote user resumes sending the video stream or enables the video module. - */ + * 6: The remote user resumes sending the video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED = 6, /** - * 7: The remote user leaves the channel. - */ - REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7, - /** 8: The remote audio-and-video stream falls back to the audio-only stream - * due to poor network conditions. + * 7: The remote user leaves the channel. + */ + REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7, + /** + * 8: The remote audio-and-video stream falls back to the audio-only stream due to poor network + * conditions. */ REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK = 8, - /** 9: The remote audio-only stream switches back to the audio-and-video - * stream after the network conditions improve. + /** + * 9: The remote audio-only stream switches back to the audio-and-video stream after the network + * conditions improve. */ REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY = 9, /** (Internal use only) 10: The remote video stream type change to low stream type @@ -3210,11 +3855,13 @@ enum REMOTE_VIDEO_STATE_REASON { /** (Internal use only) 11: The remote video stream type change to high stream type */ REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11, - /** (iOS only) 12: The app of the remote user is in background. + /** + * 12: (iOS only) The remote user's app has switched to the background. */ REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12, - /** 13: The remote video stream is not supported by the decoder + /** + * 13: The local video decoder does not support decoding the remote video stream. */ REMOTE_VIDEO_STATE_REASON_CODEC_NOT_SUPPORT = 13, @@ -3248,10 +3895,14 @@ enum REMOTE_USER_STATE { */ struct VideoTrackInfo { VideoTrackInfo() - : isLocal(false), ownerUid(0), trackId(0), channelId(OPTIONAL_NULLPTR) - , codecType(VIDEO_CODEC_H265) - , encodedFrameOnly(false), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY) - , observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} + : isLocal(false), + ownerUid(0), + trackId(0), + channelId(OPTIONAL_NULLPTR), + codecType(VIDEO_CODEC_H265), + encodedFrameOnly(false), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} /** * Whether the video track is local or remote. * - true: The video track is local. @@ -3291,7 +3942,8 @@ struct VideoTrackInfo { }; /** - * The downscale level of the remote video stream . The higher the downscale level, the more the video downscales. + * The downscale level of the remote video stream . The higher the downscale level, the more the + * video downscales. */ enum REMOTE_VIDEO_DOWNSCALE_LEVEL { /** @@ -3317,19 +3969,22 @@ enum REMOTE_VIDEO_DOWNSCALE_LEVEL { }; /** - * The volume information of users. + * @brief The volume information of users. */ struct AudioVolumeInfo { /** - * User ID of the speaker. - * - In the local user's callback, `uid` = 0. - * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous - * volume is one of the three highest. + * The user ID. + * - In the local user's callback, `uid` is 0. + * - In the remote users' callback, `uid` is the user ID of a remote user whose instantaneous volume + * is the highest. */ uid_t uid; /** * The volume of the user. The value ranges between 0 (the lowest volume) and 255 (the highest - * volume). If the user calls `startAudioMixing`, the value of volume is the volume after audio + * volume). If the local user enables audio capturing and calls `muteLocalAudioStream` and set it as + * `true` to mute, the value of `volume` indicates the volume of locally captured audio signal. If + * the user calls `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)`, + * the value of `volume` indicates the volume after audio * mixing. */ unsigned int volume; // [0,255] @@ -3340,13 +3995,14 @@ struct AudioVolumeInfo { * @note * - The `vad` parameter does not report the voice activity status of remote users. In a remote * user's callback, the value of `vad` is always 1. - * - To use this parameter, you must set `reportVad` to true when calling `enableAudioVolumeIndication`. + * - To use this parameter, you must set `reportVad` to `true` when calling + * `enableAudioVolumeIndication`. */ unsigned int vad; /** - * The voice pitch (Hz) of the local user. The value ranges between 0.0 and 4000.0. - * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the - * remote users' callback, the value of `voicePitch` is always 0.0. + * The voice pitch of the local user. The value ranges between 0.0 and 4000.0. + * @note The `voicePitch` parameter does not report the voice pitch of remote users. In the remote + * users' callback, the value of `voicePitch` is always 0.0. */ double voicePitch; @@ -3354,10 +4010,13 @@ struct AudioVolumeInfo { }; /** - * The audio device information. + * @brief The audio device information. + * + * @note This class is for Android only. + * */ struct DeviceInfo { - /* + /** * Whether the audio device supports ultra-low-latency capture and playback: * - `true`: The device supports ultra-low-latency capture and playback. * - `false`: The device does not support ultra-low-latency capture and playback. @@ -3374,13 +4033,13 @@ class IPacketObserver { public: virtual ~IPacketObserver() {} /** - * The definition of the Packet struct. + * @brief Configurations for the `Packet` instance. */ struct Packet { /** * The buffer address of the sent or received data. - * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you - * may encounter undefined behaviors (such as crashes). + * @note Agora recommends setting `buffer` to a value larger than 2048 bytes. Otherwise, you may + * encounter undefined behaviors (such as crashes). */ const unsigned char* buffer; /** @@ -3391,62 +4050,70 @@ class IPacketObserver { Packet() : buffer(OPTIONAL_NULLPTR), size(0) {} }; /** - * Occurs when the SDK is ready to send the audio packet. - * @param packet The audio packet to be sent: Packet. - * @return Whether to send the audio packet: - * - true: Send the packet. - * - false: Do not send the packet, in which case the audio packet will be discarded. + * @brief Occurs when the local user sends an audio packet. + * + * @param packet The sent audio packet, see `Packet`. + * + * @return + * - `true`: The audio packet is sent successfully. + * - `false`: The audio packet is discarded. */ virtual bool onSendAudioPacket(Packet& packet) = 0; /** - * Occurs when the SDK is ready to send the video packet. - * @param packet The video packet to be sent: Packet. - * @return Whether to send the video packet: - * - true: Send the packet. - * - false: Do not send the packet, in which case the audio packet will be discarded. + * @brief Occurs when the local user sends a video packet. + * + * @param packet The sent video packet, see `Packet`. + * + * @return + * - `true`: The video packet is sent successfully. + * - `false`: The video packet is discarded. */ virtual bool onSendVideoPacket(Packet& packet) = 0; /** - * Occurs when the audio packet is received. - * @param packet The received audio packet: Packet. - * @return Whether to process the audio packet: - * - true: Process the packet. - * - false: Do not process the packet, in which case the audio packet will be discarded. + * @brief Occurs when the local user receives an audio packet. + * + * @param packet The received audio packet, see `Packet`. + * + * @return + * - `true`: The audio packet is received successfully. + * - `false`: The audio packet is discarded. */ virtual bool onReceiveAudioPacket(Packet& packet) = 0; /** - * Occurs when the video packet is received. - * @param packet The received video packet: Packet. - * @return Whether to process the audio packet: - * - true: Process the packet. - * - false: Do not process the packet, in which case the video packet will be discarded. + * @brief Occurs when the local user receives a video packet. + * + * @param packet The received video packet, see `Packet`. + * + * @return + * - `true`: The video packet is received successfully. + * - `false`: The video packet is discarded. */ virtual bool onReceiveVideoPacket(Packet& packet) = 0; }; /** - * Audio sample rate types. + * @brief The audio sampling rate of the stream to be pushed to the CDN. */ enum AUDIO_SAMPLE_RATE_TYPE { /** - * 32000: 32 KHz. + * 32000: 32 kHz */ AUDIO_SAMPLE_RATE_32000 = 32000, /** - * 44100: 44.1 KHz. + * 44100: 44.1 kHz */ AUDIO_SAMPLE_RATE_44100 = 44100, /** - * 48000: 48 KHz. + * 48000: (Default) 48 kHz */ AUDIO_SAMPLE_RATE_48000 = 48000, }; /** - * The codec type of the output video. + * @brief The codec type of the output video. */ enum VIDEO_CODEC_TYPE_FOR_STREAM { /** - * 1: H.264. + * 1: (Default) H.264. */ VIDEO_CODEC_H264_FOR_STREAM = 1, /** @@ -3456,30 +4123,31 @@ enum VIDEO_CODEC_TYPE_FOR_STREAM { }; /** - * Video codec profile types. + * @brief Video codec profile types. */ enum VIDEO_CODEC_PROFILE_TYPE { /** - * 66: Baseline video codec profile. Generally used in video calls on mobile phones. + * 66: Baseline video codec profile; generally used for video calls on mobile phones. */ VIDEO_CODEC_PROFILE_BASELINE = 66, /** - * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, portable video players, PSP, and iPads. + * 77: Main video codec profile; generally used in mainstream electronics such as MP4 players, + * portable video players, PSP, and iPads. */ VIDEO_CODEC_PROFILE_MAIN = 77, /** - * 100: High video codec profile. Generally used in high-resolution broadcasts or television. + * 100: (Default) High video codec profile; generally used in high-resolution live streaming or + * television. */ VIDEO_CODEC_PROFILE_HIGH = 100, }; - /** - * Self-defined audio codec profile. + * @brief Self-defined audio codec profile. */ enum AUDIO_CODEC_PROFILE_TYPE { /** - * 0: LC-AAC. + * 0: (Default) LC-AAC. */ AUDIO_CODEC_PROFILE_LC_AAC = 0, /** @@ -3487,16 +4155,15 @@ enum AUDIO_CODEC_PROFILE_TYPE { */ AUDIO_CODEC_PROFILE_HE_AAC = 1, /** - * 2: HE-AAC v2. + * 2: HE-AAC v2. */ AUDIO_CODEC_PROFILE_HE_AAC_V2 = 2, }; /** - * Local audio statistics. + * @brief Local audio statistics. */ -struct LocalAudioStats -{ +struct LocalAudioStats { /** * The number of audio channels. */ @@ -3514,11 +4181,12 @@ struct LocalAudioStats */ int internalCodec; /** - * The packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * The packet loss rate (%) from the local client to the Agora server before applying the + * anti-packet loss strategies. */ unsigned short txPacketLossRate; /** - * The audio delay of the device, contains record and playout delay + * The audio device module delay (ms) when playing or recording audio. */ int audioDeviceDelay; /** @@ -3526,59 +4194,65 @@ struct LocalAudioStats */ int audioPlayoutDelay; /** - * The signal delay estimated from audio in-ear monitoring (ms). + * The ear monitor delay (ms), which is the delay from microphone input to headphone output. */ int earMonitorDelay; /** - * The signal delay estimated during the AEC process from nearin and farin (ms). + * Acoustic echo cancellation (AEC) module estimated delay (ms), which is the signal delay between + * when audio is played locally before being locally captured. */ int aecEstimatedDelay; }; - /** - * States of the Media Push. + * @brief States of the Media Push. */ enum RTMP_STREAM_PUBLISH_STATE { /** - * 0: The Media Push has not started or has ended. This state is also triggered after you remove a RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. + * 0: The Media Push has not started or has ended. */ RTMP_STREAM_PUBLISH_STATE_IDLE = 0, /** - * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is triggered after you call the `addPublishStreamUrl` method. + * 1: The streaming server and CDN server are being connected. */ RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1, /** - * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS streaming and returns this state. + * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS + * streaming and returns this state. */ RTMP_STREAM_PUBLISH_STATE_RUNNING = 2, /** - * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. - * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns. - * - If the streaming does not resume within 60 seconds or server errors occur, #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling the `removePublishStreamUrl` and `addPublishStreamUrl` methods. + * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming + * is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. + * - If the SDK successfully resumes the streaming, RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns. + * - If the streaming does not resume within 60 seconds or server errors occur, + * RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. If you feel that 60 seconds is too long, you can + * also actively try to reconnect. */ RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3, /** - * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS streaming again. + * 4: The RTMP or RTMPS streaming fails. After a failure, you can troubleshoot the cause of the + * error through the returned error code. */ RTMP_STREAM_PUBLISH_STATE_FAILURE = 4, /** - * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is triggered after you call the `removePublishStreamUrl` method. + * 5: The SDK is disconnecting from the Agora streaming server and CDN. When you call + * `stopRtmpStream` to stop the Media Push normally, the SDK reports the Media Push state as + * `RTMP_STREAM_PUBLISH_STATE_DISCONNECTING` and `RTMP_STREAM_PUBLISH_STATE_IDLE` in sequence. */ RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5, }; /** - * Error codes of the RTMP or RTMPS streaming. + * @brief Reasons for changes in the status of RTMP or RTMPS streaming. */ enum RTMP_STREAM_PUBLISH_REASON { /** - * 0: The RTMP or RTMPS streaming publishes successfully. + * 0: The RTMP or RTMPS streaming has not started or has ended. */ RTMP_STREAM_PUBLISH_REASON_OK = 0, /** - * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, - * the SDK returns this error. Check whether you set the parameters in the `setLiveTranscoding` method properly. + * 1: Invalid argument used. Check the parameter setting. */ RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1, /** @@ -3586,11 +4260,11 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2, /** - * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish the streaming again. + * 3: Timeout for the RTMP or RTMPS streaming. */ RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3, /** - * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to publish the streaming again. + * 4: An error occurs in Agora's streaming server. */ RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4, /** @@ -3606,7 +4280,8 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_REACH_LIMIT = 7, /** - * 8: The host manipulates other hosts' URLs. Check your app logic. + * 8: The host manipulates other hosts' URLs. For example, the host updates or stops other hosts' + * streams. Check your app logic. */ RTMP_STREAM_PUBLISH_REASON_NOT_AUTHORIZED = 8, /** @@ -3614,17 +4289,22 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9, /** - * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format is correct. + * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format + * is correct. */ RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10, /** - * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your application code logic. + * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your + * application code logic. */ - RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = + 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h /** - * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the transcoding configuration in a scenario where there is streaming without transcoding. Check your application code logic. + * 13: The `updateRtmpTranscoding` method is called to update the transcoding configuration in a + * scenario where there is streaming without transcoding. Check your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = + 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h /** * 14: Errors occurred in the host's network. */ @@ -3632,23 +4312,30 @@ enum RTMP_STREAM_PUBLISH_REASON { /** * 15: Your App ID does not have permission to use the CDN live streaming function. */ - RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h - /** invalid privilege. */ + RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = + 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h + /** + * 16: Your project does not have permission to use streaming services. + */ RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16, /** - * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop streaming, the SDK returns this value. + * 100: The streaming has been stopped normally. After you stop the Media Push, the SDK returns this + * value. */ RTMP_STREAM_UNPUBLISH_REASON_OK = 100, }; -/** Events during the RTMP or RTMPS streaming. */ +/** + * @brief Events during the Media Push. + */ enum RTMP_STREAMING_EVENT { /** - * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS stream. + * 1: An error occurs when you add a background image or a watermark image in the Media Push. */ RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1, /** - * 2: The streaming URL is already being used for CDN live streaming. If you want to start new streaming, use a new streaming URL. + * 2: The streaming URL is already being used for Media Push. If you want to start new streaming, + * use a new streaming URL. */ RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2, /** @@ -3656,45 +4343,52 @@ enum RTMP_STREAMING_EVENT { */ RTMP_STREAMING_EVENT_ADVANCED_FEATURE_NOT_SUPPORT = 3, /** - * 4: Client request too frequently. + * 4: Reserved. */ RTMP_STREAMING_EVENT_REQUEST_TOO_OFTEN = 4, }; /** - * Image properties. + * @brief Image properties. + * + * @details + * This class sets the properties of the watermark and background images in the live video. + * */ typedef struct RtcImage { /** - *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter is 1024 bytes. + * The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter + * is 1024 bytes. */ const char* url; /** - * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The x-coordinate (px) of the image on the video frame (taking the upper left corner of the video + * frame as the origin). */ int x; /** - * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The y-coordinate (px) of the image on the video frame (taking the upper left corner of the video + * frame as the origin). */ int y; /** - * The width (pixel) of the image on the video frame. + * The width (px) of the image on the video frame. */ int width; /** - * The height (pixel) of the image on the video frame. + * The height (px) of the image on the video frame. */ int height; /** - * The layer index of the watermark or background image. When you use the watermark array to add - * a watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255]; - * otherwise, the SDK reports an error. In other cases, zOrder can optionally be passed in the + * The layer index of the watermark or background image. When you use the watermark array to add a + * watermark or multiple watermarks, you must pass a value to `zOrder` in the range [1,255]; + * otherwise, the SDK reports an error. In other cases, `zOrder` can optionally be passed in the * range [0,255], with 0 being the default value. 0 means the bottom layer and 255 means the top * layer. */ int zOrder; - /** The transparency level of the image. The value ranges between 0.0 and 1.0: - * + /** + * The transparency of the watermark or background image. The range of the value is [0.0,1.0]: * - 0.0: Completely transparent. * - 1.0: (Default) Opaque. */ @@ -3703,82 +4397,90 @@ typedef struct RtcImage { RtcImage() : url(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0) {} } RtcImage; /** - * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. + * @brief The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. + * + * @details + * If you want to enable the advanced features of streaming with transcoding, contact + * `support@agora.io`. * - * If you want to enable the advanced features of streaming with transcoding, contact support@agora.io. */ struct LiveStreamAdvancedFeature { LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {} - LiveStreamAdvancedFeature(const char* feat_name, bool open) : featureName(feat_name), opened(open) {} + LiveStreamAdvancedFeature(const char* feat_name, bool open) + : featureName(feat_name), opened(open) {} /** The advanced feature for high-quality video with a lower bitrate. */ // static const char* LBHQ = "lbhq"; /** The advanced feature for the optimized video encoder. */ // static const char* VEO = "veo"; /** - * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized video encoder). + * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized + * video encoder). */ const char* featureName; /** * Whether to enable the advanced features of streaming with transcoding: - * - `true`: Enable the advanced feature. - * - `false`: (Default) Disable the advanced feature. + * - `true`: Enable the advanced features. + * - `false`: (Default) Do not enable the advanced features. */ bool opened; -} ; +}; /** - * Connection state types. + * @brief Connection states. */ -enum CONNECTION_STATE_TYPE -{ +enum CONNECTION_STATE_TYPE { /** - * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of the following phases: - * - The initial state before calling the `joinChannel` method. - * - The app calls the `leaveChannel` method. + * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of + * the following phases: + * - Theinitial state before calling the `joinChannel(const char* token, const char* channelId, + * uid_t uid, const ChannelMediaOptions& options)` method. + * - The app calls the `leaveChannel()` method. */ CONNECTION_STATE_DISCONNECTED = 1, /** * 2: The SDK is connecting to the Agora edge server. This state indicates that the SDK is - * establishing a connection with the specified channel after the app calls `joinChannel`. - * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged` - * callback and the connection state switches to `CONNECTION_STATE_CONNECTED`. + * establishing a connection with the specified channel after the app calls `joinChannel(const char* + * token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`. + * - If the SDK successfully joins the channel, it triggers the `onConnectionStateChanged` callback + * and the connection state switches to CONNECTION_STATE_CONNECTED. * - After the connection is established, the SDK also initializes the media and triggers * `onJoinChannelSuccess` when everything is ready. */ CONNECTION_STATE_CONNECTING = 2, /** - * 3: The SDK is connected to the Agora edge server. This state also indicates that the user - * has joined a channel and can now publish or subscribe to a media stream in the channel. - * If the connection to the Agora edge server is lost because, for example, the network is down - * or switched, the SDK automatically tries to reconnect and triggers `onConnectionStateChanged` - * that indicates the connection state switches to `CONNECTION_STATE_RECONNECTING`. + * 3: The SDK is connected to the Agora edge server. This state also indicates that the user has + * joined a channel and can now publish or subscribe to a media stream in the channel. If the + * connection to the channel is lost because, for example, if the network is down or switched, the + * SDK automatically tries to reconnect and triggers `onConnectionStateChanged` callback, notifying + * that the current network state becomes CONNECTION_STATE_RECONNECTING. */ CONNECTION_STATE_CONNECTED = 3, /** - * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel - * after being disconnected from a joined channel because of network issues. - * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`, - * stays in the `CONNECTION_STATE_RECONNECTING` state, and keeps rejoining the channel. - * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora - * edge server, the SDK triggers the `onConnectionStateChanged` callback, switches to the - * `CONNECTION_STATE_FAILED` state, and stops rejoining the channel. + * 4: The SDK keeps reconnecting to the Agora edge server. The SDK keeps rejoining the channel after + * being disconnected from a joined channel because of network issues. + * - If the SDK cannot rejoin the channel within 10 seconds, it triggers `onConnectionLost`, stays + * in the CONNECTION_STATE_RECONNECTING state, and keeps rejoining the channel. + * - If the SDK fails to rejoin the channel 20 minutes after being disconnected from the Agora edge + * server, the SDK triggers the `onConnectionStateChanged` callback, switches to the + * CONNECTION_STATE_FAILED state, and stops rejoining the channel. */ CONNECTION_STATE_RECONNECTING = 4, /** * 5: The SDK fails to connect to the Agora edge server or join the channel. This state indicates - * that the SDK stops trying to rejoin the channel. You must call `leaveChannel` to leave the + * that the SDK stops trying to rejoin the channel. You must call `leaveChannel()` to leave the * channel. - * - You can call `joinChannel` to rejoin the channel. - * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful - * API, the SDK triggers the `onConnectionStateChanged` callback. + * - You can call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to rejoin the channel. + * - If the SDK is banned from joining the channel by the Agora edge server through the RESTful API, + * the SDK triggers the `onConnectionStateChanged` callback. */ CONNECTION_STATE_FAILED = 5, }; /** - * Transcoding configurations of each host. + * @brief Transcoding configurations of each host. */ struct TranscodingUser { /** @@ -3786,11 +4488,15 @@ struct TranscodingUser { */ uid_t uid; /** - * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, width], where width is the `width` set in `LiveTranscoding`. + * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, width], where width is the + * `width` set in `LiveTranscoding`. */ int x; /** - * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, height], where height is the `height` set in `LiveTranscoding`. + * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, height], where height is the + * `height` set in `LiveTranscoding`. */ int y; /** @@ -3805,133 +4511,163 @@ struct TranscodingUser { * The layer index number of the host's video. The value range is [0, 100]. * - 0: (Default) The host's video is the bottom layer. * - 100: The host's video is the top layer. - * - * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`. - */ + * @note + * - If the value is less than 0 or greater than 100, `ERR_INVALID_ARGUMENT` error is returned. + * - Setting zOrder to 0 is supported. + */ int zOrder; /** - * The transparency of the host's video. The value range is [0.0, 1.0]. + * The transparency of the host's video. The value range is [0.0,1.0]. * - 0.0: Completely transparent. * - 1.0: (Default) Opaque. */ double alpha; /** - * The audio channel used by the host's audio in the output audio. The default value is 0, and the value range is [0, 5]. - * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the upstream of the host. - * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the host's audio. - * + * The audio channel used by the host's audio in the output audio. The default value is 0, and the + * value range is [0, 5]. + * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the + * upstream of the host. + * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `0xFF` or a value greater than `5`: The host's audio is muted, and the Agora server removes the + * host's audio. * @note If the value is not `0`, a special player is required. */ int audioChannel; TranscodingUser() - : uid(0), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - audioChannel(0) {} + : uid(0), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0), audioChannel(0) {} }; /** - * Transcoding configurations for Media Push. + * @brief Transcoding configurations for Media Push. */ struct LiveTranscoding { - /** The width of the video in pixels. The default value is 360. - * - When pushing video streams to the CDN, the value range of `width` is [64,1920]. - * If the value is less than 64, Agora server automatically adjusts it to 64; if the - * value is greater than 1920, Agora server automatically adjusts it to 1920. + /** + * The width of the video in pixels. The default value is 360. + * - When pushing video streams to the CDN, the value range of `width` is [64,1920]. If the value is + * less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1920, + * Agora server automatically adjusts it to 1920. * - When pushing audio streams to the CDN, set `width` and `height` as 0. */ int width; - /** The height of the video in pixels. The default value is 640. - * - When pushing video streams to the CDN, the value range of `height` is [64,1080]. - * If the value is less than 64, Agora server automatically adjusts it to 64; if the - * value is greater than 1080, Agora server automatically adjusts it to 1080. + /** + * The height of the video in pixels. The default value is 640. + * - When pushing video streams to the CDN, the value range of` height` is [64,1080]. If the value + * is less than 64, Agora server automatically adjusts it to 64; if the value is greater than 1080, + * Agora server automatically adjusts it to 1080. * - When pushing audio streams to the CDN, set `width` and `height` as 0. */ int height; - /** Bitrate of the CDN live output video stream. The default value is 400 Kbps. - - Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper range, the SDK automatically adapts it to a value within the range. - */ + /** + * The encoding bitrate (Kbps) of the video. This parameter does not need to be set; keeping the + * default value `STANDARD_BITRATE` is sufficient. The SDK automatically matches the most suitable + * bitrate based on the video resolution and frame rate you have set. For the correspondence between + * video resolution and frame rate, see `Video profile`. + */ int videoBitrate; - /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 fps, and the value range is (0,30]. - - @note The Agora server adjusts any value over 30 to 30. - */ + /** + * Frame rate (fps) of the output video stream set for Media Push. The default value is 15. The + * value range is (0,30]. + * @note The Agora server adjusts any value over 30 to 30. + */ int videoFramerate; - /** **DEPRECATED** Latency mode: - - - true: Low latency with unassured quality. - - false: (Default) High latency with assured quality. + /** + * Deprecated + * This member is deprecated. + * Latency mode: + * - `true`: Low latency with unassured quality. + * - `false`: (Default) High latency with assured quality. */ bool lowLatency; - /** Video GOP in frames. The default value is 30 fps. + /** + * GOP (Group of Pictures) in fps of the video frames for Media Push. The default value is 30. */ int videoGop; - /** Self-defined video codec profile: #VIDEO_CODEC_PROFILE_TYPE. - - @note If you set this parameter to other values, Agora adjusts it to the default value of 100. - */ + /** + * Video codec profile type for Media Push. Set it as 66, 77, or 100 (default). See + * `VIDEO_CODEC_PROFILE_TYPE` for details. + * @note If you set this parameter to any other value, Agora adjusts it to the default value. + */ VIDEO_CODEC_PROFILE_TYPE videoCodecProfile; - /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, 0xFFB6C1 (light pink). The default value is 0x000000 (black). + /** + * The background color in RGB hex value. Value only. Do not include a preceeding #. For example, + * 0xFFB6C1 (light pink). The default value is 0x000000 (black). */ unsigned int backgroundColor; - /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */ + /** + * Video codec profile types for Media Push. See `VIDEO_CODEC_TYPE_FOR_STREAM`. + */ VIDEO_CODEC_TYPE_FOR_STREAM videoCodecType; - /** The number of users in the live interactive streaming. - * The value range is [0, 17]. + /** + * The number of users in the Media Push. The value range is [0,17]. */ unsigned int userCount; - /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 transcoding users in a Media Push channel. See `TranscodingUser`. + /** + * Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 + * transcoding users in a Media Push channel. See `TranscodingUser`. */ TranscodingUser* transcodingUsers; - /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to the CDN live client. Maximum length: 4096 Bytes. - - For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei). + /** + * Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to + * the CDN live client. Maximum length: 4096 bytes. For more information on SEI, see SEI-related + * questions. */ const char* transcodingExtraInfo; - /** **DEPRECATED** The metadata sent to the CDN live client. + /** + * Deprecated + * Obsolete and not recommended for use. + * The metadata sent to the CDN client. */ const char* metadata; - /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`. - - You can add one watermark, or add multiple watermarks using an array. This parameter is used with `watermarkCount`. - */ + /** + * The watermark on the live video. The image format needs to be PNG. See `RtcImage`. + * You can add one watermark, or add multiple watermarks using an array. This parameter is used with + * `watermarkCount`. + */ RtcImage* watermark; /** - * The number of watermarks on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `watermark`. + * The number of watermarks on the live video. The total number of watermarks and background images + * can range from 0 to 10. This parameter is used with `watermark`. */ unsigned int watermarkCount; - /** The number of background images on the live video. The image format needs to be PNG. See `RtcImage`. - * - * You can add a background image or use an array to add multiple background images. This parameter is used with `backgroundImageCount`. + /** + * The number of background images on the live video. The image format needs to be PNG. See + * `RtcImage`. + * You can add a background image or use an array to add multiple background images. This parameter + * is used with `backgroundImageCount`. */ RtcImage* backgroundImage; /** - * The number of background images on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `backgroundImage`. + * The number of background images on the live video. The total number of watermarks and background + * images can range from 0 to 10. This parameter is used with `backgroundImage`. */ unsigned int backgroundImageCount; - /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE. + /** + * The audio sampling rate (Hz) of the output media stream. See `AUDIO_SAMPLE_RATE_TYPE`. */ AUDIO_SAMPLE_RATE_TYPE audioSampleRate; - /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the highest value is 128. + /** + * Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the + * highest value is 128. */ int audioBitrate; - /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) audio channels. Special players are required if you choose 3, 4, or 5. + /** + * The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) + * audio channels. Special players are required if you choose 3, 4, or 5. * - 1: (Default) Mono. * - 2: Stereo. * - 3: Three audio channels. @@ -3939,14 +4675,18 @@ struct LiveTranscoding { * - 5: Five audio channels. */ int audioChannels; - /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE. + /** + * Audio codec profile type for Media Push. See `AUDIO_CODEC_PROFILE_TYPE`. */ AUDIO_CODEC_PROFILE_TYPE audioCodecProfile; - /** Advanced features of the RTMP or RTMPS streaming with transcoding. See LiveStreamAdvancedFeature. + /** + * Advanced features of the Media Push with transcoding. See `LiveStreamAdvancedFeature`. */ LiveStreamAdvancedFeature* advancedFeatures; - /** The number of enabled advanced features. The default value is 0. */ + /** + * The number of enabled advanced features. The default value is 0. + */ unsigned int advancedFeatureCount; LiveTranscoding() @@ -3959,7 +4699,7 @@ struct LiveTranscoding { videoCodecProfile(VIDEO_CODEC_PROFILE_HIGH), backgroundColor(0x000000), videoCodecType(VIDEO_CODEC_H264_FOR_STREAM), - userCount(0), + userCount(0), transcodingUsers(OPTIONAL_NULLPTR), transcodingExtraInfo(OPTIONAL_NULLPTR), metadata(OPTIONAL_NULLPTR), @@ -3976,76 +4716,84 @@ struct LiveTranscoding { }; /** - * The video streams for the video mixing on the local client. + * @brief The video streams for local video mixing. */ struct TranscodingVideoStream { /** - * The source type of video for the video mixing on the local client. See #VIDEO_SOURCE_TYPE. + * The video source type for local video mixing. See `VIDEO_SOURCE_TYPE`. */ VIDEO_SOURCE_TYPE sourceType; /** - * The ID of the remote user. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `VIDEO_SOURCE_REMOTE`. + * The user ID of the remote user. + * @note Use this parameter only when the source type is `VIDEO_SOURCE_REMOTE` for local video + * mixing. */ uid_t remoteUserUid; /** - * The URL of the image. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `RTC_IMAGE`. + * The file path of local images. + * Examples: + * - Windows: `C:\\Users\\{username}\\Pictures\\image.png` + * @note Use this parameter only when the source type is the image for local video mixing. */ const char* imageUrl; /** - * MediaPlayer id if sourceType is MEDIA_PLAYER_SOURCE. + * (Optional) Media player ID. Use the parameter only when you set `sourceType` to + * `VIDEO_SOURCE_MEDIA_PLAYER`. */ int mediaPlayerId; /** - * The horizontal displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The relative lateral displacement of the top left corner of the video for local video mixing to + * the origin (the top left corner of the canvas). */ int x; /** - * The vertical displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The relative longitudinal displacement of the top left corner of the captured video to the origin + * (the top left corner of the canvas). */ int y; /** - * The width (px) of the video for the video mixing on the local client. + * The width (px) of the video for local video mixing on the canvas. */ int width; /** - * The height (px) of the video for the video mixing on the local client. + * The height (px) of the video for local video mixing on the canvas. */ int height; /** - * The number of the layer to which the video for the video mixing on the local client belongs. The value range is [0,100]. + * The number of the layer to which the video for the local video mixing belongs. The value range is + * [0, 100]. * - 0: (Default) The layer is at the bottom. * - 100: The layer is at the top. */ int zOrder; /** - * The transparency of the video for the video mixing on the local client. The value range is [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is opaque. + * The transparency of the video for local video mixing. The value range is [0.0, 1.0]. 0.0 + * indicates that the video is completely transparent, and 1.0 indicates that it is opaque. */ double alpha; /** - * Whether to mirror the video for the video mixing on the local client. - * - true: Mirroring. - * - false: (Default) Do not mirror. - * @note The paramter only works for videos with the source type `CAMERA`. + * Whether to mirror the video for the local video mixing. + * - `true`: Mirror the video for the local video mixing. + * - `false`: (Default) Do not mirror the video for the local video mixing. + * @note This parameter only takes effect on video source types that are cameras. */ bool mirror; TranscodingVideoStream() - : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), - remoteUserUid(0), - imageUrl(OPTIONAL_NULLPTR), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - mirror(false) {} + : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + remoteUserUid(0), + imageUrl(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + zOrder(0), + alpha(1.0), + mirror(false) {} }; /** - * The configuration of the video mixing on the local client. + * @brief The configuration of the video mixing on the local client. */ struct LocalTranscoderConfiguration { /** @@ -4053,80 +4801,178 @@ struct LocalTranscoderConfiguration { */ unsigned int streamCount; /** - * The video streams for the video mixing on the local client. See TranscodingVideoStream. + * The video streams for local video mixing. See `TranscodingVideoStream`. */ TranscodingVideoStream* videoInputStreams; /** - * The encoding configuration of the mixed video stream after the video mixing on the local client. See VideoEncoderConfiguration. + * The encoding configuration of the mixed video stream after the local video mixing. See + * `VideoEncoderConfiguration`. */ VideoEncoderConfiguration videoOutputConfiguration; /** - * Whether to use the timestamp when the primary camera captures the video frame as the timestamp of the mixed video frame. - * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed video frame. - * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed video frame. Instead, use the timestamp when the mixed video frame is constructed. + * Whether to use the timestamp when the primary camera captures the video frame as the timestamp + * of the mixed video frame. + * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. + * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. Instead, use the timestamp when the mixed video frame is constructed. */ bool syncWithPrimaryCamera; - LocalTranscoderConfiguration() : streamCount(0), videoInputStreams(OPTIONAL_NULLPTR), videoOutputConfiguration(), syncWithPrimaryCamera(true) {} + LocalTranscoderConfiguration() + : streamCount(0), + videoInputStreams(OPTIONAL_NULLPTR), + videoOutputConfiguration(), + syncWithPrimaryCamera(true) {} }; +/** + * @brief The error code of the local video mixing failure. + */ enum VIDEO_TRANSCODER_ERROR { /** - * The video track of the video source is not started. + * 1: The selected video source has not started video capture. You need to create a video track for + * it and start video capture. */ VT_ERR_VIDEO_SOURCE_NOT_READY = 1, /** - * The video source type is not supported. + * 2: The video source type is invalid. You need to re-specify the supported video source type. */ VT_ERR_INVALID_VIDEO_SOURCE_TYPE = 2, /** - * The image url is not correctly of image source. + * 3: The image path is invalid. You need to re-specify the correct image path. */ VT_ERR_INVALID_IMAGE_PATH = 3, /** - * The image format not the type png/jpeg/gif of image source. + * 4: The image format is invalid. Make sure the image format is one of PNG, JPEG, or GIF. */ VT_ERR_UNSUPPORT_IMAGE_FORMAT = 4, /** - * The layout is invalid such as width is zero. + * 5: The video encoding resolution after video mixing is invalid. */ VT_ERR_INVALID_LAYOUT = 5, /** - * Internal error. + * 20: Unknown internal error. */ VT_ERR_INTERNAL = 20 }; + +/** + * @brief The source of the audio streams that are mixed locally. + */ +struct MixedAudioStream { + /** + * The type of the audio source. See `AUDIO_SOURCE_TYPE`. + */ + AUDIO_SOURCE_TYPE sourceType; + /** + * The user ID of the remote user. + * @note Set this parameter if the source type of the locally mixed audio steams is + * AUDIO_SOURCE_REMOTE_USER. + */ + uid_t remoteUserUid; + /** + * The channel name. This parameter signifies the channel in which users engage in real-time audio + * and video interaction. Under the premise of the same App ID, users who fill in the same channel + * ID enter the same channel for audio and video interaction. The string length must be less than 64 + * bytes. Supported characters (89 characters in total): + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @note Set this parameter if the source type of the locally mixed audio streams is + * AUDIO_SOURCE_REMOTE_CHANNEL or AUDIO_SOURCE_REMOTE_USER. + */ + const char* channelId; + /** + * The audio track ID. Set this parameter to the custom audio track ID returned in + * `createCustomAudioTrack`. + * @note Set this parameter if the source type of the locally mixed audio steams is + * AUDIO_SOURCE_CUSTOM. + */ + track_id_t trackId; + + MixedAudioStream(AUDIO_SOURCE_TYPE source) + : sourceType(source), + remoteUserUid(0), + channelId(NULL), + trackId(-1) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, track_id_t track) + : sourceType(source), + trackId(track) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel) + : sourceType(source), + remoteUserUid(uid), + channelId(channel) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel, track_id_t track) + : sourceType(source), + remoteUserUid(uid), + channelId(channel), + trackId(track) {} + +}; + +/** + * @brief The configurations for mixing the lcoal audio. + */ +struct LocalAudioMixerConfiguration { + /** + * The number of the audio streams that are mixed locally. + */ + unsigned int streamCount; + /** + * The source of the audio streams that are mixed locally. See `MixedAudioStream`. + */ + MixedAudioStream* audioInputStreams; + + /** + * Whether the mxied audio stream uses the timestamp of the audio frames captured by the local + * microphone. + * - `true`: (Default) Yes. Set to this value if you want all locally captured audio streams + * synchronized. + * - `false`: No. The SDK uses the timestamp of the audio frames at the time when they are mixed. + */ + bool syncWithLocalMic; + + LocalAudioMixerConfiguration() : streamCount(0), syncWithLocalMic(true) {} +}; + /** - * Configurations of the last-mile network test. + * @brief Configurations of the last-mile network test. */ struct LastmileProbeConfig { /** - * Determines whether to test the uplink network. Some users, for example, - * the audience in a live broadcast channel, do not need such a test: - * - true: Test. - * - false: Do not test. + * Sets whether to test the uplink network. Some users, for example, the audience members in a + * LIVE_BROADCASTING channel, do not need such a test. + * - `true`: Test the uplink network. + * - `false`: Do not test the uplink network. */ bool probeUplink; /** - * Determines whether to test the downlink network: - * - true: Test. - * - false: Do not test. + * Sets whether to test the downlink network: + * - `true`: Test the downlink network. + * - `false`: Do not test the downlink network. */ bool probeDownlink; /** - * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, 5000000]. We recommend setting this parameter - * according to the bitrate value set by `setVideoEncoderConfiguration`. + * The expected maximum uplink bitrate (bps) of the local user. The value range is [100000, + * 5000000]. Agora recommends referring to `setVideoEncoderConfiguration` to set the value. */ unsigned int expectedUplinkBitrate; /** - * The expected maximum receiving bitrate (bps) of the local user. The value range is [100000,5000000]. + * The expected maximum downlink bitrate (bps) of the local user. The value range is + * [100000,5000000]. */ unsigned int expectedDownlinkBitrate; }; /** - * The status of the last-mile network tests. + * @brief The status of the last-mile probe test. */ enum LASTMILE_PROBE_RESULT_STATE { /** @@ -4134,17 +4980,19 @@ enum LASTMILE_PROBE_RESULT_STATE { */ LASTMILE_PROBE_RESULT_COMPLETE = 1, /** - * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not available due to limited test resources. + * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not + * available due to limited test resources. One possible reason is that testing resources are + * temporarily limited. */ LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2, /** - * 3: The last-mile network probe test is not carried out, probably due to poor network conditions. + * 3: The last-mile network probe test is not carried out. Probably due to poor network conditions. */ LASTMILE_PROBE_RESULT_UNAVAILABLE = 3 }; /** - * Results of the uplink or downlink last-mile network test. + * @brief Results of the uplink or downlink last-mile network test. */ struct LastmileProbeOneWayResult { /** @@ -4160,25 +5008,23 @@ struct LastmileProbeOneWayResult { */ unsigned int availableBandwidth; - LastmileProbeOneWayResult() : packetLossRate(0), - jitter(0), - availableBandwidth(0) {} + LastmileProbeOneWayResult() : packetLossRate(0), jitter(0), availableBandwidth(0) {} }; /** - * Results of the uplink and downlink last-mile network tests. + * @brief Results of the uplink and downlink last-mile network tests. */ struct LastmileProbeResult { /** - * The status of the last-mile network tests. See #LASTMILE_PROBE_RESULT_STATE. + * The status of the last-mile network tests. See `LASTMILE_PROBE_RESULT_STATE`. */ LASTMILE_PROBE_RESULT_STATE state; /** - * Results of the uplink last-mile network test. For details, see LastmileProbeOneWayResult. + * Results of the uplink last-mile network test. See `LastmileProbeOneWayResult`. */ LastmileProbeOneWayResult uplinkReport; /** - * Results of the downlink last-mile network test. For details, see LastmileProbeOneWayResult. + * Results of the downlink last-mile network test. See `LastmileProbeOneWayResult`. */ LastmileProbeOneWayResult downlinkReport; /** @@ -4186,18 +5032,15 @@ struct LastmileProbeResult { */ unsigned int rtt; - LastmileProbeResult() - : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), - rtt(0) {} + LastmileProbeResult() : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), rtt(0) {} }; /** - * Reasons causing the change of the connection state. + * @brief Reasons causing the change of the connection state. */ -enum CONNECTION_CHANGED_REASON_TYPE -{ +enum CONNECTION_CHANGED_REASON_TYPE { /** - * 0: The SDK is connecting to the server. + * 0: The SDK is connecting to the Agora edge server. */ CONNECTION_CHANGED_CONNECTING = 0, /** @@ -4205,15 +5048,18 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_JOIN_SUCCESS = 1, /** - * 2: The connection between the SDK and the server is interrupted. + * 2: The connection between the SDK and the Agora edge server is interrupted. */ CONNECTION_CHANGED_INTERRUPTED = 2, /** - * 3: The connection between the SDK and the server is banned by the server. This error occurs when the user is kicked out of the channel by the server. + * 3: The connection between the SDK and the Agora edge server is banned by the Agora edge server. + * For example, when a user is kicked out of the channel, this status will be returned. */ CONNECTION_CHANGED_BANNED_BY_SERVER = 3, /** - * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 minutes, this error occurs and the SDK stops reconnecting to the channel. + * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 + * minutes, this code will be returned and the SDK stops reconnecting to the channel. You need to + * prompt the user to try to switch to another network and rejoin the channel. */ CONNECTION_CHANGED_JOIN_FAILED = 4, /** @@ -4221,31 +5067,51 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_LEAVE_CHANNEL = 5, /** - * 6: The connection fails because the App ID is not valid. + * 6: The App ID is invalid. You need to rejoin the channel with a valid APP ID and make sure the + * App ID you are using is consistent with the one generated in the Agora Console. */ CONNECTION_CHANGED_INVALID_APP_ID = 6, /** - * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a valid channel name. + * 7: Invalid channel name. Rejoin the channel with a valid channel name. A valid channel name is a + * string of up to 64 bytes in length. Supported characters (89 characters in total): + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," */ CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7, /** - * 8: The connection fails because the token is not valid. Typical reasons include: - * - The App Certificate for the project is enabled in Agora Console, but you do not use a token when joining the channel. If you enable the App Certificate, you must use a token to join the channel. - * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the `uid` passed in when generating the token. + * 8: Invalid token. Possible reasons are as follows: + * - The App Certificate for the project is enabled in Agora Console, but you do not pass in a token + * when joining a channel. + * - The uid specified when calling `joinChannel(const char* token, const char* channelId, uid_t + * uid, const ChannelMediaOptions& options)` to join the channel is inconsistent with the + * uid passed in when generating the token. + * - The generated token and the token used to join the channel are not consistent. + * Ensure the following: + * - When your project enables App Certificate, you need to pass in a token to join a channel. + * - The user ID specified when generating the token is consistent with the user ID used when + * joining the channel. + * - The generated token is the same as the token passed in to join the channel. */ CONNECTION_CHANGED_INVALID_TOKEN = 8, /** - * 9: The connection fails because the token has expired. + * 9: The token currently being used has expired. You need to generate a new token on your server + * and rejoin the channel with the new token. */ CONNECTION_CHANGED_TOKEN_EXPIRED = 9, /** - * 10: The connection is rejected by the server. Typical reasons include: - * - The user is already in the channel and still calls a method, for example, `joinChannel`, to join the channel. Stop calling this method to clear this error. - * - The user tries to join the channel when conducting a pre-call test. The user needs to call the channel after the call test ends. + * 10: The connection is rejected by server. Possible reasons are as follows: + * - The user is already in the channel and still calls a method, for example, `joinChannel(const + * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)`, + * to join the channel. Stop calling this method to clear this error. + * - The user tries to join a channel while a test call is in progress. The user needs to join the + * channel after the call test ends. */ CONNECTION_CHANGED_REJECTED_BY_SERVER = 10, /** - * 11: The connection changes to reconnecting because the SDK has set a proxy server. + * 11: The connection state changed to reconnecting because the SDK has set a proxy server. */ CONNECTION_CHANGED_SETTING_PROXY_SERVER = 11, /** @@ -4253,15 +5119,17 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_RENEW_TOKEN = 12, /** - * 13: The IP address of the client has changed, possibly because the network type, IP address, or port has been changed. + * 13: Client IP address changed. If you receive this code multiple times, You need to prompt the + * user to switch networks and try joining the channel again. */ CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13, /** - * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The connection state changes to CONNECTION_STATE_RECONNECTING. + * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The + * SDK tries to reconnect to the server automatically. */ CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14, /** - * 15: The SDK has rejoined the channel successfully. + * 15: The user has rejoined the channel successfully. */ CONNECTION_CHANGED_REJOIN_SUCCESS = 15, /** @@ -4269,19 +5137,19 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_LOST = 16, /** - * 17: The change of connection state is caused by echo test. + * 17: The connection state changes due to the echo test. */ CONNECTION_CHANGED_ECHO_TEST = 17, /** - * 18: The local IP Address is changed by user. + * 18: The local IP address was changed by the user. */ CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED_BY_USER = 18, /** - * 19: The connection is failed due to join the same channel on another device with the same uid. + * 19: The user joined the same channel from different devices with the same UID. */ CONNECTION_CHANGED_SAME_UID_LOGIN = 19, /** - * 20: The connection is failed due to too many broadcasters in the channel. + * 20: The number of hosts in the channel has reached the upper limit. */ CONNECTION_CHANGED_TOO_MANY_BROADCASTERS = 20, @@ -4304,85 +5172,37 @@ enum CONNECTION_CHANGED_REASON_TYPE }; /** - * The reason of changing role's failure. + * @brief The reason for a user role switch failure. */ enum CLIENT_ROLE_CHANGE_FAILED_REASON { /** - * 1: Too many broadcasters in the channel. + * 1: The number of hosts in the channel exceeds the limit. + * @note This enumerator is reported only when the support for 128 users is enabled. The maximum + * number of hosts is based on the actual number of hosts configured when you enable the 128-user + * feature. */ CLIENT_ROLE_CHANGE_FAILED_TOO_MANY_BROADCASTERS = 1, /** - * 2: The operation of changing role is not authorized. + * 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to + * switch their user role again. */ CLIENT_ROLE_CHANGE_FAILED_NOT_AUTHORIZED = 2, /** - * 3: The operation of changing role is timeout. + * 3: The request is timed out. Agora recommends you prompt the user to check the network connection + * and try to switch their user role again. * @deprecated This reason is deprecated. */ CLIENT_ROLE_CHANGE_FAILED_REQUEST_TIME_OUT __deprecated = 3, /** - * 4: The operation of changing role is interrupted since we lost connection with agora service. + * 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through + * the `reason` reported by `onConnectionStateChanged`. * @deprecated This reason is deprecated. */ CLIENT_ROLE_CHANGE_FAILED_CONNECTION_FAILED __deprecated = 4, }; /** - * The reason of notifying the user of a message. - */ -enum WLACC_MESSAGE_REASON { - /** - * WIFI signal is weak. - */ - WLACC_MESSAGE_REASON_WEAK_SIGNAL = 0, - /** - * Channel congestion. - */ - WLACC_MESSAGE_REASON_CHANNEL_CONGESTION = 1, -}; - -/** - * Suggest an action for the user. - */ -enum WLACC_SUGGEST_ACTION { - /** - * Please get close to AP. - */ - WLACC_SUGGEST_ACTION_CLOSE_TO_WIFI = 0, - /** - * The user is advised to connect to the prompted SSID. - */ - WLACC_SUGGEST_ACTION_CONNECT_SSID = 1, - /** - * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton link is attached), or purchases an AP that supports 5G. AP does not support 5G band. - */ - WLACC_SUGGEST_ACTION_CHECK_5G = 2, - /** - * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). The SSID of the 2.4G band AP is the same as that of the 5G band. - */ - WLACC_SUGGEST_ACTION_MODIFY_SSID = 3, -}; - -/** - * Indicator optimization degree. - */ -struct WlAccStats { - /** - * End-to-end delay optimization percentage. - */ - unsigned short e2eDelayPercent; - /** - * Frozen Ratio optimization percentage. - */ - unsigned short frozenRatioPercent; - /** - * Loss Rate optimization percentage. - */ - unsigned short lossRatePercent; -}; - -/** - * The network type. + * @brief Network type. */ enum NETWORK_TYPE { /** @@ -4420,455 +5240,852 @@ enum NETWORK_TYPE { }; /** - * The mode of setting up video views. + * @brief Setting mode of the view. */ enum VIDEO_VIEW_SETUP_MODE { /** - * 0: replace one view + * 0: (Default) Clear all added views and replace with a new view. */ VIDEO_VIEW_SETUP_REPLACE = 0, /** - * 1: add one view + * 1: Adds a view. */ VIDEO_VIEW_SETUP_ADD = 1, /** - * 2: remove one view + * 2: Deletes a view. + * @note When you no longer need to use a certain view, it is recommended to delete the view by + * setting `setupMode` to VIDEO_VIEW_SETUP_REMOVE, otherwise it may lead to leak of rendering + * resources. */ VIDEO_VIEW_SETUP_REMOVE = 2, }; /** - * Attributes of video canvas object. + * @brief Attributes of the video canvas object. */ struct VideoCanvas { /** - * The user id of local video. + * User ID that publishes the video source. */ uid_t uid; /** - * The uid of video stream composing the video stream from transcoder which will be drawn on this video canvas. - */ + * The ID of the user who publishes a specific sub-video stream within the mixed video stream. + */ uid_t subviewUid; /** - * Video display window. + * The video display window. + * @note In one `VideoCanvas`, you can only choose to set either `view` or `surfaceTexture`. If both + * are set, only the settings in `view` take effect. */ view_t view; /** - * A RGBA value indicates background color of the render view. Defaults to 0x00000000. + * The background color of the video canvas in RGBA format. The default value is 0x00000000, which + * represents black. */ uint32_t backgroundColor; /** - * The video render mode. See \ref agora::media::base::RENDER_MODE_TYPE "RENDER_MODE_TYPE". - * The default value is RENDER_MODE_HIDDEN. + * The rendering mode of the video. See `RENDER_MODE_TYPE`. */ media::base::RENDER_MODE_TYPE renderMode; /** - * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE". - * The default value is VIDEO_MIRROR_MODE_AUTO. + * The mirror mode of the view. See `VIDEO_MIRROR_MODE_TYPE`. * @note - * - For the mirror mode of the local video view: - * If you use a front camera, the SDK enables the mirror mode by default; - * if you use a rear camera, the SDK disables the mirror mode by default. + * - For the mirror mode of the local video view: If you use a front camera, the SDK enables the + * mirror mode by default; if you use a rear camera, the SDK disables the mirror mode by default. * - For the remote user: The mirror mode is disabled by default. */ VIDEO_MIRROR_MODE_TYPE mirrorMode; /** - * The mode of setting up video view. See \ref VIDEO_VIEW_SETUP_MODE "VIDEO_VIEW_SETUP_MODE" - * The default value is VIDEO_VIEW_SETUP_REPLACE. + * Setting mode of the view. See `VIDEO_VIEW_SETUP_MODE`. */ VIDEO_VIEW_SETUP_MODE setupMode; /** - * The video source type. See \ref VIDEO_SOURCE_TYPE "VIDEO_SOURCE_TYPE". - * The default value is VIDEO_SOURCE_CAMERA_PRIMARY. + * The type of the video source. See `VIDEO_SOURCE_TYPE`. */ VIDEO_SOURCE_TYPE sourceType; /** - * The media player id of AgoraMediaPlayer. It should set this parameter when the - * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing. - * You can get this value by calling the method \ref getMediaPlayerId(). + * The ID of the media player. You can get the Device ID by calling `getMediaPlayerId`. */ int mediaPlayerId; /** - * If you want to display a certain part of a video frame, you can set - * this value to crop the video frame to show. - * The default value is empty(that is, if it has zero width or height), which means no cropping. + * (Optional) Display area of the video frame, see `Rectangle`. `width` and `height` represent the + * video pixel width and height of the area. The default value is null (width or height is 0), which + * means that the actual resolution of the video frame is displayed. */ Rectangle cropArea; /** - * Whether to apply alpha mask to the video frame if exsit: - * true: Apply alpha mask to video frame. - * false: (Default) Do not apply alpha mask to video frame. + * (Optional) Whether to enable alpha mask rendering: + * - `true`: Enable alpha mask rendering. + * - `false`: (Default) Disable alpha mask rendering. + * Alpha mask rendering can create images with transparent effects and extract portraits from + * videos. When used in combination with other methods, you can implement effects such as + * portrait-in-picture and watermarking. + * @note + * - The receiver can render alpha channel information only when the sender enables alpha + * transmission. + * - To enable alpha transmission, `technical support`. */ bool enableAlphaMask; /** - * The video frame position in pipeline. See \ref VIDEO_MODULE_POSITION "VIDEO_MODULE_POSITION". - * The default value is POSITION_POST_CAPTURER. + * The observation position of the video frame in the video link. See `VIDEO_MODULE_POSITION`. */ media::base::VIDEO_MODULE_POSITION position; VideoCanvas() - : uid(0), subviewUid(0), view(NULL), backgroundColor(0x00000000), renderMode(media::base::RENDER_MODE_HIDDEN), mirrorMode(VIDEO_MIRROR_MODE_AUTO), - setupMode(VIDEO_VIEW_SETUP_REPLACE), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(NULL), + backgroundColor(0x00000000), + renderMode(media::base::RENDER_MODE_HIDDEN), + mirrorMode(VIDEO_MIRROR_MODE_AUTO), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt) - : uid(0), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u) - : uid(u), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} - - VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, uid_t subu) - : uid(u), subviewUid(subu), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} -}; - -/** Image enhancement options. + : uid(u), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} + + VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, + uid_t subu) + : uid(u), + subviewUid(subu), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} +}; + +/** + * @brief Image enhancement options. */ struct BeautyOptions { - /** The contrast level. - */ + /** + * @brief The contrast level. + */ enum LIGHTENING_CONTRAST_LEVEL { - /** Low contrast level. */ - LIGHTENING_CONTRAST_LOW = 0, - /** (Default) Normal contrast level. */ - LIGHTENING_CONTRAST_NORMAL = 1, - /** High contrast level. */ - LIGHTENING_CONTRAST_HIGH = 2, + /** + * 0: Low contrast level. + */ + LIGHTENING_CONTRAST_LOW = 0, + /** + * 1: (Default) Normal contrast level. + */ + LIGHTENING_CONTRAST_NORMAL = 1, + /** + * 2: High contrast level. + */ + LIGHTENING_CONTRAST_HIGH = 2, }; - /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. - */ + /** + * The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater + * the contrast between light and dark. See `LIGHTENING_CONTRAST_LEVEL`. + */ LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel; - /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of whitening. */ + /** + * The brightening level, in the range [0.0,1.0], where 0.0 means the original brightening. The + * default value is 0.0. The higher the value, the greater the degree of brightening. + */ float lighteningLevel; - /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of skin grinding. - */ + /** + * The smoothness level, in the range [0.0,1.0], where 0.0 means the original smoothness. The + * default value is 0.0. The greater the value, the greater the smoothness level. + */ float smoothnessLevel; - /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the rosy degree. - */ + /** + * The redness level, in the range [0.0,1.0], where 0.0 means the original redness. The default + * value is 0.0. The larger the value, the greater the redness level. + */ float rednessLevel; - /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the sharpening degree. - */ + /** + * The sharpness level, in the range [0.0,1.0], where 0.0 means the original sharpness. The default + * value is 0.0. The larger the value, the greater the sharpness level. + */ float sharpnessLevel; - BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, float redness, float sharpness) : lighteningContrastLevel(contrastLevel), lighteningLevel(lightening), smoothnessLevel(smoothness), rednessLevel(redness), sharpnessLevel(sharpness) {} + BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, + float redness, float sharpness) + : lighteningContrastLevel(contrastLevel), + lighteningLevel(lightening), + smoothnessLevel(smoothness), + rednessLevel(redness), + sharpnessLevel(sharpness) {} - BeautyOptions() : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), lighteningLevel(0), smoothnessLevel(0), rednessLevel(0), sharpnessLevel(0) {} + BeautyOptions() + : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), + lighteningLevel(0), + smoothnessLevel(0), + rednessLevel(0), + sharpnessLevel(0) {} }; -/** Face shape area options. This structure defines options for facial adjustments on different facial areas. +/** + * @brief Filter effect options. * - * @technical preview + * @since v4.4.0 */ struct FaceShapeAreaOptions { - /** The specific facial area to be adjusted. - */ - enum FACE_SHAPE_AREA { - /** (Default) Invalid area. */ - FACE_SHAPE_AREA_NONE = -1, - /** Head Scale, reduces the size of head. */ - FACE_SHAPE_AREA_HEADSCALE = 0, - /** Forehead, adjusts the size of forehead. */ - FACE_SHAPE_AREA_FOREHEAD = 1, - /** Face Contour, slims the facial contour. */ - FACE_SHAPE_AREA_FACECONTOUR = 2, - /** Face Length, adjusts the length of face. */ - FACE_SHAPE_AREA_FACELENGTH = 3, - /** Face Width, narrows the width of face. */ - FACE_SHAPE_AREA_FACEWIDTH = 4, - /** Cheekbone, adjusts the size of cheekbone. */ - FACE_SHAPE_AREA_CHEEKBONE = 5, - /** Cheek, adjusts the size of cheek. */ - FACE_SHAPE_AREA_CHEEK = 6, - /** Chin, adjusts the length of chin. */ - FACE_SHAPE_AREA_CHIN = 7, - /** Eye Scale, adjusts the size of eyes. */ - FACE_SHAPE_AREA_EYESCALE = 8, - /** Nose Length, adjusts the length of nose. */ - FACE_SHAPE_AREA_NOSELENGTH = 9, - /** Nose Width, adjusts the width of nose. */ - FACE_SHAPE_AREA_NOSEWIDTH = 10, - /** Mouth Scale, adjusts the size of mouth. */ - FACE_SHAPE_AREA_MOUTHSCALE = 11, - }; - - /** The specific facial area to be adjusted, See #FACE_SHAPE_AREA. - */ - FACE_SHAPE_AREA shapeArea; - - /** The intensity of the pinching effect applied to the specified facial area. - * For the following area values: #FACE_SHAPE_AREA_FOREHEAD, #FACE_SHAPE_AREA_FACELENGTH, #FACE_SHAPE_AREA_CHIN, #FACE_SHAPE_AREA_NOSELENGTH, #FACE_SHAPE_AREA_NOSEWIDTH, #FACE_SHAPE_AREA_MOUTHSCALE, the value ranges from -100 to 100. - * The default value is 0. The greater the absolute value, the stronger the intensity applied to the specified facial area, and negative values indicate the opposite direction. - * For enumeration values other than the above, the value ranges from 0 to 100. The default value is 0. The greater the value, the stronger the intensity applied to the specified facial area. - */ - int shapeIntensity; - - FaceShapeAreaOptions(FACE_SHAPE_AREA shapeArea, int areaIntensity) : shapeArea(shapeArea), shapeIntensity(areaIntensity) {} - - FaceShapeAreaOptions() : shapeArea(FACE_SHAPE_AREA_NONE), shapeIntensity(0) {} -}; - -/** Face shape beauty options. This structure defines options for facial adjustments of different facial styles. - * - * @technical preview - */ -struct FaceShapeBeautyOptions { - /** The face shape style. - */ - enum FACE_SHAPE_BEAUTY_STYLE { - /** (Default) Female face shape style. */ - FACE_SHAPE_BEAUTY_STYLE_FEMALE = 0, - /** Male face shape style. */ - FACE_SHAPE_BEAUTY_STYLE_MALE = 1, - }; - - /** The face shape style, See #FACE_SHAPE_BEAUTY_STYLE. - */ - FACE_SHAPE_BEAUTY_STYLE shapeStyle; - - /** The intensity of the pinching effect applied to the specified facial style. The value ranges from 0 (original) to 100. The default value is 0. The greater the value, the stronger the intensity applied to face pinching. - */ - int styleIntensity; - - FaceShapeBeautyOptions(FACE_SHAPE_BEAUTY_STYLE shapeStyle, int styleIntensity) : shapeStyle(shapeStyle), styleIntensity(styleIntensity) {} - - FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {} -}; - -struct LowlightEnhanceOptions { - /** - * The low-light enhancement mode. - */ - enum LOW_LIGHT_ENHANCE_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement feature according to the ambient light to compensate for the lighting level or prevent overexposure, as necessary. */ - LOW_LIGHT_ENHANCE_AUTO = 0, - /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */ - LOW_LIGHT_ENHANCE_MANUAL = 1, - }; /** - * The low-light enhancement level. + * @brief Chooses the specific facial areas that need to be adjusted. + * + * @since v4.4.0 */ - enum LOW_LIGHT_ENHANCE_LEVEL { + enum FACE_SHAPE_AREA { /** - * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, details, and noise of the video image. The performance consumption is moderate, the processing speed is moderate, and the overall video quality is optimal. + * -1: (Default) Invalid area; facial enhancement effects do not take effect. */ - LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0, + FACE_SHAPE_AREA_NONE = -1, /** - * Promotes performance during low-light enhancement. It processes the brightness and details of the video image. The processing speed is faster. + * (100): Head, used to achieve a smaller head effect. The value range is 0 to 100, and the default + * value is 50. The larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_HEADSCALE = 100, + /** + * (101): Forehead, used to adjust the hairline height. The range is [0, 100], with a default value + * of 0. The larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_FOREHEAD = 101, + /** + * (102): Face contour, used to achieve a slimmer face effect. The range is [0, 100], with a default + * value of 0. The larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_FACECONTOUR = 102, + /** + * (103): Face length, used to achieve a longer face effect. The range is [-100, 100], with a + * default value of 0. The greater the absolute value, the more noticeable the adjustment. Negative + * values indicate the opposite direction. + */ + FACE_SHAPE_AREA_FACELENGTH = 103, + /** + * (104): Face width, used to achieve a narrower face effect. The range is [0, 100], with a default + * value of 0. The larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_FACEWIDTH = 104, + /** + * (105): Cheekbone, used to adjust cheekbone width. The range is [0, 100], with a default value of + * 0. The larger the value, the more noticeable the adjustment.The larger the value, the more + * noticeable the adjustment. + */ + FACE_SHAPE_AREA_CHEEKBONE = 105, + /** + * (106): Cheek, used to adjust cheek width. The range is [0, 100], with a default value of 0. The + * larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_CHEEK = 106, + /** + * (107): Adjustment of the mandible. The range is [0, 100], with a default value of 0. The larger + * the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_MANDIBLE = 107, + /** + * (108): Chin, used to adjust chin length. The range is [-100, 100], with a default value of 0. The + * greater the absolute value, the more noticeable the adjustment. Negative values indicate the + * opposite direction. + */ + FACE_SHAPE_AREA_CHIN = 108, + /** + * (200): Eyes, used to achieve a larger eye effect. The value range is 0 to 100, and the default + * value is 50. The larger the value, the more noticeable the adjustment. + */ + FACE_SHAPE_AREA_EYESCALE = 200, + /** + * (201): Eye distance adjustment. The range is [-100, 100], with a default value of 0. The greater + * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite + * direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEDISTANCE = 201, + /** + * (202): Eye position adjustment. The range is [-100, 100], with a default value of 0. The greater + * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite + * direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEPOSITION = 202, + /** + * (203): Lower eyelid adjustment.(203): Lower eyelid adjustment. The range is [0, 100], with a + * default value of 0. The larger the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_LOWEREYELID = 203, + /** + * (204): Pupil size adjustment. The range is [0, 100], with a default value of 0. The larger the + * value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEPUPILS = 204, + /** + * (205): Inner eye corner adjustment. The range is [-100, 100], with a default value of 0. The + * greater the absolute value, the more noticeable the adjustment. Negative values indicate the + * opposite direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEINNERCORNER = 205, + /** + * (206): Outer eye corner adjustment. The range is [-100, 100], with a default value of 0. The + * greater the absolute value, the more noticeable the adjustment. Negative values indicate the + * opposite direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEOUTERCORNER = 206, + /** + * (300): Nose length, used to achieve a longer nose effect. The range is [-100, 100], with a + * default value of 0. + */ + FACE_SHAPE_AREA_NOSELENGTH = 300, + /** + * (301): Nose width, used to achieve a slimmer nose effect. The range is [0, 100], with a default + * value of 0. The larger the value, the more noticiable the effect of narrowing the nose. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSEWIDTH = 301, + /** + * (302): Nose wing adjustment. The value range is 0 to 100, and the default value is 10. The larger + * the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSEWING = 302, + /** + * (303): Nose root adjustment. The range is [0, 100], with a default value of 0. The larger the + * value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSEROOT = 303, + /** + * (304): Nose bridge adjustment. The value range is 0 to 100, and the default value is 50. The + * larger the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSEBRIDGE = 304, + /** + * (305): Nose tip adjustment. The value range is 0 to 100, and the default value is 50. The larger + * the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSETIP = 305, + /** + * (306): Overall nose adjustment. The range is [-100, 100], with a default value of 50. The greater + * the absolute value, the more noticeable the adjustment. Negative values indicate the opposite + * direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_NOSEGENERAL = 306, + /** + * (400): Mouth, used to achieve a larger mouth effect. The range is [-100, 100], with a default + * value of 20. The greater the absolute value, the more noticeable the adjustment. Negative values + * indicate the opposite direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_MOUTHSCALE = 400, + /** + * (401): Mouth position adjustment. The range is [0, 100], with a default value of 0. The larger + * the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_MOUTHPOSITION = 401, + /** + * (402): Mouth smile adjustment. The value range is [0,1], and the default value is 0. The larger + * the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_MOUTHSMILE = 402, + /** + * (403): Lip shape adjustment. The range is [0, 100], with a default value of 0. The larger the + * value, the more noticeable the adjustment. + * @note v.4.6.0. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_MOUTHLIP = 403, + /** + * (500): Eyebrow position adjustment. The range is [-100, 100], with a default value of 0. The + * greater the absolute value, the more noticeable the adjustment. Negative values indicate the + * opposite direction. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEBROWPOSITION = 500, + /** + * (501): Eyebrow thickness adjustment. The range is [-100, 100], with a default value of 0. The + * larger the value, the more noticeable the adjustment. + * @since v4.6.0 + */ + FACE_SHAPE_AREA_EYEBROWTHICKNESS = 501, + }; + + /** + * Facial enhancement areas: `FACE_SHAPE_AREA` + */ + FACE_SHAPE_AREA shapeArea; + + /** + * The intensity of the enhancement. The definition of enhancement intensity varies according to the + * different face areas, such as its orientation, range, and preset value. See `FACE_SHAPE_AREA`. + */ + int shapeIntensity; + + FaceShapeAreaOptions(FACE_SHAPE_AREA shapeArea, int areaIntensity) : shapeArea(shapeArea), shapeIntensity(areaIntensity) {} + + FaceShapeAreaOptions() : shapeArea(FACE_SHAPE_AREA_NONE), shapeIntensity(0) {} +}; + +/** + * @brief The facial enhancement style options. + * + * @since v4.4.0 + */ +struct FaceShapeBeautyOptions { + /** + * @brief The facial enhancement style options. + * + * @since v4.4.0 + */ + enum FACE_SHAPE_BEAUTY_STYLE { + /** + * 0: (Default) Feminine style. + */ + FACE_SHAPE_BEAUTY_STYLE_FEMALE = 0, + /** + * 1: Masculine style. + */ + FACE_SHAPE_BEAUTY_STYLE_MALE = 1, + /** + * 2: The natural style beauty effect only makes minimal adjustments to facial features. + * @since v4.6.0 + */ + FACE_SHAPE_BEAUTY_STYLE_NATURAL = 2, + }; + + /** + * Facial enhancement style options: `FACE_SHAPE_BEAUTY_STYLE`. + */ + FACE_SHAPE_BEAUTY_STYLE shapeStyle; + + /** + * The intensity of the facial enhancement style, with a value range oof [0.0,1.0]. The default + * value is 0.0, which means no face enhancement effect. The higher the value, the more obvious the + * facial enhancement effect. + */ + int styleIntensity; + + FaceShapeBeautyOptions(FACE_SHAPE_BEAUTY_STYLE shapeStyle, int styleIntensity) : shapeStyle(shapeStyle), styleIntensity(styleIntensity) {} + + FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {} +}; + +/** + * @brief Filter effect options. + * + * @since v4.4.1 + */ +struct FilterEffectOptions { + /** + * The absolute path to the local cube map texture file, which can be used to customize the filter + * effect. The specified .cude file should strictly follow the Cube LUT Format Specification; + * otherwise, the filter options do not take effect. The following is a sample of the .cude file: + * ``` + * LUT_3D_SIZE 32 + * 0.0039215689 0 0.0039215682 + * 0.0086021447 0.0037950677 0 + * ... + * 0.0728652592 0.0039215689 0 + * ``` + * @note + * - The identifier `LUT_3D_SIZE` on the first line of the cube map file represents the size of the + * three-dimensional lookup table. The LUT size for filter effect can only be set to 32. + * - The SDK provides a built-in `built_in_whiten_filter.cube` file. You can pass the absolute path + * of this file to get the whitening filter effect. + */ + const char * path; + + /** + * The intensity of the filter effect, with a range value of [0.0,1.0], in which 0.0 represents no + * filter effect. The default value is 0.5. The higher the value, the stronger the filter effect. + */ + float strength; + + FilterEffectOptions(const char * lut3dPath, float filterStrength) : path(lut3dPath), strength(filterStrength) {} + + FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {} +}; + +/** + * @brief The low-light enhancement options. + */ +struct LowlightEnhanceOptions { + /** + * @brief The low-light enhancement mode. + */ + enum LOW_LIGHT_ENHANCE_MODE { + /** + * 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement + * feature according to the ambient light to compensate for the lighting level or prevent + * overexposure, as necessary. + */ + LOW_LIGHT_ENHANCE_AUTO = 0, + /** + * 1: Manual mode. Users need to enable or disable the low-light enhancement feature manually. + */ + LOW_LIGHT_ENHANCE_MANUAL = 1, + }; + /** + * @brief The low-light enhancement level. + */ + enum LOW_LIGHT_ENHANCE_LEVEL { + /** + * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, + * details, and noise of the video image. The performance consumption is moderate, the processing + * speed is moderate, and the overall video quality is optimal. + */ + LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0, + /** + * 1: Promotes performance during low-light enhancement. It processes the brightness and details of + * the video image. The processing speed is faster. + */ + LOW_LIGHT_ENHANCE_LEVEL_FAST = 1, + }; + + /** + * The low-light enhancement mode. See `LOW_LIGHT_ENHANCE_MODE`. + */ + LOW_LIGHT_ENHANCE_MODE mode; + + /** + * The low-light enhancement level. See `LOW_LIGHT_ENHANCE_LEVEL`. + */ + LOW_LIGHT_ENHANCE_LEVEL level; + + LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) + : mode(lowlightMode), level(lowlightLevel) {} + + LowlightEnhanceOptions() + : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} +}; +/** + * @brief Video noise reduction options. + * + * @since v4.0.0 + */ +struct VideoDenoiserOptions { + /** + * @brief Video noise reduction mode. + */ + enum VIDEO_DENOISER_MODE { + /** + * 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction + * feature according to the ambient light. + */ + VIDEO_DENOISER_AUTO = 0, + /** + * 1: Manual mode. Users need to enable or disable the video noise reduction feature manually. */ - LOW_LIGHT_ENHANCE_LEVEL_FAST = 1, - }; - - /** The low-light enhancement mode. See #LOW_LIGHT_ENHANCE_MODE. - */ - LOW_LIGHT_ENHANCE_MODE mode; - - /** The low-light enhancement level. See #LOW_LIGHT_ENHANCE_LEVEL. - */ - LOW_LIGHT_ENHANCE_LEVEL level; - - LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) : mode(lowlightMode), level(lowlightLevel) {} - - LowlightEnhanceOptions() : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} -}; -/** - * The video noise reduction options. - * - * @since v4.0.0 - */ -struct VideoDenoiserOptions { - /** The video noise reduction mode. - */ - enum VIDEO_DENOISER_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction feature according to the ambient light. */ - VIDEO_DENOISER_AUTO = 0, - /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */ VIDEO_DENOISER_MANUAL = 1, }; /** - * The video noise reduction level. + * @brief Video noise reduction level. */ enum VIDEO_DENOISER_LEVEL { /** - * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances performance consumption and video noise reduction quality. - * The performance consumption is moderate, the video noise reduction speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during video noise reduction. balances performance + * consumption and video noise reduction quality. The performance consumption is moderate, the video + * noise reduction speed is moderate, and the overall video quality is optimal. */ VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0, /** - * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes reducing performance consumption over video noise reduction quality. - * The performance consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect (shadows trailing behind moving objects) in the processed video, Agora recommends that you use `FAST` when the camera is fixed. + * 1: Promotes reducing performance consumption during video noise reduction. It prioritizes + * reducing performance consumption over video noise reduction quality. The performance consumption + * is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect + * (shadows trailing behind moving objects) in the processed video, Agora recommends that you use + * this setting when the camera is fixed. */ VIDEO_DENOISER_LEVEL_FAST = 1, - /** - * Enhanced video noise reduction. `STRENGTH` prioritizes video noise reduction quality over reducing performance consumption. - * The performance consumption is higher, the video noise reduction speed is slower, and the video noise reduction quality is better. - * If `HIGH_QUALITY` is not enough for your video noise reduction needs, you can use `STRENGTH`. - */ - VIDEO_DENOISER_LEVEL_STRENGTH = 2, }; - /** The video noise reduction mode. See #VIDEO_DENOISER_MODE. + /** + * Video noise reduction mode. See `VIDEO_DENOISER_MODE`. */ VIDEO_DENOISER_MODE mode; - /** The video noise reduction level. See #VIDEO_DENOISER_LEVEL. + /** + * Video noise reduction level. See `VIDEO_DENOISER_LEVEL`. */ VIDEO_DENOISER_LEVEL level; - VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) : mode(denoiserMode), level(denoiserLevel) {} + VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) + : mode(denoiserMode), level(denoiserLevel) {} VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {} }; -/** The color enhancement options. +/** + * @brief The color enhancement options. * * @since v4.0.0 */ struct ColorEnhanceOptions { - /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, which means no color enhancement is applied to the video. The higher the value, the higher the level of color enhancement. + /** + * The level of color enhancement. The value range is [0.0, 1.0]. `0.0` is the default value, which + * means no color enhancement is applied to the video. The higher the value, the higher the level of + * color enhancement. The default value is `0.5`. */ float strengthLevel; - /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone protection. The higher the value, the higher the level of skin tone protection. - * The default value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be significantly distorted, so you need to set the level of skin tone protection; when the level of skin tone protection is higher, the color enhancement effect can be slightly reduced. - * Therefore, to get the best color enhancement effect, Agora recommends that you adjust `strengthLevel` and `skinProtectLevel` to get the most appropriate values. + /** + * The level of skin tone protection. The value range is [0.0, 1.0]. `0.0` means no skin tone + * protection. The higher the value, the higher the level of skin tone protection. The default value + * is `1.0`. + * - When the level of color enhancement is higher, the portrait skin tone can be significantly + * distorted, so you need to set the level of skin tone protection. + * - When the level of skin tone protection is higher, the color enhancement effect can be slightly + * reduced. + * Therefore, to get the best color enhancement effect, Agora recommends that you adjust + * `strengthLevel` and `skinProtectLevel` to get the most appropriate values. */ float skinProtectLevel; - ColorEnhanceOptions(float stength, float skinProtect) : strengthLevel(stength), skinProtectLevel(skinProtect) {} + ColorEnhanceOptions(float stength, float skinProtect) + : strengthLevel(stength), skinProtectLevel(skinProtect) {} ColorEnhanceOptions() : strengthLevel(0), skinProtectLevel(1) {} }; /** - * The custom background image. + * @brief The custom background. */ struct VirtualBackgroundSource { - /** The type of the custom background source. + /** + * @brief The custom background. */ enum BACKGROUND_SOURCE_TYPE { /** - * 0: Enable segementation with the captured video frame without replacing the background. + * 0: Process the background as alpha data without replacement, only separating the portrait and the + * background. After setting this value, you can call `startLocalVideoTranscoder` to implement the + * picture-in-picture effect. */ BACKGROUND_NONE = 0, /** - * 1: (Default) The background source is a solid color. + * 1: (Default) The background image is a solid color. */ BACKGROUND_COLOR = 1, /** - * The background source is a file in PNG or JPG format. + * 2: The background is an image in PNG or JPG format. */ BACKGROUND_IMG = 2, - /** - * The background source is the blurred original video frame. - * */ + /** + * 3: The background is a blurred version of the original background. + */ BACKGROUND_BLUR = 3, - /** - * The background source is a file in MP4, AVI, MKV, FLV format. - * */ + /** + * 4: The background is a local video in MP4, AVI, MKV, FLV, or other supported formats. + */ BACKGROUND_VIDEO = 4, }; - /** The degree of blurring applied to the background source. + /** + * @brief The degree of blurring applied to the custom background image. */ enum BACKGROUND_BLUR_DEGREE { - /** 1: The degree of blurring applied to the custom background image is low. The user can almost see the background clearly. */ + /** + * 1: The degree of blurring applied to the custom background image is low. The user can almost see + * the background clearly. + */ BLUR_DEGREE_LOW = 1, - /** 2: The degree of blurring applied to the custom background image is medium. It is difficult for the user to recognize details in the background. */ + /** + * 2: The degree of blurring applied to the custom background image is medium. It is difficult for + * the user to recognize details in the background. + */ BLUR_DEGREE_MEDIUM = 2, - /** 3: (Default) The degree of blurring applied to the custom background image is high. The user can barely see any distinguishing features in the background. */ + /** + * 3: (Default) The degree of blurring applied to the custom background image is high. The user can + * barely see any distinguishing features in the background. + */ BLUR_DEGREE_HIGH = 3, }; - /** The type of the custom background image. See #BACKGROUND_SOURCE_TYPE. + /** + * The custom background. See `BACKGROUND_SOURCE_TYPE`. */ BACKGROUND_SOURCE_TYPE background_source_type; /** - * The color of the custom background image. The format is a hexadecimal integer defined by RGB, without the # sign, - * such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which signifies white. The value range - * is [0x000000,0xFFFFFF]. If the value is invalid, the SDK replaces the original background image with a white - * background image. - * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_COLOR`. + * The type of the custom background image. The color of the custom background image. The format is + * a hexadecimal integer defined by RGB, without the # sign, such as 0xFFB6C1 for light pink. The + * default value is 0xFFFFFF, which signifies white. The value range is [0x000000, 0xffffff]. If the + * value is invalid, the SDK replaces the original background image with a white background image. + * @note + * This parameter is only applicable to custom backgrounds of the following types: + * - BACKGROUND_COLOR: The background image is a solid-colored image of the color passed in by the + * parameter. + * - BACKGROUND_IMG: If the image in `source` has a transparent background, the transparent + * background will be filled with the color passed in by the parameter. */ unsigned int color; /** - * The local absolute path of the custom background image. PNG and JPG formats are supported. If the path is invalid, - * the SDK replaces the original background image with a white background image. - * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_IMG`. + * The local absolute path of the custom background image. Supports PNG, JPG, MP4, AVI, MKV, and FLV + * formats. If the path is invalid, the SDK will use either the original background image or the + * solid color image specified by `color`. + * @note This parameter takes effect only when the type of the custom background image is + * BACKGROUND_IMG or BACKGROUND_VIDEO. */ const char* source; - /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE. - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_BLUR`. + /** + * The degree of blurring applied to the custom background image. See `BACKGROUND_BLUR_DEGREE`. + * @note This parameter takes effect only when the type of the custom background image is + * BACKGROUND_BLUR. */ BACKGROUND_BLUR_DEGREE blur_degree; - VirtualBackgroundSource() : background_source_type(BACKGROUND_COLOR), color(0xffffff), source(OPTIONAL_NULLPTR), blur_degree(BLUR_DEGREE_HIGH) {} + VirtualBackgroundSource() + : background_source_type(BACKGROUND_COLOR), + color(0xffffff), + source(OPTIONAL_NULLPTR), + blur_degree(BLUR_DEGREE_HIGH) {} }; +/** + * @brief Processing properties for background images. + */ struct SegmentationProperty { - - enum SEG_MODEL_TYPE { - + /** + * @brief The type of algorithms to user for background processing. + */ + enum SEG_MODEL_TYPE { + /** + * 1: (Default) Use the algorithm suitable for all scenarios. + */ SEG_MODEL_AI = 1, + /** + * 2: Use the algorithm designed specifically for scenarios with a green screen background. + */ SEG_MODEL_GREEN = 2 }; + /** + * @brief Screen color type. + */ + enum SCREEN_COLOR_TYPE { + /** + * (0): Automatically selects screen color. + */ + SCREEN_COLOR_AUTO = 0, + /** + * (1): Green screen. + */ + SCREEN_COLOR_GREEN = 1, + /** + * (2): Blue screen. + */ + SCREEN_COLOR_BLUE = 2 + }; + + /** + * The type of algorithms to user for background processing. See `SEG_MODEL_TYPE`. + */ SEG_MODEL_TYPE modelType; + /** + * The accuracy range for recognizing background colors in the image. The value range is [0,1], and + * the default value is 0.5. The larger the value, the wider the range of identifiable shades of + * pure color. When the value of this parameter is too large, the edge of the portrait and the pure + * color in the portrait range are also detected. Agora recommends that you dynamically adjust the + * value of this parameter according to the actual effect. + * @note This parameter only takes effect when `modelType` is set to `SEG_MODEL_GREEN`. + */ float greenCapacity; + /** + * The screen color. See `SCREEN_COLOR_TYPE`. + */ + SCREEN_COLOR_TYPE screenColorType; - SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5){} + SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5), screenColorType(SCREEN_COLOR_AUTO) {} }; -/** The type of custom audio track -*/ +/** + * @brief The type of the audio track. + */ enum AUDIO_TRACK_TYPE { - /** + /** * -1: Invalid audio track */ AUDIO_TRACK_INVALID = -1, - /** - * 0: Mixable audio track - * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), - * and SDK will mix these tracks into one audio track automatically. - * However, compare to direct audio track, mixable track might cause extra 30ms+ delay. + /** + * 0: Mixable audio tracks. This type of audio track supports mixing with other audio streams (such + * as audio streams captured by microphone) and playing locally or publishing to channels after + * mixing. The latency of mixable audio tracks is higher than that of direct audio tracks. */ AUDIO_TRACK_MIXABLE = 0, /** - * 1: Direct audio track - * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + uid). - * Compare to mixable stream, you can have lower lantency using direct audio track. + * 1: Direct audio tracks. This type of audio track will replace the audio streams captured by the + * microphone and does not support mixing with other audio streams. The latency of direct audio + * tracks is lower than that of mixable audio tracks. + * @note If `AUDIO_TRACK_DIRECT` is specified for this parameter, you must set + * `publishMicrophoneTrack` to `false` in `ChannelMediaOptions` when calling `joinChannel(const + * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)` to + * join the channel; otherwise, joining the channel fails and returns the error code -2. */ AUDIO_TRACK_DIRECT = 1, }; -/** The configuration of custom audio track -*/ +/** + * @brief The configuration of custom audio tracks. + */ struct AudioTrackConfig { /** - * Enable local playback, enabled by default - * true: (Default) Enable local playback - * false: Do not enable local playback + * Whether to enable the local audio-playback device: + * - `true`: (Default) Enable the local audio-playback device. + * - `false`: Do not enable the local audio-playback device. */ bool enableLocalPlayback; + /** + * Whether to enable audio processing module: + * - `true`Enable the audio processing module to apply the Automatic Echo Cancellation (AEC), + * Automatic Noise Suppression (ANS), and Automatic Gain Control (AGC) effects. + * - `false`: (Default) Do not enable the audio processing module. + * @note This parameter only takes effect on AUDIO_TRACK_DIRECT in custom audio capturing. + */ + bool enableAudioProcessing; - AudioTrackConfig() - : enableLocalPlayback(true) {} + AudioTrackConfig() : enableLocalPlayback(true),enableAudioProcessing(false) {} }; /** @@ -4889,211 +6106,201 @@ struct AudioTrackConfig { * | |--------------------|-----------------------------| | | * | | 0x3: voice changer | 0x1: voice transform | | | */ -/** The options for SDK preset voice beautifier effects. +/** + * @brief The options for SDK preset voice beautifier effects. */ enum VOICE_BEAUTIFIER_PRESET { - /** Turn off voice beautifier effects and use the original voice. + /** + * Turn off voice beautifier effects and use the original voice. */ VOICE_BEAUTIFIER_OFF = 0x00000000, - /** A more magnetic voice. - * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you - * may experience vocal distortion. + /** + * A more magnetic voice. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may + * experience vocal distortion. */ CHAT_BEAUTIFIER_MAGNETIC = 0x01010100, - /** A fresher voice. - * + /** + * A fresher voice. * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you * may experience vocal distortion. */ CHAT_BEAUTIFIER_FRESH = 0x01010200, - /** A more vital voice. - * + /** + * A more vital voice. * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you * may experience vocal distortion. */ CHAT_BEAUTIFIER_VITALITY = 0x01010300, /** * Singing beautifier effect. - * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding voice and add a reverberation effect - * that sounds like singing in a small room. Agora recommends not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process - * a female-sounding voice; otherwise, you may experience vocal distortion. - * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can beautify a male- or - * female-sounding voice and add a reverberation effect. + * - If you call `setVoiceBeautifierPreset` ( SINGING_BEAUTIFIER ), you can beautify a male-sounding + * voice and add a reverberation effect that sounds like singing in a small room. Agora recommends + * using this enumerator to process a male-sounding voice; otherwise, you might experience vocal + * distortion. + * - If you call `setVoiceBeautifierParameters` ( SINGING_BEAUTIFIER, param1, param2), you can + * beautify a male or female-sounding voice and add a reverberation effect. */ SINGING_BEAUTIFIER = 0x01020100, - /** A more vigorous voice. + /** + * A more vigorous voice. */ TIMBRE_TRANSFORMATION_VIGOROUS = 0x01030100, - /** A deeper voice. + /** + * A deep voice. */ TIMBRE_TRANSFORMATION_DEEP = 0x01030200, - /** A mellower voice. + /** + * A mellower voice. */ TIMBRE_TRANSFORMATION_MELLOW = 0x01030300, - /** A falsetto voice. + /** + * Falsetto. */ TIMBRE_TRANSFORMATION_FALSETTO = 0x01030400, - /** A fuller voice. + /** + * A fuller voice. */ TIMBRE_TRANSFORMATION_FULL = 0x01030500, - /** A clearer voice. + /** + * A clearer voice. */ TIMBRE_TRANSFORMATION_CLEAR = 0x01030600, - /** A more resounding voice. + /** + * A more resounding voice. */ TIMBRE_TRANSFORMATION_RESOUNDING = 0x01030700, - /** A more ringing voice. + /** + * A more ringing voice. */ TIMBRE_TRANSFORMATION_RINGING = 0x01030800, /** * A ultra-high quality voice, which makes the audio clearer and restores more details. - * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile` - * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` - * and `scenario` to `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. - * - If you have an audio capturing device that can already restore audio details to a high - * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may - * over-restore audio details, and you may not hear the anticipated voice effect. + * - To achieve better audio effect quality, Agora recommends that you set the `profile` of + * `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5) and `scenario` to `AUDIO_SCENARIO_GAME_STREAMING` + * (3) before calling `setVoiceBeautifierPreset`. + * - If you have an audio capturing device that can already restore audio details to a high degree, + * Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may over-restore + * audio details, and you may not hear the anticipated voice effect. */ ULTRA_HIGH_QUALITY_VOICE = 0x01040100 }; -/** Preset voice effects. +/** + * @brief Preset audio effects. * - * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using the following presets: + * @details + * To get better audio effects, Agora recommends calling `setAudioProfile(AUDIO_PROFILE_TYPE profile, AUDIO_SCENARIO_TYPE scenario)` and setting the `profile` parameter as recommended below before using the preset audio effects. + * | Preset audio effects | `profile` | + * | ------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | + * | - ROOM_ACOUSTICS_VIRTUAL_STEREO - ROOM_ACOUSTICS_3D_VOICE - ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` or `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` | + * | Other preset audio effects (except for `AUDIO_EFFECT_OFF` ) | `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` | * - * - `ROOM_ACOUSTICS_KTV` - * - `ROOM_ACOUSTICS_VOCAL_CONCERT` - * - `ROOM_ACOUSTICS_STUDIO` - * - `ROOM_ACOUSTICS_PHONOGRAPH` - * - `ROOM_ACOUSTICS_SPACIAL` - * - `ROOM_ACOUSTICS_ETHEREAL` - * - `ROOM_ACOUSTICS_CHORUS` - * - `VOICE_CHANGER_EFFECT_UNCLE` - * - `VOICE_CHANGER_EFFECT_OLDMAN` - * - `VOICE_CHANGER_EFFECT_BOY` - * - `VOICE_CHANGER_EFFECT_SISTER` - * - `VOICE_CHANGER_EFFECT_GIRL` - * - `VOICE_CHANGER_EFFECT_PIGKING` - * - `VOICE_CHANGER_EFFECT_HULK` - * - `PITCH_CORRECTION` */ enum AUDIO_EFFECT_PRESET { - /** Turn off voice effects, that is, use the original voice. + /** + * Turn off voice effects, that is, use the original voice. */ AUDIO_EFFECT_OFF = 0x00000000, - /** The voice effect typical of a KTV venue. + /** + * The voice effect typical of a KTV venue. */ ROOM_ACOUSTICS_KTV = 0x02010100, - /** The voice effect typical of a concert hall. + /** + * The voice effect typical of a concert hall. */ ROOM_ACOUSTICS_VOCAL_CONCERT = 0x02010200, - /** The voice effect typical of a recording studio. + /** + * The voice effect typical of a recording studio. */ ROOM_ACOUSTICS_STUDIO = 0x02010300, - /** The voice effect typical of a vintage phonograph. + /** + * The voice effect typical of a vintage phonograph. */ ROOM_ACOUSTICS_PHONOGRAPH = 0x02010400, - /** The virtual stereo effect, which renders monophonic audio as stereo audio. - * - * @note Before using this preset, set the `profile` parameter of `setAudioProfile` - * to `AUDIO_PROFILE_MUSIC_STANDARD_STEREO(3)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)`; - * otherwise, the preset setting is invalid. + /** + * The virtual stereo effect, which renders monophonic audio as stereo audio. */ ROOM_ACOUSTICS_VIRTUAL_STEREO = 0x02010500, - /** A more spatial voice effect. + /** + * A more spatial voice effect. */ ROOM_ACOUSTICS_SPACIAL = 0x02010600, - /** A more ethereal voice effect. + /** + * A more ethereal voice effect. */ ROOM_ACOUSTICS_ETHEREAL = 0x02010700, - /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle - * period of the 3D voice effect is 10 seconds. To change the cycle period, call `setAudioEffectParameters` - * after this method. - * - * @note - * - Before using this preset, set the `profile` parameter of `setAudioProfile` to - * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise, - * the preset setting is invalid. - * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear + /** + * A 3D voice effect that makes the voice appear to be moving around the user. The default cycle + * period is 10 seconds. After setting this effect, you can call `setAudioEffectParameters` to + * modify the movement period. + * @note If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear * the anticipated voice effect. */ ROOM_ACOUSTICS_3D_VOICE = 0x02010800, - /** virtual suround sound. - * - * @note - * - Agora recommends using this enumerator to process virtual suround sound; otherwise, you may - * not hear the anticipated voice effect. - * - To achieve better audio effect quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to - * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before - * setting this enumerator. + /** + * Virtual surround sound, that is, the SDK generates a simulated surround sound field on the basis + * of stereo channels, thereby creating a surround sound effect. + * @note If the virtual surround sound is enabled, users need to use stereo audio playback devices + * to hear the anticipated audio effect. */ ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900, - /** The voice effect for chorus. - * - * @note: To achieve better audio effect quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to - * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before - * setting this enumerator. - */ + /** + * The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the + * sense of depth and dimension in the vocals. + */ ROOM_ACOUSTICS_CHORUS = 0x02010D00, - /** A middle-aged man's voice. - * - * @note - * Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + /** + * A middle-aged man's voice. + * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not + * hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_UNCLE = 0x02020100, - /** A senior man's voice. - * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + /** + * An older man's voice. + * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not + * hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200, - /** A boy's voice. - * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + /** + * A boy's voice. + * @note Agora recommends using this preset to process a male-sounding voice; otherwise, you may not + * hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_BOY = 0x02020300, - /** A young woman's voice. - * - * @note - * - Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may + /** + * A young woman's voice. + * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may * not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_SISTER = 0x02020400, - /** A girl's voice. - * - * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may + /** + * A girl's voice. + * @note Agora recommends using this preset to process a female-sounding voice; otherwise, you may * not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_GIRL = 0x02020500, - /** The voice of Pig King, a character in Journey to the West who has a voice like a growling - * bear. + /** + * The voice of Pig King, a character in Journey to the West who has a voice like a growling bear. */ VOICE_CHANGER_EFFECT_PIGKING = 0x02020600, - /** The Hulk's voice. + /** + * The Hulk's voice. */ VOICE_CHANGER_EFFECT_HULK = 0x02020700, - /** An audio effect typical of R&B music. - * - * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to - - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise, - * the preset setting is invalid. + /** + * The voice effect typical of R&B music. */ STYLE_TRANSFORMATION_RNB = 0x02030100, - /** The voice effect typical of popular music. - * - * @note Before using this preset, set the `profile` parameter of `setAudioProfile` to - - `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO`; otherwise, - * the preset setting is invalid. + /** + * The voice effect typical of popular music. */ STYLE_TRANSFORMATION_POPULAR = 0x02030200, - /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C - * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust - * the basic mode of tuning and the pitch of the main tone. + /** + * A pitch correction effect that corrects the user's pitch based on the pitch of the natural C + * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust + * the basic mode of tuning and the pitch of the main tone. */ PITCH_CORRECTION = 0x02040100, @@ -5102,22 +6309,32 @@ enum AUDIO_EFFECT_PRESET { */ }; -/** The options for SDK preset voice conversion. +/** + * @brief The options for SDK preset voice conversion effects. */ enum VOICE_CONVERSION_PRESET { - /** Turn off voice conversion and use the original voice. + /** + * Turn off voice conversion effects and use the original voice. */ VOICE_CONVERSION_OFF = 0x00000000, - /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** + * A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process + * a female-sounding voice. */ VOICE_CHANGER_NEUTRAL = 0x03010100, - /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** + * A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a + * female-sounding voice. */ VOICE_CHANGER_SWEET = 0x03010200, - /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** + * A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_SOLID = 0x03010300, - /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** + * A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_BASS = 0x03010400, /** A voice like a cartoon character. @@ -5156,163 +6373,314 @@ enum VOICE_CONVERSION_PRESET { }; -/** The options for SDK preset headphone equalizer. +/** + * @brief Preset headphone equalizer types. */ enum HEADPHONE_EQUALIZER_PRESET { - /** Turn off headphone EQ and use the original voice. + /** + * The headphone equalizer is disabled, and the original audio is heard. */ HEADPHONE_EQUALIZER_OFF = 0x00000000, - /** For over-ear headphones. + /** + * An equalizer is used for headphones. */ HEADPHONE_EQUALIZER_OVEREAR = 0x04000001, - /** For in-ear headphones. + /** + * An equalizer is used for in-ear headphones. */ HEADPHONE_EQUALIZER_INEAR = 0x04000002 }; -/** The options for SDK voice AI tuner. +/** + * @brief Voice AI tuner sound types. */ enum VOICE_AI_TUNER_TYPE { - /** Uncle, deep and magnetic male voice. + /** + * 0: Mature male voice. A deep and magnetic male voice. */ VOICE_AI_TUNER_MATURE_MALE, - /** Fresh male, refreshing and sweet male voice. + /** + * 1: Fresh male voice. A fresh and slightly sweet male voice. */ VOICE_AI_TUNER_FRESH_MALE, - /** Big sister, deep and charming female voice. + /** + * 2: Elegant female voice. A deep and charming female voice. */ VOICE_AI_TUNER_ELEGANT_FEMALE, - /** Lolita, high-pitched and cute female voice. + /** + * 3: Sweet female voice. A high-pitched and cute female voice. */ VOICE_AI_TUNER_SWEET_FEMALE, - /** Warm man singing, warm and melodic male voice that is suitable for male lyrical songs. + /** + * 4: Warm male singing. A warm and melodious male voice. */ VOICE_AI_TUNER_WARM_MALE_SINGING, - /** Gentle female singing, soft and delicate female voice that is suitable for female lyrical songs. + /** + * 5: Gentle female singing. A soft and delicate female voice. */ VOICE_AI_TUNER_GENTLE_FEMALE_SINGING, - /** Smoky uncle singing, unique husky male voice that is suitable for rock or blues songs. + /** + * 6: Husky male singing. A unique husky male voice. */ VOICE_AI_TUNER_HUSKY_MALE_SINGING, - /** Warm big sister singing, warm and mature female voice that is suitable for emotionally powerful songs. + /** + * 7: Warm elegant female singing. A warm and mature female voice. */ VOICE_AI_TUNER_WARM_ELEGANT_FEMALE_SINGING, - /** Forceful male singing, strong and powerful male voice that is suitable for passionate songs. + /** + * 8: Powerful male singing. A strong and powerful male voice. */ VOICE_AI_TUNER_POWERFUL_MALE_SINGING, - /** Dreamy female singing, dreamlike and soft female voice that is suitable for airy and dream-like songs. + /** + * 9: Dreamy female singing. A dreamy and soft female voice. */ VOICE_AI_TUNER_DREAMY_FEMALE_SINGING, }; /** - * Screen sharing configurations. + * @brief The audio configuration for the shared screen stream. + * + * @details + * Only available where `captureAudio` is `true`. + * + */ +struct ScreenAudioParameters { + /** + * Audio sample rate (Hz). + */ + int sampleRate; + /** + * The number of audio channels. The default value is 2, which means stereo. + */ + int channels; + /** + * The volume of the captured system audio. The value range is [0, 100]. The default value is 100. + */ + int captureSignalVolume; + +#if defined(__APPLE__) && !TARGET_OS_IOS + /** + * @technical preview + */ + bool excludeCurrentProcessAudio = true; + ScreenAudioParameters(): sampleRate(48000), channels(2), captureSignalVolume(100) {} +#else + ScreenAudioParameters(): sampleRate(16000), channels(2), captureSignalVolume(100) {} +#endif +}; + +/** + * @brief Screen sharing configurations. */ struct ScreenCaptureParameters { + /** - * On Windows and macOS, it represents the video encoding resolution of the shared screen stream. - * See `VideoDimensions`. The default value is 1920 x 1080, that is, 2,073,600 pixels. Agora uses - * the value of this parameter to calculate the charges. + * Determines whether to capture system audio during screen sharing: + * - `true`: Capture. + * - `false`: (Default) Do not capture. * - * If the aspect ratio is different between the encoding dimensions and screen dimensions, Agora - * applies the following algorithms for encoding. Suppose dimensions are 1920 x 1080: - * - If the value of the screen dimensions is lower than that of dimensions, for example, - * 1000 x 1000 pixels, the SDK uses 1000 x 1000 pixels for encoding. - * - If the value of the screen dimensions is higher than that of dimensions, for example, - * 2000 x 1500, the SDK uses the maximum value under dimensions with the aspect ratio of - * the screen dimension (4:3) for encoding, that is, 1440 x 1080. + * @note + * Due to system limitations, capturing system audio is only available for Android API level 29 + * and later (that is, Android 10 and later). + */ + bool captureAudio; + /** + * The audio configuration for the shared screen stream. + * @note This parameter only takes effect when `captureAudio` is `true`. + * See `ScreenAudioParameters`. + */ + ScreenAudioParameters audioParams; + + /** + * The video encoding resolution of the screen sharing stream. See `VideoDimensions`. The default + * value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to + * calculate the charges. + * If the screen dimensions are different from the value of this parameter, Agora applies the + * following strategies for encoding. Suppose `dimensions` is set to 1920 × 1080: + * - If the value of the screen dimensions is lower than that of `dimensions`, for example, 1000 × + * 1000 pixels, the SDK uses the screen dimensions, that is, 1000 × 1000 pixels, for encoding. + * - If the value of the screen dimensions is higher than that of `dimensions`, for example, 2000 × + * 1500, the SDK uses the maximum value under `dimensions` with the aspect ratio of the screen + * dimension (4:3) for encoding, that is, 1440 × 1080. + * @note + * When setting the encoding resolution in the scenario of sharing documents ( + * SCREEN_SCENARIO_DOCUMENT ), choose one of the following two methods: + * - If you require the best image quality, it is recommended to set the encoding resolution to be + * the same as the capture resolution. + * - If you wish to achieve a relative balance between image quality, bandwidth, and system + * performance, then: + * - When the capture resolution is greater than 1920 × 1080, it is recommended that the encoding + * resolution is not less than 1920 × 1080. + * - When the capture resolution is less than 1920 × 1080, it is recommended that the encoding + * resolution is not less than 1280 × 720. */ VideoDimensions dimensions; /** - * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen stream. - * The frame rate (fps) of the shared region. The default value is 5. We do not recommend setting - * this to a value greater than 15. + * On Windows and macOS, this represents the video encoding frame rate (fps) of the screen sharing + * stream. The frame rate (fps) of the shared region. The default value is 5. Agora does not + * recommend setting this to a value greater than 15. */ int frameRate; /** - * On Windows and macOS, it represents the video encoding bitrate of the shared screen stream. + * On Windows and macOS, this represents the video encoding bitrate of the screen sharing stream. * The bitrate (Kbps) of the shared region. The default value is 0 (the SDK works out a bitrate * according to the dimensions of the current screen). */ int bitrate; - /** Whether to capture the mouse in screen sharing: + /** + * Whether to capture the mouse in screen sharing: * - `true`: (Default) Capture the mouse. * - `false`: Do not capture the mouse. + * @note Due to macOS system restrictions, setting this parameter to `false` is ineffective during + * screen sharing (it has no impact when sharing a window). */ bool captureMouseCursor; /** - * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method to share it: + * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method + * to share it: * - `true`: Bring the window to the front. * - `false`: (Default) Do not bring the window to the front. - */ + * @note Due to macOS system limitations, when setting this member to bring the window to the front, + * if the current app has multiple windows, only the main window will be brought to the front. + */ bool windowFocus; /** - * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start screen sharing, - * you can use this parameter to block a specified window. When calling `updateScreenCaptureParameters` to update - * screen sharing configurations, you can use this parameter to dynamically block the specified windows during - * screen sharing. + * The ID list of the windows to be blocked. When calling `startScreenCaptureByDisplayId` to start + * screen sharing, you can use this parameter to block a specified window. When calling + * `updateScreenCaptureParameters` to update screen sharing configurations, you can use this + * parameter to dynamically block a specified window. */ - view_t *excludeWindowList; + view_t* excludeWindowList; /** - * The number of windows to be blocked. + * The number of windows to be excluded. + * @note On the Windows platform, the maximum value of this parameter is 24; if this value is + * exceeded, excluding the window fails. */ int excludeWindowCount; - /** The width (px) of the border. Defaults to 0, and the value range is [0,50]. - * - */ + /** + * (For macOS and Windows only) The width (px) of the border. The default value is 5, and the value + * range is (0, 50]. + * @note This parameter only takes effect when `highLighted` is set to `true`. + */ int highLightWidth; - /** The color of the border in RGBA format. The default value is 0xFF8CBF26. - * - */ + /** + * (For macOS and Windows only) + * - On Windows platforms, the color of the border in ARGB format. The default value is 0xFF8CBF26. + * - On macOS, `COLOR_CLASS` refers to `NSColor`. + */ unsigned int highLightColor; - /** Whether to place a border around the shared window or screen: - * - true: Place a border. - * - false: (Default) Do not place a border. - * - * @note When you share a part of a window or screen, the SDK places a border around the entire window or screen if you set `enableHighLight` as true. - * - */ + /** + * (For macOS and Windows only) Whether to place a border around the shared window or screen: + * - `true`: Place a border. + * - `false`: (Default) Do not place a border. + * @note When you share a part of a window or screen, the SDK places a border around the entire + * window or screen if you set this parameter to `true`. + */ bool enableHighLight; ScreenCaptureParameters() - : dimensions(1920, 1080), frameRate(5), bitrate(STANDARD_BITRATE), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : captureAudio(false), + dimensions(1920, 1080), + frameRate(5), + bitrate(STANDARD_BITRATE), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(const VideoDimensions& d, int f, int b) - : dimensions(d), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : captureAudio(false),dimensions(d), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false){} + : captureAudio(false), + dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} -}; - -/** - * Audio recording quality. + : captureAudio(false), + dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt) + : captureAudio(false), + dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex, + int cnt) + : captureAudio(false), + dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} +}; + +/** + * @brief Recording quality. */ enum AUDIO_RECORDING_QUALITY_TYPE { /** - * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of recording. + * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of + * recording. */ AUDIO_RECORDING_QUALITY_LOW = 0, /** - * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes of recording. + * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_MEDIUM = 1, /** - * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes of recording. + * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_HIGH = 2, /** - * 3: Ultra high audio recording quality. + * 3: Ultra high quality. The sample rate is 32 kHz, and the file size is around 7.5 MB after 10 + * minutes of recording. */ AUDIO_RECORDING_QUALITY_ULTRA_HIGH = 3, }; /** - * Recording content. Set in `startAudioRecording`. + * @brief Recording content. Set in `startAudioRecording [3/3]`. */ enum AUDIO_FILE_RECORDING_TYPE { /** @@ -5330,29 +6698,30 @@ enum AUDIO_FILE_RECORDING_TYPE { }; /** - * Audio encoded frame observer position. + * @brief Audio profile. */ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { /** - * 1: Only records the audio of the local user. - */ + * 1: Only records the audio of the local user. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD = 1, /** - * 2: Only records the audio of all remote users. - */ + * 2: Only records the audio of all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK = 2, /** - * 3: Records the mixed audio of the local and all remote users. - */ + * 3: Records the mixed audio of the local and all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED = 3, }; /** - * Recording configuration. + * @brief Recording configurations. */ struct AudioRecordingConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. For example: `C:\music\audio.mp4`. + * The absolute path (including the filename extensions) of the recording file. For example: + * `C:\music\audio.aac`. * @note Ensure that the directory for the log files exists and is writable. */ const char* filePath; @@ -5368,8 +6737,9 @@ struct AudioRecordingConfiguration { * - (Default) 32000 * - 44100 * - 48000 - * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC files with quality - * to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for better recording quality. + * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC + * files with `quality` set as AUDIO_RECORDING_QUALITY_MEDIUM or AUDIO_RECORDING_QUALITY_HIGH for + * better recording quality. */ int sampleRate; /** @@ -5383,138 +6753,162 @@ struct AudioRecordingConfiguration { AUDIO_RECORDING_QUALITY_TYPE quality; /** - * Recording channel. The following values are supported: - * - (Default) 1 - * - 2 + * The audio channel of recording: The parameter supports the following values: + * - 1: (Default) Mono. + * - 2: Stereo. + * @note + * The actual recorded audio channel is related to the audio channel that you capture. + * - If the captured audio is mono and `recordingChannel` is `2`, the recorded audio is the + * dual-channel data that is copied from mono data, not stereo. + * - If the captured audio is dual channel and `recordingChannel` is `1`, the recorded audio is the + * mono data that is mixed by dual-channel data. + * The integration scheme also affects the final recorded audio channel. If you need to record in + * stereo, contact `technical support`. */ int recordingChannel; AudioRecordingConfiguration() - : filePath(OPTIONAL_NULLPTR), - encode(false), - sampleRate(32000), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(AUDIO_RECORDING_QUALITY_LOW), - recordingChannel(1) {} - - AudioRecordingConfiguration(const char* file_path, int sample_rate, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(false), - sampleRate(sample_rate), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, AUDIO_FILE_RECORDING_TYPE type, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(enc), - sampleRate(sample_rate), - fileRecordingType(type), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const AudioRecordingConfiguration &rhs) - : filePath(rhs.filePath), - encode(rhs.encode), - sampleRate(rhs.sampleRate), - fileRecordingType(rhs.fileRecordingType), - quality(rhs.quality), - recordingChannel(rhs.recordingChannel) {} -}; - -/** - * Observer settings for the encoded audio. + : filePath(OPTIONAL_NULLPTR), + encode(false), + sampleRate(32000), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(AUDIO_RECORDING_QUALITY_LOW), + recordingChannel(1) {} + + AudioRecordingConfiguration(const char* file_path, int sample_rate, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(false), + sampleRate(sample_rate), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, + AUDIO_FILE_RECORDING_TYPE type, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(enc), + sampleRate(sample_rate), + fileRecordingType(type), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const AudioRecordingConfiguration& rhs) + : filePath(rhs.filePath), + encode(rhs.encode), + sampleRate(rhs.sampleRate), + fileRecordingType(rhs.fileRecordingType), + quality(rhs.quality), + recordingChannel(rhs.recordingChannel) {} +}; + +/** + * @brief Observer settings for the encoded audio. */ struct AudioEncodedFrameObserverConfig { - /** - * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. - */ - AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; - /** - * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. - */ - AUDIO_ENCODING_TYPE encodingType; - - AudioEncodedFrameObserverConfig() - : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), - encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM){} + /** + * Audio profile. See `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. + */ + AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; + /** + * Audio encoding type. See `AUDIO_ENCODING_TYPE`. + */ + AUDIO_ENCODING_TYPE encodingType; + AudioEncodedFrameObserverConfig() + : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), + encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM) {} }; /** * The encoded audio observer. */ class IAudioEncodedFrameObserver { -public: -/** -* Gets the encoded audio data of the local user. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, -* you can get the encoded audio data of the local user from this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; - -/** -* Gets the encoded audio data of all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, -* you can get encoded audio data of all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; - -/** -* Gets the mixed and encoded audio data of the local and all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, -* you can get the mixed and encoded audio data of the local and all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + public: + /** + * @brief Gets the encoded audio data of the local user. + * + * @details + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD, you can get the encoded audio data of the local + * user from this callback. + * + * @param frameBuffer The audio buffer. + * @param length The data length (byte). + * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`. + * + */ + virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -virtual ~IAudioEncodedFrameObserver () {} -}; + /** + * @brief Gets the encoded audio data of all remote users. + * + * @details + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK, you can get encoded audio data of all remote + * users through this callback. + * + * @param frameBuffer The audio buffer. + * @param length The data length (byte). + * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`. + * + */ + virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** The region for connection, which is the region where the server the SDK connects to is located. - */ -enum AREA_CODE { - /** - * Mainland China. - */ - AREA_CODE_CN = 0x00000001, - /** - * North America. - */ - AREA_CODE_NA = 0x00000002, - /** - * Europe. - */ - AREA_CODE_EU = 0x00000004, - /** - * Asia, excluding Mainland China. - */ - AREA_CODE_AS = 0x00000008, - /** - * Japan. - */ - AREA_CODE_JP = 0x00000010, - /** - * India. - */ - AREA_CODE_IN = 0x00000020, - /** - * (Default) Global. - */ - AREA_CODE_GLOB = (0xFFFFFFFF) + /** + * @brief Gets the mixed and encoded audio data of the local and all remote users. + * + * @details + * After calling `registerAudioEncodedFrameObserver` and setting the audio profile as + * AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED, you can get the mixed and encoded audio data of the + * local and all remote users through this callback. + * + * @param frameBuffer The audio buffer. + * @param length The data length (byte). + * @param audioEncodedFrameInfo Audio information after encoding. See `EncodedAudioFrameInfo`. + * + */ + virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + + virtual ~IAudioEncodedFrameObserver() {} +}; + +/** + * @brief The region for connection, which is the region where the server the SDK connects to is + * located. + */ +enum AREA_CODE { + /** + * Mainland China. + */ + AREA_CODE_CN = 0x00000001, + /** + * North America. + */ + AREA_CODE_NA = 0x00000002, + /** + * Europe. + */ + AREA_CODE_EU = 0x00000004, + /** + * Asia, excluding Mainland China. + */ + AREA_CODE_AS = 0x00000008, + /** + * Japan. + */ + AREA_CODE_JP = 0x00000010, + /** + * India. + */ + AREA_CODE_IN = 0x00000020, + /** + * Global. + */ + AREA_CODE_GLOB = (0xFFFFFFFF) }; /** @@ -5557,131 +6951,157 @@ enum AREA_CODE_EX { }; /** - * The error code of the channel media replay. + * @brief The error code of the channel media relay. */ enum CHANNEL_MEDIA_RELAY_ERROR { - /** 0: No error. + /** + * 0: No error. */ RELAY_OK = 0, - /** 1: An error occurs in the server response. + /** + * 1: An error occurs in the server response. */ RELAY_ERROR_SERVER_ERROR_RESPONSE = 1, - /** 2: No server response. You can call the `leaveChannel` method to leave the channel. - * - * This error can also occur if your project has not enabled co-host token authentication. You can contact technical - * support to enable the service for cohosting across channels before starting a channel media relay. + /** + * 2: No server response. + * This error may be caused by poor network connections. If this error occurs when initiating a + * channel media relay, you can try again later; if this error occurs during channel media relay, + * you can call `leaveChannel(const LeaveChannelOptions& options)` to leave the channel. + * This error can also occur if the channel media relay service is not enabled in the project. You + * can contact `technical support` to enable the service. */ RELAY_ERROR_SERVER_NO_RESPONSE = 2, - /** 3: The SDK fails to access the service, probably due to limited resources of the server. + /** + * 3: The SDK fails to access the service, probably due to limited resources of the server. */ RELAY_ERROR_NO_RESOURCE_AVAILABLE = 3, - /** 4: Fails to send the relay request. + /** + * 4: Fails to send the relay request. */ RELAY_ERROR_FAILED_JOIN_SRC = 4, - /** 5: Fails to accept the relay request. + /** + * 5: Fails to accept the relay request. */ RELAY_ERROR_FAILED_JOIN_DEST = 5, - /** 6: The server fails to receive the media stream. + /** + * 6: The server fails to receive the media stream. */ RELAY_ERROR_FAILED_PACKET_RECEIVED_FROM_SRC = 6, - /** 7: The server fails to send the media stream. + /** + * 7: The server fails to send the media stream. */ RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7, - /** 8: The SDK disconnects from the server due to poor network connections. You can call the `leaveChannel` method to - * leave the channel. + /** + * 8: The SDK disconnects from the server due to poor network connections. You can call + * `leaveChannel(const LeaveChannelOptions& options)` to leave the channel. */ RELAY_ERROR_SERVER_CONNECTION_LOST = 8, - /** 9: An internal error occurs in the server. + /** + * 9: An internal error occurs in the server. */ RELAY_ERROR_INTERNAL_ERROR = 9, - /** 10: The token of the source channel has expired. + /** + * 10: The token of the source channel has expired. */ RELAY_ERROR_SRC_TOKEN_EXPIRED = 10, - /** 11: The token of the destination channel has expired. + /** + * 11: The token of the destination channel has expired. */ RELAY_ERROR_DEST_TOKEN_EXPIRED = 11, }; /** - * The state code of the channel media relay. + * @brief The state code of the channel media relay. */ enum CHANNEL_MEDIA_RELAY_STATE { - /** 0: The initial state. After you successfully stop the channel media relay by calling `stopChannelMediaRelay`, - * the `onChannelMediaRelayStateChanged` callback returns this state. + /** + * 0: The initial state. After you successfully stop the channel media relay by calling + * `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state. */ RELAY_STATE_IDLE = 0, - /** 1: The SDK tries to relay the media stream to the destination channel. + /** + * 1: The SDK tries to relay the media stream to the destination channel. */ RELAY_STATE_CONNECTING = 1, - /** 2: The SDK successfully relays the media stream to the destination channel. + /** + * 2: The SDK successfully relays the media stream to the destination channel. */ RELAY_STATE_RUNNING = 2, - /** 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code. + /** + * 3: An error occurs. See `code` in `onChannelMediaRelayStateChanged` for the error code. */ RELAY_STATE_FAILURE = 3, }; -/** The definition of ChannelMediaInfo. +/** + * @brief Channel media information. */ struct ChannelMediaInfo { - /** The user ID. - */ + /** + * The user ID. + */ uid_t uid; - /** The channel name. The default value is NULL, which means that the SDK - * applies the current channel name. - */ + /** + * The channel name. + */ const char* channelName; - /** The token that enables the user to join the channel. The default value - * is NULL, which means that the SDK applies the current token. - */ + /** + * The token that enables the user to join the channel. + */ const char* token; ChannelMediaInfo() : uid(0), channelName(NULL), token(NULL) {} ChannelMediaInfo(const char* c, const char* t, uid_t u) : uid(u), channelName(c), token(t) {} }; -/** The definition of ChannelMediaRelayConfiguration. +/** + * @brief Configuration of cross channel media relay. */ struct ChannelMediaRelayConfiguration { - /** The information of the source channel `ChannelMediaInfo`. It contains the following members: - * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK applies the name - * of the current channel. - * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is 0, which means the - * SDK generates a random UID. You must set it as 0. - * - `token`: The token for joining the source channel. It is generated with the `channelName` and `uid` you set in - * `srcInfo`. - * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, which means the - * SDK applies the App ID. - * - If you have enabled the App Certificate, you must use the token generated with the `channelName` and `uid`, and - * the `uid` must be set as 0. + /** + * The information of the source channel. See `ChannelMediaInfo`. It contains the following members: + * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK + * applies the name of the current channel. + * - `token`: The `token` for joining the source channel. This token is generated with the + * `channelName` and `uid` you set in `srcInfo`. + * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, + * which means the SDK applies the App ID. + * - If you have enabled the App Certificate, you must use the `token` generated with the + * `channelName` and `uid`, and the `uid` must be set as 0. + * - `uid`: The unique user ID to identify the relay stream in the source channel. Agora recommends + * leaving the default value of 0 unchanged. */ ChannelMediaInfo* srcInfo; - /** The information of the destination channel `ChannelMediaInfo`. It contains the following members: - * - `channelName`: The name of the destination channel. - * - `uid`: The unique ID to identify the relay stream in the destination channel. The value - * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any - * other `UID` in the destination channel. The default value is 0, which means the SDK generates - * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel, - * and ensure that this `UID` is different from any other `UID` in the channel. - * - `token`: The token for joining the destination channel. It is generated with the `channelName` - * and `uid` you set in `destInfos`. - * - If you have not enabled the App Certificate, set this parameter as the default value NULL, + /** + * The information of the target channel `ChannelMediaInfo`. It contains the following members: + * - `channelName`: The name of the target channel. + * - `token`: The `token` for joining the target channel. It is generated with the `channelName` and + * `uid` you set in `destInfos`. + * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, * which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the `channelName` - * and `uid`. + * - If you have enabled the App Certificate, you must use the `token` generated with the + * `channelName` and `uid`. + * - `uid`: The unique user ID to identify the relay stream in the target channel. The value ranges + * from 0 to (2 32-1). To avoid user ID conflicts, this user ID must be different from any other + * user ID in the target channel. The default value is 0, which means the SDK generates a random + * UID. + * @note If the token of any target channel expires, the whole media relay stops; hence Agora + * recommends that you specify the same expiration time for the tokens of all the target channels. */ ChannelMediaInfo* destInfos; - /** The number of destination channels. The default value is 0, and the value range is from 0 to - * 6. Ensure that the value of this parameter corresponds to the number of `ChannelMediaInfo` - * structs you define in `destInfo`. + /** + * The number of target channels. The default value is 0, and the value range is from 0 to 6. Ensure + * that the value of this parameter corresponds to the number of `ChannelMediaInfo` structs you + * define in `destInfo`. */ int destCount; - ChannelMediaRelayConfiguration() : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} + ChannelMediaRelayConfiguration() + : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} }; /** - * The uplink network information. + * @brief The uplink network information. */ struct UplinkNetworkInfo { /** @@ -5722,11 +7142,11 @@ struct DownlinkNetworkInfo { expected_bitrate_bps(-1) {} PeerDownlinkInfo(const PeerDownlinkInfo& rhs) - : stream_type(rhs.stream_type), + : stream_type(rhs.stream_type), current_downscale_level(rhs.current_downscale_level), expected_bitrate_bps(rhs.expected_bitrate_bps) { if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5741,7 +7161,7 @@ struct DownlinkNetworkInfo { current_downscale_level = rhs.current_downscale_level; expected_bitrate_bps = rhs.expected_bitrate_bps; if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5775,18 +7195,18 @@ struct DownlinkNetworkInfo { int total_received_video_count; DownlinkNetworkInfo() - : lastmile_buffer_delay_time_ms(-1), - bandwidth_estimation_bps(-1), - total_downscale_level_count(-1), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(-1) {} + : lastmile_buffer_delay_time_ms(-1), + bandwidth_estimation_bps(-1), + total_downscale_level_count(-1), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(-1) {} DownlinkNetworkInfo(const DownlinkNetworkInfo& info) - : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), - bandwidth_estimation_bps(info.bandwidth_estimation_bps), - total_downscale_level_count(info.total_downscale_level_count), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(info.total_received_video_count) { + : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), + bandwidth_estimation_bps(info.bandwidth_estimation_bps), + total_downscale_level_count(info.total_downscale_level_count), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(info.total_received_video_count) { if (total_received_video_count <= 0) return; peer_downlink_info = new PeerDownlinkInfo[total_received_video_count]; for (int i = 0; i < total_received_video_count; ++i) @@ -5812,76 +7232,94 @@ struct DownlinkNetworkInfo { }; /** - * The built-in encryption mode. + * @brief The built-in encryption mode. * + * @details * Agora recommends using AES_128_GCM2 or AES_256_GCM2 encrypted mode. These two modes support the * use of salt for higher security. + * */ enum ENCRYPTION_MODE { - /** 1: 128-bit AES encryption, XTS mode. + /** + * 1: 128-bit AES encryption, XTS mode. */ AES_128_XTS = 1, - /** 2: 128-bit AES encryption, ECB mode. + /** + * 2: 128-bit AES encryption, ECB mode. */ AES_128_ECB = 2, - /** 3: 256-bit AES encryption, XTS mode. + /** + * 3: 256-bit AES encryption, XTS mode. */ AES_256_XTS = 3, - /** 4: 128-bit SM4 encryption, ECB mode. + /** + * 4: 128-bit SM4 encryption, ECB mode. */ SM4_128_ECB = 4, - /** 5: 128-bit AES encryption, GCM mode. + /** + * 5: 128-bit AES encryption, GCM mode. */ AES_128_GCM = 5, - /** 6: 256-bit AES encryption, GCM mode. + /** + * 6: 256-bit AES encryption, GCM mode. */ AES_256_GCM = 6, - /** 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of - * salt (`encryptionKdfSalt`). + /** + * 7: (Default) 128-bit AES encryption, GCM mode. This encryption mode requires the setting of salt + * (`encryptionKdfSalt`). */ AES_128_GCM2 = 7, - /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (`encryptionKdfSalt`). + /** + * 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt + * (`encryptionKdfSalt`). */ AES_256_GCM2 = 8, - /** Enumerator boundary. + /** + * Enumerator boundary. */ MODE_END, }; -/** Built-in encryption configurations. */ +/** + * @brief Built-in encryption configurations. + */ struct EncryptionConfig { /** - * The built-in encryption mode. See #ENCRYPTION_MODE. Agora recommends using `AES_128_GCM2` - * or `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security. + * The built-in encryption mode. See `ENCRYPTION_MODE`. Agora recommends using `AES_128_GCM2` or + * `AES_256_GCM2` encrypted mode. These two modes support the use of salt for higher security. */ ENCRYPTION_MODE encryptionMode; /** * Encryption key in string type with unlimited length. Agora recommends using a 32-byte key. - * - * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). + * @note If you do not set an encryption key or set it as `NULL`, you cannot use the built-in + * encryption, and the SDK returns `-2`. */ const char* encryptionKey; /** - * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. - * - * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. - * In this case, ensure that this parameter is not 0. + * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server + * side. See Media Stream Encryption for details. + * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. In + * this case, ensure that this parameter is not `0`. */ uint8_t encryptionKdfSalt[32]; - + + /** + * Whether to enable data stream encryption: + * - `true`: Enable data stream encryption. + * - `false`: (Default) Disable data stream encryption. + */ bool datastreamEncryptionEnabled; EncryptionConfig() - : encryptionMode(AES_128_GCM2), - encryptionKey(OPTIONAL_NULLPTR), - datastreamEncryptionEnabled(false) - { + : encryptionMode(AES_128_GCM2), + encryptionKey(OPTIONAL_NULLPTR), + datastreamEncryptionEnabled(false) { memset(encryptionKdfSalt, 0, sizeof(encryptionKdfSalt)); } /// @cond const char* getEncryptionString() const { - switch(encryptionMode) { + switch (encryptionMode) { case AES_128_XTS: return "aes-128-xts"; case AES_128_ECB: @@ -5906,39 +7344,88 @@ struct EncryptionConfig { /// @endcond }; -/** Encryption error type. +/** + * @brief Encryption error type. */ enum ENCRYPTION_ERROR_TYPE { - /** - * 0: Internal reason. - */ - ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, - /** - * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, - /** - * 2: MediaStream encryption errors. - */ - ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, - /** - * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, - /** - * 4: DataStream encryption errors. - */ - ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, + /** + * 0: Internal reason. + */ + ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, + /** + * 1: Media stream decryption error. Ensure that the receiver and the sender use the same encryption + * mode and key. + */ + ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, + /** + * 2: Media stream encryption error. + */ + ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, + /** + * 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption + * mode and key. + */ + ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, + /** + * 4: Data stream encryption error. + */ + ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, }; -enum UPLOAD_ERROR_REASON -{ +enum UPLOAD_ERROR_REASON { UPLOAD_SUCCESS = 0, UPLOAD_NET_ERROR = 1, UPLOAD_SERVER_ERROR = 2, }; -/** The type of the device permission. +/** + * @brief Represents the error codes after calling `renewToken`. + * + * @since 4.6.0 + */ +enum RENEW_TOKEN_ERROR_CODE { + /** + * (0): Token updated successfully. + */ + RENEW_TOKEN_SUCCESS = 0, + /** + * (1): Token update failed due to an unknown server error. It is recommended to check the + * parameters used to generate the Token, regenerate the Token, and retry `renewToken`. + */ + RENEW_TOKEN_FAILURE = 1, + /** + * (2): Token update failed because the provided Token has expired. It is recommended to generate a + * new Token with a longer expiration time and retry `renewToken`. + */ + RENEW_TOKEN_TOKEN_EXPIRED = 2, + /** + * (3): Token update failed because the provided Token is invalid. Common reasons include: the + * project has enabled App Certificate in the Agora Console but did not use a Token when joining the + * channel; the uid specified in `joinChannel` is inconsistent with the uid used when generating the + * Token; the channel name specified in `joinChannel` is inconsistent with the one used when + * generating the Token. It is recommended to check the Token generation process, generate a new + * Token, and retry `renewToken`. + */ + RENEW_TOKEN_INVALID_TOKEN = 3, + /** + * (4): Token update failed because the channel name in the Token does not match the current + * channel. It is recommended to check the channel name, generate a new Token, and retry + * `renewToken`. + */ + RENEW_TOKEN_INVALID_CHANNEL_NAME = 4, + /** + * (5): Token update failed because the App ID in the Token does not match the current App ID. It is + * recommended to check the App ID, generate a new Token, and retry `renewToken`. + */ + RENEW_TOKEN_INCONSISTENT_APPID = 5, + /** + * (6): The previous Token update request was canceled due to a new request being initiated. + */ + RENEW_TOKEN_CANCELED_BY_NEW_REQUEST = 6, +}; + +/** + * @brief The type of the device permission. */ enum PERMISSION_TYPE { /** @@ -5950,28 +7437,34 @@ enum PERMISSION_TYPE { */ CAMERA = 1, + /** + * (For Android only) 2: Permission for screen sharing. + */ SCREEN_CAPTURE = 2, }; /** - * The subscribing state. + * @brief The subscribing state. */ enum STREAM_SUBSCRIBE_STATE { /** - * 0: The initial subscribing state after joining the channel. + * 0: The initial publishing state after joining the channel. */ SUB_STATE_IDLE = 0, /** * 1: Fails to subscribe to the remote stream. Possible reasons: * - The remote user: - * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local - * media stream. - * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module. - * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - Calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop sending + * local media stream. + * - Calls `disableAudio` or `disableVideo` to disable the local audio or video module. + * - Calls `enableLocalAudio` ( false ) or `enableLocalVideo` ( false ) to disable local audio or + * video capture. * - The role of the remote user is audience. * - The local user calls the following methods to stop receiving remote streams: - * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams. - * - Calls `muteRemoteVideoStream(true)`, `muteAllRemoteVideoStreams(true)` to stop receiving the remote video streams. + * - Call `muteRemoteAudioStream` ( true ) or `muteAllRemoteAudioStreams` ( true ) to stop + * receiving the remote audio stream. + * - Call `muteRemoteVideoStream` ( true ) or `muteAllRemoteVideoStreams` ( true ) to stop + * receiving the remote video stream. */ SUB_STATE_NO_SUBSCRIBED = 1, /** @@ -5979,13 +7472,13 @@ enum STREAM_SUBSCRIBE_STATE { */ SUB_STATE_SUBSCRIBING = 2, /** - * 3: Subscribes to and receives the remote stream successfully. + * 3: The remote stream is received, and the subscription is successful. */ SUB_STATE_SUBSCRIBED = 3 }; /** - * The publishing state. + * @brief The publishing state. */ enum STREAM_PUBLISH_STATE { /** @@ -5994,9 +7487,12 @@ enum STREAM_PUBLISH_STATE { PUB_STATE_IDLE = 0, /** * 1: Fails to publish the local stream. Possible reasons: - * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending the local media stream. - * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video module. - * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - The local user calls `muteLocalAudioStream` (`true`) or `muteLocalVideoStream` (`true`) to stop + * sending local media streams. + * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video + * module. + * - The local user calls `enableLocalAudio` (`false`) or `enableLocalVideo` (`false`) to disable + * the local audio or video capture. * - The role of the local user is audience. */ PUB_STATE_NO_PUBLISHED = 1, @@ -6011,25 +7507,65 @@ enum STREAM_PUBLISH_STATE { }; /** - * The EchoTestConfiguration struct. + * @brief The configuration of the audio and video call loop test. */ struct EchoTestConfiguration { + /** + * The view used to render the local user's video. This parameter is only applicable to scenarios + * testing video devices, that is, when `enableVideo` is true. + */ view_t view; + /** + * Whether to enable the audio device for the loop test: + * - `true`: (Default) Enable the audio device. To test the audio device, set this parameter as + * true. + * - `false`: Disable the audio device. + */ bool enableAudio; + /** + * Whether to enable the video device for the loop test: + * - `true`: (Default) Enable the video device. To test the video device, set this parameter as + * true. + * - `false`: Disable the video device. + */ bool enableVideo; + /** + * The token used to secure the audio and video call loop test. If you do not enable App Certificate + * in Agora Console, you do not need to pass a value in this parameter; if you have enabled App + * Certificate in Agora Console, you must pass a token in this parameter; the `uid` used when you + * generate the token must be 0xFFFFFFFF, and the channel name used must be the channel name that + * identifies each audio and video call loop tested. For server-side token generation, see . + */ const char* token; + /** + * The channel name that identifies each audio and video call loop. To ensure proper loop test + * functionality, the channel name passed in to identify each loop test cannot be the same when + * users of the same project (App ID) perform audio and video call loop tests on different devices. + */ const char* channelId; + /** + * Set the time interval or delay for returning the results of the audio and video loop test. The + * value range is [2,10], in seconds, with the default value being 2 seconds. + * - For audio loop tests, the test results will be returned according to the time interval you set. + * - For video loop tests, the video will be displayed in a short time, after which the delay will + * gradually increase until it reaches the delay you set. + */ int intervalInSeconds; EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is) - : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} + : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} EchoTestConfiguration() - : view(OPTIONAL_NULLPTR), enableAudio(true), enableVideo(true), token(OPTIONAL_NULLPTR), channelId(OPTIONAL_NULLPTR), intervalInSeconds(2) {} + : view(OPTIONAL_NULLPTR), + enableAudio(true), + enableVideo(true), + token(OPTIONAL_NULLPTR), + channelId(OPTIONAL_NULLPTR), + intervalInSeconds(2) {} }; /** - * The information of the user. + * @brief The information of the user. */ struct UserInfo { /** @@ -6037,37 +7573,36 @@ struct UserInfo { */ uid_t uid; /** - * The user account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`. + * User account. The maximum data length is `MAX_USER_ACCOUNT_LENGTH_TYPE`. */ char userAccount[MAX_USER_ACCOUNT_LENGTH]; - UserInfo() : uid(0) { - userAccount[0] = '\0'; - } + UserInfo() : uid(0) { userAccount[0] = '\0'; } }; /** - * The audio filter of in-ear monitoring. + * @brief The audio filter types of in-ear monitoring. */ enum EAR_MONITORING_FILTER_TYPE { /** - * 1: Do not add an audio filter to the in-ear monitor. + * 1<<0: No audio filter added to in-ear monitoring. */ - EAR_MONITORING_FILTER_NONE = (1<<0), + EAR_MONITORING_FILTER_NONE = (1 << 0), /** - * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice - * beautifier and audio effect, users can hear the voice after adding these effects. + * 1<<1: Add vocal effects audio filter to in-ear monitoring. If you implement functions such as + * voice beautifier and audio effect, users can hear the voice after adding these effects. */ - EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1<<1), + EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1), /** - * 4: Enable noise suppression to the in-ear monitor. + * 1<<2: Add noise suppression audio filter to in-ear monitoring. */ - EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1<<2), + EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2), /** - * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor. - * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other bits will be disregarded. + * 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. + * This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable + * for latency-tolerant scenarios requiring low CPU consumption. */ - EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1<<15), + EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15), }; /** @@ -6100,52 +7635,26 @@ enum THREAD_PRIORITY_TYPE { CRITICAL = 5, }; -#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) +#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__) /** - * The video configuration for the shared screen stream. + * @brief The video configuration for the shared screen stream. */ struct ScreenVideoParameters { /** - * The dimensions of the video encoding resolution. The default value is `1280` x `720`. - * For recommended values, see [Recommended video - * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). - * If the aspect ratio is different between width and height and the screen, the SDK adjusts the - * video encoding resolution according to the following rules (using an example where `width` × - * `height` is 1280 × 720): - * - When the width and height of the screen are both lower than `width` and `height`, the SDK - * uses the resolution of the screen for video encoding. For example, if the screen is 640 × - * 360, The SDK uses 640 × 360 for video encoding. - * - When either the width or height of the screen is higher than `width` or `height`, the SDK - * uses the maximum values that do not exceed those of `width` and `height` while maintaining - * the aspect ratio of the screen for video encoding. For example, if the screen is 2000 × 1500, - * the SDK uses 960 × 720 for video encoding. - * - * @note - * - The billing of the screen sharing stream is based on the values of width and height. - * When you do not pass in these values, Agora bills you at 1280 × 720; - * when you pass in these values, Agora bills you at those values. - * For details, see [Pricing for Real-time - * Communication](https://docs.agora.io/en/Interactive%20Broadcast/billing_rtc). - * - This value does not indicate the orientation mode of the output ratio. - * For how to set the video orientation, see `ORIENTATION_MODE`. - * - Whether the SDK can support a resolution at 720P depends on the performance of the device. - * If you set 720P but the device cannot support it, the video frame rate can be lower. + * The video encoding dimension. The default value is 1280 × 720. */ VideoDimensions dimensions; /** - * The video encoding frame rate (fps). The default value is `15`. - * For recommended values, see [Recommended video - * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). + * The video encoding frame rate (fps). The default value is 15. */ int frameRate = 15; - /** - * The video encoding bitrate (Kbps). For recommended values, see [Recommended video - * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). + /** + * The video encoding bitrate (Kbps). */ int bitrate; - /* - * The content hint of the screen sharing: + /** + * The content hint for screen sharing. See `VIDEO_CONTENT_HINT`. */ VIDEO_CONTENT_HINT contentHint = VIDEO_CONTENT_HINT::CONTENT_HINT_MOTION; @@ -6153,124 +7662,140 @@ struct ScreenVideoParameters { }; /** - * The audio configuration for the shared screen stream. - */ -struct ScreenAudioParameters { - /** - * The audio sample rate (Hz). The default value is `16000`. - */ - int sampleRate = 16000; - /** - * The number of audio channels. The default value is `2`, indicating dual channels. - */ - int channels = 2; - /** - * The volume of the captured system audio. The value range is [0,100]. The default value is - * `100`. - */ - int captureSignalVolume = 100; -}; - -/** - * The configuration of the screen sharing + * @brief Screen sharing configurations. */ struct ScreenCaptureParameters2 { /** * Determines whether to capture system audio during screen sharing: - * - `true`: Capture. - * - `false`: (Default) Do not capture. - * - * **Note** - * Due to system limitations, capturing system audio is only available for Android API level 29 + * - `true`: Capture system audio. + * - `false`: (Default) Do not capture system audio. + * @note + * - Due to system limitations, capturing system audio is only applicable to Android API level 29 * and later (that is, Android 10 and later). + * - To improve the success rate of capturing system audio during screen sharing, ensure that you + * have called the `setAudioScenario` method and set the audio scenario to + * `AUDIO_SCENARIO_GAME_STREAMING`. */ bool captureAudio = false; /** - * The audio configuration for the shared screen stream. + * The audio configuration for the shared screen stream. See `ScreenAudioParameters`. + * @note This parameter only takes effect when `captureAudio` is `true`. */ ScreenAudioParameters audioParams; /** - * Determines whether to capture the screen during screen sharing: - * - `true`: (Default) Capture. - * - `false`: Do not capture. - * - * **Note** - * Due to system limitations, screen capture is only available for Android API level 21 and later - * (that is, Android 5 and later). + * Whether to capture the screen when screen sharing: + * - `true`: (Default) Capture the screen. + * - `false`: Do not capture the screen. + * @note Due to system limitations, the capture screen is only applicable to Android API level 21 + * and above, that is, Android 5 and above. */ bool captureVideo = true; /** - * The video configuration for the shared screen stream. + * The video configuration for the shared screen stream. See `ScreenVideoParameters`. + * @note This parameter only takes effect when `captureVideo` is `true`. */ ScreenVideoParameters videoParams; }; #endif /** - * The tracing event of media rendering. + * @brief The rendering state of the media frame. */ enum MEDIA_TRACE_EVENT { /** - * 0: The media frame has been rendered. + * 0: The video frame has been rendered. */ MEDIA_TRACE_EVENT_VIDEO_RENDERED = 0, /** - * 1: The media frame has been decoded. + * 1: The video frame has been decoded. */ MEDIA_TRACE_EVENT_VIDEO_DECODED, }; /** - * The video rendering tracing result + * @brief Indicators during video frame rendering progress. */ struct VideoRenderingTracingInfo { /** - * Elapsed time from the start tracing time to the time when the tracing event occurred. + * The time interval (ms) from `startMediaRenderingTracing` to SDK triggering the + * `onVideoRenderingTracingResult` callback. Agora recommends you call `startMediaRenderingTracing` + * before joining a channel. */ int elapsedTime; /** - * Elapsed time from the start tracing time to the time when join channel. - * - * **Note** - * If the start tracing time is behind the time when join channel, this value will be negative. + * The time interval (ms) from `startMediaRenderingTracing` to `joinChannel(const char* token, const + * char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` + * . A negative number indicates that `startMediaRenderingTracing` is called after calling + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)`. */ int start2JoinChannel; /** - * Elapsed time from joining channel to finishing joining channel. + * The time interval (ms) from `joinChannel(const char* token, const char* channelId, const char* + * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to successfully joining + * the channel. */ int join2JoinSuccess; /** - * Elapsed time from finishing joining channel to remote user joined. - * - * **Note** - * If the start tracing time is after the time finishing join channel, this value will be - * the elapsed time from the start tracing time to remote user joined. The minimum value is 0. + * - If the local user calls `startMediaRenderingTracing` before successfully joining the channel, + * this value is the time interval (ms) from the local user successfully joining the channel to the + * remote user joining the channel. + * - If the local user calls `startMediaRenderingTracing` after successfully joining the channel, + * the value is the time interval (ms) from `startMediaRenderingTracing` to when the remote user + * joins the channel. + * @note + * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel, + * the value is 0 and meaningless. + * - In order to reduce the time of rendering the first frame for remote users, Agora recommends + * that the local user joins the channel when the remote user is in the channel to reduce this + * value. */ int joinSuccess2RemoteJoined; /** - * Elapsed time from remote user joined to set the view. - * - * **Note** - * If the start tracing time is after the time when remote user joined, this value will be - * the elapsed time from the start tracing time to set the view. The minimum value is 0. + * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel, + * this value is the time interval (ms) from when the remote user joins the channel to when the + * local user sets the remote view. + * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel, + * this value is the time interval (ms) from calling `startMediaRenderingTracing` to setting the + * remote view. + * @note + * - If the local user calls `startMediaRenderingTracing` after setting the remote view, the value + * is 0 and has no effect. + * - In order to reduce the time of rendering the first frame for remote users, Agora recommends + * that the local user sets the remote view before the remote user joins the channel, or sets the + * remote view immediately after the remote user joins the channel to reduce this value. */ int remoteJoined2SetView; /** - * Elapsed time from remote user joined to the time subscribing remote video stream. - * - * **Note** - * If the start tracing time is after the time when remote user joined, this value will be - * the elapsed time from the start tracing time to the time subscribing remote video stream. - * The minimum value is 0. + * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel, + * this value is the time interval (ms) from the remote user joining the channel to subscribing to + * the remote video stream. + * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel, + * this value is the time interval (ms) from `startMediaRenderingTracing` to subscribing to the + * remote video stream. + * @note + * - If the local user calls `startMediaRenderingTracing` after subscribing to the remote video + * stream, the value is 0 and has no effect. + * - In order to reduce the time of rendering the first frame for remote users, Agora recommends + * that after the remote user joins the channel, the local user immediately subscribes to the remote + * video stream to reduce this value. */ int remoteJoined2UnmuteVideo; /** - * Elapsed time from remote user joined to the remote video packet received. - * - * **Note** - * If the start tracing time is after the time when remote user joined, this value will be - * the elapsed time from the start tracing time to the time subscribing remote video stream. - * The minimum value is 0. + * - If the local user calls `startMediaRenderingTracing` before the remote user joins the channel, + * this value is the time interval (ms) from when the remote user joins the channel to when the + * local user receives the remote video stream. + * - If the local user calls `startMediaRenderingTracing` after the remote user joins the channel, + * this value is the time interval (ms) from `startMediaRenderingTracing` to receiving the remote + * video stream. + * @note + * - If the local user calls `startMediaRenderingTracing` after receiving the remote video stream, + * the value is 0 and has no effect. + * - In order to reduce the time of rendering the first frame for remote users, Agora recommends + * that the remote user publishes video streams immediately after joining the channel, and the local + * user immediately subscribes to remote video streams to reduce this value. */ int remoteJoined2PacketReceived; }; @@ -6286,65 +7811,99 @@ enum CONFIG_FETCH_TYPE { CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2, }; - -/** The local proxy mode type. */ +/** + * @brief Connection mode with the Agora Private Media Server. + */ enum LOCAL_PROXY_MODE { - /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn. + /** + * 0: The SDK first tries to connect to the specified Agora Private Media Server; if it fails, it + * connects to the Agora SD-RTN™. */ ConnectivityFirst = 0, - /** 1: Only connect local proxy + /** + * 1: The SDK only tries to connect to the specified Agora Private Media Server. */ LocalOnly = 1, }; +/** + * @brief Configuration information for the log server. + */ struct LogUploadServerInfo { - /** Log upload server domain + /** + * Domain name of the log server. */ const char* serverDomain; - /** Log upload server path + /** + * Storage path for logs on the server. */ const char* serverPath; - /** Log upload server port + /** + * Port of the log server. */ int serverPort; - /** Whether to use HTTPS request: - - true: Use HTTPS request - - fasle: Use HTTP request + /** + * Whether the log server uses HTTPS protocol: + * - `true`: Uses HTTPS. + * - `false`: Uses HTTP. */ bool serverHttps; LogUploadServerInfo() : serverDomain(NULL), serverPath(NULL), serverPort(0), serverHttps(true) {} - LogUploadServerInfo(const char* domain, const char* path, int port, bool https) : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} + LogUploadServerInfo(const char* domain, const char* path, int port, bool https) + : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} }; +/** + * @brief Advanced options for the Local Access Point. + */ struct AdvancedConfigInfo { - /** Log upload server + /** + * Custom log upload server. By default, the SDK uploads logs to the Agora log server. You can use + * this parameter to change the log upload server. See `LogUploadServerInfo`. */ LogUploadServerInfo logUploadServer; }; +/** + * @brief Configuration for the Local Access Point. + */ struct LocalAccessPointConfiguration { - /** Local access point IP address list. + /** + * Internal IP address list of the Local Access Point. Either ipList or domainList must be + * specified. */ const char** ipList; - /** The number of local access point IP address. + /** + * Number of internal IP addresses for the Local Access Point. This value must match the number of + * IP addresses you provide. */ int ipListSize; - /** Local access point domain list. + /** + * Domain name list of the Local Access Point. The SDK resolves the IP addresses of the Local Access + * Point from the provided domain names. The DNS resolution timeout is 10 seconds. Either ipList or + * domainList must be specified. If you specify both IP addresses and domain names, the SDK merges + * and deduplicates the resolved IP addresses and the specified IP addresses, then randomly selects + * one for load balancing. */ const char** domainList; - /** The number of local access point domain. + /** + * Number of domain names for the Local Access Point. This value must match the number of domain + * names you provide. */ int domainListSize; - /** Certificate domain name installed on specific local access point. pass "" means using sni domain on specific local access point - * SNI(Server Name Indication) is an extension to the TLS protocol. + /** + * Domain name for internal certificate verification. If left empty, the SDK uses the default domain + * name `secure-edge.local` for certificate verification. */ const char* verifyDomainName; - /** Local proxy connection mode, connectivity first or local only. + /** + * Connection mode. See `LOCAL_PROXY_MODE`. */ LOCAL_PROXY_MODE mode; - /** Local proxy connection, advanced Config info. + /** + * Advanced options for the Local Access Point. See `AdvancedConfigInfo`. */ AdvancedConfigInfo advancedConfig; /** @@ -6353,23 +7912,108 @@ struct LocalAccessPointConfiguration { - false: not disable vos-aut */ bool disableAut; - LocalAccessPointConfiguration() : ipList(NULL), ipListSize(0), domainList(NULL), domainListSize(0), verifyDomainName(NULL), mode(ConnectivityFirst), disableAut(true) {} + LocalAccessPointConfiguration() + : ipList(NULL), + ipListSize(0), + domainList(NULL), + domainListSize(0), + verifyDomainName(NULL), + mode(ConnectivityFirst), + disableAut(true) {} +}; + +/** + * @brief Type of video stream to be recorded. + */ +enum RecorderStreamType { + /** + * 0: (Default) Video stream in the channel. + */ + RTC, + /** + * 1: Local preview video stream before joining the channel. + */ + PREVIEW, }; /** - * The information about recorded media streams. + * @brief The information about the media streams to be recorded. */ struct RecorderStreamInfo { - const char* channelId; - /** - * The user ID. - */ - uid_t uid; - /** - * The channel ID of the audio/video stream needs to be recorded. - */ - RecorderStreamInfo() : channelId(NULL), uid(0) {} - RecorderStreamInfo(const char* channelId, uid_t uid) : channelId(channelId), uid(uid) {} + /** + * The name of the channel in which the media streams publish. + */ + const char* channelId; + /** + * The ID of the user whose media streams you want to record. + */ + uid_t uid; + /** + * The Recoder Stream type. + */ + RecorderStreamType type; + RecorderStreamInfo() : channelId(NULL), uid(0), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid) + : channelId(channelId), uid(uid), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid, RecorderStreamType type) + : channelId(channelId), uid(uid), type(type) {} +}; + +/** + * @brief Reliable Data Transmission Tunnel message stream type + * + * @technical preview + */ +enum RdtStreamType { + /** + * Command stream type. + * Characterized by: reliability, high priority, and not affected by congestion control. + * Transmission limits: a maximum of 256 bytes per packet, and 100 packets per second. + */ + RDT_STREAM_CMD, + /** + * Data stream type. + * Characterized by: reliability, low priority, and affected by congestion control. + * Transmission limits: a maximum of 128 KBytes per packet, with a rate of 4 Mbps. + */ + RDT_STREAM_DATA, + /** + * Reliable Data Transmission stream type count + */ + RDT_STREAM_COUNT, +}; + +/** + * @brief Reliable Data Transmission tunnel state + * + * @technical preview + */ +enum RdtState { + /** + * The RDT tunnel is in the initial or is closed. + */ + RDT_STATE_CLOSED, + /** + * The RDT tunnel is open, and data can only be sent in this state. + */ + RDT_STATE_OPENED, + /** + * The send buffer of the RDT tunnel is full. RDT_STREAM_DATA cannot be sent, + * but RDT_STREAM_CMD can be sent, as the latter is not affected by congestion control. + */ + RDT_STATE_BLOCKED, + /** + * The RDT tunnel is in a suspended state because SDK has disconnected. + * It will automatically resume to the RDT_STATE_OPENED state after rejoining the channel. + */ + RDT_STATE_PENDING, + /** + * The RDT channel is broken, and the data being sent and received will be cleared. + * It will automatically resume to the RDT_STATE_OPENED state later. + * Reason for occurrence: The remote user actively called the API to leave the + * channel and then rejoined the channel, without being detected by this end. + */ + RDT_STATE_BROKEN, }; } // namespace rtc @@ -6396,93 +8040,151 @@ class AParameter : public agora::util::AutoPtr { }; class LicenseCallback { - public: - virtual ~LicenseCallback() {} - virtual void onCertificateRequired() = 0; - virtual void onLicenseRequest() = 0; - virtual void onLicenseValidated() = 0; - virtual void onLicenseError(int result) = 0; + public: + virtual ~LicenseCallback() {} + virtual void onCertificateRequired() = 0; + virtual void onLicenseRequest() = 0; + virtual void onLicenseValidated() = 0; + virtual void onLicenseError(int result) = 0; }; } // namespace base /** - * Spatial audio parameters + * @brief The spatial audio parameters. */ struct SpatialAudioParams { /** - * Speaker azimuth in a spherical coordinate system centered on the listener. + * The azimuth angle of the remote user or media player relative to the local user. The value range + * is [0,360], and the unit is degrees, The values are as follows: + * - 0: (Default) 0 degrees, which means directly in front on the horizontal plane. + * - 90: 90 degrees, which means directly to the left on the horizontal plane. + * - 180: 180 degrees, which means directly behind on the horizontal plane. + * - 270: 270 degrees, which means directly to the right on the horizontal plane. + * - 360: 360 degrees, which means directly in front on the horizontal plane. */ Optional speaker_azimuth; /** - * Speaker elevation in a spherical coordinate system centered on the listener. + * The elevation angle of the remote user or media player relative to the local user. The value + * range is [-90,90], and the unit is degrees, The values are as follows: + * - 0: (Default) 0 degrees, which means that the horizontal plane is not rotated. + * - -90: -90 degrees, which means that the horizontal plane is rotated 90 degrees downwards. + * - 90: 90 degrees, which means that the horizontal plane is rotated 90 degrees upwards. */ Optional speaker_elevation; /** - * Distance between speaker and listener. + * The distance of the remote user or media player relative to the local user. The value range is + * [1,50], and the unit is meters. The default value is 1 meter. */ Optional speaker_distance; /** - * Speaker orientation [0-180], 0 degree is the same with listener orientation. + * The orientation of the remote user or media player relative to the local user. The value range is + * [0,180], and the unit is degrees, The values are as follows: + * - 0: (Default) 0 degrees, which means that the sound source and listener face the same direction. + * - 180: 180 degrees, which means that the sound source and listener face each other. */ Optional speaker_orientation; /** - * Enable blur or not for the speaker. + * Whether to enable audio blurring: + * - `true`: Enable audio blurring. + * - `false`: (Default) Disable audio blurring. */ Optional enable_blur; /** - * Enable air absorb or not for the speaker. + * Whether to enable air absorption, that is, to simulate the sound attenuation effect of sound + * transmitting in the air; under a certain transmission distance, the attenuation speed of + * high-frequency sound is fast, and the attenuation speed of low-frequency sound is slow. + * - `true`: (Default) Enable air absorption. Make sure that the value of `speaker_attenuation` is + * not `0`; otherwise, this setting does not take effect. + * - `false`: Disable air absorption. */ Optional enable_air_absorb; /** - * Speaker attenuation factor. + * The sound attenuation coefficient of the remote user or media player. The value range is [0,1]. + * The values are as follows: + * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume + * and timbre heard by local users do not change regardless of distance. + * - (0,0.5): Weak attenuation mode, where the volume and timbre only have a weak attenuation during + * the propagation, and the sound can travel farther than that in a real environment. + * `enable_air_absorb` needs to be enabled at the same time. + * - 0.5: (Default) Simulates the attenuation of the volume in the real environment; the effect is + * equivalent to not setting the `speaker_attenuation` parameter. + * - (0.5,1]: Strong attenuation mode, where volume and timbre attenuate rapidly during the + * propagation. `enable_air_absorb` needs to be enabled at the same time. */ Optional speaker_attenuation; /** - * Enable doppler factor. + * Whether to enable the Doppler effect: When there is a relative displacement between the sound + * source and the receiver of the sound source, the tone heard by the receiver changes. + * - `true`: Enable the Doppler effect. + * - `false`: (Default) Disable the Doppler effect. + * @note + * - This parameter is suitable for scenarios where the sound source is moving at high speed (for + * example, racing games). It is not recommended for common audio and video interactive scenarios + * (for example, voice chat, co-streaming, or online KTV). + * - When this parameter is enabled, Agora recommends that you set a regular period (such as 30 ms), + * and then call the `updatePlayerPositionInfo`, `updateSelfPosition`, and `updateRemotePosition` + * methods to continuously update the relative distance between the sound source and the receiver. + * The following factors can cause the Doppler effect to be unpredictable or the sound to be + * jittery: the period of updating the distance is too long, the updating period is irregular, or + * the distance information is lost due to network packet loss or delay. */ Optional enable_doppler; }; /** - * Layout info of video stream which compose a transcoder video stream. -*/ -struct VideoLayout -{ + * @brief Layout information of a specific sub-video stream within the mixed stream. + */ +struct VideoLayout { /** - * Channel Id from which this video stream come from. - */ + * The channel name to which the sub-video stream belongs. + */ const char* channelId; /** - * User id of video stream. - */ + * User ID who published this sub-video stream. + */ rtc::uid_t uid; /** - * User account of video stream. - */ + * Reserved for future use. + */ user_id_t strUid; /** - * x coordinate of video stream on a transcoded video stream canvas. - */ + * X-coordinate (px) of the sub-video stream on the mixing canvas. The relative lateral displacement + * of the top left corner of the video for video mixing to the origin (the top left corner of the + * canvas). + */ uint32_t x; /** - * y coordinate of video stream on a transcoded video stream canvas. - */ + * Y-coordinate (px) of the sub-video stream on the mixing canvas. The relative longitudinal + * displacement of the top left corner of the captured video to the origin (the top left corner of + * the canvas). + */ uint32_t y; /** - * width of video stream on a transcoded video stream canvas. - */ + * Width (px) of the sub-video stream. + */ uint32_t width; /** - * height of video stream on a transcoded video stream canvas. - */ + * Heitht (px) of the sub-video stream. + */ uint32_t height; /** - * video state of video stream on a transcoded video stream canvas. - * 0 for normal video , 1 for placeholder image showed , 2 for black image. - */ - uint32_t videoState; + * Status of the sub-video stream on the video mixing canvas. + * - 0: Normal. The sub-video stream has been rendered onto the mixing canvas. + * - 1: Placeholder image. The sub-video stream has no video frames and is displayed as a + * placeholder on the mixing canvas. + * - 2: Black image. The sub-video stream is replaced by a black image. + */ + uint32_t videoState; - VideoLayout() : channelId(OPTIONAL_NULLPTR), uid(0), strUid(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), videoState(0) {} + VideoLayout() + : channelId(OPTIONAL_NULLPTR), + uid(0), + strUid(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + videoState(0) {} }; } // namespace agora @@ -6509,7 +8211,7 @@ AGORA_API int AGORA_CALL setAgoraSdkExternalSymbolLoader(void* (*func)(const cha * @note For license only, everytime will generate a different credential. * So, just need to call once for a device, and then save the credential */ -AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential); +AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString& credential); /** * Verify given certificate and return the result @@ -6524,8 +8226,10 @@ AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential) * @return The description of the error code. * @note For license only. */ -AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_buf, int credential_len, - const char *certificate_buf, int certificate_len); +AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_buf, + int credential_len, + const char* certificate_buf, + int certificate_len); /** * @brief Implement the agora::base::LicenseCallback, @@ -6534,10 +8238,10 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_ * @param [in] callback The object of agora::LiceseCallback, * set the callback to null before delete it. */ -AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback *callback); +AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback); /** - * @brief Get the LicenseCallback pointer if already setup, + * @brief Gets the LicenseCallback pointer if already setup, * otherwise, return null. * * @return a pointer of agora::base::LicenseCallback @@ -6550,18 +8254,15 @@ AGORA_API agora::base::LicenseCallback* getAgoraLicenseCallback(); * typical scenario is as follows: * * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - * | // custom audio/video base capture time, e.g. the first audio/video capture time. | - * | int64_t custom_capture_time_base; | - * | | - * | int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | - * | | - * | // offset is fixed once calculated in the begining. | - * | const int64_t offset = agora_monotonic_time - custom_capture_time_base; | - * | | - * | // realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| - * | // actual_audio/video_capture_time is the actual capture time transfered to sdk. | - * | int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | - * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | + * | // custom audio/video base capture time, e.g. the first audio/video capture time. | | int64_t + * custom_capture_time_base; | | | | + * int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | + * | | | // offset is fixed once calculated in the begining. | | const int64_t offset = + * agora_monotonic_time - custom_capture_time_base; | | | | // + * realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| | // + * actual_audio/video_capture_time is the actual capture time transfered to sdk. | | + * int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | + * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ * * @return diff --git a/include/AgoraMediaBase.h b/include/AgoraMediaBase.h index 26b2eb2..fe0e84f 100644 --- a/include/AgoraMediaBase.h +++ b/include/AgoraMediaBase.h @@ -36,26 +36,25 @@ static const unsigned int DUMMY_CONNECTION_ID = (std::numeric_limitsaudio_track_number_ = src.audio_track_number_; size_t length = src.samples_per_channel_ * src.num_channels_; if (length > kMaxDataSizeSamples) { @@ -462,6 +465,7 @@ struct AudioPcmFrame { samples_per_channel_(0), sample_rate_hz_(0), num_channels_(0), + audio_track_number_(0), bytes_per_sample(rtc::TWO_BYTES_PER_SAMPLE), is_stereo_(false) { memset(data_, 0, sizeof(data_)); @@ -472,6 +476,7 @@ struct AudioPcmFrame { samples_per_channel_(src.samples_per_channel_), sample_rate_hz_(src.sample_rate_hz_), num_channels_(src.num_channels_), + audio_track_number_(src.audio_track_number_), bytes_per_sample(src.bytes_per_sample), is_stereo_(src.is_stereo_) { size_t length = src.samples_per_channel_ * src.num_channels_; @@ -483,29 +488,41 @@ struct AudioPcmFrame { } }; -/** Audio dual-mono output mode +/** + * @brief The channel mode. */ enum AUDIO_DUAL_MONO_MODE { - /**< ChanLOut=ChanLin, ChanRout=ChanRin */ + /** + * 0: Original mode. + */ AUDIO_DUAL_MONO_STEREO = 0, - /**< ChanLOut=ChanRout=ChanLin */ + /** + * 1: Left channel mode. This mode replaces the audio of the right channel with the audio of the + * left channel, which means the user can only hear the audio of the left channel. + */ AUDIO_DUAL_MONO_L = 1, - /**< ChanLOut=ChanRout=ChanRin */ + /** + * 2: Right channel mode. This mode replaces the audio of the left channel with the audio of the + * right channel, which means the user can only hear the audio of the right channel. + */ AUDIO_DUAL_MONO_R = 2, - /**< ChanLout=ChanRout=(ChanLin+ChanRin)/2 */ + /** + * 3: Mixed channel mode. This mode mixes the audio of the left channel and the right channel, which + * means the user can hear the audio of the left channel and the right channel at the same time. + */ AUDIO_DUAL_MONO_MIX = 3 }; /** - * Video pixel formats. + * @brief The video pixel format. */ enum VIDEO_PIXEL_FORMAT { /** - * 0: Default format. + * 0: Raw video pixel format. */ VIDEO_PIXEL_DEFAULT = 0, /** - * 1: I420. + * 1: The format is I420. */ VIDEO_PIXEL_I420 = 1, /** @@ -517,7 +534,7 @@ enum VIDEO_PIXEL_FORMAT { */ VIDEO_PIXEL_NV21 = 3, /** - * 4: RGBA. + * 4: The format is RGBA. */ VIDEO_PIXEL_RGBA = 4, /** @@ -549,11 +566,12 @@ enum VIDEO_PIXEL_FORMAT { */ VIDEO_CVPIXEL_P010 = 15, /** - * 16: I422. + * 16: The format is I422. */ VIDEO_PIXEL_I422 = 16, /** - * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_NV12 texture format + * 17: The ID3D11TEXTURE2D format. Currently supported types are `DXGI_FORMAT_B8G8R8A8_UNORM`, + * `DXGI_FORMAT_B8G8R8A8_TYPELESS` and `DXGI_FORMAT_NV12`. */ VIDEO_TEXTURE_ID3D11TEXTURE2D = 17, /** @@ -564,23 +582,23 @@ enum VIDEO_PIXEL_FORMAT { }; /** - * The video display mode. + * @brief Video display modes. */ enum RENDER_MODE_TYPE { /** - * 1: Uniformly scale the video until it fills the visible boundaries - * (cropped). One dimension of the video may have clipped contents. + * 1: Hidden mode. The priority is to fill the window. Any excess video that does not match the + * window size will be cropped. */ RENDER_MODE_HIDDEN = 1, /** - * 2: Uniformly scale the video until one of its dimension fits the boundary - * (zoomed to fit). Areas that are not filled due to the disparity in the - * aspect ratio will be filled with black. + * 2: Fit mode. The priority is to ensure that all video content is displayed. Any areas of the + * window that are not filled due to the mismatch between video size and window size will be filled + * with black. */ RENDER_MODE_FIT = 2, /** + * 3: Adaptive mode. * @deprecated - * 3: This mode is deprecated. */ RENDER_MODE_ADAPTIVE __deprecated = 3, }; @@ -608,12 +626,12 @@ enum CAMERA_VIDEO_SOURCE_TYPE { * This interface provides access to metadata information. */ class IVideoFrameMetaInfo { - public: - enum META_INFO_KEY { - KEY_FACE_CAPTURE = 0, - }; - virtual ~IVideoFrameMetaInfo() {}; - virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; + public: + enum META_INFO_KEY { + KEY_FACE_CAPTURE = 0, + }; + virtual ~IVideoFrameMetaInfo(){}; + virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; }; struct ColorSpace { @@ -775,34 +793,34 @@ struct Hdr10MetadataInfo { }; /** - * The relative position between alphabuffer and the frame. + * @brief The relative position of `alphaBuffer` and video frames. */ enum ALPHA_STITCH_MODE { /** - * 0: Normal frame without alphabuffer stitched + * 0: (Default) Only video frame, that is, `alphaBuffer` is not stitched with the video frame. */ NO_ALPHA_STITCH = 0, /** - * 1: Alphabuffer is above the frame + * 1: `alphaBuffer` is above the video frame. */ ALPHA_STITCH_UP = 1, /** - * 2: Alphabuffer is below the frame + * 2: `alphaBuffer` is below the video frame. */ ALPHA_STITCH_BELOW = 2, /** - * 3: Alphabuffer is on the left of frame + * 3: `alphaBuffer` is to the left of the video frame. */ ALPHA_STITCH_LEFT = 3, /** - * 4: Alphabuffer is on the right of frame + * 4: `alphaBuffer` is to the right of the video frame. */ ALPHA_STITCH_RIGHT = 4, }; /** - * The definition of the ExternalVideoFrame struct. + * @brief The external video frame. */ struct ExternalVideoFrame { ExternalVideoFrame() @@ -829,7 +847,7 @@ struct ExternalVideoFrame { d3d11Texture2d(NULL), textureSliceIndex(0){} - /** + /** * The EGL context type. */ enum EGL_CONTEXT_TYPE { @@ -844,89 +862,86 @@ struct ExternalVideoFrame { }; /** - * Video buffer types. + * @brief The video buffer type. */ enum VIDEO_BUFFER_TYPE { /** - * 1: Raw data. + * 1: The video buffer in the format of raw data. */ VIDEO_BUFFER_RAW_DATA = 1, /** - * 2: The same as VIDEO_BUFFER_RAW_DATA. + * 2: The video buffer in the format of raw data. */ VIDEO_BUFFER_ARRAY = 2, /** - * 3: The video buffer in the format of texture. + * 3: The video buffer in the format of `Texture`. */ VIDEO_BUFFER_TEXTURE = 3, }; /** - * The buffer type: #VIDEO_BUFFER_TYPE. + * The video type. See `VIDEO_BUFFER_TYPE`. */ VIDEO_BUFFER_TYPE type; /** - * The pixel format: #VIDEO_PIXEL_FORMAT + * The pixel format. See `VIDEO_PIXEL_FORMAT`. */ VIDEO_PIXEL_FORMAT format; + /** - * The video buffer. + * Video frame buffer. */ void* buffer; /** - * The line spacing of the incoming video frame (px). For - * texture, it is the width of the texture. + * Line spacing of the incoming video frame, which must be in pixels instead of bytes. For textures, + * it is the width of the texture. */ int stride; /** - * The height of the incoming video frame. + * Height of the incoming video frame. */ int height; /** - * [Raw data related parameter] The number of pixels trimmed from the left. The default value is - * 0. + * Raw data related parameter. The number of pixels trimmed from the left. The default value is 0. */ int cropLeft; /** - * [Raw data related parameter] The number of pixels trimmed from the top. The default value is - * 0. + * Raw data related parameter. The number of pixels trimmed from the top. The default value is 0. */ int cropTop; /** - * [Raw data related parameter] The number of pixels trimmed from the right. The default value is - * 0. + * Raw data related parameter. The number of pixels trimmed from the right. The default value is 0. */ int cropRight; /** - * [Raw data related parameter] The number of pixels trimmed from the bottom. The default value - * is 0. + * Raw data related parameter. The number of pixels trimmed from the bottom. The default value is 0. */ int cropBottom; /** - * [Raw data related parameter] The clockwise rotation information of the video frame. You can set the - * rotation angle as 0, 90, 180, or 270. The default value is 0. + * Raw data related parameter. The clockwise rotation of the video frame. You can set the rotation + * angle as 0, 90, 180, or 270. The default value is 0. */ int rotation; /** - * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss or + * Timestamp (ms) of the incoming video frame. An incorrect timestamp results in frame loss or * unsynchronized audio and video. - * - * Please refer to getAgoraCurrentMonotonicTimeInMs or getCurrentMonotonicTimeInMs - * to determine how to fill this filed. */ long long timestamp; /** - * [Texture-related parameter] - * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set EGLContext to this field. - * When using the OpenGL interface (android.opengl.*) defined by Android, set EGLContext to this field. + * This parameter only applies to video data in Texture format. + * - When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set + * eglContext to this field. + * - When using the OpenGL interface (android.opengl.*) defined by Android, set eglContext to this + * field. */ - void *eglContext; + void* eglContext; /** - * [Texture related parameter] Texture ID used by the video frame. + * This parameter only applies to video data in Texture format. Texture ID of the video frame. */ EGL_CONTEXT_TYPE eglType; /** - * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * This parameter only applies to video data in Texture format. Incoming 4 × 4 transformational + * matrix. The typical value is a unit matrix. */ int textureId; /** @@ -939,44 +954,50 @@ struct ExternalVideoFrame { */ float matrix[16]; /** - * [Texture related parameter] The MetaData buffer. - * The default value is NULL + * This parameter only applies to video data in Texture format. The MetaData buffer. The default + * value is `NULL`. */ uint8_t* metadataBuffer; /** - * [Texture related parameter] The MetaData size. - * The default value is 0 + * This parameter only applies to video data in Texture format. The MetaData size. The default value + * is `0`. */ int metadataSize; /** - * Indicates the alpha channel of current frame, which is consistent with the dimension of the video frame. - * The value range of each pixel is [0,255], where 0 represents the background; 255 represents the foreground. - * The default value is NULL. + * The alpha channel data output by using portrait segmentation algorithm. This data matches the + * size of the video frame, with each pixel value ranging from [0,255], where 0 represents the + * background and 255 represents the foreground (portrait). + * By setting this parameter, you can render the video background into various effects, such as + * transparent, solid color, image, video, etc. */ uint8_t* alphaBuffer; /** - * [For bgra or rgba only] Extract alphaBuffer from bgra or rgba data. Set it true if you do not explicitly specify the alphabuffer. - * The default value is false + * This parameter only applies to video data in BGRA or RGBA format. Whether to extract the alpha + * channel data from the video frame and automatically fill it into `alphaBuffer`: + * - `true:Extract and fill the alpha channel data.` + * - `false`: (Default) Do not extract and fill the Alpha channel data. + * @note + * For video data in BGRA or RGBA format, you can set the Alpha channel data in either of the + * following ways: + * - Automatically by setting this parameter to `true`. + * - Manually through the `alphaBuffer` parameter. */ bool fillAlphaBuffer; /** - * The relative position between alphabuffer and the frame. - * 0: Normal frame; - * 1: Alphabuffer is above the frame; - * 2: Alphabuffer is below the frame; - * 3: Alphabuffer is on the left of frame; - * 4: Alphabuffer is on the right of frame; - * The default value is 0. + * When the video frame contains alpha channel data, it represents the relative position of + * `alphaBuffer` and the video frame. See `ALPHA_STITCH_MODE`. */ ALPHA_STITCH_MODE alphaStitchMode; /** - * [For Windows only] The pointer of ID3D11Texture2D used by the video frame. + * This parameter only applies to video data in Windows Texture format. It represents a pointer to + * an object of type` ID3D11Texture2D`, which is used by a video frame. */ void *d3d11Texture2d; /** - * [For Windows only] The index of ID3D11Texture2D array used by the video frame. + * This parameter only applies to video data in Windows Texture format. It represents an index of an + * `ID3D11Texture2D` texture object used by the video frame in the `ID3D11Texture2D` array. */ int textureSliceIndex; @@ -985,14 +1006,20 @@ struct ExternalVideoFrame { */ Hdr10MetadataInfo hdr10MetadataInfo; - /** - * The ColorSpace of the video frame. + /** + * By default, the color space properties of video frames will apply the Full Range and BT.709 + * standard configurations. */ ColorSpace colorSpace; }; /** - * The definition of the VideoFrame struct. + * @brief Configurations of the video frame. + * + * @details + * Note that the buffer provides a pointer to a pointer. This interface cannot modify the pointer of + * the buffer, but it can modify the content of the buffer. + * */ struct VideoFrame { VideoFrame(): @@ -1020,96 +1047,102 @@ struct VideoFrame { memset(matrix, 0, sizeof(matrix)); } /** - * The video pixel format: #VIDEO_PIXEL_FORMAT. + * The pixel format. See `VIDEO_PIXEL_FORMAT`. */ VIDEO_PIXEL_FORMAT type; /** - * The width of the video frame. + * The width of the video, in pixels. */ int width; /** - * The height of the video frame. + * The height of the video, in pixels. */ int height; /** - * The line span of Y buffer in the YUV data. + * For YUV data, the line span of the Y buffer; for RGBA data, the total data length. + * @note When dealing with video data, it is necessary to process the offset between each line of + * pixel data based on this parameter, otherwise it may result in image distortion. */ int yStride; /** - * The line span of U buffer in the YUV data. + * For YUV data, the line span of the U buffer; for RGBA data, the value is 0. + * @note When dealing with video data, it is necessary to process the offset between each line of + * pixel data based on this parameter, otherwise it may result in image distortion. */ int uStride; /** - * The line span of V buffer in the YUV data. + * For YUV data, the line span of the V buffer; for RGBA data, the value is 0. + * @note When dealing with video data, it is necessary to process the offset between each line of + * pixel data based on this parameter, otherwise it may result in image distortion. */ int vStride; /** - * The pointer to the Y buffer in the YUV data. + * For YUV data, the pointer to the Y buffer; for RGBA data, the data buffer. */ uint8_t* yBuffer; /** - * The pointer to the U buffer in the YUV data. + * For YUV data, the pointer to the U buffer; for RGBA data, the value is 0. */ uint8_t* uBuffer; /** - * The pointer to the V buffer in the YUV data. + * For YUV data, the pointer to the V buffer; for RGBA data, the value is 0. */ uint8_t* vBuffer; /** - * The clockwise rotation information of this frame. You can set it as 0, 90, 180 or 270. + * The clockwise rotation of the video frame before rendering. Supported values include 0, 90, 180, + * and 270 degrees. */ int rotation; /** - * The timestamp to render the video stream. Use this parameter for audio-video synchronization when - * rendering the video. - * - * @note This parameter is for rendering the video, not capturing the video. + * The Unix timestamp (ms) when the video frame is rendered. This timestamp can be used to guide the + * rendering of the video frame. This parameter is required. */ int64_t renderTimeMs; /** - * The type of audio-video synchronization. + * Reserved for future use. */ int avsync_type; /** - * [Texture related parameter] The MetaData buffer. - * The default value is NULL + * This parameter only applies to video data in Texture format. The MetaData buffer. The default + * value is `NULL`. */ uint8_t* metadata_buffer; /** - * [Texture related parameter] The MetaData size. - * The default value is 0 + * This parameter only applies to video data in Texture format. The MetaData size. The default value + * is `0`. */ int metadata_size; /** - * [Texture related parameter], egl context. + * This parameter only applies to video data in Texture format. EGL Context. */ void* sharedContext; /** - * [Texture related parameter], Texture ID used by the video frame. + * This parameter only applies to video data in Texture format. Texture ID. */ int textureId; /** - * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows only. + * This parameter only applies to video data in Windows Texture format. It represents a pointer to + * an object of type` ID3D11Texture2D`, which is used by a video frame. */ void* d3d11Texture2d; /** - * [Texture related parameter], Incoming 4 × 4 transformational matrix. + * This parameter only applies to video data in Texture format. Incoming 4 × 4 transformational + * matrix. The typical value is a unit matrix. */ float matrix[16]; /** - * Indicates the alpha channel of current frame, which is consistent with the dimension of the video frame. - * The value range of each pixel is [0,255], where 0 represents the background; 255 represents the foreground. - * The default value is NULL. + * The alpha channel data output by using portrait segmentation algorithm. This data matches the + * size of the video frame, with each pixel value ranging from [0,255], where 0 represents the + * background and 255 represents the foreground (portrait). + * By setting this parameter, you can render the video background into various effects, such as + * transparent, solid color, image, video, etc. + * @note Make sure that `alphaBuffer` is exactly the same size as the video frame (width × + * height), otherwise it may cause the app to crash. */ uint8_t* alphaBuffer; /** - * The relative position between alphabuffer and the frame. - * 0: Normal frame; - * 1: Alphabuffer is above the frame; - * 2: Alphabuffer is below the frame; - * 3: Alphabuffer is on the left of frame; - * 4: Alphabuffer is on the right of frame; - * The default value is 0. + * When the video frame contains alpha channel data, it represents the relative position of + * `alphaBuffer` and the video frame. See `ALPHA_STITCH_MODE`. */ ALPHA_STITCH_MODE alphaStitchMode; /** @@ -1117,7 +1150,7 @@ struct VideoFrame { */ void* pixelBuffer; /** - * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from VideoFrame. + * The meta information in the video frame. To use this parameter, contact `technical support`. */ IVideoFrameMetaInfo* metaInfo; @@ -1127,7 +1160,8 @@ struct VideoFrame { Hdr10MetadataInfo hdr10MetadataInfo; /** - * The ColorSpace of the video frame + * By default, the color space properties of video frames will apply the Full Range and BT.709 + * standard configurations. */ ColorSpace colorSpace; }; @@ -1138,15 +1172,44 @@ struct VideoFrame { class IVideoFrameObserver { public: /** - * Occurs each time the player receives a video frame. + * @brief Occurs each time the player receives a video frame. + * + * @details + * After registering the video frame observer, the callback occurs every time the player receives a + * video frame, reporting the detailed information of the video frame. + * + * @param frame The video frame information. See VideoFrame. * - * After registering the video frame observer, - * the callback occurs each time the player receives a video frame to report the detailed information of the video frame. - * @param frame The detailed information of the video frame. See {@link VideoFrame}. */ virtual void onFrame(const VideoFrame* frame) = 0; virtual ~IVideoFrameObserver() {} virtual bool isExternal() { return true; } + /** + * @brief Sets the format of the raw video data output by the SDK. + * + * @details + * You need to register the callback when calling the `registerVideoFrameObserver` method. After you + * successfully register the video frame observer, the SDK triggers this callback each time it + * receives a video frame. You need to set your preferred video data in the return value of this + * callback. + * + * @note + * The default types of pixel format ( VIDEO_PIXEL_DEFAULT ) for the raw video are as follows: + * - On the Android platform, the default video frame type may be I420Buffer or TextureBuffer. The + * texture format of TextureBuffer type may be OES or RGB. If the returned video frame type is + * VIDEO_PIXEL_DEFAULT when you call `getVideoFormatPreference`, you need to adapt to I420Buffer or + * TextureBuffer when processing video data. The cases where the video frame type is fixed as + * I420Buffer include but are not limited to: + * - Specific devices, such as: LG G5 SE (H848), Google Pixel 4a, Samsung Galaxy A7, or Xiaomi Mi + * Max. + * - Image enhancement extension has been integrated and video noise reduction or low-light + * enhancement function has been enabled. + * - On iOS and macOS platforms, the default video frame type may be I420 or CVPixelBufferRef. + * - On Windows platforms, the default video frame type is YUV420. + * + * @return + * Sets the raw data format of the SDK output. See `VIDEO_PIXEL_FORMAT`. + */ virtual VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return VIDEO_PIXEL_DEFAULT; } }; @@ -1170,27 +1233,176 @@ enum MEDIA_PLAYER_SOURCE_TYPE { MEDIA_PLAYER_SOURCE_SIMPLE, }; +/** + * @brief The frame position of the video observer. + */ enum VIDEO_MODULE_POSITION { + /** + * 1: The location of the locally collected video data after preprocessing corresponds to the + * `onCaptureVideoFrame` callback. The observed video here has the effect of video pre-processing, + * which can be verified by enabling image enhancement, virtual background, or watermark. + */ POSITION_POST_CAPTURER = 1 << 0, + /** + * 2: The pre-renderer position , which corresponds to the video data in the `onRenderVideoFrame` + * callback. + */ POSITION_PRE_RENDERER = 1 << 1, + /** + * 4: The pre-encoder position , which corresponds to the video data in the `onPreEncodeVideoFrame` + * callback. The observed video here has the effects of video pre-processing and encoding + * pre-processing. + * - To verify the pre-processing effects of the video, you can enable image enhancement, virtual + * background, or watermark. + * - To verify the pre-encoding processing effect, you can set a lower frame rate (for example, 5 + * fps). + */ POSITION_PRE_ENCODER = 1 << 2, + /** + * 8: The position after local video capture and before pre-processing. The observed video here does + * not have pre-processing effects, which can be verified by enabling image enhancement, virtual + * background, or watermarks. + */ POSITION_POST_CAPTURER_ORIGIN = 1 << 3, }; } // namespace base +/** Definition of contentinspect + */ +#define MAX_CONTENT_INSPECT_MODULE_COUNT 32 +enum CONTENT_INSPECT_RESULT { + CONTENT_INSPECT_NEUTRAL = 1, + CONTENT_INSPECT_SEXY = 2, + CONTENT_INSPECT_PORN = 3, +}; + +/** + * @brief The type of video content moderation module. + */ +enum CONTENT_INSPECT_TYPE { + /** + * 0: (Default) This module has no actual function. Do not set `type` to this value. + */ + CONTENT_INSPECT_INVALID = 0, + /** + * @deprecated + * Content inspect type moderation + */ + CONTENT_INSPECT_MODERATION __deprecated = 1, + /** + * 2: Video screenshot and upload via Agora self-developed extension. SDK takes screenshots of the + * video stream in the channel and uploads them. + */ + CONTENT_INSPECT_SUPERVISION = 2, + /** + * 3: Video screenshot and upload via extensions from Agora Extensions Marketplace. SDK uses video + * moderation extensions from Agora Extensions Marketplace to take screenshots of the video stream + * in the channel and uploads them. + */ + CONTENT_INSPECT_IMAGE_MODERATION = 3 +}; + +/** + * @brief `ContentInspectModule` class, a structure used to configure the frequency of video + * screenshot and upload. + */ +struct ContentInspectModule { + /** + * Types of functional module. See `CONTENT_INSPECT_TYPE`. + */ + CONTENT_INSPECT_TYPE type; + /** + * The frequency (s) of video screenshot and upload. The value should be set as larger than 0. The + * default value is 0, the SDK does not take screenshots. Agora recommends that you set the value as + * 10; you can also adjust it according to your business needs. + */ + unsigned int interval; + /** + * The position of the video observer. See `VIDEO_MODULE_POSITION`. + */ + base::VIDEO_MODULE_POSITION position; + ContentInspectModule() { + type = CONTENT_INSPECT_INVALID; + interval = 0; + position = base::POSITION_PRE_ENCODER; + } +}; +/** + * @brief Screenshot and upload configuration. + */ +struct ContentInspectConfig { + /** + * Additional information on the video content (maximum length: 1024 Bytes). + * The SDK sends the screenshots and additional information on the video content to the Agora + * server. Once the video screenshot and upload process is completed, the Agora server sends the + * additional information and the callback notification to your server. + */ + const char* extraInfo; + /** + * (Optional) Server configuration related to uploading video screenshots via extensions from Agora + * Extensions Marketplace. This parameter only takes effect when `type` in `ContentInspectModule` is + * set to `CONTENT_INSPECT_IMAGE_MODERATION`. If you want to use it, contact `technical support`. + */ + const char* serverConfig; + /** + * Functional module. See `ContentInspectModule`. + * A maximum of 32 `ContentInspectModule` instances can be configured, and the value range of + * `MAX_CONTENT_INSPECT_MODULE_COUNT` is an integer in [1,32]. + * @note A function module can only be configured with one instance at most. Currently only the + * video screenshot and upload function is supported. + */ + ContentInspectModule modules[MAX_CONTENT_INSPECT_MODULE_COUNT]; + /** + * The number of functional modules, that is,the number of configured `ContentInspectModule` + * instances, must be the same as the number of instances configured in `modules`. The maximum + * number is 32. + */ + int moduleCount; + ContentInspectConfig& operator=(const ContentInspectConfig& rth) { + extraInfo = rth.extraInfo; + serverConfig = rth.serverConfig; + moduleCount = rth.moduleCount; + memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); + return *this; + } + ContentInspectConfig() : extraInfo(NULL), serverConfig(NULL), moduleCount(0) {} +}; +/** + * @brief The snapshot configuration. + */ +struct SnapshotConfig { + /** + * The local path (including filename extensions) of the snapshot. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` + * - iOS:` /App Sandbox/Library/Caches/example.jpg` + * - macOS: `~/Library/Logs/example.jpg` + * - Android:` /storage/emulated/0/Android/data//files/example.jpg` + * @note Ensure that the path you specify exists and is writable. + */ + const char* filePath; + + /** + * The position of the snapshot video frame in the video pipeline. See `VIDEO_MODULE_POSITION`. + */ + media::base::VIDEO_MODULE_POSITION position; + SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {} +}; + /** * The audio frame observer. */ class IAudioPcmFrameSink { public: /** - * Occurs when each time the player receives an audio frame. + * @brief Occurs each time the player receives an audio frame. + * + * @details + * After registering the audio frame observer, the callback occurs every time the player receives an + * audio frame, reporting the detailed information of the audio frame. + * + * @param frame The audio frame information. See AudioPcmFrame. * - * After registering the audio frame observer, - * the callback occurs when each time the player receives an audio frame, - * reporting the detailed information of the audio frame. - * @param frame The detailed information of the audio frame. See {@link AudioPcmFrame}. */ virtual void onFrame(agora::media::base::AudioPcmFrame* frame) = 0; virtual ~IAudioPcmFrameSink() {} @@ -1202,72 +1414,66 @@ class IAudioPcmFrameSink { class IAudioFrameObserverBase { public: /** - * Audio frame types. + * @brief Audio frame type. */ enum AUDIO_FRAME_TYPE { /** - * 0: 16-bit PCM. + * 0: PCM 16 */ FRAME_TYPE_PCM16 = 0, }; enum { MAX_HANDLE_TIME_CNT = 10 }; /** - * The definition of the AudioFrame struct. + * @brief Raw audio data. */ struct AudioFrame { /** - * The audio frame type: #AUDIO_FRAME_TYPE. + * The type of the audio frame. See `AUDIO_FRAME_TYPE`. */ AUDIO_FRAME_TYPE type; /** - * The number of samples per channel in this frame. + * The number of samples per channel in the audio frame. */ int samplesPerChannel; /** - * The number of bytes per sample: #BYTES_PER_SAMPLE + * The number of bytes per sample. For PCM, this parameter is generally set to 16 bits (2 bytes). */ agora::rtc::BYTES_PER_SAMPLE bytesPerSample; /** - * The number of audio channels (data is interleaved, if stereo). + * The number of audio channels (the data are interleaved if it is stereo). * - 1: Mono. * - 2: Stereo. */ int channels; /** - * The sample rate + * The number of samples per channel in the audio frame. */ int samplesPerSec; /** - * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data - * buffer is interleaved. - * - * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample. + * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data buffer + * is interleaved. + * The size of the data buffer is as follows: `buffer` = `samples` × `channels` × `bytesPerSample`. */ void* buffer; /** - * The timestamp to render the audio data. - * - * You can use this timestamp to restore the order of the captured audio frame, and synchronize - * audio and video frames in video scenarios, including scenarios where external video sources - * are used. + * The timestamp (ms) of the external audio frame. + * You can use this timestamp to restore the order of the captured audio frame, and synchronize + * audio and video frames in video scenarios, including scenarios where external video sources are + * used. */ int64_t renderTimeMs; /** - * A reserved parameter. - * - * You can use this presentationMs parameter to indicate the presenation milisecond timestamp, - * this will then filled into audio4 extension part, the remote side could use this pts in av - * sync process with video frame. + * Reserved for future use. */ int avsync_type; /** * The pts timestamp of this audio frame. * - * This timestamp is used to indicate the origin pts time of the frame, and sync with video frame by - * the pts time stamp + * This timestamp is used to indicate the origin pts time of the frame, and sync with video + * frame by the pts time stamp */ int64_t presentationMs; - /** + /** * The number of the audio track. */ int audioTrackNumber; @@ -1276,17 +1482,18 @@ class IAudioFrameObserverBase { */ uint32_t rtpTimestamp; - AudioFrame() : type(FRAME_TYPE_PCM16), - samplesPerChannel(0), - bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), - channels(0), - samplesPerSec(0), - buffer(NULL), - renderTimeMs(0), - avsync_type(0), - presentationMs(0), - audioTrackNumber(0), - rtpTimestamp(0) {} + AudioFrame() + : type(FRAME_TYPE_PCM16), + samplesPerChannel(0), + bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), + channels(0), + samplesPerSec(0), + buffer(NULL), + renderTimeMs(0), + avsync_type(0), + presentationMs(0), + audioTrackNumber(0), + rtpTimestamp(0) {} }; enum AUDIO_FRAME_POSITION { @@ -1308,73 +1515,173 @@ class IAudioFrameObserverBase { AUDIO_FRAME_POSITION_EAR_MONITORING = 0x0010, }; + /** + * @brief Audio data format. + * + * @details + * You can pass the `AudioParams` object in the following APIs to set the audio data format for the + * corresponding callback: + * - `getRecordAudioParams`: Sets the audio data format for the `onRecordAudioFrame` callback. + * - `getPlaybackAudioParams`: Sets the audio data format for the `onPlaybackAudioFrame` callback. + * - `getMixedAudioParams`: Sets the audio data format for the `onMixedAudioFrame` callback. + * - `getEarMonitoringAudioParams`: Sets the audio data format for the `onEarMonitoringAudioFrame` + * callback. + * + * @note + * - The SDK calculates the sampling interval through the `samplesPerCall`, `sampleRate`, and + * `channel` parameters in `AudioParams`, and triggers the `onRecordAudioFrame`, + * `onPlaybackAudioFrame`, `onMixedAudioFrame`, and `onEarMonitoringAudioFrame` callbacks according + * to the sampling interval. + * - Sample interval (sec) = `samplePerCall` /( `sampleRate` × `channel` ). + * - Ensure that the sample interval ≥ 0.01 (s). + * + */ struct AudioParams { - /** The audio sample rate (Hz), which can be set as one of the following values: - - - `8000` - - `16000` (Default) - - `32000` - - `44100 ` - - `48000` + /** + * The audio sample rate (Hz), which can be set as one of the following values: + * - 8000. + * - (Default) 16000. + * - 32000. + * - 44100 + * - 48000 */ int sample_rate; - /* The number of audio channels, which can be set as either of the following values: - - - `1`: Mono (Default) - - `2`: Stereo + /** + * The number of audio channels, which can be set as either of the following values: + * - 1: (Default) Mono. + * - 2: Stereo. */ int channels; - /* The use mode of the audio data. See AgoraAudioRawFrameOperationMode. + /** + * The use mode of the audio data. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`. */ rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE mode; - /** The number of samples. For example, set it as 1024 for RTMP or RTMPS - streaming. + /** + * The number of samples, such as 1024 for the media push. */ int samples_per_call; - AudioParams() : sample_rate(0), channels(0), mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), samples_per_call(0) {} - AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, int samplesPerCall) : sample_rate(samplerate), channels(channel), mode(type), samples_per_call(samplesPerCall) {} + AudioParams() + : sample_rate(0), + channels(0), + mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), + samples_per_call(0) {} + AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, + int samplesPerCall) + : sample_rate(samplerate), + channels(channel), + mode(type), + samples_per_call(samplesPerCall) {} }; public: virtual ~IAudioFrameObserverBase() {} /** - * Occurs when the recorded audio frame is received. - * @param channelId The channel name - * @param audioFrame The reference to the audio frame: AudioFrame. + * @brief Gets the captured audio frame. + * + * @details + * To ensure that the format of the cpatured audio frame is as expected, you can choose one of the + * following two methods to set the audio data format: + * - Method 1: After calling `setRecordingAudioFrameParameters` to set the audio data format and + * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the + * sampling interval according to the parameters set in the methods, and triggers the + * `onRecordAudioFrame` callback according to the sampling interval. + * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer + * object, set the audio data format in the return value of the `getObservedAudioFramePosition` + * callback. The SDK then calculates the sampling interval according to the return value of the + * `getRecordAudioParams` callback, and triggers the `onRecordAudioFrame` callback according to the + * sampling interval. + * + * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the + * audio data format, the setting of method 2 is invalid. + * + * @param audioFrame The raw audio data. See `AudioFrame`. + * @param channelId The channel ID. + * * @return - * - true: The recorded audio frame is valid and is encoded and sent. - * - false: The recorded audio frame is invalid and is not encoded or sent. + * Without practical meaning. */ virtual bool onRecordAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0; /** - * Occurs when the playback audio frame is received. - * @param channelId The channel name - * @param audioFrame The reference to the audio frame: AudioFrame. + * @brief Gets the raw audio frame for playback. + * + * @details + * To ensure that the data format of audio frame for playback is as expected, Agora recommends that + * you choose one of the following two methods to set the audio data format: + * - Method 1: After calling `setPlaybackAudioFrameParameters` to set the audio data format and + * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the + * sampling interval according to the parameters set in the methods, and triggers the + * `onPlaybackAudioFrame` callback according to the sampling interval. + * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer + * object, set the audio data format in the return value of the `getObservedAudioFramePosition` + * callback. The SDK then calculates the sampling interval according to the return value of the + * `getPlaybackAudioParams` callback, and triggers the `onPlaybackAudioFrame` callback according to + * the sampling interval. + * + * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the + * audio data format, the setting of method 2 is invalid. + * + * @param audioFrame The raw audio data. See `AudioFrame`. + * @param channelId The channel ID. + * * @return - * - true: The playback audio frame is valid and is encoded and sent. - * - false: The playback audio frame is invalid and is not encoded or sent. + * Without practical meaning. */ virtual bool onPlaybackAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0; /** - * Occurs when the mixed audio data is received. - * @param channelId The channel name - * @param audioFrame The reference to the audio frame: AudioFrame. + * @brief Retrieves the mixed captured and playback audio frame. + * + * @details + * To ensure that the data format of mixed captured and playback audio frame meets the expectations, + * Agora recommends that you choose one of the following two ways to set the data format: + * - Method 1: After calling `setMixedAudioFrameParameters` to set the audio data format and + * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the + * sampling interval according to the parameters set in the methods, and triggers the + * `onMixedAudioFrame` callback according to the sampling interval. + * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer + * object, set the audio data format in the return value of the `getObservedAudioFramePosition` + * callback. The SDK then calculates the sampling interval according to the return value of the + * `getMixedAudioParams` callback, and triggers the `onMixedAudioFrame` callback according to the + * sampling interval. + * + * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the + * audio data format, the setting of method 2 is invalid. + * + * @param audioFrame The raw audio data. See `AudioFrame`. + * @param channelId The channel ID. + * * @return - * - true: The mixed audio data is valid and is encoded and sent. - * - false: The mixed audio data is invalid and is not encoded or sent. + * Without practical meaning. */ virtual bool onMixedAudioFrame(const char* channelId, AudioFrame& audioFrame) = 0; /** - * Occurs when the ear monitoring audio frame is received. - * @param audioFrame The reference to the audio frame: AudioFrame. + * @brief Gets the in-ear monitoring audio frame. + * + * @details + * In order to ensure that the obtained in-ear audio data meets the expectations, Agora recommends + * that you choose one of the following two methods to set the in-ear monitoring-ear audio data + * format: + * - Method 1: After calling `setEarMonitoringAudioFrameParameters` to set the audio data format and + * `registerAudioFrameObserver` to register the audio frame observer object, the SDK calculates the + * sampling interval according to the parameters set in the methods, and triggers the + * `onEarMonitoringAudioFrame` callback according to the sampling interval. + * - Method 2: After calling `registerAudioFrameObserver` to register the audio frame observer + * object, set the audio data format in the return value of the `getObservedAudioFramePosition` + * callback. The SDK then calculates the sampling interval according to the return value of the + * `getEarMonitoringAudioParams` callback, and triggers the `onEarMonitoringAudioFrame` callback + * according to the sampling interval. + * + * @note The priority of method 1 is higher than that of method 2. If method 1 is used to set the + * audio data format, the setting of method 2 is invalid. + * + * @param audioFrame The raw audio data. See `AudioFrame`. + * * @return - * - true: The ear monitoring audio data is valid and is encoded and sent. - * - false: The ear monitoring audio data is invalid and is not encoded or sent. + * Without practical meaning. */ virtual bool onEarMonitoringAudioFrame(AudioFrame& audioFrame) = 0; /** @@ -1386,76 +1693,118 @@ class IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, AudioFrame& audioFrame) { - (void) channelId; - (void) userId; - (void) audioFrame; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, + AudioFrame& audioFrame) { + (void)channelId; + (void)userId; + (void)audioFrame; return true; } /** - * Sets the frame position for the audio observer. - * @return A bit mask that controls the frame position of the audio observer. - * @note - Use '|' (the OR operator) to observe multiple frame positions. - *

- * After you successfully register the audio observer, the SDK triggers this callback each time it receives a audio frame. You can determine which position to observe by setting the return value. - * The SDK provides 4 positions for observer. Each position corresponds to a callback function: - * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. - * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, which corresponds to the \ref onRecordFrame "onRecordFrame" callback. - * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which corresponds to the \ref onMixedFrame "onMixedFrame" callback. - * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing "onPlaybackFrameBeforeMixing" callback. - * @return The bit mask that controls the audio observation positions. - * See AUDIO_FRAME_POSITION. + * @brief Sets the frame position for the video observer. + * + * @details + * After successfully registering the audio data observer, the SDK uses this callback for each + * specific audio frame processing node to determine whether to trigger the following callbacks: + * - `onRecordAudioFrame` + * - `onPlaybackAudioFrame` + * - `onPlaybackAudioFrameBeforeMixing` + * - `onMixedAudioFrame` + * - `onEarMonitoringAudioFrame` + * You can set one or more positions you need to observe by modifying the return value of + * `getObservedAudioFramePosition` based on your scenario requirements: + * When the annotation observes multiple locations, the | (or operator) is required. To conserve + * system resources, you can reduce the number of frame positions that you want to observe. + * + * @return + * a bitmask that sets the observation position, with the following values: + * - AUDIO_FRAME_POSITION_PLAYBACK (0x0001): This position can observe the playback audio mixed by + * all remote users, corresponding to the `onPlaybackAudioFrame` callback. + * - AUDIO_FRAME_POSITION_RECORD (0x0002): This position can observe the collected local user's + * audio, corresponding to the `onRecordAudioFrame` callback. + * - AUDIO_FRAME_POSITION_MIXED (0x0004): This position can observe the playback audio mixed by the + * loacl user and all remote users, corresponding to the `onMixedAudioFrame` callback. + * - AUDIO_FRAME_POSITION_BEFORE_MIXING (0x0008): This position can observe the audio of a single + * remote user before mixing, corresponding to the `onPlaybackAudioFrameBeforeMixing` callback. + * - AUDIO_FRAME_POSITION_EAR_MONITORING (0x0010): This position can observe the in-ear monitoring + * audio of the local user, corresponding to the `onEarMonitoringAudioFrame` callback. */ - virtual int getObservedAudioFramePosition() = 0; - /** Sets the audio playback format - **Note**: - - - The SDK calculates the sample interval according to the `AudioParams` - you set in the return value of this callback and triggers the - `onPlaybackAudioFrame` callback at the calculated sample interval. - Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`). - Ensure that the value of sample interval is equal to or greater than 0.01. - - @return Sets the audio format. See AgoraAudioParams. + /** + * @brief Sets the audio format for the `onPlaybackAudioFrame` callback. + * + * @details + * You need to register the callback when calling the `registerAudioFrameObserver` method. After you + * successfully register the audio observer, the SDK triggers this callback, and you can set the + * audio format in the return value of this callback. + * + * @note + * The SDK triggers the `onPlaybackAudioFrame` callback with the `AudioParams` calculated sampling + * interval you set in the return value. The calculation formula is Sample interval (sec) = + * `samplePerCall` /( `sampleRate` × `channel` ). + * Ensure that the sample interval ≥ 0.01 (s). + * + * @return + * The audio data for playback, see `AudioParams`. */ virtual AudioParams getPlaybackAudioParams() = 0; - /** Sets the audio recording format - **Note**: - - The SDK calculates the sample interval according to the `AudioParams` - you set in the return value of this callback and triggers the - `onRecordAudioFrame` callback at the calculated sample interval. - Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`). - Ensure that the value of sample interval is equal to or greater than 0.01. - - @return Sets the audio format. See AgoraAudioParams. + /** + * @brief Sets the audio format for the `onRecordAudioFrame` callback. + * + * @details + * You need to register the callback when calling the `registerAudioFrameObserver` method. After you + * successfully register the audio observer, the SDK triggers this callback, and you can set the + * audio format in the return value of this callback. + * + * @note + * The SDK triggers the `onRecordAudioFrame` callback with the `AudioParams` calculated sampling + * interval you set in the return value. The calculation formula is Sample interval (sec) = + * `samplePerCall` /( `sampleRate` × `channel` ). + * Ensure that the sample interval ≥ 0.01 (s). + * + * @return + * The captured audio data, see `AudioParams`. */ virtual AudioParams getRecordAudioParams() = 0; - /** Sets the audio mixing format - **Note**: - - The SDK calculates the sample interval according to the `AudioParams` - you set in the return value of this callback and triggers the - `onMixedAudioFrame` callback at the calculated sample interval. - Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`). - Ensure that the value of sample interval is equal to or greater than 0.01. - - @return Sets the audio format. See AgoraAudioParams. + /** + * @brief Sets the audio format for the `onMixedAudioFrame` callback. + * + * @details + * You need to register the callback when calling the `registerAudioFrameObserver` method. After you + * successfully register the audio observer, the SDK triggers this callback, and you can set the + * audio format in the return value of this callback. + * + * @note + * The SDK triggers the `onMixedAudioFrame` callback with the `AudioParams` calculated sampling + * interval you set in the return value. The calculation formula is Sample interval (sec) = + * `samplePerCall` /( `sampleRate` × `channel` ). + * Ensure that the sample interval ≥ 0.01 (s). + * + * @return + * The mixed captured and playback audio data. See `AudioParams`. */ virtual AudioParams getMixedAudioParams() = 0; - /** Sets the ear monitoring audio format - **Note**: - - The SDK calculates the sample interval according to the `AudioParams` - you set in the return value of this callback and triggers the - `onEarMonitoringAudioFrame` callback at the calculated sample interval. - Sample interval (seconds) = `samplesPerCall`/(`sampleRate` × `channel`). - Ensure that the value of sample interval is equal to or greater than 0.01. - - @return Sets the audio format. See AgoraAudioParams. + /** + * @brief Sets the audio format for the `onEarMonitoringAudioFrame` callback. + * + * @details + * You need to register the callback when calling the `registerAudioFrameObserver` method. After you + * successfully register the audio observer, the SDK triggers this callback, and you can set the + * audio format in the return value of this callback. + * + * @note + * The SDK triggers the `onEarMonitoringAudioFrame` callback with the `AudioParams` calculated + * sampling interval you set in the return value. The calculation formula is `Sample` interval ( + * `sec` ) = `samplePerCall` /( `sampleRate` × `channel` ). + * Ensure that the sample interval ≥ 0.01 (s). + * + * @return + * The audio data of in-ear monitoring, see `AudioParams`. */ virtual AudioParams getEarMonitoringAudioParams() = 0; }; @@ -1467,85 +1816,102 @@ class IAudioFrameObserver : public IAudioFrameObserverBase { public: using IAudioFrameObserverBase::onPlaybackAudioFrameBeforeMixing; /** - * Occurs when the before-mixing playback audio frame is received. - * @param channelId The channel name - * @param uid ID of the remote user. - * @param audioFrame The reference to the audio frame: AudioFrame. + * @brief Retrieves the audio frame before mixing of subscribed remote users. + * + * @param channelId The channel ID. + * @param uid The ID of subscribed remote users. + * @param audioFrame The raw audio data. See `AudioFrame`. + * * @return - * - true: The before-mixing playback audio frame is valid and is encoded and sent. - * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. + * Without practical meaning. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, AudioFrame& audioFrame) = 0; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, + AudioFrame& audioFrame) = 0; }; +/** + * @brief The audio spectrum data. + */ struct AudioSpectrumData { /** - * The audio spectrum data of audio. + * The audio spectrum data. Agora divides the audio frequency into 256 frequency domains, and + * reports the energy value of each frequency domain through this parameter. The value range of each + * energy type is [-300, 1] and the unit is dBFS. */ - const float *audioSpectrumData; + const float* audioSpectrumData; /** - * The data length of audio spectrum data. + * The audio spectrum data length is 256. */ int dataLength; AudioSpectrumData() : audioSpectrumData(NULL), dataLength(0) {} - AudioSpectrumData(const float *data, int length) : - audioSpectrumData(data), dataLength(length) {} + AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {} }; -struct UserAudioSpectrumInfo { +/** + * @brief Audio spectrum information of the remote user. + */ +struct UserAudioSpectrumInfo { /** - * User ID of the speaker. + * The user ID of the remote user. */ agora::rtc::uid_t uid; /** - * The audio spectrum data of audio. + * Audio spectrum information of the remote user. See `AudioSpectrumData`. */ struct AudioSpectrumData spectrumData; UserAudioSpectrumInfo() : uid(0) {} - UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) : uid(uid), spectrumData(data, length) {} + UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) + : uid(uid), spectrumData(data, length) {} }; /** * The IAudioSpectrumObserver class. */ class IAudioSpectrumObserver { -public: + public: virtual ~IAudioSpectrumObserver() {} /** - * Reports the audio spectrum of local audio. + * @brief Gets the statistics of a local audio spectrum. * - * This callback reports the audio spectrum data of the local audio at the moment - * in the channel. + * @details + * After successfully calling `registerAudioSpectrumObserver` to implement the + * `onLocalAudioSpectrum` callback in `IAudioSpectrumObserver` and calling + * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK triggers this callback + * as the time interval you set to report the received remote audio data spectrum before encoding. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * @param data The audio spectrum data of the local user. See `AudioSpectrumData`. * - * @param data The audio spectrum data of local audio. - * - true: Processed. - * - false: Not processed. + * @return + * Whether the spectrum data is received: + * - `true`: Spectrum data is received. + * - `false`: No spectrum data is received. */ virtual bool onLocalAudioSpectrum(const AudioSpectrumData& data) = 0; /** - * Reports the audio spectrum of remote user. + * @brief Gets the remote audio spectrum. * - * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment - * in the channel. + * @details + * After successfully calling `registerAudioSpectrumObserver` to implement the + * `onRemoteAudioSpectrum` callback in the `IAudioSpectrumObserver` and calling + * `enableAudioSpectrumMonitor` to enable audio spectrum monitoring, the SDK will trigger the + * callback as the time interval you set to report the received remote audio data spectrum. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * @param spectrums The audio spectrum information of the remote user. See `UserAudioSpectrumInfo`. + * The number of arrays is the number of remote users monitored by the SDK. If the array is null, it + * means that no audio spectrum of remote users is detected. + * @param spectrumNumber The number of remote users. * - * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo "UserAudioSpectrumInfo", which is an array containing - * the user ID and audio spectrum data for each speaker. - * - This array contains the following members: - * - `uid`, which is the UID of each remote speaker - * - `spectrumData`, which reports the audio spectrum of each remote speaker. - * @param spectrumNumber The array length of the spectrums. - * - true: Processed. - * - false: Not processed. + * @return + * Whether the spectrum data is received: + * - `true`: Spectrum data is received. + * - `false`: No spectrum data is received. */ - virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, unsigned int spectrumNumber) = 0; + virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, + unsigned int spectrumNumber) = 0; }; /** @@ -1554,17 +1920,27 @@ class IAudioSpectrumObserver { class IVideoEncodedFrameObserver { public: /** - * Occurs each time the SDK receives an encoded video image. - * @param uid The user id of remote user. - * @param imageBuffer The pointer to the video image buffer. + * @brief Reports that the receiver has received the to-be-decoded video frame sent by the remote + * end. + * + * @details + * If you call the `setRemoteVideoSubscriptionOptions` method and set `encodedFrameOnly` to `true`, + * the SDK triggers this callback locally to report the received encoded video frame information. + * + * @since 4.6.0 + * @param channelId The channel name. + * @param uid The user ID of the remote user. + * @param imageBuffer The encoded video image buffer. * @param length The data length of the video image. - * @param videoEncodedFrameInfo The information of the encoded video frame: EncodedVideoFrameInfo. - * @return Determines whether to accept encoded video image. - * - true: Accept. - * - false: Do not accept. + * @param videoEncodedFrameInfo For the information of the encoded video frame, see + * `EncodedVideoFrameInfo`. + * + * @return + * Without practical meaning. */ - virtual bool onEncodedVideoFrameReceived(rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, - const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; + virtual bool onEncodedVideoFrameReceived( + const char* channelId, rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, + const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; virtual ~IVideoEncodedFrameObserver() {} }; @@ -1576,70 +1952,111 @@ class IVideoFrameObserver { public: typedef media::base::VideoFrame VideoFrame; /** - * The process mode of the video frame: + * @brief The process mode of the video frame: */ enum VIDEO_FRAME_PROCESS_MODE { /** * Read-only mode. - * * In this mode, you do not modify the video frame. The video frame observer is a renderer. */ - PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original frame. + PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original + // frame. /** * Read and write mode. - * * In this mode, you modify the video frame. The video frame observer is a video filter. */ - PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and affect the following frame processing in SDK. + PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and + // affect the following frame processing in SDK. }; public: virtual ~IVideoFrameObserver() {} /** - * Occurs each time the SDK receives a video frame captured by the local camera. + * @brief Occurs each time the SDK receives a video frame captured by local devices. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * a video frame is received. In this callback, you can get the video data captured by the local - * camera. You can then pre-process the data according to your scenarios. - * - * After pre-processing, you can send the processed video data back to the SDK by setting the - * `videoFrame` parameter in this callback. + * @details + * You can get raw video data collected by the local device through this callback and preprocess it + * as needed. Once the preprocessing is complete, you can directly modify `videoFrame` in this + * callback, and set the return value to `true` to send the modified video data to the SDK. + * If you need to send the preprocessed data to the SDK, you need to call `getVideoFrameProcessMode` + * first to set the video processing mode to read and write mode ( PROCESS_MODE_READ_WRITE ). + * Applicable scenarios: - Preprocess the locally collected video data before it is processed by the + * SDK. For example, get video data through this callback and process it with filters, watermarks, + * cropping, rotation, etc. + * - Get information about the locally collected video data before it is processed by the SDK. For + * example, the original width, height, frame rate of the video frame, etc. + * Call timing: After the successful registration of the video data observer, each time the SDK + * captures a video frame. * * @note - * - If you get the video data in RGBA color encoding format, Agora does not support using this callback to send the processed data in RGBA color encoding format back to the SDK. - * - The video data that this callback gets has not been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - If the video data type you get is RGBA, the SDK does not support processing the data of the + * alpha channel. + * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with + * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause + * unexpected rotation, distortion, and other issues in the local preview and remote video display. + * The default video format that you get from this callback is YUV420. If you need other formats, + * you can set the expected data format in the getVideoFormatPreference callback. * - * @param videoFrame A pointer to the video frame: VideoFrame - * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE. - * @return Determines whether to ignore the current video frame if the pre-processing fails: - * - true: Do not ignore. - * - false: Ignore, in which case this method does not sent the current video frame to the SDK. - */ - virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + * @param sourceType Video source types, including cameras, screens, or media player. See + * `VIDEO_SOURCE_TYPE`. + * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame + * data format obtained through this callback is as follows: + * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D) + * - iOS: I420 or CVPixelBufferRef + * - macOS: I420 or CVPixelBufferRef + * - Windows: YUV420 + * + * @return + * - When the video processing mode is `PROCESS_MODE_READ_ONLY`: + * - `true`: Reserved for future use. + * - `false`: Reserved for future use. + * - When the video processing mode is `PROCESS_MODE_READ_WRITE`: + * - `true`: Sets the SDK to receive the video frame. + * - `false`: Sets the SDK to discard the video frame. + */ + virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** - * Occurs each time the SDK receives a video frame before encoding. - * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * when it receives a video frame. In this callback, you can get the video data before encoding. You can then - * process the data according to your particular scenarios. + * @brief Occurs each time the SDK receives a video frame before encoding. * - * After processing, you can send the processed video data back to the SDK by setting the - * `videoFrame` parameter in this callback. + * @details + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. In this callback, you can get the video data before encoding and + * then process the data according to your particular scenarios. + * After processing, you can send the processed video data back to the SDK in this callback. * * @note - * - To get the video data captured from the second screen before encoding, you need to set (1 << 2) as a frame position through `getObservedFramePosition`. - * - The video data that this callback gets has been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. - * - This callback does not support sending processed RGBA video data back to the SDK. + * - If you need to send the preprocessed data to the SDK, you need to call + * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode ( + * PROCESS_MODE_READ_WRITE ). + * - To get the video data captured from the second screen before encoding, you need to set + * `POSITION_PRE_ENCODER` (1 << 2) as a frame position through `getObservedFramePosition`. + * - The video data that this callback gets has been preprocessed, with its content cropped and + * rotated, and the image enhanced. + * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with + * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause + * unexpected rotation, distortion, and other issues in the local preview and remote video display. * - * @param videoFrame A pointer to the video frame: VideoFrame - * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE. - * @return Determines whether to ignore the current video frame if the pre-processing fails: - * - true: Do not ignore. - * - false: Ignore, in which case this method does not sent the current video frame to the SDK. + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame + * data format obtained through this callback is as follows: + * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D) + * - iOS: I420 or CVPixelBufferRef + * - macOS: I420 or CVPixelBufferRef + * - Windows: YUV420 + * + * @return + * - When the video processing mode is `PROCESS_MODE_READ_ONLY`: + * - `true`: Reserved for future use. + * - `false`: Reserved for future use. + * - When the video processing mode is `PROCESS_MODE_READ_WRITE`: + * - `true`: Sets the SDK to receive the video frame. + * - `false`: Sets the SDK to discard the video frame. */ - virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame decoded by the MediaPlayer. @@ -1650,10 +2067,13 @@ class IVideoFrameObserver { * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. - * + * * @note - * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". - * - On Android, this callback is not affected by the return value of \ref getVideoFormatPreference "getVideoFormatPreference" + * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode + * "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied + * "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". + * - On Android, this callback is not affected by the return value of \ref + * getVideoFormatPreference "getVideoFormatPreference" * * @param videoFrame A pointer to the video frame: VideoFrame * @param mediaPlayerId ID of the mediaPlayer. @@ -1664,103 +2084,163 @@ class IVideoFrameObserver { virtual bool onMediaPlayerVideoFrame(VideoFrame& videoFrame, int mediaPlayerId) = 0; /** - * Occurs each time the SDK receives a video frame sent by the remote user. + * @brief Occurs each time the SDK receives a video frame sent by the remote user. * - * After you successfully register the video frame observer, the SDK triggers this callback each time a - * video frame is received. In this callback, you can get the video data sent by the remote user. You - * can then post-process the data according to your scenarios. + * @details + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. In this callback, you can get the video data sent from the remote + * end before rendering, and then process it according to the particular scenarios. + * The default video format that you get from this callback is YUV420. If you need other formats, + * you can set the expected data format in the `getVideoFormatPreference` callback. * - * After post-processing, you can send the processed data back to the SDK by setting the `videoFrame` - * parameter in this callback. - * - * @note This callback does not support sending processed RGBA video data back to the SDK. + * @note + * - If you need to send the preprocessed data to the SDK, you need to call + * `getVideoFrameProcessMode` first to set the video processing mode to read and write mode ( + * PROCESS_MODE_READ_WRITE ). + * - If the video data type you get is RGBA, the SDK does not support processing the data of the + * alpha channel. + * - It is recommended that you ensure the modified parameters in `videoFrame` are consistent with + * the actual situation of the video frames in the video frame buffer. Otherwise, it may cause + * unexpected rotation, distortion, and other issues in the local preview and remote video display. * - * @param channelId The channel name - * @param remoteUid ID of the remote user who sends the current video frame. - * @param videoFrame A pointer to the video frame: VideoFrame - * @return Determines whether to ignore the current video frame if the post-processing fails: - * - true: Do not ignore. - * - false: Ignore, in which case this method does not sent the current video frame to the SDK. + * @param remoteUid The user ID of the remote user who sends the current video frame. + * @param videoFrame The video frame. See `VideoFrame`.Note: The default value of the video frame + * data format obtained through this callback is as follows: + * - Android: I420 or RGB (GLES20.GL_TEXTURE_2D) + * - iOS: I420 or CVPixelBufferRef + * - macOS: I420 or CVPixelBufferRef + * - Windows: YUV420 + * @param channelId The channel ID. + * + * @return + * - When the video processing mode is `PROCESS_MODE_READ_ONLY`: + * - `true`: Reserved for future use. + * - `false`: Reserved for future use. + * - When the video processing mode is `PROCESS_MODE_READ_WRITE`: + * - `true`: Sets the SDK to receive the video frame. + * - `false`: Sets the SDK to discard the video frame. */ - virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, VideoFrame& videoFrame) = 0; + virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, + VideoFrame& videoFrame) = 0; virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0; /** - * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the video frame. - * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives - * a video frame. You need to set your preferred process mode in the return value of this callback. - * @return VIDEO_FRAME_PROCESS_MODE. + * @brief Occurs each time the SDK receives a video frame and prompts you to set the process mode of + * the video frame. + * + * @details + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. You need to set your preferred process mode in the return value + * of this callback. + * + * @return + * See `VIDEO_FRAME_PROCESS_MODE`. */ - virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { - return PROCESS_MODE_READ_ONLY; - } + virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; } /** * Sets the format of the raw video data output by the SDK. * - * If you want to get raw video data in a color encoding format other than YUV 420, register this callback when - * calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK triggers - * this callback each time it receives a video frame. You need to set your preferred video data in the return value - * of this callback. - * - * @note If you want the video captured by the sender to be the original format, set the original video data format - * to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the original video pixel format is also - * different, for the actual video pixel format, see `VideoFrame`. - * + * If you want to get raw video data in a color encoding format other than YUV 420, register this + * callback when calling `registerVideoFrameObserver`. After you successfully register the video + * frame observer, the SDK triggers this callback each time it receives a video frame. You need to + * set your preferred video data in the return value of this callback. + * + * @note If you want the video captured by the sender to be the original format, set the original + * video data format to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the + * original video pixel format is also different, for the actual video pixel format, see + * `VideoFrame`. + * * @return Sets the video format. See VIDEO_PIXEL_FORMAT. */ virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; } /** - * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured video. - * - * If you want to rotate the captured video according to the rotation member in the `VideoFrame` class, register this - * callback by calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the - * SDK triggers this callback each time it receives a video frame. You need to set whether to rotate the video frame - * in the return value of this callback. - * - * @note This function only supports video data in RGBA or YUV420. + * @brief Occurs each time the SDK receives a video frame, and prompts you whether to rotate the + * captured video. + * + * @details + * If you want to rotate the captured video according to the `rotation` member in the `VideoFrame` + * class, ensure that you register this callback when calling `registerVideoFrameObserver`. After + * you successfully register the video frame observer, the SDK triggers this callback each time it + * receives a video frame. You need to set whether to rotate the video frame in the return value of + * this callback. * - * @return Determines whether to rotate. + * @note + * - On the Android platform, the supported video data formats for this callback are: I420, RGBA, + * and Texture. + * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA, + * and TextureBuffer. + * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and + * CVPixelBuffer. + * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA. + * + * @return + * Sets whether to rotate the captured video: * - `true`: Rotate the captured video. * - `false`: (Default) Do not rotate the captured video. */ virtual bool getRotationApplied() { return false; } /** - * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the captured video. - * - * If the video data you want to obtain is a mirror image of the original video, you need to register this callback - * when calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK - * triggers this callback each time it receives a video frame. You need to set whether or not to mirror the video - * frame in the return value of this callback. - * - * @note This function only supports video data in RGBA and YUV420 formats. + * @brief Occurs each time the SDK receives a video frame and prompts you whether or not to mirror + * the captured video. + * + * @details + * If the video data you want to obtain is a mirror image of the original video, you need to + * register this callback when calling `registerVideoFrameObserver`. After you successfully register + * the video frame observer, the SDK triggers this callback each time it receives a video frame. You + * need to set whether or not to mirror the video frame in the return value of this callback. + * + * @note + * - On the Android platform, the supported video data formats for this callback are: I420, RGBA, + * and Texture. + * - On the Windows platform, the supported video data formats for this callback are: I420, RGBA, + * and TextureBuffer. + * - On the iOS platform, the supported video data formats for this callback are: I420, RGBA, and + * CVPixelBuffer. + * - On the macOS platform, the supported video data formats for this callback are: I420 and RGBA. + * - Both this method and the `setVideoEncoderConfiguration` method support setting the mirroring + * effect. Agora recommends that you only choose one method to set it up. Using both methods at the + * same time causes the mirroring effect to overlap, and the mirroring settings fail. * - * @return Determines whether to mirror. + * @return + * Sets whether or not to mirror the captured video: * - `true`: Mirror the captured video. * - `false`: (Default) Do not mirror the captured video. */ virtual bool getMirrorApplied() { return false; } /** - * Sets the frame position for the video observer. - * - * After you successfully register the video observer, the SDK triggers this callback each time it receives - * a video frame. You can determine which position to observe by setting the return value. The SDK provides - * 3 positions for observer. Each position corresponds to a callback function: + * @brief Sets the frame position for the video observer. * - * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds to the onCaptureVideoFrame callback. - * POSITION_PRE_RENDERER(1 << 1): The position before receiving the remote video data, which corresponds to the onRenderVideoFrame callback. - * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to the onPreEncodeVideoFrame callback. + * @details + * After successfully registering the video data observer, the SDK uses this callback to determine + * whether to trigger `onCaptureVideoFrame`, `onRenderVideoFrame` and `onPreEncodeVideoFrame` + * callback at each specific video frame processing position, so that you can observe the locally + * collected video data, the video data sent by the remote end, and the video data before encoding. + * You can set one or more positions you need to observe by modifying the return value according to + * your scenario: + * - `POSITION_POST_CAPTURER` (1 << 0): The position after capturing the video data, which + * corresponds to the `onCaptureVideoFrame` callback. + * - `POSITION_PRE_RENDERER` (1 << 1): The position of the received remote video data before + * rendering, which corresponds to the `onRenderVideoFrame` callback. + * - `POSITION_PRE_ENCODER` (1 << 2): The position before encoding the video data, which corresponds + * to the `onPreEncodeVideoFrame` callback. * - * To observe multiple frame positions, use '|' (the OR operator). - * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by default. - * To conserve the system consumption, you can reduce the number of frame positions that you want to observe. + * @note + * - Use '|' (the OR operator) to observe multiple frame positions. + * - This callback observes `POSITION_POST_CAPTURER` (1 << 0) and `POSITION_PRE_RENDERER` (1 << 1) + * by default. + * - To conserve system resources, you can reduce the number of frame positions that you want to + * observe. + * - When the video processing mode is `PROCESS_MODE_READ_WRITE` and the observation position is set + * to `POSITION_PRE_ENCODER` | `POSITION_POST_CAPTURER`, the `getMirrorApplied` does not take + * effect; you need to modify the video processing mode or the position of the observer. * - * @return A bit mask that controls the frame position of the video observer: VIDEO_OBSERVER_POSITION. + * @return + * A bit mask that controls the frame position of the video observer. See `VIDEO_MODULE_POSITION`. */ virtual uint32_t getObservedFramePosition() { return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER; @@ -1777,21 +2257,21 @@ class IVideoFrameObserver { }; /** - * The external video source type. + * @brief The external video frame encoding type. */ enum EXTERNAL_VIDEO_SOURCE_TYPE { /** - * 0: non-encoded video frame. + * 0: The video frame is not encoded. */ VIDEO_FRAME = 0, /** - * 1: encoded video frame. + * 1: The video frame is encoded. */ ENCODED_VIDEO_FRAME, }; /** - * The format of the recording file. + * @brief Format of the recording file. * * @since v3.5.2 */ @@ -1802,7 +2282,7 @@ enum MediaRecorderContainerFormat { FORMAT_MP4 = 1, }; /** - * The recording content. + * @brief The recording content. * * @since v3.5.2 */ @@ -1821,32 +2301,32 @@ enum MediaRecorderStreamType { STREAM_TYPE_BOTH = STREAM_TYPE_AUDIO | STREAM_TYPE_VIDEO, }; /** - * The current recording state. + * @brief The current recording state. * * @since v3.5.2 */ enum RecorderState { /** - * -1: An error occurs during the recording. See RecorderReasonCode for the reason. + * -1: An error occurs during the recording. See `RecorderReasonCode` for the reason. */ RECORDER_STATE_ERROR = -1, /** - * 2: The audio and video recording is started. + * 2: The audio and video recording starts. */ RECORDER_STATE_START = 2, /** - * 3: The audio and video recording is stopped. + * 3: The audio and video recording stops. */ RECORDER_STATE_STOP = 3, }; /** - * The reason for the state change + * @brief The reason for the state change. * * @since v3.5.2 */ enum RecorderReasonCode { /** - * 0: No error occurs. + * 0: No error. */ RECORDER_REASON_NONE = 0, /** @@ -1854,7 +2334,8 @@ enum RecorderReasonCode { */ RECORDER_REASON_WRITE_FAILED = 1, /** - * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams are interrupted for more than five seconds during recording. + * 2: The SDK does not detect any audio and video streams, or audio and video streams are + * interrupted for more than five seconds during recording. */ RECORDER_REASON_NO_STREAM = 2, /** @@ -1867,112 +2348,217 @@ enum RecorderReasonCode { RECORDER_REASON_CONFIG_CHANGED = 4, }; /** - * Configurations for the local audio and video recording. + * @brief Configuration for audio and video stream recording. * * @since v3.5.2 */ struct MediaRecorderConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. - * For example, `C:\Users\\AppData\Local\Agora\\example.mp4` on Windows, - * `/App Sandbox/Library/Caches/example.mp4` on iOS, `/Library/Logs/example.mp4` on macOS, and - * `/storage/emulated/0/Android/data//files/example.mp4` on Android. - * - * @note Ensure that the specified path exists and is writable. + * The absolute path where the recording file will be saved locally, including the file name and + * format. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.mp4` + * - iOS: `/App Sandbox/Library/Caches/example.mp4` + * - macOS: `/Library/Logs/example.mp4` + * - Android: `/storage/emulated/0/Android/data//files/example.mp4` + * @note Make sure the specified path exists and is writable. */ const char* storagePath; /** - * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat "MediaRecorderContainerFormat". + * The format of the recording file. See `MediaRecorderContainerFormat`. */ MediaRecorderContainerFormat containerFormat; /** - * The recording content. See \ref agora::rtc::MediaRecorderStreamType "MediaRecorderStreamType". + * The content to record. See `MediaRecorderStreamType`. */ MediaRecorderStreamType streamType; /** - * The maximum recording duration, in milliseconds. The default value is 120000. + * Maximum recording duration in milliseconds. Default is 120000. */ int maxDurationMs; /** - * The interval (ms) of updating the recording information. The value range is - * [1000,10000]. Based on the set value of `recorderInfoUpdateInterval`, the - * SDK triggers the \ref IMediaRecorderObserver::onRecorderInfoUpdated "onRecorderInfoUpdated" - * callback to report the updated recording information. + * Interval for recording information updates, in milliseconds. The valid range is [1000,10000]. The + * SDK triggers the `onRecorderInfoUpdated` callback based on this value to report updated recording + * information. */ int recorderInfoUpdateInterval; - - MediaRecorderConfiguration() : storagePath(NULL), containerFormat(FORMAT_MP4), streamType(STREAM_TYPE_BOTH), maxDurationMs(120000), recorderInfoUpdateInterval(0) {} - MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, MediaRecorderStreamType type, int duration, int interval) : storagePath(path), containerFormat(format), streamType(type), maxDurationMs(duration), recorderInfoUpdateInterval(interval) {} + /** + * Width (px) of the recorded video. The maximum value for width × height must not exceed 3840 × + * 2160. + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + int width; + /** + * Height (px) of the recorded video. The maximum value for width × height must not exceed 3840 × + * 2160. + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + int height; + /** + * Frame rate of the recorded video. The maximum is 30. For example: 5, 10, 15, 24, 30. + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + int fps; + /** + * Sample rate (Hz) of the recorded audio. Supported values: 16000, 32000, 44100, or 48000. + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + int sample_rate; + /** + * Number of audio channels to record: + * - 1: Mono + * - 2: Stereo + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + int channel_num; + /** + * Type of video source to record. See `VIDEO_SOURCE_TYPE`. + * This parameter is required only when calling `createMediaRecorder` and setting `type` in + * `RecorderStreamInfo` to PREVIEW. + */ + agora::rtc::VIDEO_SOURCE_TYPE videoSourceType; + + MediaRecorderConfiguration() + : storagePath(NULL), + containerFormat(FORMAT_MP4), + streamType(STREAM_TYPE_BOTH), + maxDurationMs(120000), + recorderInfoUpdateInterval(0), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} + MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, + MediaRecorderStreamType type, int duration, int interval) + : storagePath(path), + containerFormat(format), + streamType(type), + maxDurationMs(duration), + recorderInfoUpdateInterval(interval), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} }; class IFaceInfoObserver { -public: - /** - * Occurs when the face info is received. - * @param outFaceInfo The output face info. - * @return - * - true: The face info is valid. - * - false: The face info is invalid. + public: + /** + * @brief Occurs when the facial information processed by speech driven extension is received. + * + * @param outFaceInfo Output parameter, the JSON string of the facial information processed by the + * voice driver plugin, including the following fields: + * - faces: Object sequence. The collection of facial information, with each face corresponding to + * an object. + * - blendshapes: Object. The collection of face capture coefficients, named according to ARkit + * standards, with each key-value pair representing a blendshape coefficient. The blendshape + * coefficient is a floating point number with a range of [0.0, 1.0]. + * - rotation: Object sequence. The rotation of the head, which includes the following three + * key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0: + * - pitch: Head pitch angle. A positve value means looking down, while a negative value means + * looking up. + * - yaw: Head yaw angle. A positve value means turning left, while a negative value means turning + * right. + * - roll: Head roll angle. A positve value means tilting to the right, while a negative value + * means tilting to the left. + * - timestamp: String. The timestamp of the output result, in milliseconds. + * Here is an example of JSON: + * ```json + * { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, + * "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0, + * "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0, + * "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0, + * "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, + * "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, + * "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, + * "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0, + * "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0, + * "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, + * "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, + * "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0, + * "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 }, + * "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5}, + * }], "timestamp":"654879876546" } + * ``` + * + * @return + * - `true`: Facial information JSON parsing successful. + * - `false`: Facial information JSON parsing failed. */ - virtual bool onFaceInfo(const char* outFaceInfo) = 0; - - virtual ~IFaceInfoObserver() {} + virtual bool onFaceInfo(const char* outFaceInfo) = 0; + + virtual ~IFaceInfoObserver() {} }; /** - * Information for the recording file. + * @brief Information about the recording file. * * @since v3.5.2 */ struct RecorderInfo { /** - * The absolute path of the recording file. + * Absolute storage path of the recording file. */ const char* fileName; /** - * The recording duration, in milliseconds. + * Duration of the recording file in milliseconds. */ unsigned int durationMs; /** - * The size in bytes of the recording file. + * Size of the recording file in bytes. */ unsigned int fileSize; RecorderInfo() : fileName(NULL), durationMs(0), fileSize(0) {} - RecorderInfo(const char* name, unsigned int dur, unsigned int size) : fileName(name), durationMs(dur), fileSize(size) {} + RecorderInfo(const char* name, unsigned int dur, unsigned int size) + : fileName(name), durationMs(dur), fileSize(size) {} }; class IMediaRecorderObserver { public: /** - * Occurs when the recording state changes. + * @brief Callback when the recording state changes. * * @since v4.0.0 * - * When the local audio and video recording state changes, the SDK triggers this callback to report the current - * recording state and the reason for the change. + * @details + * When the recording state of the audio and video stream changes, the SDK triggers this callback to + * report the current recording state and the reason for the change. + * + * @param channelId Channel name. + * @param uid User ID. + * @param state Current recording state. See `RecorderState`. + * @param reason Reason for the recording state change. See `RecorderReasonCode`. * - * @param channelId The channel name. - * @param uid ID of the user. - * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState". - * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode "RecorderReasonCode". */ - virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, RecorderReasonCode reason) = 0; + virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, + RecorderReasonCode reason) = 0; /** - * Occurs when the recording information is updated. + * @brief Callback for recording information updates. * * @since v4.0.0 * - * After you successfully register this callback and enable the local audio and video recording, the SDK periodically triggers - * the `onRecorderInfoUpdated` callback based on the set value of `recorderInfoUpdateInterval`. This callback reports the - * filename, duration, and size of the current recording file. + * @details + * After successfully registering this callback and starting audio and video stream recording, the + * SDK periodically triggers this callback based on the value of `recorderInfoUpdateInterval` set in + * `MediaRecorderConfiguration`, + * reporting the current recording file's name, duration, and size. * - * @param channelId The channel name. - * @param uid ID of the user. - * @param info Information about the recording file. See \ref agora::media::RecorderInfo "RecorderInfo". + * @param channelId Channel name. + * @param uid User ID. + * @param info Recording file information. See `RecorderInfo`. * */ - virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, const RecorderInfo& info) = 0; + virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, + const RecorderInfo& info) = 0; virtual ~IMediaRecorderObserver() {} }; diff --git a/include/AgoraMediaPlayerTypes.h b/include/AgoraMediaPlayerTypes.h index 2df2a43..0129fa9 100644 --- a/include/AgoraMediaPlayerTypes.h +++ b/include/AgoraMediaPlayerTypes.h @@ -49,31 +49,39 @@ namespace base { static const uint8_t kMaxCharBufferLength = 50; /** * @brief The playback state. - * */ enum MEDIA_PLAYER_STATE { - /** Default state. + /** + * 0: The default state. The media player returns this state code before you open the media resource + * or after you stop the playback. */ PLAYER_STATE_IDLE = 0, - /** Opening the media file. + /** + * 1: Opening the media resource. */ PLAYER_STATE_OPENING, - /** The media file is opened successfully. + /** + * 2: Opens the media resource successfully. */ PLAYER_STATE_OPEN_COMPLETED, - /** Playing the media file. + /** + * 3: The media resource is playing. */ PLAYER_STATE_PLAYING, - /** The playback is paused. + /** + * 4: Pauses the playback. */ PLAYER_STATE_PAUSED, - /** The playback is completed. + /** + * 5: The playback is complete. */ PLAYER_STATE_PLAYBACK_COMPLETED, - /** All loops are completed. + /** + * 6: The loop is complete. */ PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED, - /** The playback is stopped. + /** + * 7: The playback stops. */ PLAYER_STATE_STOPPED, /** Player pausing (internal) @@ -97,58 +105,73 @@ enum MEDIA_PLAYER_STATE { /** Player set track state (internal) */ PLAYER_STATE_SET_TRACK_INTERNAL, - /** The playback fails. + /** + * 100: The media player fails to play the media resource. */ PLAYER_STATE_FAILED = 100, }; /** - * @brief Player error code - * + * @brief Reasons for the changes in the media player status. */ enum MEDIA_PLAYER_REASON { - /** No error. + /** + * 0: No error. */ PLAYER_REASON_NONE = 0, - /** The parameter is invalid. + /** + * -1: Invalid arguments. */ PLAYER_REASON_INVALID_ARGUMENTS = -1, - /** Internel error. + /** + * -2: Internal error. */ PLAYER_REASON_INTERNAL = -2, - /** No resource. + /** + * -3: No resource. */ PLAYER_REASON_NO_RESOURCE = -3, - /** Invalid media source. + /** + * -4: Invalid media resource. */ PLAYER_REASON_INVALID_MEDIA_SOURCE = -4, - /** The type of the media stream is unknown. + /** + * -5: The media stream type is unknown. */ PLAYER_REASON_UNKNOWN_STREAM_TYPE = -5, - /** The object is not initialized. + /** + * -6: The object is not initialized. */ PLAYER_REASON_OBJ_NOT_INITIALIZED = -6, - /** The codec is not supported. + /** + * -7: The codec is not supported. */ PLAYER_REASON_CODEC_NOT_SUPPORTED = -7, - /** Invalid renderer. + /** + * -8: Invalid renderer. */ PLAYER_REASON_VIDEO_RENDER_FAILED = -8, - /** An error occurs in the internal state of the player. + /** + * -9: An error with the internal state of the player occurs. */ PLAYER_REASON_INVALID_STATE = -9, - /** The URL of the media file cannot be found. + /** + * -10: The URL of the media resource cannot be found. */ PLAYER_REASON_URL_NOT_FOUND = -10, - /** Invalid connection between the player and the Agora server. + /** + * -11: Invalid connection between the player and the Agora Server. */ PLAYER_REASON_INVALID_CONNECTION_STATE = -11, - /** The playback buffer is insufficient. + /** + * -12: The playback buffer is insufficient. */ PLAYER_REASON_SRC_BUFFER_UNDERFLOW = -12, - /** The audio mixing file playback is interrupted. + /** + * -13: The playback is interrupted. */ PLAYER_REASON_INTERRUPTED = -13, - /** The SDK does not support this function. + /** + * -14: The SDK does not support the method being called. */ PLAYER_REASON_NOT_SUPPORTED = -14, /** The token has expired. @@ -157,75 +180,92 @@ enum MEDIA_PLAYER_REASON { /** The ip has expired. */ PLAYER_REASON_IP_EXPIRED = -16, - /** An unknown error occurs. + /** + * -17: An unknown error. */ PLAYER_REASON_UNKNOWN = -17, }; /** * @brief The type of the media stream. - * */ enum MEDIA_STREAM_TYPE { - /** The type is unknown. + /** + * 0: The type is unknown. */ STREAM_TYPE_UNKNOWN = 0, - /** The video stream. + /** + * 1: The video stream. */ STREAM_TYPE_VIDEO = 1, - /** The audio stream. + /** + * 2: The audio stream. */ STREAM_TYPE_AUDIO = 2, - /** The subtitle stream. + /** + * 3: The subtitle stream. */ STREAM_TYPE_SUBTITLE = 3, }; /** - * @brief The playback event. - * + * @brief Media player events. */ enum MEDIA_PLAYER_EVENT { - /** The player begins to seek to the new playback position. + /** + * 0: The player begins to seek to a new playback position. */ PLAYER_EVENT_SEEK_BEGIN = 0, - /** The seek operation completes. + /** + * 1: The player finishes seeking to a new playback position. */ PLAYER_EVENT_SEEK_COMPLETE = 1, - /** An error occurs during the seek operation. + /** + * 2: An error occurs when seeking to a new playback position. */ PLAYER_EVENT_SEEK_ERROR = 2, - /** The player changes the audio track for playback. + /** + * 5: The audio track used by the player has been changed. */ PLAYER_EVENT_AUDIO_TRACK_CHANGED = 5, - /** player buffer low + /** + * 6: The currently buffered data is not enough to support playback. */ PLAYER_EVENT_BUFFER_LOW = 6, - /** player buffer recover + /** + * 7: The currently buffered data is just enough to support playback. */ PLAYER_EVENT_BUFFER_RECOVER = 7, - /** The video or audio is interrupted + /** + * 8: The audio or video playback freezes. */ PLAYER_EVENT_FREEZE_START = 8, - /** Interrupt at the end of the video or audio + /** + * 9: The audio or video playback resumes without freezing. */ PLAYER_EVENT_FREEZE_STOP = 9, - /** switch source begin - */ + /** + * 10: The player starts switching the media resource. + */ PLAYER_EVENT_SWITCH_BEGIN = 10, - /** switch source complete - */ + /** + * 11: Media resource switching is complete. + */ PLAYER_EVENT_SWITCH_COMPLETE = 11, - /** switch source error - */ + /** + * 12: Media resource switching error. + */ PLAYER_EVENT_SWITCH_ERROR = 12, - /** An application can render the video to less than a second + /** + * 13: The first video frame is rendered. */ PLAYER_EVENT_FIRST_DISPLAYED = 13, - /** cache resources exceed the maximum file count + /** + * 14: The cached media files reach the limit in number. */ PLAYER_EVENT_REACH_CACHE_FILE_MAX_COUNT = 14, - /** cache resources exceed the maximum file size + /** + * 15: The cached media files reach the limit in aggregate storage space. */ PLAYER_EVENT_REACH_CACHE_FILE_MAX_SIZE = 15, /** Triggered when a retry is required to open the media @@ -237,66 +277,98 @@ enum MEDIA_PLAYER_EVENT { /** Triggered when retrying to open media fails */ PLAYER_EVENT_TRY_OPEN_FAILED = 18, + /** Triggered when an http redirect occurs + * @technical preview + */ + PLAYER_EVENT_HTTP_REDIRECT = 19, }; /** - * @brief The play preload another source event. - * + * @brief Events that occur when media resources are preloaded. */ enum PLAYER_PRELOAD_EVENT { - /** preload source begin - */ + /** + * 0: Starts preloading media resources. + */ PLAYER_PRELOAD_EVENT_BEGIN = 0, - /** preload source complete - */ + /** + * 1: Preloading media resources is complete. + */ PLAYER_PRELOAD_EVENT_COMPLETE = 1, - /** preload source error - */ + /** + * 2: An error occurs when preloading media resources. + */ PLAYER_PRELOAD_EVENT_ERROR = 2, }; /** - * @brief The information of the media stream object. - * + * @brief The detailed information of the media stream. */ struct PlayerStreamInfo { - /** The index of the media stream. */ + /** + * The index of the media stream. + */ int streamIndex; - /** The type of the media stream. See {@link MEDIA_STREAM_TYPE}. */ + /** + * The type of the media stream. See `MEDIA_STREAM_TYPE`. + */ MEDIA_STREAM_TYPE streamType; - /** The codec of the media stream. */ + /** + * The codec of the media stream. + */ char codecName[kMaxCharBufferLength]; - /** The language of the media stream. */ + /** + * The language of the media stream. + */ char language[kMaxCharBufferLength]; - /** The frame rate (fps) if the stream is video. */ + /** + * This parameter only takes effect for video streams, and indicates the video frame rate (fps). + */ int videoFrameRate; - /** The video bitrate (bps) if the stream is video. */ + /** + * This parameter only takes effect for video streams, and indicates the video bitrate (bps). + */ int videoBitRate; - /** The video width (pixel) if the stream is video. */ + /** + * This parameter only takes effect for video streams, and indicates the video width (pixel). + */ int videoWidth; - /** The video height (pixel) if the stream is video. */ + /** + * This parameter only takes effect for video streams, and indicates the video height (pixel). + */ int videoHeight; - /** The rotation angle if the steam is video. */ + /** + * This parameter only takes effect for video streams, and indicates the video rotation angle. + */ int videoRotation; - /** The sample rate if the stream is audio. */ + /** + * This parameter only takes effect for audio streams, and indicates the audio sample rate (Hz). + */ int audioSampleRate; - /** The number of audio channels if the stream is audio. */ + /** + * This parameter only takes effect for audio streams, and indicates the audio channel number. + */ int audioChannels; - /** The number of bits per sample if the stream is audio. */ + /** + * This parameter only takes effect for audio streams, and indicates the bit number of each audio + * sample. + */ int audioBitsPerSample; - /** The total duration (millisecond) of the media stream. */ + /** + * The total duration (ms) of the media stream. + */ int64_t duration; PlayerStreamInfo() : streamIndex(0), @@ -316,90 +388,104 @@ struct PlayerStreamInfo { }; /** - * @brief The information of the media stream object. - * + * @brief Information about the video bitrate of the media resource being played. */ struct SrcInfo { - /** The bitrate of the media stream. The unit of the number is kbps. - * + /** + * The video bitrate (Kbps) of the media resource being played. */ int bitrateInKbps; - /** The name of the media stream. - * - */ + /** + * The name of the media resource. + */ const char* name; }; /** - * @brief The type of the media metadata. - * + * @brief The type of media metadata. */ enum MEDIA_PLAYER_METADATA_TYPE { - /** The type is unknown. + /** + * 0: The type is unknown. */ PLAYER_METADATA_TYPE_UNKNOWN = 0, - /** The type is SEI. + /** + * 1: The type is SEI. */ PLAYER_METADATA_TYPE_SEI = 1, }; +/** + * @brief Statistics about the media files being cached. + */ struct CacheStatistics { - /** total data size of uri + /** + * The size (bytes) of the media file being played. */ int64_t fileSize; - /** data of uri has cached + /** + * The size (bytes) of the media file that you want to cache. */ int64_t cacheSize; - /** data of uri has downloaded + /** + * The size (bytes) of the media file that has been downloaded. */ int64_t downloadSize; }; /** - * @brief The real time statistics of the media stream being played. - * + * @brief The information of the media file being played. */ struct PlayerPlaybackStats { - /** Video fps. + /** + * The frame rate (fps) of the video. */ int videoFps; - /** Video bitrate (Kbps). + /** + * The bitrate (kbps) of the video. */ int videoBitrateInKbps; - /** Audio bitrate (Kbps). + /** + * The bitrate (kbps) of the audio. */ int audioBitrateInKbps; - /** Total bitrate (Kbps). + /** + * The total bitrate (kbps) of the media stream. */ int totalBitrateInKbps; }; /** - * @brief The updated information of media player. - * + * @brief Information related to the media player. */ struct PlayerUpdatedInfo { /** @technical preview */ const char* internalPlayerUuid; - /** The device ID of the playback device. + /** + * The ID of a deivce. */ const char* deviceId; - /** Video height. + /** + * Height (pixel) of the video. */ int videoHeight; - /** Video width. + /** + * Width (pixel) of the video. */ int videoWidth; - /** Audio sample rate. + /** + * Audio sample rate (Hz). */ int audioSampleRate; - /** The audio channel number. + /** + * The number of audio channels. */ int audioChannels; - /** The bit number of each audio sample. + /** + * The number of bits per audio sample point. */ int audioBitsPerSample; @@ -420,89 +506,132 @@ class IMediaPlayerCustomDataProvider { public: /** - * @brief The player requests to read the data callback, you need to fill the specified length of data into the buffer - * @param buffer the buffer pointer that you need to fill data. - * @param bufferSize the bufferSize need to fill of the buffer pointer. - * @return you need return offset value if succeed. return 0 if failed. + * @brief Occurs when the SDK reads the media resource data. + * + * @details + * When you call the `openWithMediaSource` method to open a media resource, the SDK triggers this + * callback and request you to pass in the buffer of the media resource data. + * + * @param buffer An input parameter. Data buffer (bytes). Write the `bufferSize` data reported by + * the SDK into this parameter. + * @param bufferSize The length of the data buffer (bytes). + * + * @return + * - If the data is read successfully, pass in the length of the data (bytes) you actually read in + * the return value. + * - If reading the data fails, pass in 0 in the return value. */ virtual int onReadData(unsigned char *buffer, int bufferSize) = 0; /** - * @brief The Player seek event callback, you need to operate the corresponding stream seek operation, You can refer to the definition of lseek() at https://man7.org/linux/man-pages/man2/lseek.2.html - * @param offset the value of seek offset. - * @param whence the postion of start seeking, the directive whence as follows: - * 0 - SEEK_SET : The file offset is set to offset bytes. - * 1 - SEEK_CUR : The file offset is set to its current location plus offset bytes. - * 2 - SEEK_END : The file offset is set to the size of the file plus offset bytes. - * 65536 - AVSEEK_SIZE : Optional. Passing this as the "whence" parameter to a seek function causes it to return the filesize without seeking anywhere. + * @brief Occurs when the SDK seeks the media resource data. + * + * @details + * When you call the `openWithMediaSource` or `open` method to open a custom media resource, the SDK + * triggers this callback to request the specified location in the media resource. + * + * @param offset An input parameter. The offset of the target position relative to the starting + * point, in bytes. The value can be positive or negative. + * @param whence An input parameter. The starting point. You can set it as one of the following + * values: + * - 0: The starting point is the head of the data, and the actual data offset after seeking is + * `offset`. + * - 1: The starting point is the current position, and the actual data offset after seeking is the + * current position plus `offset`. + * - 2: The starting point is the end of the data, and the actual data offset after seeking is the + * whole data length plus `offset`. + * - 65536: Do not perform position seeking, return the file size. Agora recommends that you use + * this parameter value when playing pure audio files such as MP3 and WAV. + * * @return - * whence == 65536, return filesize if you need. - * whence >= 0 && whence < 3 , return offset value if succeed. return -1 if failed. + * - When `whence` is `65536`, the media file size is returned. + * - When `whence` is `0`, `1`, or `2`, the actual data offset after the seeking is returned. + * - -1: Seeking failed. */ virtual int64_t onSeek(int64_t offset, int whence) = 0; virtual ~IMediaPlayerCustomDataProvider() {} }; +/** + * @brief Information related to the media file to be played and the playback scenario + * configurations. + */ struct MediaSource { /** - * The URL of the media file that you want to play. + * The URL of the media file to be played. + * @note If you open a common media resource, pass in the value to `url`. If you open a custom media + * resource, pass in the value to `provider`. Agora recommends that you do not pass in values to + * both parameters in one call; otherwise, this call may fail. */ const char* url; /** - * The URI of the media file - * - * When caching is enabled, if the url cannot distinguish the cache file name, - * the uri must be able to ensure that the cache file name corresponding to the url is unique. + * The URI (Uniform Resource Identifier) of the media file. */ const char* uri; /** - * Set the starting position for playback, in ms. + * The starting position (ms) for playback. The default value is 0. */ int64_t startPos; /** - * Determines whether to autoplay after opening a media resource. - * - true: (Default) Autoplay after opening a media resource. - * - false: Do not autoplay after opening a media resource. + * Whether to enable autoplay once the media file is opened: + * - `true`: (Default) Yes. + * - `false`: No. + * @note If autoplay is disabled, you need to call the `play` method to play a media file after it + * is opened. */ bool autoPlay; /** - * Determines whether to enable cache streaming to local files. If enable cached, the media player will - * use the url or uri as the cache index. - * + * Whether to cache the media file when it is being played: + * - `true`:Enables caching. + * - `false`: (Default) Disables caching. * @note - * The local cache function only supports on-demand video/audio streams and does not support live streams. - * Caching video and audio files based on the HLS protocol (m3u8) to your local device is not supported. - * - * - true: Enable cache. - * - false: (Default) Disable cache. + * - Agora only supports caching on-demand audio and video streams that are not transmitted in HLS + * protocol. + * - If you need to enable caching, pass in a value to `uri`; otherwise, caching is based on the + * `url` of the media file. + * - If you enable this function, the Media Player caches part of the media file being played on + * your local device, and you can play the cached media file without internet connection. The + * statistics about the media file being cached are updated every second after the media file is + * played. See `CacheStatistics`. */ bool enableCache; /** - * Determines whether to enable multi-track audio stream decoding. - * Then you can select multi audio track of the media file for playback or publish to channel - * - * @note - * If you use the selectMultiAudioTrack API, you must set enableMultiAudioTrack to true. - * - * - true: Enable MultiAudioTrack;. - * - false: (Default) Disable MultiAudioTrack;. + * Whether to allow the selection of different audio tracks when playing this media file: + * - `true`: Allow to select different audio tracks. + * - `false`: (Default) Do not allow to select different audio tracks. + * If you need to set different audio tracks for local playback and publishing to the channel, you + * need to set this parameter to `true`, and then call the `selectMultiAudioTrack` method to select + * the audio track. */ bool enableMultiAudioTrack; /** - * Determines whether the opened media resource is a stream through the Agora Broadcast Streaming Network(CDN). - * - true: It is a stream through the Agora Broadcast Streaming Network. - * - false: (Default) It is not a stream through the Agora Broadcast Streaming Network. + * Whether the media resource to be opened is a live stream or on-demand video distributed through + * Media Broadcast service: + * - `true`: The media resource to be played is a live or on-demand video distributed through Media + * Broadcast service. + * - `false`: (Default) The media resource is not a live stream or on-demand video distributed + * through Media Broadcast service. + * @note If you need to open a live stream or on-demand video distributed through Broadcast + * Streaming service, pass in the URL of the media resource to `url`, and set `isAgoraSource` as + * `true`; otherwise, you don't need to set the `isAgoraSource` parameter. */ Optional isAgoraSource; /** - * Determines whether the opened media resource is a live stream. If is a live stream, it can speed up the opening of media resources. - * - true: It is a live stream. - * - false: (Default) It is not is a live stream. + * Whether the media resource to be opened is a live stream: + * - `true`: The media resource is a live stream. + * - `false`: (Default) The media resource is not a live stream. + * If the media resource you want to open is a live stream, Agora recommends that you set this + * parameter as `true` so that the live stream can be loaded more quickly. + * @note If the media resource you open is not a live stream, but you set `isLiveSource` as `true`, + * the media resource is not to be loaded more quickly. */ Optional isLiveSource; /** - * External custom data source object + * The callback for custom media resource files. See `IMediaPlayerCustomDataProvider`. + * @note If you open a custom media resource, pass in the value to `provider`. If you open a common + * media resource, pass in the value to `url`. Agora recommends that you do not pass in values to + * both `url` and `provider` in one call; otherwise, this call may fail. */ IMediaPlayerCustomDataProvider* provider; diff --git a/include/IAgoraLog.h b/include/IAgoraLog.h index 876a75c..9201206 100644 --- a/include/IAgoraLog.h +++ b/include/IAgoraLog.h @@ -28,13 +28,29 @@ namespace agora { namespace commons { /** - * Supported logging severities of SDK + * @brief The output log level of the SDK. */ OPTIONAL_ENUM_CLASS LOG_LEVEL { + /** + * 0: Do not output any log information. + */ LOG_LEVEL_NONE = 0x0000, + /** + * 0x0001: (Default) Output `FATAL`, `ERROR`, `WARN`, and `INFO` level log information. We recommend + * setting your log filter to this level. + */ LOG_LEVEL_INFO = 0x0001, + /** + * 0x0002: Output `FATAL`, `ERROR`, and `WARN` level log information. + */ LOG_LEVEL_WARN = 0x0002, + /** + * 0x0004: Output `FATAL` and `ERROR` level log information. + */ LOG_LEVEL_ERROR = 0x0004, + /** + * 0x0008: Output `FATAL` level log information. + */ LOG_LEVEL_FATAL = 0x0008, LOG_LEVEL_API_CALL = 0x0010, LOG_LEVEL_DEBUG = 0x0020, @@ -62,12 +78,36 @@ class ILogWriter { virtual ~ILogWriter() {} }; +/** + * @brief The output log level of the SDK. + */ enum LOG_FILTER_TYPE { + /** + * 0: Do not output any log information. + */ LOG_FILTER_OFF = 0, + /** + * 0x080f: Output all log information. Set your log filter to this level if you want to get the most + * complete log file. + */ LOG_FILTER_DEBUG = 0x080f, + /** + * 0x000f: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, `LOG_FILTER_WARN`, and + * `LOG_FILTER_INFO` level log information. We recommend setting your log filter to this level. + */ LOG_FILTER_INFO = 0x000f, + /** + * 0x000e: Output `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR`, and `LOG_FILTER_WARN` level log + * information. + */ LOG_FILTER_WARN = 0x000e, + /** + * 0x000c: Output `LOG_FILTER_CRITICAL` and `LOG_FILTER_ERROR` level log information. + */ LOG_FILTER_ERROR = 0x000c, + /** + * 0x0008: Output `LOG_FILTER_CRITICAL` level log information. + */ LOG_FILTER_CRITICAL = 0x0008, LOG_FILTER_MASK = 0x80f, }; @@ -78,16 +118,34 @@ const uint32_t MIN_LOG_SIZE = 128 * 1024; // 128KB */ const uint32_t DEFAULT_LOG_SIZE_IN_KB = 2048; -/** Definition of LogConfiguration +/** + * @brief Configuration of Agora SDK log files. */ struct LogConfig { - /**The log file path, default is NULL for default log path + /** + * The complete path of the log files. Agora recommends using the default log directory. If you need + * to modify the default directory, ensure that the directory you specify exists and is writable. + * The default log directory is: + * - Android: /storage/emulated/0/Android/data//files/agorasdk.log. + * - iOS: App Sandbox/Library/caches/agorasdk.log. + * - macOS: + * - If Sandbox is enabled: App Sandbox/Library/Logs/agorasdk.log. For example, + * /Users//Library/Containers//Data/Library/Logs/agorasdk.log. + * - If Sandbox is disabled: ~/Library/Logs/agorasdk.log + * - Windows: C:\Users\\AppData\Local\Agora\\agorasdk.log. */ const char* filePath; - /** The log file size, KB , set 2048KB to use default log size + /** + * The size (KB) of an `agorasdk.log` file. The value range is [128,20480]. The default value is + * 2,048 KB. If you set `fileSizeInKByte` smaller than 128 KB, the SDK automatically adjusts it to + * 128 KB; if you set `fileSizeInKByte` greater than 20,480 KB, the SDK automatically adjusts it to + * 20,480 KB. */ uint32_t fileSizeInKB; - /** The log level, set LOG_LEVEL_INFO to use default log level + /** + * The output level of the SDK log file. See `LOG_LEVEL`. + * For example, if you set the log level to WARN, the SDK outputs the logs within levels FATAL, + * ERROR, and WARN. */ LOG_LEVEL level; diff --git a/include/IAgoraMediaEngine.h b/include/IAgoraMediaEngine.h index 2ad93ee..77c1411 100644 --- a/include/IAgoraMediaEngine.h +++ b/include/IAgoraMediaEngine.h @@ -12,16 +12,28 @@ namespace agora { namespace media { -/** dual-mono music output mode +/** + * @brief The channel mode. */ enum AUDIO_MIXING_DUAL_MONO_MODE { - /* 0: Original mode */ + /** + * 0: Original mode. + */ AUDIO_MIXING_DUAL_MONO_AUTO = 0, - /* 1: Left channel mode */ + /** + * 1: Left channel mode. This mode replaces the audio of the right channel with the audio of the + * left channel, which means the user can only hear the audio of the left channel. + */ AUDIO_MIXING_DUAL_MONO_L = 1, - /* 2: Right channel mode */ + /** + * 2: Right channel mode. This mode replaces the audio of the left channel with the audio of the + * right channel, which means the user can only hear the audio of the right channel. + */ AUDIO_MIXING_DUAL_MONO_R = 2, - /* 3: Mixed channel mode */ + /** + * 3: Mixed channel mode. This mode mixes the audio of the left channel and the right channel, which + * means the user can hear the audio of the left channel and the right channel at the same time. + */ AUDIO_MIXING_DUAL_MONO_MIX = 3 }; @@ -32,40 +44,64 @@ enum AUDIO_MIXING_DUAL_MONO_MODE { class IMediaEngine { public: /** - * Registers an audio frame observer object. + * @brief Registers an audio frame observer object. * - * @note - * Ensure that you call this method before \ref IRtcEngine::joinChannel "joinChannel". + * @details + * Call this method to register an audio frame observer object (register a callback). When you need + * the SDK to trigger the `onMixedAudioFrame`, `onRecordAudioFrame`, `onPlaybackAudioFrame`, + * `onPlaybackAudioFrameBeforeMixing` or `onEarMonitoringAudioFrame` callback, you need to use this + * method to register the callbacks. + * Call timing: Call this method before joining a channel. + * + * @param observer The observer instance. See `IAudioFrameObserver`. Set the value as NULL to + * release the instance. Agora recommends calling this method after receiving `onLeaveChannel` to + * release the audio observer object. * - * @param observer A pointer to the audio frame observer object: IAudioFrameObserver, - * nullptr means unregistering observer instead. * @return * - 0: Success. * - < 0: Failure. */ virtual int registerAudioFrameObserver(IAudioFrameObserver* observer) = 0; /** - * Registers a video frame observer object. + * @brief Registers a raw video frame observer object. + * + * @details + * If you want to observe raw video frames (such as YUV or RGBA format), Agora recommends that you + * implement one `IVideoFrameObserver` class with this method. + * When calling this method to register a video observer, you can register callbacks in the + * `IVideoFrameObserver` class as needed. After you successfully register the video frame observer, + * the SDK triggers the registered callbacks each time a video frame is received. + * Applicable scenarios: After registering the raw video observer, you can use the obtained raw + * video data in various video pre-processing scenarios, such as virtual backgrounds and image + * enhacement by yourself. + * Call timing: Call this method before joining a channel. * * @note - * - Ensure that you call this method before joining the channel. - * - If you register an observer for video raw video data, you cannot register an IVideoEncodedFrameObserver - * object. + * When handling the video data returned in the callbacks, pay attention to the changes in the + * `width` and `height` parameters, which may be adapted under the following circumstances: + * - When network conditions deteriorate, the video resolution decreases incrementally. + * - If the user adjusts the video profile, the resolution of the video returned in the callbacks + * also changes. + * + * @param observer The observer instance. See `IVideoFrameObserver`. To release the instance, set + * the value as NULL. * - * @param observer A pointer to the video frame observer: IVideoFrameObserver. * @return * - 0: Success. * - < 0: Failure. */ virtual int registerVideoFrameObserver(IVideoFrameObserver* observer) = 0; /** - * Registers a receiver object for the encoded video image. + * @brief Registers a receiver object for the encoded video image. * - * @note - * - Ensure that you call this method before joining the channel. + * @details + * If you only want to observe encoded video frames (such as H.264 format) without decoding and + * rendering the video, Agora recommends that you implement one `IVideoEncodedFrameObserver` class + * through this method. + * + * @note Call this method before joining a channel. * - * @param observer A pointer to the observer of the encoded video image: \ref IVideoEncodedFrameObserver - * "IVideoEncodedFrameObserver". + * @param observer The video frame observer object. See `IVideoEncodedFrameObserver`. * * @return * - 0: Success. @@ -74,12 +110,26 @@ class IMediaEngine { virtual int registerVideoEncodedFrameObserver(IVideoEncodedFrameObserver* observer) = 0; /** - * Registers a face info observer object. + * @brief Registers or unregisters a facial information observer. + * + * @details + * You can call this method to register the `onFaceInfo` callback to receive the facial information + * processed by Agora speech driven extension. When calling this method to register a facial + * information observer, you can register callbacks in the `IFaceInfoObserver` class as needed. + * After successfully registering the facial information observer, the SDK triggers the callback you + * have registered when it captures the facial information converted by the speech driven extension. + * Applicable scenarios: Facial information processed by the Agora speech driven extension is BS + * (Blend Shape) data that complies with ARkit standards. You can further process the BS data using + * third-party 3D rendering engines, such as driving avatar to make mouth movements corresponding to + * speech. * * @note - * Ensure that you call this method before \ref IRtcEngine::joinChannel "joinChannel". + * - Call this method before joining a channel. + * - Before calling this method, you need to make sure that the speech driven extension has been + * enabled by calling `enableExtension`. * - * @param observer A pointer to the face info observer object: IFaceInfoObserver. + * @param observer Facial information observer, see `IFaceInfoObserver`. If you need to unregister a + * facial information observer, pass in NULL. * * @return * - 0: Success. @@ -88,29 +138,54 @@ class IMediaEngine { virtual int registerFaceInfoObserver(IFaceInfoObserver* observer) = 0; /** - * Pushes the external audio data to the app. + * @brief Pushes the external audio frame. + * + * @details + * Call this method to push external audio frames through the audio track. + * Call timing: Before calling this method to push external audio data, perform the following + * steps:1. Call `createCustomAudioTrack` to create a custom audio track and get the audio track ID. + * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set + * `publishCustomAudioTrackId` to the audio track ID that you want to publish, and set + * `publishCustomAudioTrack` to `true`. + * + * @param frame The external audio frame. See `AudioFrame`. + * @param trackId The audio track ID. If you want to publish a custom external audio source, set + * this parameter to the ID of the corresponding custom audio track you want to publish. * - * @param frame The audio buffer data. - * @param trackId The audio track ID. * @return * - 0: Success. * - < 0: Failure. */ - virtual int pushAudioFrame(IAudioFrameObserverBase::AudioFrame* frame, rtc::track_id_t trackId = 0) = 0; /** - * Pulls the remote audio data. + * @brief Pulls the remote audio data. * - * After a successful method call, the app pulls the decoded and mixed audio data for playback. + * @details + * After a successful call of this method, the app pulls the decoded and mixed audio data for + * playback. + * Call timing: Call this method after joining a channel. + * Before calling this method, call `setExternalAudioSink` `(enabled: true)` to notify the app to + * enable and set the external audio rendering. * - * The difference between this method and the \ref onPlaybackAudioFrame "onPlaybackAudioFrame" is as follows: - * - `onPlaybackAudioFrame`: The SDK sends the audio data to the app once every 10 ms. Any delay in processing - * the audio frames may result in audio jitter. - * - `pullAudioFrame`: The app pulls the remote audio data. After setting the audio data parameters, the - * SDK adjusts the frame buffer and avoids problems caused by jitter in the external audio playback. + * @note + * Both this method and the `onPlaybackAudioFrame` callback can be used to get audio data after + * remote mixing. After calling `setExternalAudioSink` to enable external audio rendering, the app + * will no longer be able to obtain data from the `onPlaybackAudioFrame` callback. Therefore, you + * should choose between this method and the `onPlaybackAudioFrame` callback based on your actual + * business requirements. The specific distinctions between them are as follows: + * - After calling this method, the app automatically pulls the audio data from the SDK. By setting + * the audio data parameters, the SDK adjusts the frame buffer to help the app handle latency, + * effectively avoiding audio playback jitter. + * - After registering the `onPlaybackAudioFrame` callback, the SDK sends the audio data to the app + * through the callback. Any delay in processing the audio frames may result in audio jitter. + * This method is only used for retrieving audio data after remote mixing. If you need to get audio + * data from different audio processing stages such as capture and playback, you can register the + * corresponding callbacks by calling `registerAudioFrameObserver`. + * + * @param frame Pointers to `AudioFrame`. * - * @param frame The pointer to the audio frame: AudioFrame. * @return * - 0: Success. * - < 0: Failure. @@ -118,21 +193,27 @@ class IMediaEngine { virtual int pullAudioFrame(IAudioFrameObserverBase::AudioFrame* frame) = 0; /** - * Sets the external video source. - * - * Once the external video source is enabled, the SDK prepares to accept the external video frame. - * - * @param enabled Determines whether to enable the external video source. - * - true: Enable the external video source. Once set, the SDK creates the external source and prepares - * video data from `pushVideoFrame` or `pushEncodedVideoImage`. - * - false: Disable the external video source. - * @param useTexture Determines whether to use textured video data. - * - true: Use texture, which is not supported now. - * - False: Do not use texture. - * @param sourceType Determines the type of external video source frame. - * - ENCODED_VIDEO_FRAME: The external video source is encoded. - * - VIDEO_FRAME: The external video source is not encoded. - * @param encodedVideoOption Video encoded track option, which is only used for ENCODED_VIDEO_FRAME. + * @brief Configures the external video source. + * + * @details + * After calling this method to enable an external video source, you can call `pushVideoFrame` to + * push external video data to the SDK. + * Call timing: Call this method before joining a channel. + * + * @note Dynamic switching of video sources is not supported within the channel. To switch from an + * external video source to an internal video source, you must first leave the channel, call this + * method to disable the external video source, and then rejoin the channel. + * + * @param enabled Whether to use the external video source: + * - `true`: Use the external video source. The SDK prepares to accept the external video frame. + * - `false`: (Default) Do not use the external video source. + * @param useTexture Whether to use the external video frame in the Texture format. + * - `true`: Use the external video frame in the Texture format. + * - `false`: (Default) Do not use the external video frame in the Texture format. + * @param sourceType Whether the external video frame is encoded. See `EXTERNAL_VIDEO_SOURCE_TYPE`. + * @param encodedVideoOption Video encoding options. This parameter needs to be set if `sourceType` + * is `ENCODED_VIDEO_FRAME`. To set this parameter, contact `technical support`. + * * @return * - 0: Success. * - < 0: Failure. @@ -141,55 +222,91 @@ class IMediaEngine { bool enabled, bool useTexture, EXTERNAL_VIDEO_SOURCE_TYPE sourceType = VIDEO_FRAME, rtc::SenderOptions encodedVideoOption = rtc::SenderOptions()) = 0; +#if defined(__ANDROID__) /** - * Sets the external audio source. + * @brief Sets the EGL context for rendering remote video streams. * - * @note - * Ensure that you call this method before joining the channel. + * @details + * This method can replace the default remote EGL context within the SDK, making it easier to manage + * the EGL context. + * When the engine is destroyed, the SDK will automatically release the EGL context. + * Applicable scenarios: This method is suitable for using a custom video rendering method instead + * of the default SDK rendering method to render remote video frames in Texture format. + * Call timing: Call this method before joining a channel. * - * @deprecated This method is deprecated. Use createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE trackType, const rtc::AudioTrackConfig& config) instead. + * @note This method is for Android only. + * + * @param eglContext The EGL context for rendering remote video streams. * - * @param enabled Determines whether to enable the external audio source: - * - true: Enable the external audio source. - * - false: (default) Disable the external audio source. - * @param sampleRate The Sample rate (Hz) of the external audio source, which can set be as - * 8000, 16000, 32000, 44100, or 48000. - * @param channels The number of channels of the external audio source, which can be set as 1 or 2: - * - 1: Mono. - * - 2: Stereo. - * @param localPlayback Enable/Disables the local playback of external audio track: - * - true: Enable local playback - * - false: (Default) Do not enable local playback - * @param publish Determines whether to publish the external audio track: - * - true: (Default) Publish the external audio track. - * - false: Don`t publish the external audio track. * @return * - 0: Success. * - < 0: Failure. */ - virtual int setExternalAudioSource(bool enabled, int sampleRate, int channels, bool localPlayback = false, bool publish = true) __deprecated = 0; + virtual int setExternalRemoteEglContext(void* eglContext) = 0; +#endif /** - * Create a custom audio track and get the audio track id. - * - * @note Ensure that you call this method before calling `joinChannel`. - * - * @param trackType The type of custom audio track - * See AUDIO_TRACK_TYPE. + * @brief Sets the external audio source parameters. + * + * @deprecated This method is deprecated. Use createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE + * trackType, const rtc::AudioTrackConfig& config) instead. + * + * @details + * Call timing: Call this method before joining a channel. + * + * @param enabled Whether to enable the external audio source: + * - `true`: Enable the external audio source. + * - `false`: (Default) Disable the external audio source. + * @param sampleRate The sample rate (Hz) of the external audio source which can be set as `8000`, + * `16000`, `32000`, `44100`, or `48000`. + * @param channels The number of channels of the external audio source, which can be set as `1` + * (Mono) or `2` (Stereo). + * @param localPlayback Whether to play the external audio source: + * - `true`: Play the external audio source. + * - `false`: (Default) Do not play the external source. + * @param publish Whether to publish audio to the remote users: + * - `true`: (Default) Publish audio to the remote users. + * - `false`: Do not publish audio to the remote users. * - * @param config The config of custom audio track - * See AudioTrackConfig. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setExternalAudioSource(bool enabled, int sampleRate, int channels, bool localPlayback = false, bool publish = true) __deprecated = 0; + + /** + * @brief Creates a custom audio track. + * + * @details + * To publish a custom audio source, see the following steps:1. Call this method to create a custom + * audio track and get the audio track ID. + * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set + * `publishCustomAudioTrackId` to the audio track ID that you want to publish, and set + * `publishCustomAudioTrack` to `true`. + * 3. Call `pushAudioFrame` and specify `trackId` as the audio track ID set in step 2. You can then + * publish the corresponding custom audio source in the channel. + * + * @note Call this method before joining a channel. + * + * @param trackType The type of the custom audio track. See `AUDIO_TRACK_TYPE`.Attention: If + * `AUDIO_TRACK_DIRECT` is specified for this parameter, you must set `publishMicrophoneTrack` to + * `false` in `ChannelMediaOptions` when calling `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` to join the channel; otherwise, + * joining the channel fails and returns the error code -2. + * @param config The configuration of the custom audio track. See `AudioTrackConfig`. * * @return - * - If the call is successful, SDK returns audio track id. - * - If the call fails, SDK returns 0xffffffff. + * - If the method call is successful, the audio track ID is returned as the unique identifier of + * the audio track. + * - If the method call fails, 0xffffffff is returned. */ virtual rtc::track_id_t createCustomAudioTrack(rtc::AUDIO_TRACK_TYPE trackType, const rtc::AudioTrackConfig& config) = 0; /** - * Destroy custom audio track by trackId + * @brief Destroys the specified audio track. * - * @param trackId The custom audio track id. + * @param trackId The custom audio track ID returned in `createCustomAudioTrack`. * * @return * - 0: Success. @@ -198,25 +315,24 @@ class IMediaEngine { virtual int destroyCustomAudioTrack(rtc::track_id_t trackId) = 0; /** - * Sets the external audio sink. - * - * This method applies to scenarios where you want to use external audio - * data for playback. After calling the \ref IRtcEngine::initialize "initialize" - * method and pass value of false in the `enableAudioDevice` member in the RtcEngineContext struct, you can call - * the \ref agora::media::IMediaEngine::pullAudioFrame "pullAudioFrame" method to pull the remote audio data, process - * it, and play it with the audio effects that you want. - * - * @note - * Once you call the \ref IRtcEngine::initialize "initialize" method and pass value of false in the `enableAudioDevice` - * member in the RtcEngineContext struct, the app will not retrieve any audio data from the - * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrame "onPlaybackAudioFrame" callback. - * - * @param enabled Sets whether or not to the external audio sink - * - true: Enables the external audio sink. - * - false: Disables the external audio sink. - * @param sampleRate Sets the sample rate (Hz) of the external audio sink, which can be set as 16000, 32000, 44100 or 48000. - * @param channels Sets the number of audio channels of the external - * audio sink: + * @brief Sets the external audio sink. + * + * @details + * After enabling the external audio sink, you can call `pullAudioFrame` to pull remote audio + * frames. The app can process the remote audio and play it with the audio effects that you want. + * Applicable scenarios: This method applies to scenarios where you want to use external audio data + * for playback. + * Call timing: Call this method before joining a channel. + * + * @note Once you enable the external audio sink, the app will not retrieve any audio data from the + * `onPlaybackAudioFrame` callback. + * + * @param enabled Whether to enable or disable the external audio sink: + * - `true`: Enables the external audio sink. + * - `false`: (Default) Disables the external audio sink. + * @param sampleRate The sample rate (Hz) of the external audio sink, which can be set as 16000, + * 32000, 44100, or 48000. + * @param channels The number of audio channels of the external audio sink: * - 1: Mono. * - 2: Stereo. * @@ -243,10 +359,42 @@ class IMediaEngine { virtual int enableCustomAudioLocalPlayback(rtc::track_id_t trackId, bool enabled) = 0; /** - * Pushes the external video frame to the app. + * @brief Pushes the external raw video frame to the SDK through video tracks. + * + * @details + * To publish a custom video source, see the following steps:1. Call `createCustomVideoTrack` to + * create a video track and get the video track ID. + * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set + * `customVideoTrackId` to the video track ID that you want to publish, and set + * `publishCustomVideoTrack` to `true`. + * 3. Call this method and specify `videoTrackId` as the video track ID set in step 2. You can then + * publish the corresponding custom video source in the channel. + * Applicable scenarios: The SDK supports the ID3D11Texture2D video format since v4.2.3, which is + * widely used in game scenarios. When you need to push this type of video frame to the SDK, call + * this method and set the `format` in the `frame` to `VIDEO_TEXTURE_ID3D11TEXTURE2D`, set the + * `d3d11_texture_2d` and `texture_slice_index` members, and set the format of the video frame to + * ID3D11Texture2D. * - * @param frame The external video frame: ExternalVideoFrame. - * @param videoTrackId The id of the video track. + * @note + * If you only need to push one custom video source to the channel, you can directly call the + * `setExternalVideoSource` method and the SDK will automatically create a video track with the + * `videoTrackId` set to 0. + * DANGER: After calling this method, even if you stop pushing external video frames to the SDK, the + * custom video stream will still be counted as the video duration usage and incur charges. Agora + * recommends that you take appropriate measures based on the actual situation to avoid such video + * billing. + * - If you no longer need to capture external video data, you can call `destroyCustomVideoTrack` to + * destroy the custom video track. + * - If you only want to use the external video data for local preview and not publish it in the + * channel, you can call `muteLocalVideoStream` to cancel sending video stream or call + * `updateChannelMediaOptions` to set `publishCustomVideoTrack` to `false`. + * + * @param frame The external raw video frame to be pushed. See `ExternalVideoFrame`. + * @param videoTrackId The video track ID returned by calling the `createCustomVideoTrack` + * method.Note: If you only need to push one custom video source, set `videoTrackId` to 0. + * + * @return * - 0: Success. * - < 0: Failure. */ diff --git a/include/IAgoraMediaPlayer.h b/include/IAgoraMediaPlayer.h index eab4d3f..96a71ff 100644 --- a/include/IAgoraMediaPlayer.h +++ b/include/IAgoraMediaPlayer.h @@ -33,16 +33,28 @@ class IMediaPlayer : public RefCountInterface { virtual int initialize(base::IAgoraService* agora_service) = 0; /** - * Get unique media player id of the media player entity. + * @brief Gets the ID of the media player. + * * @return - * - >= 0: The source id of this media player entity. + * - Success. The ID of the media player. * - < 0: Failure. */ virtual int getMediaPlayerId() const = 0; /** - * Opens a media file with a specified URL. - * @param url The URL of the media file that you want to play. + * @brief Opens the media resource. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After calling this method, the SDK triggers the `onPlayerSourceStateChanged` + * callback. After receiving the report of the playback status as `PLAYER_STATE_OPEN_COMPLETED`, you + * can call the `play` method to play the media file. + * + * @note This method is called asynchronously. + * + * @param url The path of the media file. Both local path and online path are supported. + * @param startPos The starting position (ms) for playback. Default value is 0. + * * @return * - 0: Success. * - < 0: Failure. @@ -50,8 +62,19 @@ class IMediaPlayer : public RefCountInterface { virtual int open(const char* url, int64_t startPos) = 0; /** - * @brief Open a media file with a media file source. - * @param source Media file source that you want to play, see `MediaSource` + * @brief Opens a media file and configures the playback scenarios. + * + * @details + * This method supports opening media files of different sources, including a custom media source, + * and allows you to configure the playback scenarios. + * Call timing: You can call this method either before or after joining a channel. + * + * @note This method is called asynchronously. If you need to play a media file, make sure you + * receive the `onPlayerSourceStateChanged` callback reporting `PLAYER_STATE_OPEN_COMPLETED` before + * calling the `play` method to play the file. + * + * @param source Media resources. See `MediaSource`. + * * @return * - 0: Success. * - < 0: Failure. @@ -59,7 +82,14 @@ class IMediaPlayer : public RefCountInterface { virtual int openWithMediaSource(const media::base::MediaSource &source) = 0; /** - * Plays the media file. + * @brief Plays the media file. + * + * @details + * Call timing: - Call this method after calling `open` or `openWithMediaSource` opening a media + * file and receiving a `onPlayerSourceStateChanged` callback reporting the status as + * PLAYER_STATE_OPEN_COMPLETED. + * - Call the method after calling `seek`. + * * @return * - 0: Success. * - < 0: Failure. @@ -67,34 +97,72 @@ class IMediaPlayer : public RefCountInterface { virtual int play() = 0; /** - * Pauses playing the media file. + * @brief Pauses the playback. + * + * @details + * Call timing: You can call this method either before or after joining a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int pause() = 0; /** - * Stops playing the current media file. + * @brief Stops playing the media track. + * + * @details + * After calling this method to stop playback, if you want to play again, you need to call `open` or + * `openWithMediaSource` to open the media resource. + * Call timing: Call this method after play. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int stop() = 0; /** - * Resumes playing the media file. + * @brief Resumes playing the media file. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int resume() = 0; /** - * Sets the current playback position of the media file. + * @brief Seeks to a new playback position. + * + * @details + * - If you call `seek` after the playback has completed (upon receiving callback + * `onPlayerSourceStateChanged` reporting playback status as PLAYER_STATE_PLAYBACK_COMPLETED or + * PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED ), the SDK will play the media file from the specified + * position. At this point, you will receive callback `onPlayerSourceStateChanged` reporting + * playback status as PLAYER_STATE_PLAYING. + * - If you call `seek` while the playback is paused, upon successful call of this method, the SDK + * will seek to the specified position. To resume playback, call `resume` or `play` . + * Call timing: You can call this method either before or after joining a channel. + * Related callbacks: After successfully calling this method, you will receive the `onPlayerEvent` + * callback, reporting the result of the seek operation to the new playback position. + * * @param newPos The new playback position (ms). + * * @return * - 0: Success. * - < 0: Failure. */ virtual int seek(int64_t newPos) = 0; - /** Sets the pitch of the current media file. - * @param pitch Sets the pitch of the local music file by chromatic scale. The default value is 0, - * which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between - * consecutive values is a chromatic value. The greater the absolute value of this parameter, the - * higher or lower the pitch of the local music file. + /** + * @brief Sets the pitch of the current media resource. + * + * @note Call this method after calling `open`. + * + * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is + * 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value + * between consecutive values is a chromatic value. The greater the absolute value of this + * parameter, the higher or lower the pitch of the local music file. * * @return * - 0: Success. @@ -103,8 +171,10 @@ class IMediaPlayer : public RefCountInterface { virtual int setAudioPitch(int pitch) = 0; /** - * Gets the duration of the media file. - * @param duration A reference to the duration of the media file. + * @brief Gets the duration of the media resource. + * + * @param duration An output parameter. The total duration (ms) of the media file. + * * @return * - 0: Success. * - < 0: Failure. @@ -112,24 +182,60 @@ class IMediaPlayer : public RefCountInterface { virtual int getDuration(int64_t& duration) = 0; /** - * Gets the current playback position of the media file. - * @param currentPosition A reference to the current playback position (ms). + * @brief Gets current local playback progress. + * + * @param pos The playback position (ms) of the audio effect file. + * * @return - * - 0: Success. - * - < 0: Failure. + * - Returns the current playback progress (ms) if the call succeeds. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int getPlayPosition(int64_t& pos) = 0; + /** + * @brief Gets the number of the media streams in the media resource. + * + * @note Call this method after you call `open` and receive the `onPlayerSourceStateChanged` + * callback reporting the state `PLAYER_STATE_OPEN_COMPLETED`. + * + * @param count An output parameter. The number of the media streams in the media resource. + * + * @return + * - 0: Success. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. + */ virtual int getStreamCount(int64_t& count) = 0; + /** + * @brief Gets the detailed information of the media stream. + * + * @details + * Call timing: Call this method after calling `getStreamCount`. + * + * @param index The index of the media stream. This parameter needs to be less than the `count` + * parameter of `getStreamCount`. + * @param info An output parameter. The detailed information of the media stream. See + * `PlayerStreamInfo`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int getStreamInfo(int64_t index, media::base::PlayerStreamInfo* info) = 0; /** - * Sets whether to loop the media file for playback. - * @param loopCount the number of times looping the media file. - * - 0: Play the audio effect once. - * - 1: Play the audio effect twice. - * - -1: Play the audio effect in a loop indefinitely, until stopEffect() or stop() is called. + * @brief Sets the loop playback. + * + * @details + * If you want to loop, call this method and set the number of the loops. + * When the loop finishes, the SDK triggers `onPlayerSourceStateChanged` and reports the playback + * state as PLAYER_STATE_PLAYBACK_ALL_LOOPS_COMPLETED. + * + * @param loopCount The number of times the audio effect loops: + * - ≥0: Number of times for playing. For example, setting it to 0 means no loop playback, playing + * only once; setting it to 1 means loop playback once, playing a total of twice. + * - -1: Play the audio file in an infinite loop. + * * @return * - 0: Success. * - < 0: Failure. @@ -137,8 +243,17 @@ class IMediaPlayer : public RefCountInterface { virtual int setLoopCount(int loopCount) = 0; /** - * Change playback speed - * @param speed the value of playback speed ref [50-400] + * @brief Sets the channel mode of the current audio file. + * + * @details + * Call this method after calling `open`. + * + * @param speed The playback speed. Agora recommends that you set this to a value between 30 and + * 400, defined as follows: + * - 30: 0.3 times the original speed. + * - 100: The original speed. + * - 400: 4 times the original speed. + * * @return * - 0: Success. * - < 0: Failure. @@ -146,8 +261,18 @@ class IMediaPlayer : public RefCountInterface { virtual int setPlaybackSpeed(int speed) = 0; /** - * Slect playback audio track of the media file - * @param index the index of the audio track in media file + * @brief Selects the audio track used during playback. + * + * @details + * After getting the track index of the audio file, you can call this method to specify any track to + * play. For example, if different tracks of a multi-track file store songs in different languages, + * you can call this method to set the playback language. + * + * @note You need to call this method after calling `getStreamInfo` to get the audio stream index + * value. + * + * @param index The index of the audio track. + * * @return * - 0: Success. * - < 0: Failure. @@ -155,26 +280,46 @@ class IMediaPlayer : public RefCountInterface { virtual int selectAudioTrack(int index) = 0; /** - * Selects multi audio track of the media file for playback or publish to channel. - * @param playoutTrackIndex The index of the audio track in media file for local playback. - * @param publishTrackIndex The index of the audio track in the media file published to the remote. - * - * @note - * You can obtain the streamIndex of the audio track by calling getStreamInfo.. - * If you want to use selectMultiAudioTrack, you need to open the media file with openWithMediaSource and set enableMultiAudioTrack to true. + * @brief Selects the audio tracks that you want to play on your local device and publish to the + * channel respectively. + * + * @details + * You can call this method to determine the audio track to be played on your local device and + * published to the channel. + * Before calling this method, you need to open the media file with the `openWithMediaSource` method + * and set `enableMultiAudioTrack` in `MediaSource` as `true`. + * Applicable scenarios: For example, in KTV scenarios, the host can choose to play the original + * sound locally and publish the accompaniment track to the channel. + * + * @param playoutTrackIndex The index of audio tracks for local playback. You can obtain the index + * through `getStreamInfo`. + * @param publishTrackIndex The index of audio tracks to be published in the channel. You can obtain + * the index through `getStreamInfo`. * * @return * - 0: Success. - * - < 0: Failure. See {@link media::base::MEDIA_PLAYER_REASON MEDIA_PLAYER_REASON}. - * - -2: Invalid argument. Argument must be greater than or equal to zero. - * - -8: Invalid State.You must open the media file with openWithMediaSource and set enableMultiAudioTrack to true + * - < 0: Failure. */ virtual int selectMultiAudioTrack(int playoutTrackIndex, int publishTrackIndex) = 0; /** - * change player option before play a file - * @param key the key of the option param - * @param value the value of option param + * @brief Sets media player options. + * + * @details + * The media player supports setting options through `key` and `value`. + * The difference between this method and `setPlayerOption(const char* key, const char* value)` is + * that the `value` parameter of + * this method is of type Int, while the `value` of `setPlayerOption(const char* key, const char* + * value)` is of type String. These + * two methods cannot be used together. + * Applicable scenarios: Scenarios that require technical previews or special customization + * features. In general, you do not need to call this method; you can simply use the default options + * provided by the media player. + * Call timing: Call this method before the `open` or `openWithMediaSource` method. + * + * @param key The key of the option. + * @param value The value of the key. + * * @return * - 0: Success. * - < 0: Failure. @@ -182,9 +327,23 @@ class IMediaPlayer : public RefCountInterface { virtual int setPlayerOption(const char* key, int value) = 0; /** - * change player option before play a file - * @param key the key of the option param - * @param value the value of option param + * @brief Sets media player options. + * + * @details + * The media player supports setting options through `key` and `value`. + * The difference between this method and `setPlayerOption(const char* key, int value)` is that the + * `value` parameter of + * this method is of type String, while the `value` of `setPlayerOption(const char* key, int value)` + * is of type String. + * These two methods cannot be used together. + * Applicable scenarios: Scenarios that require technical previews or special customization + * features. In general, you do not need to call this method; you can simply use the default options + * provided by the media player. + * Call timing: Call this method before the `open` or `openWithMediaSource` method. + * + * @param key The key of the option. + * @param value The value of the key. + * * @return * - 0: Success. * - < 0: Failure. @@ -217,79 +376,136 @@ class IMediaPlayer : public RefCountInterface { */ virtual int setExternalSubtitle(const char* url) = 0; + /** + * @brief Gets current playback state. + * + * @return + * The current playback state. See `MEDIA_PLAYER_STATE`. + */ virtual media::base::MEDIA_PLAYER_STATE getState() = 0; /** - * @brief Turn mute on or off + * @brief Sets whether to mute the media file. + * + * @details + * Call timing: You can call this method either before or after joining a channel. * - * @param muted Whether to mute on - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @param muted Whether to mute the media file: + * - `true`: Mute the media file. + * - `false`: (Default) Unmute the media file. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int mute(bool muted) = 0; /** - * @brief Get mute state + * @brief Reports whether the media resource is muted. * - * @param[out] muted Whether is mute on - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @param mute An output parameter. Whether the media file is muted: + * - `true`: The media file is muted. + * - `false`: The media file is not muted. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int getMute(bool& muted) = 0; /** - * @brief Adjust playback volume + * @brief Adjusts the local playback volume. + * + * @details + * Call timing: This method can be called either before or after joining the channel. * - * @param volume The volume value to be adjusted - * The volume can be adjusted from 0 to 400: - * 0: mute; - * 100: original volume; - * 400: Up to 4 times the original volume (with built-in overflow protection). - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @param volume The local playback volume, which ranges from 0 to 100: + * - 0: Mute. + * - 100: (Default) The original volume. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int adjustPlayoutVolume(int volume) = 0; /** - * @brief Get the current playback volume + * @brief Gets the local playback volume. + * + * @param volume An output parameter. The local playback volume, which ranges from 0 to 100: + * - 0: Mute. + * - 100: (Default) The original volume. * - * @param[out] volume - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int getPlayoutVolume(int& volume) = 0; /** - * @brief adjust publish signal volume + * @brief Adjusts the volume of the media file for publishing. + * + * @details + * After connected to the Agora server, you can call this method to adjust the volume of the media + * file heard by the remote user. + * Call timing: This method can be called either before or after joining the channel. * - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @param volume The volume, which ranges from 0 to 400: + * - 0: Mute. + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int adjustPublishSignalVolume(int volume) = 0; /** - * @brief get publish signal volume + * @brief Gets the volume of the media file for publishing. + * + * @param volume An output parameter. The remote playback volume. * - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int getPublishSignalVolume(int& volume) = 0; /** - * @brief Set video rendering view + * @brief Sets the view. + * + * @details + * Call timing: You can call this method either before or after joining a channel. + * + * @param view The render view. On Windows, this parameter sets the window handle (HWND). * - * @param view view object, windows platform is HWND - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setView(media::base::view_t view) = 0; /** - * @brief Set video display mode + * @brief Sets the render mode of the media player. + * + * @param renderMode Sets the render mode of the view. See `RENDER_MODE_TYPE`. * - * @param renderMode Video display mode - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setRenderMode(media::base::RENDER_MODE_TYPE renderMode) = 0; /** - * Registers a media player source observer. + * @brief Registers a media player observer. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @param observer The player observer, listening for events during the playback. See + * `IMediaPlayerSourceObserver`. * - * Once the media player source observer is registered, you can use the observer to monitor the state change of the media player. - * @param observer The pointer to the IMediaPlayerSourceObserver object. * @return * - 0: Success. * - < 0: Failure. @@ -297,8 +513,11 @@ class IMediaPlayer : public RefCountInterface { virtual int registerPlayerSourceObserver(IMediaPlayerSourceObserver* observer) = 0; /** - * Releases the media player source observer. - * @param observer The pointer to the IMediaPlayerSourceObserver object. + * @brief Releases a media player observer. + * + * @param observer The player observer, listening for events during the playback. See + * `IMediaPlayerSourceObserver`. + * * @return * - 0: Success. * - < 0: Failure. @@ -306,9 +525,16 @@ class IMediaPlayer : public RefCountInterface { virtual int unregisterPlayerSourceObserver(IMediaPlayerSourceObserver* observer) = 0; /** - * Register the audio frame observer. + * @brief Registers a PCM audio frame observer object. + * + * @details + * You need to implement the `IAudioPcmFrameSink` class in this method and register callbacks + * according to your scenarios. After you successfully register the video frame observer, the SDK + * triggers the registered callbacks each time a video frame is received. + * + * @param observer The audio frame observer, reporting the reception of each audio frame. See + * `IAudioPcmFrameSink`. * - * @param observer The pointer to the IAudioFrameObserver object. * @return * - 0: Success. * - < 0: Failure. @@ -316,13 +542,12 @@ class IMediaPlayer : public RefCountInterface { virtual int registerAudioFrameObserver(media::IAudioPcmFrameSink* observer) = 0; /** - * Registers an audio observer. + * @brief Registers an audio frame observer object. + * + * @param observer The audio frame observer, reporting the reception of each audio frame. See + * `IAudioPcmFrameSink`. + * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`. * - * @param observer The audio observer, reporting the reception of each audio - * frame. See - * \ref media::IAudioPcmFrameSink "IAudioFrameObserver" for - * details. - * @param mode Use mode of the audio frame. See #RAW_AUDIO_FRAME_OP_MODE_TYPE. * @return * - 0: Success. * - < 0: Failure. @@ -331,8 +556,10 @@ class IMediaPlayer : public RefCountInterface { RAW_AUDIO_FRAME_OP_MODE_TYPE mode) = 0; /** - * Releases the audio frame observer. - * @param observer The pointer to the IAudioFrameObserver object. + * @brief Unregisters an audio frame observer. + * + * @param observer The audio observer. See `IAudioPcmFrameSink`. + * * @return * - 0: Success. * - < 0: Failure. @@ -340,18 +567,31 @@ class IMediaPlayer : public RefCountInterface { virtual int unregisterAudioFrameObserver(media::IAudioPcmFrameSink* observer) = 0; /** - * @brief Register the player video observer + * @brief Registers a video frame observer object. + * + * @details + * You need to implement the `IVideoFrameObserver` class in this method and register callbacks + * according to your scenarios. After you successfully register the video frame observer, the SDK + * triggers the registered callbacks each time a video frame is received. * - * @param observer observer object - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @param observer The video observer, reporting the reception of each video frame. See + * `IVideoFrameObserver`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int registerVideoFrameObserver(media::base::IVideoFrameObserver* observer) = 0; /** - * @brief UnRegister the player video observer + * @brief Unregisters the video frame observer. + * + * @param observer The video observer, reporting the reception of each video frame. See + * `IVideoFrameObserver`. * - * @param observer observer object - * @return int < 0 on behalf of an error, the value corresponds to one of MEDIA_PLAYER_REASON + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int unregisterVideoFrameObserver(agora::media::base::IVideoFrameObserver* observer) = 0; @@ -378,9 +618,26 @@ class IMediaPlayer : public RefCountInterface { virtual int unregisterMediaPlayerAudioSpectrumObserver(media::IAudioSpectrumObserver* observer) = 0; /** - * @brief Set dual-mono output mode of the music file. - * - * @param mode dual mono mode. See #agora::media::AUDIO_DUAL_MONO_MODE + * @brief Sets the channel mode of the current audio file. + * + * @details + * In a stereo music file, the left and right channels can store different audio data. According to + * your needs, you can set the channel mode to original mode, left channel mode, right channel mode, + * or mixed channel mode. For example, in the KTV scenario, the left channel of the music file + * stores the musical accompaniment, and the right channel stores the singing voice. If you only + * need to listen to the accompaniment, call this method to set the channel mode of the music file + * to left channel mode; if you need to listen to the accompaniment and the singing voice at the + * same time, call this method to set the channel mode to mixed channel mode. + * + * @note + * - Call this method after calling `open`. + * - This method only applies to stereo audio files. + * + * @param mode The channel mode. See `AUDIO_DUAL_MONO_MODE`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setAudioDualMonoMode(agora::media::base::AUDIO_DUAL_MONO_MODE mode) = 0; @@ -393,15 +650,19 @@ class IMediaPlayer : public RefCountInterface { virtual const char* getPlayerSdkVersion() = 0; /** - * Get the current play src. + * @brief Gets the path of the media resource being played. + * * @return - * - current play src of raw bytes. + * The path of the media resource being played. */ virtual const char* getPlaySrc() = 0; /** * Open the Agora CDN media source. + * + * @deprecated 4.6.0 + * * @param src The src of the media file that you want to play. * @param startPos The playback position (ms). * @return @@ -412,6 +673,9 @@ class IMediaPlayer : public RefCountInterface { /** * Gets the number of Agora CDN lines. + * + * @deprecated 4.6.0 + * * @return * - > 0: number of CDN. * - <= 0: Failure. @@ -420,6 +684,9 @@ class IMediaPlayer : public RefCountInterface { /** * Switch Agora CDN lines. + * + * @deprecated 4.6.0 + * * @param index Specific CDN line index. * @return * - 0: Success. @@ -429,6 +696,9 @@ class IMediaPlayer : public RefCountInterface { /** * Gets the line of the current CDN. + * + * @deprecated 4.6.0 + * * @return * - >= 0: Specific line. * - < 0: Failure. @@ -437,6 +707,9 @@ class IMediaPlayer : public RefCountInterface { /** * Enable automatic CDN line switching. + * + * @deprecated 4.6.0 + * * @param enable Whether enable. * @return * - 0: Success. @@ -446,6 +719,9 @@ class IMediaPlayer : public RefCountInterface { /** * Update the CDN source token and timestamp. + * + * @deprecated 4.6.0 + * * @param token token. * @param ts ts. * @return @@ -456,6 +732,9 @@ class IMediaPlayer : public RefCountInterface { /** * Switch the CDN source when open a media through "openWithAgoraCDNSrc" API + * + * @deprecated 4.6.0 + * * @param src Specific src. * @param syncPts Live streaming must be set to false. * @return @@ -465,9 +744,34 @@ class IMediaPlayer : public RefCountInterface { virtual int switchAgoraCDNSrc(const char* src, bool syncPts = false) = 0; /** - * Switch the media source when open a media through "open" API - * @param src Specific src. - * @param syncPts Live streaming must be set to false. + * @brief Switches the media resource being played. + * + * @details + * You can call this method to switch the media resource to be played according to the current + * network status. For example: + * - When the network is poor, the media resource to be played is switched to a media resource + * address with a lower bitrate. + * - When the network is good, the media resource to be played is switched to a media resource + * address with a higher bitrate. + * After calling this method, if you receive the `onPlayerEvent` callback report the + * `PLAYER_EVENT_SWITCH_COMPLETE` event, the switching is successful. If the switching fails, the + * SDK will automatically retry 3 times. If it still fails, you will receive the `onPlayerEvent` + * callback reporting the `PLAYER_EVENT_SWITCH_ERROR` event indicating an error occurred during + * media resource switching. + * + * @note + * - Ensure that you call this method after `open`. + * - To ensure normal playback, pay attention to the following when calling this method: + * - Do not call this method when playback is paused. + * - Do not call the `seek` method during switching. + * - Before switching the media resource, make sure that the playback position does not exceed the + * total duration of the media resource to be switched. + * + * @param src The URL of the media resource. + * @param syncPts Whether to synchronize the playback position (ms) before and after the switch: + * - `true`: Synchronize the playback position before and after the switch. + * - `false`: (Default) Do not synchronize the playback position before and after the switch. + * * @return * - 0: Success. * - < 0: Failure. @@ -475,9 +779,27 @@ class IMediaPlayer : public RefCountInterface { virtual int switchSrc(const char* src, bool syncPts = true) = 0; /** - * Preload a media source - * @param src Specific src. - * @param startPos The starting position (ms) for playback. Default value is 0. + * @brief Preloads a media resource. + * + * @details + * You can call this method to preload a media resource into the playlist. If you need to preload + * multiple media resources, you can call this method multiple times. + * After calling this method, if you receive the `PLAYER_PRELOAD_EVENT_COMPLETE` event in the + * `onPreloadEvent` callback, the preload is successful; If you receive the + * `PLAYER_PRELOAD_EVENT_ERROR` event in the `onPreloadEvent` callback, the preload fails. + * If the preload is successful and you want to play the media resource, call `playPreloadedSrc`; if + * you want to clear the playlist, call `stop`. + * + * @note + * - Before calling this method, ensure that you have called `open` or `openWithMediaSource` to open + * the media resource successfully. + * - Agora does not support preloading duplicate media resources to the playlist. However, you can + * preload the media resources that are being played to the playlist again. + * + * @param src The URL of the media resource. + * @param startPos The starting position (ms) for playing after the media resource is preloaded to + * the playlist. When preloading a live stream, set this parameter to 0. + * * @return * - 0: Success. * - < 0: Failure. @@ -485,8 +807,24 @@ class IMediaPlayer : public RefCountInterface { virtual int preloadSrc(const char* src, int64_t startPos) = 0; /** - * Play a pre-loaded media source - * @param src Specific src. + * @brief Plays preloaded media resources. + * + * @details + * After calling the `preloadSrc` method to preload the media resource into the playlist, you can + * call this method to play the preloaded media resource. After calling this method, if you receive + * the `onPlayerSourceStateChanged` callback which reports the `PLAYER_STATE_PLAYING` state, the + * playback is successful. + * If you want to change the preloaded media resource to be played, you can call this method again + * and specify the URL of the new media resource that you want to preload. If you want to replay the + * media resource, you need to call `preloadSrc` to preload the media resource to the playlist again + * before playing. If you want to clear the playlist, call the `stop` method. + * + * @note If you call this method when playback is paused, this method does not take effect until + * playback is resumed. + * + * @param src The URL of the media resource in the playlist must be consistent with the `src` set by + * the `preloadSrc` method; otherwise, the media resource cannot be played. + * * @return * - 0: Success. * - < 0: Failure. @@ -494,8 +832,12 @@ class IMediaPlayer : public RefCountInterface { virtual int playPreloadedSrc(const char* src) = 0; /** - * Unload a preloaded media source - * @param src Specific src. + * @brief Unloads media resources that are preloaded. + * + * @note This method cannot release the media resource being played. + * + * @param src The URL of the media resource. + * * @return * - 0: Success. * - < 0: Failure. @@ -503,11 +845,17 @@ class IMediaPlayer : public RefCountInterface { virtual int unloadSrc(const char* src) = 0; /** - * Set spatial audio params for the music file. It can be called after the media player - * was created. + * @brief Enables or disables the spatial audio effect for the media player. + * + * @details + * After successfully setting the spatial audio effect parameters of the media player, the SDK + * enables the spatial audio effect for the media player, and the local user can hear the media + * resources with a sense of space. + * If you need to disable the spatial audio effect for the media player, set the `params` parameter + * to null. + * + * @param params The spatial audio effect parameters of the media player. See `SpatialAudioParams`. * - * @param params See #agora::SpatialAudioParams. If it's - * not set, then the spatial audio will be disabled; or it will be enabled. * @return * - 0: Success. * - < 0: Failure. @@ -529,6 +877,15 @@ class IMediaPlayer : public RefCountInterface { */ virtual int setSoundPositionParams(float pan, float gain) = 0; + /** + * @brief Gets the audio buffer delay when playing the media file. + * @param[out] delayMs delay in millisecond. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int getAudioBufferDelay(int32_t& delayMs) = 0; + }; /** @@ -538,89 +895,139 @@ class IMediaPlayer : public RefCountInterface { class IMediaPlayerCacheManager { public: /** - * Delete the longest used cache file in order to release some of the cache file disk usage. - * (usually used when the cache quota notification is received) - * + * @brief Deletes all cached media files in the media player. + * + * @note The cached media file currently being played will not be deleted. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int removeAllCaches() = 0; /** - * Remove the latest media resource cache file. + * @brief Deletes a cached media file that is the least recently used. + * + * @details + * You can call this method to delete a cached media file when the storage space for the cached + * files is about to reach its limit. After you call this method, the SDK deletes the cached media + * file that is least used. + * + * @note The cached media file currently being played will not be deleted. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int removeOldCache() = 0; /** - * Remove the cache file by uri, setting by MediaSource. - * @param uri URI,identify the uniqueness of the property, Set from `MeidaSource` + * @brief Deletes a cached media file. + * + * @note The cached media file currently being played will not be deleted. + * + * @param uri The URI (Uniform Resource Identifier) of the media file to be deleted. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int removeCacheByUri(const char *uri) = 0; /** - * Set cache file path that files will be saved to. - * @param path file path. + * @brief Sets the storage path for the media files that you want to cache. + * + * @note Make sure `IRtcEngine` is initialized before you call this method. + * + * @param path The absolute path of the media files to be cached. Ensure that the directory for the + * media files exists and is writable. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int setCacheDir(const char *path) = 0; /** - * Set the maximum number of cached files. - * @param count maximum number of cached files. + * @brief Sets the maximum number of media files that can be cached. + * + * @param count The maximum number of media files that can be cached. The default value is 1,000. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int setMaxCacheFileCount(int count) = 0; /** - * Set the maximum size of cache file disk usage. - * @param cacheSize total size of the largest cache file. + * @brief Sets the maximum size of the aggregate storage space for cached media files. + * + * @param cacheSize The maximum size (bytes) of the aggregate storage space for cached media files. + * The default value is 1 GB. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int setMaxCacheFileSize(int64_t cacheSize) = 0; /** - * Whether to automatically delete old cache files when the cache file usage reaches the limit. - * @param enable enable the player to automatically clear the cache. + * @brief Sets whether to delete cached media files automatically. + * + * @details + * If you enable this function to remove cached media files automatically, when the cached media + * files exceed either the number or size limit you set, the SDK automatically deletes the least + * recently used cache file. + * + * @param enable Whether to enable the SDK to delete cached media files automatically: + * - `true`: Delete cached media files automatically. + * - `false`: (Default) Do not delete cached media files automatically. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int enableAutoRemoveCache(bool enable) = 0; /** - * Get the cache directory. - * @param path cache path, recieve a pointer to be copied to. - * @param length the length to be copied. + * @brief Gets the storage path of the cached media files. + * + * @details + * If you have not called the `setCacheDir` method to set the storage path for the media files to be + * cached before calling this method, you get the default storage path used by the SDK. + * + * @param path An output parameter; the storage path for the media file to be cached. + * @param length An input parameter; the maximum length of the cache file storage path string. Fill + * in according to the cache file storage `path` string you obtained from path. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int getCacheDir(char* path, int length) = 0; /** - * Get the maximum number of cached files. + * @brief Gets the maximum number of media files that can be cached. + * + * @details + * By default, the maximum number of media files that can be cached is 1,000. + * * @return - * > 0: file count. - * - < 0: Failure. + * - > 0: The call succeeds and returns the maximum number of media files that can be cached. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int getMaxCacheFileCount() = 0; /** - * Get the total size of the largest cache file + * @brief Gets the maximum size of the aggregate storage space for cached media files. + * + * @details + * By default, the maximum size of the aggregate storage space for cached media files is 1 GB. You + * can call the `setMaxCacheFileSize` method to set the limit according to your scenarios. + * * @return - * > 0: file size. - * - < 0: Failure. + * - > 0: The call succeeds and returns the maximum size (in bytes) of the aggregate storage space + * for cached media files. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int64_t getMaxCacheFileSize() = 0; /** - * Get the number of all cache files. + * @brief Gets the number of media files that are cached. + * * @return - * > 0: file count. - * - < 0: Failure. + * - ≥ 0: The call succeeds and returns the number of media files that are cached. + * - < 0: Failure. See `MEDIA_PLAYER_REASON`. */ virtual int getCacheFileCount() = 0; @@ -630,4 +1037,18 @@ class IMediaPlayerCacheManager { } //namespace rtc } // namespace agora +/** + * @brief Gets one `IMediaPlayerCacheManager` instance. + * + * @details + * Before calling any APIs in the `IMediaPlayerCacheManager` class, you need to call this method to + * get a cache manager instance of a media player. + * Call timing: Make sure the `IRtcEngine` is initialized before you call this method. + * + * @note The cache manager is a singleton pattern. Therefore, multiple calls to this method returns + * the same instance. + * + * @return + * The `IMediaPlayerCacheManager` instance. + */ AGORA_API agora::rtc::IMediaPlayerCacheManager* AGORA_CALL getMediaPlayerCacheManager(); diff --git a/include/IAgoraMediaPlayerSource.h b/include/IAgoraMediaPlayerSource.h index 964eefd..ea40bab 100644 --- a/include/IAgoraMediaPlayerSource.h +++ b/include/IAgoraMediaPlayerSource.h @@ -273,6 +273,9 @@ class IMediaPlayerSource : public RefCountInterface { * Open the Agora CDN media source. * @param src The src of the media file that you want to play. * @param startPos The playback position (ms). + * + * @deprecated 4.6.0 + * * @return * - 0: Success. * - < 0: Failure. @@ -281,6 +284,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Gets the number of Agora CDN lines. + * + * @deprecated 4.6.0 + * * @return * - > 0: number of CDN. * - <= 0: Failure. @@ -290,6 +296,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Switch Agora CDN lines. + * + * @deprecated 4.6.0 + * * @param index Specific CDN line index. * @return * - 0: Success. @@ -299,6 +308,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Gets the line of the current CDN. + * + * @deprecated 4.6.0 + * * @return * - >= 0: Specific line. * - < 0: Failure. @@ -307,6 +319,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Enable automatic CDN line switching. + * + * @deprecated 4.6.0 + * * @param enable Whether enable. * @return * - 0: Success. @@ -316,6 +331,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Update the CDN source token and timestamp. + * + * @deprecated 4.6.0 + * * @param token token. * @param ts ts. * @return @@ -326,6 +344,9 @@ class IMediaPlayerSource : public RefCountInterface { /** * Switch the CDN source when open a media through "openWithAgoraCDNSrc" API + * + * @deprecated 4.6.0 + * * @param src Specific src. * @param syncPts Live streaming must be set to false. * @return @@ -382,58 +403,83 @@ class IMediaPlayerSourceObserver { virtual ~IMediaPlayerSourceObserver() {} /** - * @brief Reports the playback state change. + * @brief Reports the changes of playback state. + * + * @details + * When the state of the media player changes, the SDK triggers this callback to report the current + * playback state. + * + * @param state The playback state. See `MEDIA_PLAYER_STATE`. + * @param reason The reason for the changes in the media player status. See `MEDIA_PLAYER_REASON`. * - * When the state of the playback changes, the SDK triggers this callback to report the new playback state and the reason or error for the change. - * @param state The new playback state after change. See {@link media::base::MEDIA_PLAYER_STATE MEDIA_PLAYER_STATE}. - * @param reason The player's error code. See {@link media::base::MEDIA_PLAYER_REASON MEDIA_PLAYER_REASON}. */ virtual void onPlayerSourceStateChanged(media::base::MEDIA_PLAYER_STATE state, media::base::MEDIA_PLAYER_REASON reason) = 0; /** - * @brief Reports current playback progress. + * @brief Reports the playback progress of the media file. + * + * @details + * When playing media files, the SDK triggers this callback every two second to report current + * playback progress. + * + * @param positionMs The playback position (ms) of media files. + * @param timeStampMs The NTP timestamp (ms) of the current playback progress. * - * The callback occurs once every one second during the playback and reports the current playback progress. - * @param positionMs Current playback progress (milisecond). - * @param timestampMs Current NTP(Network Time Protocol) time (milisecond). */ virtual void onPositionChanged(int64_t positionMs, int64_t timestampMs) = 0; /** - * @brief Reports the playback event. + * @brief Reports the player events. + * + * @details + * - After calling the `seek` method, the SDK triggers the callback to report the results of the + * seek operation. * - * - After calling the `seek` method, the SDK triggers the callback to report the results of the seek operation. - * - After calling the `selectAudioTrack` method, the SDK triggers the callback to report that the audio track changes. + * @param eventCode The player event. See `MEDIA_PLAYER_EVENT`. + * @param elapsedTime The time (ms) when the event occurs. + * @param message Information about the event. * - * @param eventCode The playback event. See {@link media::base::MEDIA_PLAYER_EVENT MEDIA_PLAYER_EVENT}. - * @param elapsedTime The playback elapsed time. - * @param message The playback message. */ virtual void onPlayerEvent(media::base::MEDIA_PLAYER_EVENT eventCode, int64_t elapsedTime, const char* message) = 0; /** - * @brief Occurs when the metadata is received. + * @brief Occurs when the media metadata is received. + * + * @details + * The callback occurs when the player receives the media metadata and reports the detailed + * information of the media metadata. * - * The callback occurs when the player receives the media metadata and reports the detailed information of the media metadata. * @param data The detailed data of the media metadata. * @param length The data length (bytes). + * */ virtual void onMetaData(const void* data, int length) = 0; /** - * @brief Triggered when play buffer updated, once every 1 second + * @brief Reports the playback duration that the buffered data can support. + * + * @details + * When playing online media resources, the SDK triggers this callback every two seconds to report + * the playback duration that the currently buffered data can support. + * - When the playback duration supported by the buffered data is less than the threshold (0 by + * default), the SDK returns `PLAYER_EVENT_BUFFER_LOW` (6). + * - When the playback duration supported by the buffered data is greater than the threshold (0 by + * default), the SDK returns `PLAYER_EVENT_BUFFER_RECOVER` (7). + * + * @param playCachedBuffer The playback duration (ms) that the buffered data can support. * - * @param int cached buffer during playing, in milliseconds */ virtual void onPlayBufferUpdated(int64_t playCachedBuffer) = 0; /** - * @brief Triggered when the player preloadSrc + * @brief Reports the events of preloaded media resources. + * + * @param src The URL of the media resource. + * @param event Events that occur when media resources are preloaded. See `PLAYER_PRELOAD_EVENT`. * - * @param event */ virtual void onPreloadEvent(const char* src, media::base::PLAYER_PRELOAD_EVENT event) = 0; @@ -444,47 +490,72 @@ class IMediaPlayerSourceObserver { /** * @brief AgoraCDN Token has expired and needs to be set up with renewAgoraCDNSrcToken(const char* src). + * + * @deprecated 4.6.0 + * */ virtual void onAgoraCDNTokenWillExpire() = 0; /** - * @brief Reports current playback source bitrate changed. - * @brief Reports current playback source info changed. + * @brief Occurs when the video bitrate of the media resource changes. + * + * @param from Information about the video bitrate of the media resource being played. See + * `SrcInfo`. + * @param to Information about the changed video bitrate of media resource being played. See + * `SrcInfo`. * - * @param from Streaming media information before the change. - * @param to Streaming media information after the change. */ virtual void onPlayerSrcInfoChanged(const media::base::SrcInfo& from, const media::base::SrcInfo& to) = 0; - /** - * @brief Triggered when media player information updated. + /** + * @brief Occurs when information related to the media player changes. + * + * @details + * When the information about the media player changes, the SDK triggers this callback. You can use + * this callback for troubleshooting. + * + * @param info Information related to the media player. See `PlayerUpdatedInfo`. * - * @param info Include information of media player. */ virtual void onPlayerInfoUpdated(const media::base::PlayerUpdatedInfo& info) = 0; - /** - * @brief Triggered every 1 second, reports the statistics of the files being cached. - * - * @param stats Cached file statistics. + /** + * @brief Reports the statistics of the media file being cached. + * + * @details + * After you call the `openWithMediaSource` method and set `enableCache` as `true`, the SDK triggers + * this callback once per second to report the statistics of the media file being cached. + * + * @param stats The statistics of the media file being cached. See `CacheStatistics`. + * */ virtual void onPlayerCacheStats(const media::base::CacheStatistics& stats) { (void)stats; } - /** - * @brief Triggered every 1 second, reports the statistics of the media stream being played. - * - * @param stats The statistics of the media stream. + /** + * @brief The statistics of the media file being played. + * + * @details + * The SDK triggers this callback once per second to report the statistics of the media file being + * played. + * + * @param stats The statistics of the media file. See `PlayerPlaybackStats`. + * */ virtual void onPlayerPlaybackStats(const media::base::PlayerPlaybackStats& stats) { (void)stats; } /** - * @brief Triggered every 200 millisecond ,update player current volume range [0,255] + * @brief Reports the volume of the media player. + * + * @details + * The SDK triggers this callback every 200 milliseconds to report the current volume of the media + * player. + * + * @param volume The volume of the media player. The value ranges from 0 to 255. * - * @param volume volume of current player. */ virtual void onAudioVolumeIndication(int volume) = 0; }; diff --git a/include/IAgoraMediaRecorder.h b/include/IAgoraMediaRecorder.h index 33f5a30..3cd8322 100644 --- a/include/IAgoraMediaRecorder.h +++ b/include/IAgoraMediaRecorder.h @@ -7,7 +7,6 @@ #include "AgoraBase.h" #include "AgoraMediaBase.h" -#include "IAgoraRtcEngineEx.h" namespace agora { namespace rtc { @@ -18,70 +17,82 @@ class IMediaRecorder : public RefCountInterface { public: /** - * Registers the IMediaRecorderObserver object. + * @brief Registers the `IMediaRecorderObserver` observer. * * @since v4.0.0 * - * @note Call this method before the startRecording method. + * @details + * This method sets the callback for audio and video recording, so the app can be notified of + * recording status and information during the recording process. + * Before calling this method, make sure that: + * - The `IRtcEngine` object has been created and initialized. + * - The media recorder object has been created using `createMediaRecorder`. * - * @param callback The callbacks for recording audio and video streams. See \ref IMediaRecorderObserver. + * @param callback Callback for audio and video stream recording. See `IMediaRecorderObserver`. * * @return - * - 0(ERR_OK): Success. - * - < 0: Failure: + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int setMediaRecorderObserver(media::IMediaRecorderObserver* callback) = 0; /** - * Starts recording the local or remote audio and video. + * @brief Starts audio and video stream recording. * * @since v4.0.0 * - * After successfully calling \ref IRtcEngine::createMediaRecorder "createMediaRecorder" to get the media recorder object - * , you can call this method to enable the recording of the local audio and video. - * - * This method can record the following content: - * - The audio captured by the local microphone and encoded in AAC format. - * - The video captured by the local camera and encoded by the SDK. - * - The audio received from remote users and encoded in AAC format. - * - The video received from remote users. - * - * The SDK can generate a recording file only when it detects the recordable audio and video streams; when there are - * no audio and video streams to be recorded or the audio and video streams are interrupted for more than five - * seconds, the SDK stops recording and triggers the - * \ref IMediaRecorderObserver::onRecorderStateChanged "onRecorderStateChanged" (RECORDER_STATE_ERROR, RECORDER_ERROR_NO_STREAM) - * callback. + * @details + * This method starts recording audio and video streams. The Agora SDK supports recording both local + * and remote users' audio and video streams simultaneously. + * Before starting the recording, make sure that: + * - You have created the media recorder object using `createMediaRecorder`. + * - You have registered a recorder observer using `setMediaRecorderObserver` to listen for + * recording callbacks. + * - You have joined a channel. + * This method supports recording the following data: + * - Audio captured from the microphone in AAC encoding format. + * - Video captured from the camera in H.264 or H.265 encoding format. + * After recording starts, if the video resolution changes during recording, the SDK stops the + * recording. If the audio sample rate or number of channels changes, the SDK continues recording + * and generates a single MP4 file. + * A recording file is only successfully generated when a recordable audio or video stream is + * detected. If there is no recordable stream, or if the stream is interrupted for more than 5 + * seconds during recording, the SDK stops the recording and triggers the + * `onRecorderStateChanged` (`RECORDER_STATE_ERROR, RECORDER_REASON_NO_STREAM`) callback. * - * @note Call this method after joining the channel. + * @note + * - If you want to record local audio and video streams, make sure the local user role is set to + * broadcaster before starting recording. + * - If you want to record remote audio and video streams, make sure you have subscribed to the + * remote user's streams before starting recording. * - * @param config The recording configurations. See MediaRecorderConfiguration. + * @param config Audio and video stream recording configuration. See `MediaRecorderConfiguration`. * * @return - * - 0(ERR_OK): Success. - * - < 0: Failure: - * - `-1(ERR_FAILED)`: IRtcEngine does not support the request because the remote user did not subscribe to the target channel or the media streams published by the local user during remote recording. - * - `-2(ERR_INVALID_ARGUMENT)`: The parameter is invalid. Ensure the following: - * - The specified path of the recording file exists and is writable. - * - The specified format of the recording file is supported. - * - The maximum recording duration is correctly set. - * - During remote recording, ensure the user whose media streams you want record did join the channel. - * - `-4(ERR_NOT_SUPPORTED)`: IRtcEngine does not support the request due to one of the following reasons: - * - The recording is ongoing. - * - The recording stops because an error occurs. - * - No \ref IMediaRecorderObserver object is registered. + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + * - -2: Invalid parameter. Please ensure that: + * - The specified recording file path is correct and writable. + * - The specified recording file format is correct. + * - The maximum recording duration is set correctly. + * - -4: `IRtcEngine` is in a state that does not support this operation. This may be because a + * recording is already in progress or has stopped due to an error. + * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the + * `IMediaRecorder` object has been created before calling this method. */ virtual int startRecording(const media::MediaRecorderConfiguration& config) = 0; /** - * Stops recording the audio and video. + * @brief Stops audio and video stream recording. * * @since v4.0.0 * - * @note After calling \ref IMediaRecorder::startRecording "startRecording", if you want to stop the recording, - * you must call `stopRecording`; otherwise, the generated recording files might not be playable. - * + * @note After calling `startRecording`, you must call this method to stop the recording; otherwise, + * the generated recording file may not play properly. * * @return - * - 0(ERR_OK): Success. + * - 0: Success. * - < 0: Failure: + * - -7: `IRtcEngine` is not initialized when this method is called. Please make sure the + * `IMediaRecorder` object has been created before calling this method. */ virtual int stopRecording() = 0; }; diff --git a/include/IAgoraMediaStreamingSource.h b/include/IAgoraMediaStreamingSource.h index 7a0dbdf..244c559 100644 --- a/include/IAgoraMediaStreamingSource.h +++ b/include/IAgoraMediaStreamingSource.h @@ -23,6 +23,8 @@ class IMediaStreamingSourceObserver; /** * @brief The error code of streaming source + * + * @deprecated Since version 4.6.0, this feature has been turned off by default. * */ enum STREAMING_SRC_ERR { @@ -54,6 +56,8 @@ enum STREAMING_SRC_ERR { /** * @brief The state machine of Streaming Source + * + * @deprecated Since version 4.6.0, this feature has been turned off by default. * */ enum STREAMING_SRC_STATE { @@ -69,6 +73,8 @@ enum STREAMING_SRC_STATE { /** * @brief The input SEI data + * + * @deprecated Since version 4.6.0, this feature has been turned off by default. * */ struct InputSeiData { @@ -85,6 +91,8 @@ struct InputSeiData { * @brief The IMediaStreamingSource class provides access to a media streaming source demuxer. * To playout multiple stream sources simultaneously, * create multiple media stream source objects. + * + * @deprecated Since version 4.6.0, this feature has been turned off by default. */ class IMediaStreamingSource : public RefCountInterface { public: @@ -270,6 +278,8 @@ class IMediaStreamingSource : public RefCountInterface { /** * @brief This observer interface of media streaming source + * + * @deprecated Since version 4.6.0, this feature has been turned off by default. */ class IMediaStreamingSourceObserver { public: diff --git a/include/IAgoraMusicContentCenter.h b/include/IAgoraMusicContentCenter.h index 76002d6..6e03973 100644 --- a/include/IAgoraMusicContentCenter.h +++ b/include/IAgoraMusicContentCenter.h @@ -14,128 +14,159 @@ namespace agora { namespace rtc { /** - * Modes for playing songs. + * @brief Playback mode of a music resource. */ typedef enum { /** - * 0: The music player is in the origin mode, which means playing the original song. + * 0: Original vocals. */ kMusicPlayModeOriginal = 0, /** - * 1: The music player is in the accompany mode, which means playing the accompaniment only. + * 1: Accompaniment. */ kMusicPlayModeAccompany = 1, /** - * 2: The music player is in the lead sing mode, which means playing the lead vocals. + * 2: Vocal guide. */ kMusicPlayModeLeadSing = 2, } MusicPlayMode; +/** + * @brief Loading state of a music resource. + */ typedef enum { /** - * 0: No error occurs and preload succeeds. + * 0: Music resource loading completed. */ kPreloadStateCompleted = 0, /** - * 1: A general error occurs. + * 1: Music resource loading failed. */ kPreloadStateFailed = 1, /** - * 2: The media file is preloading. + * 2: Music resource is currently loading. */ kPreloadStatePreloading = 2, - /** - * 3: The media file is removed. + /** + * 3: Cached music resource has been removed. */ kPreloadStateRemoved = 3, } PreloadState; +/** + * @brief Request status codes for the Music Content Center. + */ typedef enum { /** - * 0: No error occurs and request succeeds. + * 0: Request succeeded. */ kMusicContentCenterReasonOk = 0, /** - * 1: A general error occurs. + * 1: General error with no specific cause. */ kMusicContentCenterReasonError = 1, /** - * 2: The gateway error. There are several possible reasons: - * - Token is expired. Check if your token is expired. - * - Token is invalid. Check the type of token you passed in. - * - Network error. Check your network. + * 2: Gateway error. Possible reasons include: + * - The current token has expired. Please regenerate the token. + * - The token provided is invalid. Make sure you are using an RTM token. + * - Network error. Please check your connection. */ kMusicContentCenterReasonGateway = 2, /** - * 3: Permission and resource error. There are several possible reasons: - * - Your appid may not have the mcc permission. Please contact technical support - * - The resource may not exist. Please contact technical support + * 3: Permission error or music resource does not exist. Make sure your project has Music Content + * Center enabled. Please `contact technical support`. */ kMusicContentCenterReasonPermissionAndResource = 3, /** - * 4: Internal data parse error. Please contact technical support + * 4: Internal data parsing error. Please `contact technical support`. */ kMusicContentCenterReasonInternalDataParse = 4, /** - * 5: Music loading error. Please contact technical support + * 5: Error occurred while loading the music resource. Please `contact technical support`. */ kMusicContentCenterReasonMusicLoading = 5, /** - * 6: Music decryption error. Please contact technical support + * 6: Error occurred while decrypting the music resource. Please `contact technical support`. */ kMusicContentCenterReasonMusicDecryption = 6, /** - * 7: Http internal error. Please retry later. + * 7: Internal HTTP error. Please try again later. */ kMusicContentCenterReasonHttpInternalError = 7, } MusicContentCenterStateReason; +/** + * @brief Detailed information about a music chart. + */ typedef struct { /** - * Name of the music chart + * Name of the chart. */ const char* chartName; /** - * Id of the music chart, which is used to get music list + * ID of the music chart. */ int32_t id; } MusicChartInfo; +/** + * @brief Cache status of a music resource. + */ enum MUSIC_CACHE_STATUS_TYPE { /** - * 0: Music is already cached. + * 0: The music resource is cached. */ MUSIC_CACHE_STATUS_TYPE_CACHED = 0, /** - * 1: Music is being cached. + * 1: The music resource is being cached. */ MUSIC_CACHE_STATUS_TYPE_CACHING = 1 }; +/** + * @brief Information about a cached music resource. + */ struct MusicCacheInfo { /** - * The songCode of music. + * The ID of the music resource, used to identify the resource. */ int64_t songCode; /** - * The cache status of the music. + * Cache status of the music resource. See `MUSIC_CACHE_STATUS_TYPE`. */ MUSIC_CACHE_STATUS_TYPE status; MusicCacheInfo():songCode(0), status(MUSIC_CACHE_STATUS_TYPE_CACHED) {} }; +/** + * @brief Detailed information about music charts. + */ class MusicChartCollection : public RefCountInterface { public: + /** + * @brief Gets the number of music charts in this request. + * + * @return + * The number of music charts in this request. + */ virtual int getCount() = 0; + /** + * @brief Gets the detailed information of a music chart. + * + * @param index Index of the `MusicChartInfo` array. + * + * @return + * `MusicChartInfo`, containing the detailed information of the music chart. + */ virtual MusicChartInfo* get(int index) = 0; protected: virtual ~MusicChartCollection() = default; @@ -153,77 +184,81 @@ struct MvProperty const char* bandwidth; }; +/** + * @brief The climax parts of the music. + */ struct ClimaxSegment { /** - * The start time of climax segment + * The time (ms) when the climax part begins. */ int32_t startTimeMs; /** - * The end time of climax segment + * The time (ms) when the climax part ends. */ int32_t endTimeMs; }; +/** + * @brief Detailed information of a music resource. + */ struct Music { /** - * The songCode of music + * The ID of the music resource, used to identify a music item. */ int64_t songCode; /** - * The name of music + * Name of the music resource. */ const char* name; /** - * The singer of music + * Name of the singer. */ const char* singer; /** - * The poster url of music + * Download URL of the music poster. */ const char* poster; /** - * The release time of music + * Release time of the music resource. */ const char* releaseTime; /** - * The duration (in seconds) of music + * Total duration of the music resource (in seconds). */ int32_t durationS; /** - * The type of music - * 1, mp3 with instrumental accompaniment and original - * 2, mp3 only with instrumental accompaniment - * 3, mp3 only with original - * 4, mp4 with instrumental accompaniment and original - * 5, mv only - * 6, new type mp4 with instrumental accompaniment and original - * detail at document of music media center + * Type of the music resource: + * - 1: Single-track with accompaniment on the left channel and original vocals on the right + * channel. + * - 2: Single-track with accompaniment only. + * - 3: Single-track with original vocals only. + * - 4: Multi-track audio. */ int32_t type; /** - * The pitch type of music. - * 1, xml lyric has pitch - * 2, lyric has no pitch + * Whether the song supports pitch scoring: + * - 1: The song supports pitch scoring. + * - 2: The song does not support pitch scoring. */ int32_t pitchType; /** - * The number of lyrics available for the music + * Number of lyrics available for the song. */ int32_t lyricCount; /** - * The lyric list of music - * 0, xml - * 1, lrc + * Supported lyric formats: + * - 0: XML format. + * - 1: LRC format. */ int32_t* lyricList; /** - * The number of climax segments of the music + * Number of climax segments. */ int32_t climaxSegmentCount; /** - * The climax segment list of music + * List of climax segments. See `ClimaxSegment`. */ ClimaxSegment* climaxSegmentList; /** @@ -237,12 +272,47 @@ struct Music MvProperty* mvPropertyList; }; +/** + * @brief Detailed information about the music resource list. + */ class MusicCollection : public RefCountInterface { public: + /** + * @brief Gets the number of music items in this request. + * + * @return + * The number of music items in this request. + */ virtual int getCount() = 0; + /** + * @brief Gets the total number of music resources in the list. + * + * @return + * The total number of music resources in the list. + */ virtual int getTotal() = 0; + /** + * @brief Gets the current page number of the music resource list. + * + * @return + * The current page number. + */ virtual int getPage() = 0; + /** + * @brief Gets the actual number of music resources returned by the SDK. + * + * @return + * The actual number of music resources returned by the SDK. + */ virtual int getPageSize() = 0; + /** + * @brief Gets the detailed information of a music resource in the current page list. + * + * @param index Index of the `Music` array. + * + * @return + * A `Music` instance. + */ virtual Music* getMusic(int32_t index) = 0; protected: virtual ~MusicCollection() = default; @@ -252,73 +322,118 @@ class MusicCollection : public RefCountInterface { class IMusicContentCenterEventHandler { public: /** - * The music chart result callback; occurs when getMusicCharts method is called. - * - * @param requestId The request id is same as that returned by getMusicCharts. - * @param result The result of music chart collection - * @param reason The status of the request. See MusicContentCenterStateReason + * @brief Callback for retrieving music charts. + * + * @details + * After you call the `getMusicCharts` method to retrieve all music charts, the SDK triggers this + * callback. + * + * @param requestId Request ID. A unique identifier for this request. + * @param reason The request status code from the Music Content Center. See + * `MusicContentCenterStateReason`. + * @param result The list of currently playable music charts. See `MusicChartCollection`. + * */ virtual void onMusicChartsResult(const char* requestId, agora_refptr result, MusicContentCenterStateReason reason) = 0; /** - * Music collection, occurs when getMusicCollectionByMusicChartId or searchMusic method is called. - * - * @param requestId The request id is same as that returned by getMusicCollectionByMusicChartId or searchMusic - * @param result The result of music collection - * @param reason The status of the request. See MusicContentCenterStateReason + * @brief Callback for retrieving the music resource list. + * + * @details + * When you call the `getMusicCollectionWithMusicChartId` method to get the music resource list of a + * specific chart or call `searchMusic` to search for music resources, the SDK + * triggers this callback to report the detailed information of the music resource list. + * + * @param requestId Request ID. A unique identifier for this request. + * @param reason The request status code from the Music Content Center. See + * `MusicContentCenterStateReason`. + * @param result Detailed information of the music resource list. See `MusicCollection`. + * */ virtual void onMusicCollectionResult(const char* requestId, agora_refptr result, MusicContentCenterStateReason reason) = 0; /** - * Lyric url callback of getLyric, occurs when getLyric is called - * - * @param requestId The request id is same as that returned by getLyric - * @param songCode Song code - * @param lyricUrl The lyric url of this music - * @param reason The status of the request. See MusicContentCenterStateReason + * @brief Callback for the lyrics download URL. + * + * @details + * After you call `getLyric` to get the lyrics download URL for a specific song, the SDK triggers + * this callback. + * + * @param requestId Request ID. A unique identifier for this request. + * @param songCode The ID of the music resource, used to identify the music. + * @param lyricUrl The download URL of the lyrics. + * @param reason The request status code from the Music Content Center. See + * `MusicContentCenterStateReason`. + * */ virtual void onLyricResult(const char* requestId, int64_t songCode, const char* lyricUrl, MusicContentCenterStateReason reason) = 0; /** - * Simple info callback of getSongSimpleInfo, occurs when getSongSimpleInfo is called - * - * @param requestId The request id is same as that returned by getSongSimpleInfo. - * @param songCode Song code - * @param simpleInfo The metadata of the music. - * @param reason The status of the request. See MusicContentCenterStateReason + * @brief Callback for detailed information of a music resource. + * + * @details + * After you call `getSongSimpleInfo` to get detailed information of a music resource, the SDK + * triggers this callback. + * + * @param requestId Request ID. A unique identifier for this request. + * @param songCode The ID of the music resource, used to identify the music. + * @param simpleInfo Information about the music resource, including the following: + * - Start and end time of the chorus segment (ms) + * - Download URL of the chorus lyrics + * - Duration of the chorus segment (ms) + * - Song name + * - Artist name + * @param reason Request status code from the Music Content Center. See + * `MusicContentCenterStateReason`. + * */ virtual void onSongSimpleInfoResult(const char* requestId, int64_t songCode, const char* simpleInfo, MusicContentCenterStateReason reason) = 0; /** - * Preload process callback, occurs when preload is called - * - * @param requestId The request id is same as that returned by preload. - * @param songCode Song code - * @param percent Preload progress (0 ~ 100) - * @param lyricUrl The lyric url of this music - * @param state Preload state; see PreloadState. - * @param reason The status of the request. See MusicContentCenterStateReason + * @brief Reports events related to preloading music resources. + * + * @details + * After you call `preload(int64_t songCode, const char* jsonOption = nullptr)` or + * `preload(agora::util::AString& requestId, int64_t songCode)` to preload a music resource, the SDK + * triggers this callback. + * + * @param requestId Request ID. A unique identifier for this request. + * @param songCode The ID of the music resource, used to identify a music item. + * @param percent Current loading progress of the music resource, ranging from [0, 100]. + * @param lyricUrl Download URL of the lyrics. + * @param state Current loading state of the music resource. See `PreloadState`. + * @param reason Request status code from the Music Content Center. See + * `MusicContentCenterStateReason`. + * */ virtual void onPreLoadEvent(const char* requestId, int64_t songCode, int percent, const char* lyricUrl, PreloadState state, MusicContentCenterStateReason reason) = 0; virtual ~IMusicContentCenterEventHandler() {}; }; +/** + * @brief Configuration for the Music Content Center. + */ struct MusicContentCenterConfiguration { /** - * The app ID of the project that has enabled the music content center + * App ID of the project with Music Content Center enabled. */ const char *appId; /** - * Music content center need token to connect with server + * RTM Token used for authentication when using the Music Content Center. + * @note + * - Agora recommends using AccessToken2 for authentication. See `Deploy Token Server`. When + * generating the token, pass a `String` type `mccUid` to `uid`. + * - When your token is about to expire, you can call `renewToken` to pass in a new token. */ const char *token; /** - * The user ID when using music content center. It can be different from that of the rtc product. + * User ID for using the Music Content Center. This ID can be the same as the `uid` used when + * joining an RTC channel, but it cannot be 0. */ int64_t mccUid; /** - * The max number which the music content center caches cannot exceed 50. + * Number of music resources that can be cached. The maximum is 50. */ int32_t maxCacheSize; /** @@ -326,7 +441,7 @@ struct MusicContentCenterConfiguration { */ const char* mccDomain; /** - * Event handler to get callback result. + * Event handler to receive callbacks. See `IMusicContentCenterEventHandler`. */ IMusicContentCenterEventHandler* eventHandler; MusicContentCenterConfiguration():appId(nullptr),token(nullptr),eventHandler(nullptr),mccUid(0),maxCacheSize(10), mccDomain(nullptr){} @@ -342,26 +457,53 @@ class IMusicPlayer : public IMediaPlayer { IMusicPlayer() {}; using IMediaPlayer::open; /** - * Open a media file with specified parameters. - * - * @param songCode The identifier of the media file that you want to play. - * @param startPos The playback position (ms) of the music file. - * @return - * - 0: Success. - * - < 0: Failure. - */ + * @brief Opens a music resource by its song code. + * + * @details + * Before calling this method, make sure the music resource to be played has been loaded. You can + * call `isPreloaded` to check whether the resource has been preloaded, or listen for the + * `onPreLoadEvent` callback. + * After calling this method, the `onPlayerSourceStateChanged` callback is triggered. Once you + * receive a playback state of `PLAYER_STATE_OPEN_COMPLETED`, you can call the `play` method to play + * the media file. + * + * @note Note: If the music resource you want to open is protected by digital rights management + * (DRM), you must use this method to open it. For non-DRM-protected resources, you can choose to + * open them using this method or the `open` method under the `IMediaPlayer` class. + * + * @param songCode The song code of the music resource, used to identify the music. + * @param startPos The start playback position in milliseconds. Default is 0. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int open(int64_t songCode, int64_t startPos = 0) = 0; /** - * Set the mode for playing songs. - * You can call this method to switch from original to accompaniment or lead vocals. - * If you do not call this method to set the mode, the SDK plays the accompaniment by default. - * - * @param model The playing mode. - * @return - * - 0: Success. - * - < 0: Failure. - */ + * @brief Sets the playback mode of a music resource. + * + * @details + * You can call this method to enable original vocals, accompaniment, or vocal guide. If you do not + * call this method, accompaniment is played by default; if the music resource has no accompaniment, + * the original vocals are played. + * Applicable scenarios: In entertainment scenarios such as online karaoke or talent shows, if you + * need to play copyrighted music provided by Agora's content center, you can call this method to + * set the playback mode. + * Call timing: This method must be called after `createMusicPlayer`. + * + * @note + * You can get detailed information about the music resource from the `onMusicCollectionResult` + * callback, and determine the supported playback types of the copyrighted music from the `result` + * parameter. + * + * @param mode Playback mode. See `MusicPlayMode`. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + * - -2: Invalid parameter. Please reset the parameter. + */ virtual int setPlayMode(MusicPlayMode mode) = 0; }; @@ -373,206 +515,340 @@ class IMusicContentCenter IMusicContentCenter() {}; /** - * Initializes the IMusicContentCenter - * Set token of music content center and other params + * @brief Initializes the `IMusicContentCenter`. + * + * @details + * You must call this method to initialize `IMusicContentCenter` before using any other methods + * under the `IMusicContentCenter` class. + * + * @param configuration Configuration for `IMusicContentCenter`. See + * `MusicContentCenterConfiguration`. * - * @param configuration * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int initialize(const MusicContentCenterConfiguration & configuration) = 0; /** - * Renew token of music content center - * + * @brief Renews the token. + * + * @details + * When the token used for authentication is about to expire or has already expired, you can call + * this method to pass in a newly generated token. + * * @param token The new token. - * @return + * + * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int renewToken(const char* token) = 0; /** - * release music content center resource. - * + * @brief Releases all resources used by the Music Content Center. + * + * @details + * This method must be called before the `release` method of `IRtcEngine`. + * */ virtual void release() = 0; /** - * register event handler. - */ + * @brief Registers the Music Content Center event handler. + * + * @param eventHandler The event handler to register. See `IMusicContentCenterEventHandler`. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int registerEventHandler(IMusicContentCenterEventHandler* eventHandler) = 0; /** - * unregister event handler. - */ + * @brief Unregisters the Music Content Center event callback. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int unregisterEventHandler() = 0; /** - * Creates a music player source object and return its pointer. + * @brief Creates a music player. + * + * @details + * If you need to play music resources from the Music Content Center, you must first call this + * method to create a music player. + * * @return - * - The pointer to \ref rtc::IMusicPlayer "IMusicPlayer", - * if the method call succeeds. - * - The empty pointer NULL, if the method call fails. + * - If the method call succeeds: Returns an `IMusicPlayer` object. + * - If the method call fails: Returns a null pointer. */ virtual agora_refptr createMusicPlayer() = 0; /** - * Destroy a music player source object and return result. - * @param music_player The pointer to \ref rtc::IMusicPlayer "IMusicPlayer". + * @brief Destroys the music player object. + * + * @details + * When you no longer need to use the music player, you can call this method to destroy the music + * player object. If you need to use the music player again after destruction, call + * `createMusicPlayer` to recreate a music player object. + * Call timing: This method can be called before or after joining a channel, but make sure to call + * it before the `release` method of `IRtcEngine`. + * + * @param music_player Pointer to the `IMusicPlayer` object. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int destroyMusicPlayer(agora_refptr music_player) = 0; /** - * Get music chart collection of music. - * If the method call succeeds, get result from the - * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicChartsResult - * "onMusicChartsResult" callback - * @param requestId The request id you will get of this query, format is uuid. + * @brief Gets all music charts. + * + * @details + * After you call this method, the SDK triggers the `onMusicChartsResult` callback to report + * detailed information about the music charts. + * + * @param requestId Request ID. A unique identifier for this request. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int getMusicCharts(agora::util::AString& requestId) = 0; /** - * Get music collection of the music chart by musicChartId and page info. - * If the method call success, get result from the - * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicCollectionResult - * "onMusicCollectionResult" callback - * @param requestId The request id you will get of this query, format is uuid. - * @param musicChartId The music chart id obtained from getMusicCharts. - * @param page The page of the music chart, starting from 1 - * @param pageSize The page size, max is 50. - * @param jsonOption The ext param, default is null. + * @brief Gets the list of music resources from a specified chart by its music chart ID. + * + * @details + * After successfully calling this method, the SDK triggers the `onMusicCollectionResult` callback + * to report detailed information about the music resources in the chart. + * + * @param requestId Request ID. A unique identifier for this request. + * @param musicChartId The ID of the music chart. You can obtain it from the `onMusicChartsResult` + * callback. You can also use RESTful APIs to `get the full music library list` or + * `get incremental music list`. + * @param page Current page number, starting from 1 by default. + * @param pageSize Total number of items per page in the music resource list. The maximum value is + * 50. + * @param jsonOption Extended JSON field, default is NULL. You can use this field to filter the + * music resources you need. Currently supports filtering by scoreable music and chorus segments: + * | Key | Value | Example | + * | ------------- | ---------------------------------------------------------------------- | ------------------------ | + * | pitchType | Whether scoring is supported: - 1: Scoreable music. - 2: Non-scoreable music. | {"pitchType":1} | + * | needHighPart | Whether chorus segment is needed: - `true`: Chorus segment needed. - `false`: Not needed. | {"needHighPart":true} | + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int getMusicCollectionByMusicChartId(agora::util::AString& requestId, int32_t musicChartId, int32_t page, int32_t pageSize, const char* jsonOption = nullptr) = 0; /** - * Search music by keyword and page info. - * If the method call success, get result from the - * \ref agora::rtc::IMusicContentCenterEventHandler::onMusicCollectionResult - * "onMusicCollectionResult" callback - * @param requestId The request id you will get of this query, format is uuid. - * @param keyWord The key word to search. - * @param page The page of music search result , start from 1. - * @param pageSize The page size, max is 50. - * @param jsonOption The ext param, default is null. + * @brief Searches for music resources. + * + * @details + * After successfully calling this method, the SDK triggers the `onMusicCollectionResult` callback + * to report the list of retrieved music resources. + * + * @param keyword Search keyword. Supports searching by song name or artist. + * @param page The target page number of the music resource list to retrieve. + * @param pageSize Maximum number of music resources displayed per page. The maximum value is 50. + * @param jsonOption Extended JSON field. Default is NULL. You can use this field to filter the + * music resources you need. Currently supports filtering by scoreable music and chorus segments: + * | Key | Value | Example | + * | ------------- | ---------------------------------------------------------------------- | ------------------------ | + * | pitchType | Whether scoring is supported: - 1: Scoreable music. - 2: Non-scoreable music. | {"pitchType":1} | + * | needHighPart | Whether chorus segment is needed: - `true`: Chorus segment needed. - `false`: Not needed. | {"needHighPart":true} | + * @param requestId Request ID. A unique identifier for this request. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int searchMusic(agora::util::AString& requestId, const char* keyWord, int32_t page, int32_t pageSize, const char* jsonOption = nullptr) = 0; /** - * Preload a media file with specified parameters. + * @brief Preloads a music resource. * * @deprecated This method is deprecated. Use preload(int64_t songCode) instead. - * - * @param songCode The identifier of the media file that you want to play. - * @param jsonOption The ext param, default is null. + * + * @details + * You can call this method to preload the music resource you want to play. After successfully + * calling this method, the SDK triggers the `onPreLoadEvent` callback to report the preload event. + * Before calling this method to preload a music resource, you need to call + * `getMusicCollectionWithMusicChartId` or `searchMusic` + * to get the music resource you want to play, and obtain the song code (`songCode`) from the + * `onMusicCollectionResult` callback. + * + * @note To destroy the `IRtcEngine` object, make sure to call the `release` method only after + * receiving the `onPreLoadEvent` callback. + * + * @param songCode The song code of the music resource, used to identify the music. + * @param jsonOption Extended JSON field. + * Agora charges based on the application scenario you pass in the `sceneType` field. Different + * scenarios have different rates. Refer to the `Billing Description` for details. + * - 1: Live scene: Karaoke and background music playback. + * - 2: Live scene: Background music playback. + * - 3: (Default) Voice chat scene: Karaoke. + * - 4: Voice chat scene: Background music playback. + * - 5: VR scene: Karaoke and background music playback. + * If you need to switch to a different scenario, call this method again and pass + * the new `sceneType` value in this field. + * Example: `{"sceneType":1}` + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int preload(int64_t songCode, const char* jsonOption) __deprecated = 0; /** - * Preload a media file with specified parameters. - * - * @param requestId The request id you will get of this query, format is uuid. - * @param songCode The identifier of the media file that you want to play. + * @brief Preloads a music resource. + * + * @details + * You can call this method to preload the music resource you want to play. After successfully + * calling this method, the SDK triggers the `onPreLoadEvent` callback to report the preload event. + * Before calling this method to preload a music resource, you need to call + * `getMusicCollectionWithMusicChartId` or `searchMusic` + * to get the music resource you want to play, and obtain the song code (`songCode`) from the + * `onMusicCollectionResult` callback. + * + * @note To destroy the `IRtcEngine` object, make sure to call the `release` method only after + * receiving the `onPreLoadEvent` callback. + * + * @param songCode The song code of the music resource, used to identify the music. + * @param requestId Output parameter. Request ID. A unique identifier for this request. + * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int preload(agora::util::AString& requestId, int64_t songCode) = 0; /** - * Remove a media file cache + * @brief Deletes a cached music resource. + * + * @details + * You can call this method to delete a specific cached music resource. To delete multiple + * resources, call this method multiple times. + * + * @note Note: This method does not delete cached music resources that are currently being played. + * + * @param songCode The ID of the music resource to be deleted. * - * @param songCode The identifier of the media file that you want to play. * @return - * - 0: Success; the cached media file is removed. - * - < 0: Failure. + * - 0: Success. The music resource has been deleted. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int removeCache(int64_t songCode) = 0; /** - * Get cached media files. - * Before calling this API, you should allocate a memory buffer that stores the cached media file information, and pass the pointer of the buffer as the input parameter cacheInfo, and set the size of the memory buffer to cacheInfoSize. - * The sample code below illustrates how to request the cached media file information: + * @brief Gets information about cached music resources. * - * cacheInfoSize = 10 // Allocate a memory buffer of 10 MusicCacheInfo size - * agora::rtc::MusicCacheInfo *infos = new agora::rtc::MusicCacheInfo[cacheInfoSize]; - * int ret = self.imcc->getCaches(infos, cacheInfoSize); - * if (ret < 0) { // error occurred! - * return; - * } - * std::cout << "the cache size:" << cacheInfoSize << std::endl; // The cache size: 5 + * @details + * Before calling this method, you need to pre-allocate a certain amount of memory to store + * information about cached music resources. If you want to set the number of music resources that + * can be cached, you can configure it through the `configuration` parameter in `initialize`. + * When you no longer need the cached music resources, you should release the memory in time to + * prevent memory leaks. * + * @param cacheInfo Output parameter. A pointer to the memory buffer used to store cached music + * resource information. + * @param cacheInfoSize Input and output parameter. + * - Input: The length of the `cacheInfo` array, i.e., the number of `MusicCacheInfo` structures you + * allocated. + * - Output: The number of `MusicCacheInfo` structures returned after the method execution. * - * @param cacheInfo An output parameter; A pointer to the memory buffer that stores the cached media file information. The memory buffer pointed to by cacheInfo should be allocated by yourself before calling this API. - * @param cacheInfoSize - * - Input: The number of MusicCacheInfo's size that you get from the memory. - * - Output: The actual number of MusicCacheInfo struct that is returned. * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int getCaches(MusicCacheInfo *cacheInfo, int32_t* cacheInfoSize) = 0; /** - * Check if the media file is preloaded + * @brief Checks whether a music resource has been preloaded. + * + * @details + * This method is synchronous. To preload a new music resource, call `preload(agora::util::AString& + * requestId, int64_t songCode)`. + * + * @param songCode The ID of the music resource, used to identify a music item. * - * @param songCode The identifier of the media file that you want to play. * @return - * - 0: Success, file is preloaded. - * - < 0: Failure. + * - 0: Success. The music resource has been preloaded. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int isPreloaded(int64_t songCode) = 0; /** - * Get lyric of the music. + * @brief Gets the download URL of the lyrics for a music resource. + * + * @details + * After successfully calling this method, the SDK triggers the `onLyricResult` callback to report + * the lyrics download URL. + * + * @param songCode The ID of the music resource, used to identify the music. + * @param lyricType Type of lyrics: + * - 0: XML format. + * - 1: LRC format. + * @param requestId Request ID. A unique identifier for this request. * - * @param requestId The request id you will get of this query, format is uuid. - * @param songCode The identifier of the media file that you want to play. - * @param lyricType The type of the lyric file. 0:xml or 1:lrc. * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int getLyric(agora::util::AString& requestId, int64_t songCode, int32_t lyricType = 0) = 0; /** - * Gets the metadata of a specific music. Once this method is called, the SDK triggers the onSongSimpleInfoResult callback to report the metadata of the music. + * @brief Gets detailed information of a specific music resource. + * + * @details + * Before calling this method, you need to obtain the song code of the corresponding music resource. + * You can get it by calling `getMusicCollectionWithMusicChartId` or `searchMusic`, + * and retrieve the song code from the `onMusicCollectionResult` callback triggered by those + * methods. + * After you call this method, the SDK triggers the `onSongSimpleInfoResult` callback to report the + * detailed information of the music resource. + * + * @param songCode The song code of the music resource, used to identify the music. + * @param requestId Request ID. A unique identifier for this request. * - * @param requestId The request id you will get of this query, format is uuid. - * @param songCode The identifier of the media file. * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int getSongSimpleInfo(agora::util::AString& requestId, int64_t songCode) = 0; /** - * Get internal songCodeKey from songCode and jsonOption + * @brief Creates an internal song code for the chorus segment of a music resource. + * + * @details + * Applicable scenarios: Before playing the chorus segment of a music resource, you need to call + * this method to create an internal song code for the chorus segment using the `jsonOption` + * parameter and the music resource's `songCode`. + * This internal song code serves as the unique identifier for the resource. Once you obtain this + * code, use it as the `songCode` parameter when calling methods to open, preload, or remove the + * resource. + * + * @param songCode The song code of the music resource, used to identify the resource. You can + * obtain it by calling `getMusicCollectionWithMusicChartId` or `searchMusic`, + * and retrieve the song code from the `onMusicCollectionResult` callback triggered by those + * methods. + * @param jsonOption Extended JSON field, default is NULL. Currently supports the following values: + * | Key | Value | Example | + * | ---------- | -------------------------------- | ----------------------------- | + * | sceneType | Scene type: - 1: Live scene: Karaoke and background music playback. - 2: Live scene: Background music playback. - 3: (Default) Voice chat scene: Karaoke. - 4: Voice chat scene: Background music playback. - 5: VR scene: Karaoke and background music playback. Note: Agora charges based on the scene type you pass in `sceneType`. Different scenes have different rates. See `Billing Description` for details. To switch scenes, you need to call this method again with a new `sceneType`. | {"sceneType":1} | + * | highPart | Index of the chorus segment. You can get the index from the `onMusicCollectionResult` callback and pass it here. The index starts from 0. | {"format": {"highpart": 0}} | + * @param internalSongCode Output parameter, the internal song code of the music resource. * - * @param songCode The identifier of the media file. - * @param jsonOption An extention parameter. The default value is null. it’s a json-format string and the `key` and `value` can be customized according to your scenarios. - * @param internalSongCode The identifier of internal * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ - virtual int getInternalSongCode(int64_t songCode, const char* jsonOption, int64_t& internalSongCode) = 0; }; diff --git a/include/IAgoraParameter.h b/include/IAgoraParameter.h index e224006..ccdf5e4 100644 --- a/include/IAgoraParameter.h +++ b/include/IAgoraParameter.h @@ -144,6 +144,10 @@ typedef CopyableAutoPtr AString; namespace base { +/** + * @brief The interface class of Agora RTC SDK, which provides JSON configuration information of the + * SDK. + */ class IAgoraParameter : public RefCountInterface { public: /** @@ -291,10 +295,17 @@ class IAgoraParameter : public RefCountInterface { virtual int getArray(const char* key, const char* args, agora::util::AString& value) = 0; /** - * set parameters of the sdk or engine - * @param [in] parameters - * the parameters - * @return return 0 if success or an error code + * @brief Provides the technical preview functionalities or special customizations by configuring + * the SDK with JSON options. + * + * @details + * Contact `technical support` to get the JSON configuration method. + * + * @param parameters Pointer to the set parameters in a JSON string. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setParameters(const char* parameters) = 0; diff --git a/include/IAgoraRhythmPlayer.h b/include/IAgoraRhythmPlayer.h index f657b1c..5ae25bc 100644 --- a/include/IAgoraRhythmPlayer.h +++ b/include/IAgoraRhythmPlayer.h @@ -19,52 +19,70 @@ class ILocalAudioTrack; class IRtcEngineEventHandler; /** - The states of the rhythm player. + * @brief Virtual metronome state. */ enum RHYTHM_PLAYER_STATE_TYPE { - /** 810: The rhythm player is idle. */ + /** + * 810: The virtual metronome is not enabled or disabled already. + */ RHYTHM_PLAYER_STATE_IDLE = 810, - /** 811: The rhythm player is opening files. */ + /** + * 811: Opening the beat files. + */ RHYTHM_PLAYER_STATE_OPENING, - /** 812: Files opened successfully, the rhythm player starts decoding files. */ + /** + * 812: Decoding the beat files. + */ RHYTHM_PLAYER_STATE_DECODING, - /** 813: Files decoded successfully, the rhythm player starts mixing the two files and playing back them locally. */ + /** + * 813: The beat files are playing. + */ RHYTHM_PLAYER_STATE_PLAYING, - /** 814: The rhythm player is starting to fail, and you need to check the error code for detailed failure reasons. */ + /** + * 814: Failed to start virtual metronome. You can use the reported `errorCode` to troubleshoot the + * cause of the error, or you can try to start the virtual metronome again. + */ RHYTHM_PLAYER_STATE_FAILED, }; /** - The reason codes of the rhythm player. + * @brief Virtual Metronome error message. */ enum RHYTHM_PLAYER_REASON { - /** 0: The rhythm player works well. */ + /** + * 0: The beat files are played normally without errors. + */ RHYTHM_PLAYER_REASON_OK = 0, - /** 1: The rhythm player occurs a internal error. */ + /** + * 1: A general error; no specific reason. + */ RHYTHM_PLAYER_REASON_FAILED = 1, - /** 801: The rhythm player can not open the file. */ + /** + * 801: There is an error when opening the beat files. + */ RHYTHM_PLAYER_REASON_CAN_NOT_OPEN = 801, - /** 802: The rhythm player can not play the file. */ + /** + * 802: There is an error when playing the beat files. + */ RHYTHM_PLAYER_REASON_CAN_NOT_PLAY, - /** 803: The file duration over the limit. The file duration limit is 1.2 seconds */ + /** + * 803: The duration of the beat file exceeds the limit. The maximum duration is 1.2 seconds. + */ RHYTHM_PLAYER_REASON_FILE_OVER_DURATION_LIMIT, }; /** - * The configuration of rhythm player, - * which is set in startRhythmPlayer or configRhythmPlayer. + * @brief The metronome configuration. */ struct AgoraRhythmPlayerConfig { /** - * The number of beats per measure. The range is 1 to 9. - * The default value is 4, - * which means that each measure contains one downbeat and three upbeats. + * The number of beats per measure, which ranges from 1 to 9. The default value is 4, which means + * that each measure contains one downbeat and three upbeats. */ int beatsPerMeasure; - /* - * The range is 60 to 360. - * The default value is 60, - * which means that the rhythm player plays 60 beats in one minute. + /** + * The beat speed (beats/minute), which ranges from 60 to 360. The default value is 60, which means + * that the metronome plays 60 beats in one minute. */ int beatsPerMinute; diff --git a/include/IAgoraRtcEngine.h b/include/IAgoraRtcEngine.h index a065c2b..6f0fddc 100644 --- a/include/IAgoraRtcEngine.h +++ b/include/IAgoraRtcEngine.h @@ -35,7 +35,7 @@ static void ReplaceBy(Optional* s, const Optional& o) { //class IAudioDeviceManager; /** - * The media device types. + * @brief Media device types. */ enum MEDIA_DEVICE_TYPE { /** @@ -43,67 +43,100 @@ enum MEDIA_DEVICE_TYPE { */ UNKNOWN_AUDIO_DEVICE = -1, /** - * 0: The audio playback device. + * 0: Audio playback device. */ AUDIO_PLAYOUT_DEVICE = 0, /** - * 1: The audio recording device. + * 1: Audio capturing device. */ AUDIO_RECORDING_DEVICE = 1, /** - * 2: The video renderer. + * 2: Video rendering device (graphics card). */ VIDEO_RENDER_DEVICE = 2, /** - * 3: The video capturer. + * 3: Video capturing device. */ VIDEO_CAPTURE_DEVICE = 3, /** - * 4: The audio playback device of the app. + * 4: Audio playback device for an app. */ AUDIO_APPLICATION_PLAYOUT_DEVICE = 4, /** - * 5: The virtual audio playback device. + * (For macOS only) 5: Virtual audio playback device (virtual sound card). */ AUDIO_VIRTUAL_PLAYOUT_DEVICE = 5, /** - * 6: The virtual audio recording device. + * (For macOS only) 6: Virtual audio capturing device (virtual sound card). */ AUDIO_VIRTUAL_RECORDING_DEVICE = 6, }; /** - The playback state of the music file. + * @brief The playback state of the music file. */ enum AUDIO_MIXING_STATE_TYPE { - /** 710: The music file is playing. */ + /** + * 710: The music file is playing. + */ AUDIO_MIXING_STATE_PLAYING = 710, - /** 711: The music file pauses playing. */ + /** + * 711: The music file pauses playing. + */ AUDIO_MIXING_STATE_PAUSED = 711, - /** 713: The music file stops playing. */ + /** + * 713: The music file stops playing. + * The possible reasons include: + * - AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED (723) + * - AUDIO_MIXING_REASON_STOPPED_BY_USER (724) + */ AUDIO_MIXING_STATE_STOPPED = 713, - /** 714: An error occurs during the playback of the audio mixing file. + /** + * 714: An error occurs during the playback of the audio mixing file. + * The possible reasons include: + * - AUDIO_MIXING_REASON_CAN_NOT_OPEN (701) + * - AUDIO_MIXING_REASON_TOO_FREQUENT_CALL (702) + * - AUDIO_MIXING_REASON_INTERRUPTED_EOF (703) */ AUDIO_MIXING_STATE_FAILED = 714, }; /** - The reson codes of the local user's audio mixing file. + * @brief The reason why the playback state of the music file changes. Reported in the + * `onAudioMixingStateChanged` callback. */ enum AUDIO_MIXING_REASON_TYPE { - /** 701: The SDK cannot open the audio mixing file. */ + /** + * 701: The SDK cannot open the music file. For example, the local music file does not exist, the + * SDK does not support the file format, or the the SDK cannot access the music file URL. + */ AUDIO_MIXING_REASON_CAN_NOT_OPEN = 701, - /** 702: The SDK opens the audio mixing file too frequently. */ + /** + * 702: The SDK opens the music file too frequently. If you need to call `startAudioMixing` multiple + * times, ensure that the call interval is more than 500 ms. + */ AUDIO_MIXING_REASON_TOO_FREQUENT_CALL = 702, - /** 703: The audio mixing file playback is interrupted. */ + /** + * 703: The music file playback is interrupted. + */ AUDIO_MIXING_REASON_INTERRUPTED_EOF = 703, - /** 715: The audio mixing file is played once. */ + /** + * 721: The music file completes a loop playback. + */ AUDIO_MIXING_REASON_ONE_LOOP_COMPLETED = 721, - /** 716: The audio mixing file is all played out. */ + /** + * 723: The music file completes all loop playback. + */ AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED = 723, - /** 716: The audio mixing file stopped by user */ + /** + * 724: Successfully call `stopAudioMixing` to stop playing the music file. + */ AUDIO_MIXING_REASON_STOPPED_BY_USER = 724, - /** 0: The SDK can open the audio mixing file. */ + /** 726: The audio mixing playback has resumed by user */ + AUDIO_MIXING_REASON_RESUMED_BY_USER = 726, + /** + * 0: The SDK opens music file successfully. + */ AUDIO_MIXING_REASON_OK = 0, }; @@ -158,91 +191,95 @@ enum INJECT_STREAM_STATUS { }; /** - * The audio equalization band frequency. + * @brief The midrange frequency for audio equalization. */ enum AUDIO_EQUALIZATION_BAND_FREQUENCY { /** - * 0: 31 Hz. + * 0: 31 Hz */ AUDIO_EQUALIZATION_BAND_31 = 0, /** - * 1: 62 Hz. + * 1: 62 Hz */ AUDIO_EQUALIZATION_BAND_62 = 1, /** - * 2: 125 Hz. + * 2: 125 Hz */ AUDIO_EQUALIZATION_BAND_125 = 2, /** - * 3: 250 Hz. + * 3: 250 Hz */ AUDIO_EQUALIZATION_BAND_250 = 3, /** - * 4: 500 Hz. + * 4: 500 Hz */ AUDIO_EQUALIZATION_BAND_500 = 4, /** - * 5: 1 KHz. + * 5: 1 kHz */ AUDIO_EQUALIZATION_BAND_1K = 5, /** - * 6: 2 KHz. + * 6: 2 kHz */ AUDIO_EQUALIZATION_BAND_2K = 6, /** - * 7: 4 KHz. + * 7: 4 kHz */ AUDIO_EQUALIZATION_BAND_4K = 7, /** - * 8: 8 KHz. + * 8: 8 kHz */ AUDIO_EQUALIZATION_BAND_8K = 8, /** - * 9: 16 KHz. + * 9: 16 kHz */ AUDIO_EQUALIZATION_BAND_16K = 9, }; /** - * The audio reverberation type. + * @brief Audio reverberation types. */ enum AUDIO_REVERB_TYPE { /** - * 0: (-20 to 10 dB), the level of the dry signal. + * 0: The level of the dry signal (dB). The value is between -20 and 10. */ AUDIO_REVERB_DRY_LEVEL = 0, /** - * 1: (-20 to 10 dB), the level of the early reflection signal (wet signal). + * 1: The level of the early reflection signal (wet signal) (dB). The value is between -20 and 10. */ AUDIO_REVERB_WET_LEVEL = 1, /** - * 2: (0 to 100 dB), the room size of the reflection. + * 2: The room size of the reflection. The value is between 0 and 100. */ AUDIO_REVERB_ROOM_SIZE = 2, /** - * 3: (0 to 200 ms), the length of the initial delay of the wet signal in ms. + * 3: The length of the initial delay of the wet signal (ms). The value is between 0 and 200. */ AUDIO_REVERB_WET_DELAY = 3, /** - * 4: (0 to 100), the strength of the late reverberation. + * 4: The reverberation strength. The value is between 0 and 100. */ AUDIO_REVERB_STRENGTH = 4, }; +/** + * @brief Options for handling audio and video stream fallback when network conditions are weak. + */ enum STREAM_FALLBACK_OPTIONS { - /** 0: No fallback operation to a lower resolution stream when the network - condition is poor. Fallback to Scalable Video Coding (e.g. SVC) - is still possible, but the resolution remains in high stream. - The stream quality cannot be guaranteed. */ + /** + * 0: No fallback processing is performed on audio and video streams, the quality of the audio and + * video streams cannot be guaranteed. + */ STREAM_FALLBACK_OPTION_DISABLED = 0, - /** 1: (Default) Under poor network conditions, the receiver SDK will receive - agora::rtc::VIDEO_STREAM_LOW. You can only set this option in - RtcEngineParameters::setRemoteSubscribeFallbackOption. Nothing happens when - you set this in RtcEngineParameters::setLocalPublishFallbackOption. */ + /** + * 1: Only receive low-quality (low resolution, low bitrate) video stream. + */ STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW = 1, - /** 2: Under poor network conditions, the SDK may receive agora::rtc::VIDEO_STREAM_LOW first, - then agora::rtc::VIDEO_STREAM_LAYER_1 to agora::rtc::VIDEO_STREAM_LAYER_6 if the related layer exists. - If the network still does not allow displaying the video, the SDK will receive audio only. */ + /** + * 2: When the network conditions are weak, try to receive the low-quality video stream first. If + * the video cannot be displayed due to extremely weak network environment, then fall back to + * receiving audio-only stream. + */ STREAM_FALLBACK_OPTION_AUDIO_ONLY = 2, /** 3~8: If the receiver SDK uses RtcEngineParameters::setRemoteSubscribeFallbackOption,it will receive one of the streams from agora::rtc::VIDEO_STREAM_LAYER_1 to agora::rtc::VIDEO_STREAM_LAYER_6 @@ -267,107 +304,141 @@ enum PRIORITY_TYPE { struct RtcConnection; -/** Statistics of the local video stream. +/** + * @brief The statistics of the local video stream. */ struct LocalVideoStats { /** - * ID of the local user. - */ + * The ID of the local user. + */ uid_t uid; - /** The actual bitrate (Kbps) while sending the local video stream. - * @note This value does not include the bitrate for resending the video after packet loss. - */ + /** + * The actual bitrate (Kbps) while sending the local video stream. + * @note This value does not include the bitrate for resending the video after packet loss. + */ int sentBitrate; - /** The actual frame rate (fps) while sending the local video stream. - * @note This value does not include the frame rate for resending the video after packet loss. - */ + /** + * The actual frame rate (fps) while sending the local video stream. + * @note This value does not include the frame rate for resending the video after packet loss. + */ int sentFrameRate; - /** The capture frame rate (fps) of the local video. - */ + /** + * The frame rate (fps) for capturing the local video stream. + */ int captureFrameRate; - /** The width of the capture frame (px). - */ + /** + * The width (px) for capturing the local video stream. + */ int captureFrameWidth; - /** The height of the capture frame (px). - */ + /** + * The height (px) for capturing the local video stream. + */ int captureFrameHeight; /** - * The regulated frame rate of capture frame rate according to video encoder configuration. - */ + * The frame rate (fps) adjusted by the built-in video capture adapter (regulator) of the SDK for + * capturing the local video stream. The regulator adjusts the frame rate of the video captured by + * the camera according to the video encoding configuration. + */ int regulatedCaptureFrameRate; /** - * The regulated frame width (pixel) of capture frame width according to video encoder configuration. - */ + * The width (px) adjusted by the built-in video capture adapter (regulator) of the SDK for + * capturing the local video stream. The regulator adjusts the height and width of the video + * captured by the camera according to the video encoding configuration. + */ int regulatedCaptureFrameWidth; /** - * The regulated frame height (pixel) of capture frame height according to video encoder configuration. - */ + * The height (px) adjusted by the built-in video capture adapter (regulator) of the SDK for + * capturing the local video stream. The regulator adjusts the height and width of the video + * captured by the camera according to the video encoding configuration. + */ int regulatedCaptureFrameHeight; - /** The output frame rate (fps) of the local video encoder. - */ + /** + * The output frame rate (fps) of the local video encoder. + */ int encoderOutputFrameRate; - /** The width of the encoding frame (px). - */ + /** + * The width of the encoded video (px). + */ int encodedFrameWidth; - /** The height of the encoding frame (px). - */ + /** + * The height of the encoded video (px). + */ int encodedFrameHeight; - /** The output frame rate (fps) of the local video renderer. - */ + /** + * The output frame rate (fps) of the local video renderer. + */ int rendererOutputFrameRate; - /** The target bitrate (Kbps) of the current encoder. This is an estimate made by the SDK based on the current network conditions. - */ + /** + * The target bitrate (Kbps) of the current encoder. This is an estimate made by the SDK based on + * the current network conditions. + */ int targetBitrate; - /** The target frame rate (fps) of the current encoder. - */ + /** + * The target frame rate (fps) of the current encoder. + */ int targetFrameRate; - /** Quality adaption of the local video stream in the reported interval (based on the target frame - * rate and target bitrate). See #QUALITY_ADAPT_INDICATION. - */ + /** + * The quality adaptation of the local video stream in the reported interval (based on the target + * frame rate and target bitrate). See `QUALITY_ADAPT_INDICATION`. + */ QUALITY_ADAPT_INDICATION qualityAdaptIndication; - /** The bitrate (Kbps) while encoding the local video stream. - * @note This value does not include the bitrate for resending the video after packet loss. - */ + /** + * The bitrate (Kbps) while encoding the local video stream. + * @note This value does not include the bitrate for resending the video after packet loss. + */ int encodedBitrate; - /** The number of the sent video frames, represented by an aggregate value. - */ + /** + * The number of the sent video frames, represented by an aggregate value. + */ int encodedFrameCount; - /** The codec type of the local video. See #VIDEO_CODEC_TYPE. - */ + /** + * The codec type of the local video. See `VIDEO_CODEC_TYPE`. + */ VIDEO_CODEC_TYPE codecType; /** - * The video packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. - */ + * The video packet loss rate (%) from the local client to the Agora server before applying the + * anti-packet loss strategies. + */ unsigned short txPacketLossRate; - /** The brightness level of the video image captured by the local camera. See #CAPTURE_BRIGHTNESS_LEVEL_TYPE. - */ + /** + * The brightness level of the video image captured by the local camera. See + * `CAPTURE_BRIGHTNESS_LEVEL_TYPE`. + */ CAPTURE_BRIGHTNESS_LEVEL_TYPE captureBrightnessLevel; /** * Whether we send dual stream now. */ bool dualStreamEnabled; - /** The hwEncoderAccelerating of the local video: - * - software = 0. - * - hardware = 1. - */ + /** + * The local video encoding acceleration type. + * - 0: Software encoding is applied without acceleration. + * - 1: Hardware encoding is applied for acceleration. + */ int hwEncoderAccelerating; /** The dimensions of the simulcast streams's encoding frame. */ VideoDimensions simulcastDimensions[SimulcastConfig::STREAM_LAYER_COUNT_MAX]; + /** + * @technical preview + * The encodedFrameDepth of the local video: + * - SDR = 8. + * - HDR = 10. + */ + int encodedFrameDepth; }; /** - * Audio statistics of the remote user. + * @brief Audio statistics of the remote user. */ struct RemoteAudioStats { /** - * User ID of the remote user sending the audio stream. + * The user ID of the remote user. */ uid_t uid; /** - * The quality of the remote audio: #QUALITY_TYPE. + * The quality of the audio stream sent by the user. See `QUALITY_TYPE`. */ int quality; /** @@ -375,55 +446,52 @@ struct RemoteAudioStats */ int networkTransportDelay; /** - * The network delay (ms) from the receiver to the jitter buffer. - * @note When the receiving end is an audience member and `audienceLatencyLevel` of `ClientRoleOptions` - * is 1, this parameter does not take effect. + * The network delay (ms) from the audio receiver to the jitter buffer. + * @note When the receiving end is an audience member and `audienceLatencyLevel` of + * `ClientRoleOptions` is 1, this parameter does not take effect. */ int jitterBufferDelay; /** - * The audio frame loss rate in the reported interval. + * The frame loss rate (%) of the remote audio stream in the reported interval. */ int audioLossRate; /** - * The number of channels. + * The number of audio channels. */ int numChannels; /** - * The sample rate (Hz) of the remote audio stream in the reported interval. + * The sampling rate of the received audio stream in the reported interval. */ int receivedSampleRate; /** - * The average bitrate (Kbps) of the remote audio stream in the reported - * interval. + * The average bitrate (Kbps) of the received audio stream in the reported interval. */ int receivedBitrate; /** - * The total freeze time (ms) of the remote audio stream after the remote - * user joins the channel. - * - * In a session, audio freeze occurs when the audio frame loss rate reaches 4%. + * The total freeze time (ms) of the remote audio stream after the remote user joins the channel. In + * a session, audio freeze occurs when the audio frame loss rate reaches 4%. */ int totalFrozenTime; /** - * The total audio freeze time as a percentage (%) of the total time when the - * audio is available. + * The total audio freeze time as a percentage (%) of the total time when the audio is available. + * The audio is considered available when the remote user neither stops sending the audio stream nor + * disables the audio module after joining the channel. */ int frozenRate; /** - * The quality of the remote audio stream as determined by the Agora - * real-time audio MOS (Mean Opinion Score) measurement method in the - * reported interval. The return value ranges from 0 to 500. Dividing the - * return value by 100 gets the MOS score, which ranges from 0 to 5. The - * higher the score, the better the audio quality. - * - * | MOS score | Perception of audio quality | - * |-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| - * | Greater than 4 | Excellent. The audio sounds clear and smooth. | - * | From 3.5 to 4 | Good. The audio has some perceptible impairment, but still sounds clear. | - * | From 3 to 3.5 | Fair. The audio freezes occasionally and requires attentive listening. | - * | From 2.5 to 3 | Poor. The audio sounds choppy and requires considerable effort to understand. | - * | From 2 to 2.5 | Bad. The audio has occasional noise. Consecutive audio dropouts occur, resulting in some information loss. The users can communicate only with difficulty. | - * | Less than 2 | Very bad. The audio has persistent noise. Consecutive audio dropouts are frequent, resulting in severe information loss. Communication is nearly impossible. | + * The quality of the remote audio stream in the reported interval. The quality is determined by the + * Agora real-time audio MOS (Mean Opinion Score) measurement method. The return value range is [0, + * 500]. Dividing the return value by 100 gets the MOS score, which ranges from 0 to 5. The higher + * the score, the better the audio quality. + * The subjective perception of audio quality corresponding to the Agora real-time audio MOS scores is as follows: + * | MOS score | Perception of audio quality | + * | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | + * | Greater than 4 | Excellent. The audio sounds clear and smooth. | + * | From 3.5 to 4 | Good. The audio has some perceptible impairment but still sounds clear. | + * | From 3 to 3.5 | Fair. The audio freezes occasionally and requires attentive listening. | + * | From 2.5 to 3 | Poor. The audio sounds choppy and requires considerable effort to understand. | + * | From 2 to 2.5 | Bad. The audio has occasional noise. Consecutive audio dropouts occur, resulting in some information loss. The users can communicate only with difficulty. | + * | Less than 2 | Very bad. The audio has persistent noise. Consecutive audio dropouts are frequent, resulting in severe information loss. Communication is nearly impossible. | */ int mosValue; /** @@ -437,20 +505,35 @@ struct RemoteAudioStats uint32_t plcCount; /** - * The total time (ms) when the remote user neither stops sending the audio - * stream nor disables the audio module after joining the channel. + * @technical preview + * The number of times the remote audio stream has experienced freezing. + */ + uint32_t frozenCntByCustom; + + /** + * @technical preview + * The total duration (ms) that the remote audio stream has been in a frozen state. + */ + uint32_t frozenTimeByCustom; + + /** + * The total active time (ms) between the start of the audio call and the callback of the remote + * user. + * The active time refers to the total duration of the remote user without the mute state. */ int totalActiveTime; /** - * The total publish duration (ms) of the remote audio stream. + * The total duration (ms) of the remote audio stream. */ int publishDuration; /** - * Quality of experience (QoE) of the local user when receiving a remote audio stream. See #EXPERIENCE_QUALITY_TYPE. + * The Quality of Experience (QoE) of the local user when receiving a remote audio stream. See + * `EXPERIENCE_QUALITY_TYPE`. */ int qoeQuality; /** - * The reason for poor QoE of the local user when receiving a remote audio stream. See #EXPERIENCE_POOR_REASON. + * Reasons why the QoE of the local user when receiving a remote audio stream is poor. See + * `EXPERIENCE_POOR_REASON`. */ int qualityChangedReason; /** @@ -458,7 +541,8 @@ struct RemoteAudioStats */ unsigned int rxAudioBytes; /** - * The end-to-end delay (ms) from the sender to the receiver. + * End-to-end audio delay (in milliseconds), which refers to the time from when the audio is + * captured by the remote user to when it is played by the local user. */ int e2eDelay; @@ -476,6 +560,8 @@ struct RemoteAudioStats mosValue(0), frozenRateByCustomPlcCount(0), plcCount(0), + frozenCntByCustom(0), + frozenTimeByCustom(0), totalActiveTime(0), publishDuration(0), qoeQuality(0), @@ -485,84 +571,92 @@ struct RemoteAudioStats }; /** - * The statistics of the remote video stream. + * @brief Statistics of the remote video stream. */ struct RemoteVideoStats { /** - * ID of the remote user sending the video stream. + * The user ID of the remote user sending the video stream. */ uid_t uid; /** + * Deprecated: + * In scenarios where audio and video are synchronized, you can get the video delay data from + * `networkTransportDelay` and `jitterBufferDelay` in `RemoteAudioStats`. + * The video delay (ms). * @deprecated Time delay (ms). - * - * In scenarios where audio and video is synchronized, you can use the - * value of `networkTransportDelay` and `jitterBufferDelay` in `RemoteAudioStats` - * to know the delay statistics of the remote video. */ int delay __deprecated; /** - * End-to-end delay from video capturer to video renderer. Hardware capture or render delay is excluded. + * End-to-end video latency (ms). That is, the time elapsed from the video capturing on the remote + * user's end to the receiving and rendering of the video on the local user's end. */ int e2eDelay; /** - * The width (pixels) of the video stream. + * The width (pixels) of the video. */ int width; /** - * The height (pixels) of the video stream. + * The height (pixels) of the video. */ int height; /** - * Bitrate (Kbps) received since the last count. + * The bitrate (Kbps) of the remote video received since the last count. */ int receivedBitrate; /** The decoder input frame rate (fps) of the remote video. */ int decoderInputFrameRate; - /** The decoder output frame rate (fps) of the remote video. + /** + * The frame rate (fps) of decoding the remote video. */ int decoderOutputFrameRate; - /** The render output frame rate (fps) of the remote video. + /** + * The frame rate (fps) of rendering the remote video. */ int rendererOutputFrameRate; - /** The video frame loss rate (%) of the remote video stream in the reported interval. + /** + * The packet loss rate (%) of the remote video. */ int frameLossRate; - /** Packet loss rate (%) of the remote video stream after using the anti-packet-loss method. + /** + * The packet loss rate (%) of the remote video after using the anti-packet-loss technology. */ int packetLossRate; /** - * The type of the remote video stream: #VIDEO_STREAM_TYPE. + * The type of the video stream. See `VIDEO_STREAM_TYPE`. */ VIDEO_STREAM_TYPE rxStreamType; /** - The total freeze time (ms) of the remote video stream after the remote user joins the channel. - In a video session where the frame rate is set to no less than 5 fps, video freeze occurs when - the time interval between two adjacent renderable video frames is more than 500 ms. - */ + * The total freeze time (ms) of the remote video stream after the remote user joins the channel. In + * a video session where the frame rate is set to no less than 5 fps, video freeze occurs when the + * time interval between two adjacent renderable video frames is more than 500 ms. + */ int totalFrozenTime; /** - The total video freeze time as a percentage (%) of the total time when the video is available. + * The total video freeze time as a percentage (%) of the total time the video is available. The + * video is considered available as long as that the remote user neither stops sending the video + * stream nor disables the video module after joining the channel. */ int frozenRate; /** - The offset (ms) between audio and video stream. A positive value indicates the audio leads the - video, and a negative value indicates the audio lags the video. + * The amount of time (ms) that the audio is ahead of the video. + * @note If this value is negative, the audio is lagging behind the video. */ int avSyncTimeMs; /** - * The total time (ms) when the remote user neither stops sending the audio - * stream nor disables the audio module after joining the channel. + * The total active time (ms) of the video. + * As long as the remote user or host neither stops sending the video stream nor disables the video + * module after joining the channel, the video is available. */ int totalActiveTime; /** - * The total publish duration (ms) of the remote audio stream. + * The total duration (ms) of the remote video stream. */ int publishDuration; /** - * The quality of the remote video stream in the reported interval. - * The quality is determined by the Agora real-time video MOS (Mean Opinion Score) measurement method. - * The return value range is [0, 500]. + * The quality of the remote video stream in the reported interval. + * The quality is determined by the Agora real-time video MOS (Mean Opinion Score) measurement method. + * The return value range is [0, 500]. * Dividing the return value by 100 gets the MOS score, which ranges from 0 to 5. The higher the score, the better the video quality. * @note For textured video data, this parameter always returns 0. */ @@ -698,14 +792,19 @@ struct InjectStreamConfig { audioChannels(1) {} }; -/** The video stream lifecycle of CDN Live. +/** + * @brief Lifecycle of the CDN live video stream. */ enum RTMP_STREAM_LIFE_CYCLE_TYPE { - /** Bound to the channel lifecycle. - */ + /** + * Bind to the channel lifecycle. If all hosts leave the channel, the CDN live streaming stops after + * 30 seconds. + */ RTMP_STREAM_LIFE_CYCLE_BIND2CHANNEL = 1, - /** Bound to the owner identity of the RTMP stream. - */ + /** + * Bind to the owner of the RTMP stream. If the owner leaves the channel, the CDN live streaming + * stops immediately. + */ RTMP_STREAM_LIFE_CYCLE_BIND2OWNER = 2, }; @@ -780,69 +879,116 @@ struct PublisherConfiguration { }; /** - * The camera direction. + * @brief The camera direction. */ enum CAMERA_DIRECTION { - /** The rear camera. */ + /** + * 0: The rear camera. + */ CAMERA_REAR = 0, - /** The front camera. */ + /** + * 1: (Default) The front camera. + */ CAMERA_FRONT = 1, }; -/** The cloud proxy type. +/** + * @brief The cloud proxy type. * * @since v3.3.0 */ enum CLOUD_PROXY_TYPE { - /** 0: Do not use the cloud proxy. + /** + * 0: The automatic mode. The SDK has this mode enabled by default. In this mode, the SDK attempts a + * direct connection to SD-RTN™ and automatically switches to TCP/TLS 443 if the attempt fails. */ NONE_PROXY = 0, - /** 1: The cloud proxy for the UDP protocol. + /** + * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode, + * the SDK always transmits data over UDP. */ UDP_PROXY = 1, /// @cond - /** 2: The cloud proxy for the TCP (encrypted) protocol. + /** + * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In + * this mode, the SDK always transmits data over TCP/TLS 443. */ TCP_PROXY = 2, /// @endcond }; -/** Camera capturer configuration.*/ +/** + * @brief The camera capturer preference. + */ struct CameraCapturerConfiguration { /** Camera direction settings (for Android/iOS only). See: #CAMERA_DIRECTION. */ -#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) +#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__) /** - * The camera direction. + * (Optional) The camera direction. See `CAMERA_DIRECTION`. + * @note This parameter is for Android and iOS only. */ Optional cameraDirection; - /*- CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_DEFAULT: - For iOS, if iPhone/iPad has 3 or 2 back camera, it means combination of triple (wide + ultra wide + telephoto) camera - or dual wide(wide + ultra wide) camera.In this situation, you can apply for ultra wide len by set smaller zoom fator - and bigger zoom fator for telephoto len.Otherwise, it always means wide back/front camera. - - - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_WIDE_ANGLE:wide camera - - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_ULTRA_WIDE:ultra wide camera - - CAMERA_FOCAL_LENGTH_TYPE.CAMERA_FOCAL_LENGTH_TELEPHOTO:telephoto camera*/ + /** + * (Optional) The camera focal length type. See `CAMERA_FOCAL_LENGTH_TYPE`. + * @note + * - This parameter is for Android and iOS only. + * - To set the focal length type of the camera, it is only supported to specify the camera through + * `cameraDirection`, and not supported to specify it through `cameraId`. + * - For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera + * (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), + * you can use one of the following methods to capture video with an ultra-wide-angle perspective: + * - Method one: Set this parameter to `CAMERA_FOCAL_LENGTH_ULTRA_WIDE` (2) (ultra-wide lens). + * - Method two: Set this parameter to `CAMERA_FOCAL_LENGTH_DEFAULT` (0) (standard lens), then + * call `setCameraZoomFactor` to set the camera's zoom factor to a value less than 1.0, with the + * minimum setting being 0.5. + * The difference is that the size of the ultra-wide angle in method one is not adjustable, whereas + * method two supports adjusting the camera's zoom factor freely. + */ Optional cameraFocalLengthType; #else - /** For windows. The device ID of the playback device. */ + /** + * The camera ID. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * @note This parameter is for Windows and macOS only. + */ Optional deviceId; #endif #if defined(__ANDROID__) /** - * The camera id. + * (Optional) The camera ID. The default value is the camera ID of the front camera. You can get the + * camera ID through the Android native system API, see `Camera.open()` and + * `CameraManager.getCameraIdList()` for details. + * @note + * - This parameter is for Android only. + * - This parameter and `cameraDirection` are mutually exclusive in specifying the camera; you can + * choose one based on your needs. The differences are as follows: + * - Specifying the camera via `cameraDirection` is more straightforward. You only need to + * indicate the camera direction (front or rear), without specifying a specific camera ID; the SDK + * will retrieve and confirm the actual camera ID through Android native system APIs. + * - Specifying via `cameraId` allows for more precise identification of a particular camera. For + * devices with multiple cameras, where `cameraDirection` cannot recognize or access all available + * cameras, it is recommended to use `cameraId` to specify the desired camera ID directly. */ Optional cameraId; #endif + /** + * (Optional) Whether to follow the video aspect ratio set in `setVideoEncoderConfiguration`: + * - `true`: (Default) Follow the set video aspect ratio. The SDK crops the captured video according + * to the set video aspect ratio and synchronously changes the local preview screen and the video + * frame in `onCaptureVideoFrame` and `onPreEncodeVideoFrame`. + * - `false`: Do not follow the system default audio playback device. The SDK does not change the + * aspect ratio of the captured video frame. + */ Optional followEncodeDimensionRatio; - /** The video format. See VideoFormat. */ + /** + * (Optional) The format of the video frame. See `VideoFormat`. + */ VideoFormat format; CameraCapturerConfiguration() : format(VideoFormat(0, 0, 0)) {} }; /** - * The configuration of the captured screen. + * @brief The configuration of the captured screen. */ struct ScreenCaptureConfiguration { /** @@ -853,35 +999,35 @@ struct ScreenCaptureConfiguration { bool isCaptureWindow; // true - capture window, false - capture display /** * (macOS only) The display ID of the screen. + * @note This parameter takes effect only when you want to capture the screen on macOS. */ - uint32_t displayId; + int64_t displayId; /** * (Windows only) The relative position of the shared screen to the virtual screen. * @note This parameter takes effect only when you want to capture the screen on Windows. */ Rectangle screenRect; //Windows only /** - * (For Windows and macOS only) The window ID. + * (For Windows and macOS only) Window ID. * @note This parameter takes effect only when you want to capture the window. */ - view_t windowId; + int64_t windowId; /** - * (For Windows and macOS only) The screen capture configuration. For details, see ScreenCaptureParameters. + * (For Windows and macOS only) The screen capture configuration. See `ScreenCaptureParameters`. */ ScreenCaptureParameters params; /** - * (For Windows and macOS only) The relative position of the shared region to the whole screen. For details, see Rectangle. - * - * If you do not set this parameter, the SDK shares the whole screen. If the region you set exceeds the boundary of the - * screen, only the region within in the screen is shared. If you set width or height in Rectangle as 0, the whole - * screen is shared. + * (For Windows and macOS only) The relative position of the shared region to the whole screen. See + * `Rectangle`. If you do not set this parameter, the SDK shares the whole screen. If the region you + * set exceeds the boundary of the screen, only the region within in the screen is shared. If you + * set width or height in `Rectangle` as 0, the whole screen is shared. */ Rectangle regionRect; ScreenCaptureConfiguration() : isCaptureWindow(false), displayId(0), windowId(0) {} }; -#if (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) +#if (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE)|| (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) /** The size of the screen shot to the screen or window. */ struct SIZE { @@ -897,15 +1043,17 @@ struct SIZE { }; #endif -#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) +#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) /** - * The image content of the thumbnail or icon. - * @note The default image is in the RGBA format. If you need to use another format, you need to convert the image on - * your own. + * @brief The image content of the thumbnail or icon. Set in `ScreenCaptureSourceInfo`. + * + * @note The default image is in the ARGB format. If you need to use another format, you need to + * convert the image on your own. + * */ struct ThumbImageBuffer { /** - * The buffer of the thumbnail ot icon. + * The buffer of the thumbnail or icon. */ const char* buffer; /** @@ -923,38 +1071,49 @@ struct ThumbImageBuffer { ThumbImageBuffer() : buffer(nullptr), length(0), width(0), height(0) {} }; /** - * The type of the shared target. Set in ScreenCaptureSourceInfo. + * @brief The type of the shared target. Set in `ScreenCaptureSourceInfo`. */ enum ScreenCaptureSourceType { - /** -1: Unknown type. */ + /** + * -1: Unknown type. + */ ScreenCaptureSourceType_Unknown = -1, - /** 0: The shared target is a window.*/ + /** + * 0: The shared target is a window. + */ ScreenCaptureSourceType_Window = 0, - /** 1: The shared target is a screen of a particular monitor.*/ + /** + * 1: The shared target is a screen of a particular monitor. + */ ScreenCaptureSourceType_Screen = 1, - /** 2: Reserved parameter.*/ + /** + * 2: Reserved parameter. + */ ScreenCaptureSourceType_Custom = 2, }; -/** The information about the specified shareable window or screen. It is returned in IScreenCaptureSourceList. */ +/** + * @brief The information about the specified shareable window or screen. The information is + * returned in `IScreenCaptureSourceList`. + */ struct ScreenCaptureSourceInfo { /** - * The type of the shared target. See \ref agora::rtc::ScreenCaptureSourceType "ScreenCaptureSourceType". + * The type of the shared target. See `ScreenCaptureSourceType`. */ ScreenCaptureSourceType type; /** * The window ID for a window or the display ID for a screen. */ - view_t sourceId; + int64_t sourceId; /** * The name of the window or screen. UTF-8 encoding. */ const char* sourceName; /** - * The image content of the thumbnail. See ThumbImageBuffer. + * The image content of the thumbnail. See `ThumbImageBuffer`. */ ThumbImageBuffer thumbImage; /** - * The image content of the icon. See ThumbImageBuffer. + * The image content of the icon. See `ThumbImageBuffer`. */ ThumbImageBuffer iconImage; /** @@ -967,31 +1126,34 @@ struct ScreenCaptureSourceInfo { const char* sourceTitle; /** * Determines whether the screen is the primary display: - * - true: The screen is the primary display. - * - false: The screen is not the primary display. + * - `true`: The screen is the primary display. + * - `false`: The screen is not the primary display. */ bool primaryMonitor; bool isOccluded; /** - * The relative position of the shared region to the screen space (A virtual space include all the screens). See Rectangle. + * The position of a window relative to the entire screen space (including all shareable screens). + * See `Rectangle`. */ Rectangle position; #if defined(_WIN32) /** - * Determines whether the window is minimized. + * (For Windows only) Whether the window is minimized: + * - `true`: The window is minimized. + * - `false`: The window is not minimized. */ bool minimizeWindow; /** - * The display ID to the window of interest. - * If the window intersects one or more display monitor rectangles, the return value is an valid - * ID to the display monitor that has the largest area of intersection with the window, Otherwise - * the return value is -2. + * (For Windows only) Screen ID where the window is located. If the window is displayed across + * multiple screens, this parameter indicates the ID of the screen with which the window has the + * largest intersection area. If the window is located outside of the visible screens, the value of + * this member is -2. */ - view_t sourceDisplayId; - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), - processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId((view_t)-2) {} + int64_t sourceDisplayId; + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), + processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId(-2) {} #else - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} #endif }; /** @@ -1003,226 +1165,275 @@ class IScreenCaptureSourceList { public: /** - * Gets the number of shareable cpp and screens. + * @brief Gets the number of shareable windows and screens. * - * @return The number of shareable cpp and screens. + * @note This method applies to macOS and Windows only. + * + * @return + * The number of shareable windows and screens. */ virtual unsigned int getCount() = 0; /** - * Gets information about the specified shareable window or screen. + * @brief Gets information about the specified shareable window or screen. * - * After you get IScreenCaptureSourceList, you can pass in the index value of the specified shareable window or - * screen to get information about that window or screen from ScreenCaptureSourceInfo. + * @details + * After you get `IScreenCaptureSourceList`, you can pass in the index value of the specified + * shareable window or screen to get information about that window or screen from + * `ScreenCaptureSourceInfo`. * - * @param index The index of the specified shareable window or screen. The value range is [0, getCount()). - * @return ScreenCaptureSourceInfo The information of the specified window or screen. + * @note This method applies to macOS and Windows only. + * + * @param index The index of the specified shareable window or screen. The value range is [0, + * `getCount` `()`). + * + * @return + * `ScreenCaptureSourceInfo` */ virtual ScreenCaptureSourceInfo getSourceInfo(unsigned int index) = 0; /** - * Releases IScreenCaptureSourceList. + * @brief Releases `IScreenCaptureSourceList`. + * + * @details + * After you get the list of shareable windows and screens, to avoid memory leaks, call this method + * to release `IScreenCaptureSourceList` instead of deleting `IScreenCaptureSourceList` directly. + * + * @note This method applies to macOS and Windows only. * - * After you get the list of shareable cpp and screens, to avoid memory leaks, call this method to release - * IScreenCaptureSourceList instead of deleting IScreenCaptureSourceList directly. */ virtual void release() = 0; }; #endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC) /** - * The advanced options for audio. + * @brief The advanced options for audio. */ struct AdvancedAudioOptions { - /** - * Audio processing channels, only support 1 or 2. - */ + /** + * The number of channels for audio preprocessing: + * - 1: Mono. + * - 2: Stereo. + */ Optional audioProcessingChannels; AdvancedAudioOptions() {} ~AdvancedAudioOptions() {} }; +/** + * @brief Image configurations. + */ struct ImageTrackOptions { + /** + * The image URL. Supported formats of images include JPEG, JPG, PNG and GIF. This method supports + * adding an image from the local absolute or relative file path. + * @note On the Android platform, adding images from `/assets/` is not supported. + */ const char* imageUrl; + /** + * The frame rate of the video streams being published. The value range is [1,30]. The default value + * is 1. + */ int fps; VIDEO_MIRROR_MODE_TYPE mirrorMode; ImageTrackOptions() : imageUrl(NULL), fps(1), mirrorMode(VIDEO_MIRROR_MODE_DISABLED) {} }; /** - * The channel media options. + * @brief The channel media options. + * + * @details + * Agora supports publishing multiple audio streams and one video stream at the same time and in the + * same `RtcConnection`. For example, `publishMicrophoneTrack`, `publishCustomAudioTrack`, and + * `publishMediaPlayerAudioTrack` can be set as `true` at the same time, but only one of + * `publishCameraTrack`, `publishScreenCaptureVideo`, `publishScreenTrack`, + * `publishCustomVideoTrack`, or `publishEncodedVideoTrack` can be set as `true`. + * + * @note Agora recommends that you set member parameter values yourself according to your business + * scenario, otherwise the SDK will automatically assign values to member parameters. * - * Agora supports publishing multiple audio streams and one video stream at the same time and in the same RtcConnection. - * For example, `publishAudioTrack`, `publishCustomAudioTrack` and `publishMediaPlayerAudioTrack` can be true at the same time; - * but only one of `publishCameraTrack`, `publishScreenTrack`, `publishCustomVideoTrack`, and `publishEncodedVideoTrack` can be - * true at the same time. */ struct ChannelMediaOptions { /** - * Whether to publish the video of the camera track. - * - `true`: (Default) Publish the video track of the camera capturer. - * - `false`: Do not publish the video track of the camera capturer. + * Whether to publish the video captured by the camera: + * - `true`: Publish the video captured by the camera. + * - `false`: Do not publish the video captured by the camera. */ Optional publishCameraTrack; /** - * Whether to publish the video of the secondary camera track. - * - `true`: Publish the video track of the secondary camera capturer. - * - `false`: (Default) Do not publish the video track of the secondary camera capturer. + * Whether to publish the video captured by the second camera: + * - `true`: Publish the video captured by the second camera. + * - `false`: Do not publish the video captured by the second camera. */ Optional publishSecondaryCameraTrack; - /** - * Whether to publish the video of the third camera track. - * - `true`: Publish the video track of the third camera capturer. - * - `false`: (Default) Do not publish the video track of the third camera capturer. + /** + * Whether to publish the video captured by the third camera: + * - `true`: Publish the video captured by the third camera. + * - `false`: Do not publish the video captured by the third camera. + * @note This parameter is for Android, Windows and macOS only. */ Optional publishThirdCameraTrack; /** - * Whether to publish the video of the fourth camera track. - * - `true`: Publish the video track of the fourth camera capturer. - * - `false`: (Default) Do not publish the video track of the fourth camera capturer. + * Whether to publish the video captured by the fourth camera: + * - `true`: Publish the video captured by the fourth camera. + * - `false`: Do not publish the video captured by the fourth camera. + * @note This parameter is for Android, Windows and macOS only. */ Optional publishFourthCameraTrack; /** - * Whether to publish the recorded audio. - * - `true`: (Default) Publish the recorded audio. - * - `false`: Do not publish the recorded audio. + * Whether to publish the audio captured by the microphone: + * - `true`: Publish the audio captured by the microphone. + * - `false`: Do not publish the audio captured by the microphone. */ Optional publishMicrophoneTrack; - - #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) - /** - * Whether to publish the video track of the screen capturer: - * - `true`: Publish the video track of the screen capture. - * - `false`: (Default) Do not publish the video track of the screen capture. - */ - Optional publishScreenCaptureVideo; + #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__) /** * Whether to publish the audio track of the screen capturer: * - `true`: Publish the video audio of the screen capturer. * - `false`: (Default) Do not publish the audio track of the screen capturer. */ - Optional publishScreenCaptureAudio; + Optional publishScreenCaptureAudio; + #endif + #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__) + /** + * Whether to publish the video captured from the screen: + * - `true`: Publish the video captured from the screen. + * - `false`: Do not publish the video captured from the screen. + * @note This parameter is for Android and iOS only. + */ + Optional publishScreenCaptureVideo; + #else /** - * Whether to publish the captured video from the screen: - * - `true`: PPublish the captured video from the screen. - * - `false`: (Default) Do not publish the captured video from the screen. + * Whether to publish the video captured from the screen: + * - `true`: Publish the video captured from the screen. + * - `false`: Do not publish the video captured from the screen. + * @note This is for Windows and macOS only. */ Optional publishScreenTrack; /** - * Whether to publish the captured video from the secondary screen: - * - true: Publish the captured video from the secondary screen. - * - false: (Default) Do not publish the captured video from the secondary screen. + * Whether to publish the video captured from the second screen: + * - `true`: Publish the video captured from the second screen. + * - `false`: Do not publish the video captured from the second screen. */ Optional publishSecondaryScreenTrack; - /** - * Whether to publish the captured video from the third screen: - * - true: Publish the captured video from the third screen. - * - false: (Default) Do not publish the captured video from the third screen. + /** + * Whether to publish the video captured from the third screen: + * - `true`: Publish the captured video from the third screen. + * - `false`: Do not publish the video captured from the third screen. + * @note This is for Windows and macOS only. */ Optional publishThirdScreenTrack; /** - * Whether to publish the captured video from the fourth screen: - * - true: Publish the captured video from the fourth screen. - * - false: (Default) Do not publish the captured video from the fourth screen. + * Whether to publish the video captured from the fourth screen: + * - `true`: Publish the captured video from the fourth screen. + * - `false`: Do not publish the video captured from the fourth screen. + * @note This is for Windows and macOS only. */ Optional publishFourthScreenTrack; #endif /** - * Whether to publish the captured audio from a custom source: - * - true: Publish the captured audio from a custom source. - * - false: (Default) Do not publish the captured audio from the custom source. + * Whether to publish the audio captured from a custom source: + * - `true`: Publish the audio captured from the custom source. + * - `false`: Do not publish the captured audio from a custom source. */ Optional publishCustomAudioTrack; /** - * The custom audio track id. The default value is 0. + * The ID of the custom audio track to be published. The default value is 0. You can obtain the + * custom audio track ID through the `createCustomAudioTrack` method. */ Optional publishCustomAudioTrackId; /** - * Whether to publish the captured video from a custom source: - * - `true`: Publish the captured video from a custom source. - * - `false`: (Default) Do not publish the captured video from the custom source. + * Whether to publish the video captured from a custom source: + * - `true`: Publish the video captured from the custom source. + * - `false`: Do not publish the captured video from a custom source. */ Optional publishCustomVideoTrack; /** * Whether to publish the encoded video: * - `true`: Publish the encoded video. - * - `false`: (Default) Do not publish the encoded video. + * - `false`: Do not publish the encoded video. */ Optional publishEncodedVideoTrack; /** - * Whether to publish the audio from the media player: - * - `true`: Publish the audio from the media player. - * - `false`: (Default) Do not publish the audio from the media player. - */ + * Whether to publish the audio from the media player: + * - `true`: Publish the audio from the media player. + * - `false`: Do not publish the audio from the media player. + */ Optional publishMediaPlayerAudioTrack; /** - * Whether to publish the video from the media player: - * - `true`: Publish the video from the media player. - * - `false`: (Default) Do not publish the video from the media player. - */ + * Whether to publish the video from the media player: + * - `true`: Publish the video from the media player. + * - `false`: Do not publish the video from the media player. + */ Optional publishMediaPlayerVideoTrack; /** - * Whether to publish the local transcoded video track. - * - `true`: Publish the video track of local transcoded video track. - * - `false`: (Default) Do not publish the local transcoded video track. - */ + * Whether to publish the local transcoded video: + * - `true`: Publish the local transcoded video. + * - `false`: Do not publish the local transcoded video. + * @note As of v4.2.0, the parameter name is corrected from `publishTrancodedVideoTrack` to + * `publishTranscodedVideoTrack`. + */ Optional publishTranscodedVideoTrack; - /** - * Whether to publish the local mixed track. - * - `true`: Publish the audio track of local mixed track. - * - `false`: (Default) Do not publish the local mixed track. - */ + /** + * Whether to publish the mixed audio track: + * - `true`: Publish the mixed audio track. + * - `false`: Do not publish the mixed audio track. + */ Optional publishMixedAudioTrack; /** - * Whether to publish the local lip sync video track. - * - `true`: Publish the video track of local lip sync video track. - * - `false`: (Default) Do not publish the local lip sync video track. + * Whether to publish the video track processed by the speech driven extension: + * - `true`: Publish the video track processed by the speech driven extension. + * - `false`: (Default) Do not publish the video track processed by the speech driven extension. */ Optional publishLipSyncTrack; /** * Whether to automatically subscribe to all remote audio streams when the user joins a channel: - * - `true`: (Default) Subscribe to all remote audio streams. - * - `false`: Do not subscribe to any remote audio stream. + * - `true`: Subscribe to all remote audio streams. + * - `false`: Do not automatically subscribe to any remote audio streams. */ Optional autoSubscribeAudio; /** - * Whether to subscribe to all remote video streams when the user joins the channel: - * - `true`: (Default) Subscribe to all remote video streams. - * - `false`: Do not subscribe to any remote video stream. + * Whether to automatically subscribe to all remote video streams when the user joins the channel: + * - `true`: Subscribe to all remote video streams. + * - `false`: Do not automatically subscribe to any remote video streams. */ Optional autoSubscribeVideo; /** - * Whether to enable audio capturing or playback. - * - `true`: (Default) Enable audio capturing and playback. + * Whether to enable audio capturing or playback: + * - `true`: Enable audio capturing or playback. * - `false`: Do not enable audio capturing or playback. + * @note If you need to publish the audio streams captured by your microphone, ensure this parameter + * is set as `true`. */ Optional enableAudioRecordingOrPlayout; /** - * The ID of the media player to be published. The default value is 0. - */ + * The ID of the media player to be published. The default value is 0. + */ Optional publishMediaPlayerId; /** - * The client role type. See \ref CLIENT_ROLE_TYPE. - * Default is CLIENT_ROLE_AUDIENCE. + * The user role. See `CLIENT_ROLE_TYPE`. + * @note If you set the user role as an audience member, you cannot publish audio and video streams + * in the channel. If you want to publish media streams in a channel during live streaming, ensure + * you set the user role as broadcaster. */ Optional clientRoleType; /** - * The audience latency level type. See #AUDIENCE_LATENCY_LEVEL_TYPE. + * The latency level of an audience member in interactive live streaming. See + * `AUDIENCE_LATENCY_LEVEL_TYPE`. */ Optional audienceLatencyLevel; /** - * The default video stream type. See \ref VIDEO_STREAM_TYPE. - * Default is VIDEO_STREAM_HIGH. + * The default video-stream type. See `VIDEO_STREAM_TYPE`. */ Optional defaultVideoStreamType; /** - * The channel profile. See \ref CHANNEL_PROFILE_TYPE. - * Default is CHANNEL_PROFILE_LIVE_BROADCASTING. + * The channel profile. See `CHANNEL_PROFILE_TYPE`. */ Optional channelProfile; /** - * The delay in ms for sending audio frames. This is used for explicit control of A/V sync. - * To switch off the delay, set the value to zero. + * Delay (in milliseconds) for sending audio frames. You can use this parameter to set the delay of + * the audio frames that need to be sent, to ensure audio and video synchronization. + * To switch off the delay, set the value to 0. */ Optional audioDelayMs; /** @@ -1231,12 +1442,14 @@ struct ChannelMediaOptions { */ Optional mediaPlayerAudioDelayMs; /** - * (Optional) The token generated on your server for authentication. + * (Optional) The token generated on your server for authentication. See . * @note - * - This parameter takes effect only when calling `updateChannelMediaOptions` or `updateChannelMediaOptionsEx`. - * - Ensure that the App ID, channel name, and user name used for creating the token are the same ones as those - * used by the initialize method for initializing the RTC engine, and those used by the `joinChannel [2/2]` - * and `joinChannelEx` methods for joining the channel. + * - This parameter takes effect only when calling `updateChannelMediaOptions` or + * `updateChannelMediaOptionsEx`. + * - Ensure that the App ID, channel name, and user name used for creating the token are the same as + * those used by the `initialize` method for initializing the RTC engine, and those used by the + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` and `joinChannelEx` methods for joining the channel. */ Optional token; /** @@ -1248,37 +1461,77 @@ struct ChannelMediaOptions { */ Optional enableBuiltInMediaEncryption; /** - * Whether to publish the sound of the rhythm player to remote users: - * - `true`: (Default) Publish the sound of the rhythm player. - * - `false`: Do not publish the sound of the rhythm player. + * Whether to publish the sound of a metronome to remote users: + * - `true`: Publish processed audio frames. Both the local user and remote users can hear the + * metronome. + * - `false`: Do not publish the sound of the metronome. Only the local user can hear the metronome. */ Optional publishRhythmPlayerTrack; /** - * Whether the user is an interactive audience member in the channel. - * - `true`: Enable low lentancy and smooth video when joining as an audience. - * - `false`: (Default) Use default settings for audience role. - * @note This mode is only used for audience. In PK mode, client might join one channel as broadcaster, and join - * another channel as interactive audience to achieve low lentancy and smooth video from remote user. + * Whether to enable interactive mode: + * - `true`: Enable interactive mode. Once this mode is enabled and the user role is set as + * audience, the user can receive remote video streams with low latency. + * - `false`:Do not enable interactive mode. If this mode is disabled, the user receives the remote + * video streams in default settings. + * @note + * - This parameter only applies to co-streaming scenarios. The cohosts need to call the + * `joinChannelEx` method to join the other host's channel as an audience member, and set + * `isInteractiveAudience` to `true`. + * - This parameter takes effect only when the user role is `CLIENT_ROLE_AUDIENCE`. */ Optional isInteractiveAudience; /** - * The custom video track id which will used to publish or preview. - * You can get the VideoTrackId after calling createCustomVideoTrack() of IRtcEngine. + * The video track ID returned by calling the `createCustomVideoTrack` method. The default value is + * 0. */ Optional customVideoTrackId; /** - * Whether local audio stream can be filtered. - * - `true`: (Default) Can be filtered when audio level is low. - * - `false`: Do not Filter this audio stream. + * Whether the audio stream being published is filtered according to the volume algorithm: + * - `true`: The audio stream is filtered. If the audio stream filter is not enabled, this setting + * does not takes effect. + * - `false`: The audio stream is not filtered. + * @note If you need to enable this function, contact `support@agora.io`. */ Optional isAudioFilterable; /** Provides the technical preview functionalities or special customizations by configuring the SDK with JSON options. Pointer to the set parameters in a JSON string. - * @technical preview + * @technical preview */ Optional parameters; + /** + * Whether to enable multiple transmisssion paths: + * - `true`: Enable multiple transmisssion path. + * - `false`: Dsiable multiple transmisssion path. + * @note Permissions and system requirements: Android: Android 7.0 or higher (API level 24 or + * higher), and the ACCESS_NETWORK_STATE and CHANGE_NETWORK_STATE permissions are required. iOS: iOS + * 12.0 or later. macOS: 10.14 or later. Windows: Windows Vista or higher. + * @since 4.6.0 + */ + Optional enableMultipath; + + /** + * Uplink transmission mode. See `MultipathMode`. + * @note When using this parameter, make sure that `enableMultipath` is set to `true`. + * @since 4.6.0 + */ + Optional uplinkMultipathMode; + + /** + * Downlink transmission mode. See `MultipathMode`. + * @note When using this parameter, make sure that `enableMultipath` is set to `true`. + * @since 4.6.0 + */ + Optional downlinkMultipathMode; + + /** + * Preferred type of transmission path. See `MultipathType`. + * @note When using this parameter, make sure that `enableMultipath` is set to `true`. + * @since 4.6.0 + */ + Optional preferMultipathType; + ChannelMediaOptions() {} ~ChannelMediaOptions() {} @@ -1288,11 +1541,13 @@ struct ChannelMediaOptions { SET_FROM(publishCameraTrack); SET_FROM(publishSecondaryCameraTrack); SET_FROM(publishThirdCameraTrack); - SET_FROM(publishFourthCameraTrack); + SET_FROM(publishFourthCameraTrack); SET_FROM(publishMicrophoneTrack); -#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) - SET_FROM(publishScreenCaptureVideo); +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__) SET_FROM(publishScreenCaptureAudio); +#endif +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__) + SET_FROM(publishScreenCaptureVideo); #else SET_FROM(publishScreenTrack); SET_FROM(publishSecondaryScreenTrack); @@ -1325,6 +1580,10 @@ struct ChannelMediaOptions { SET_FROM(isAudioFilterable); SET_FROM(isInteractiveAudience); SET_FROM(parameters); + SET_FROM(enableMultipath); + SET_FROM(uplinkMultipathMode); + SET_FROM(downlinkMultipathMode); + SET_FROM(preferMultipathType); #undef SET_FROM } @@ -1339,9 +1598,11 @@ struct ChannelMediaOptions { ADD_COMPARE(publishThirdCameraTrack); ADD_COMPARE(publishFourthCameraTrack); ADD_COMPARE(publishMicrophoneTrack); -#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) - ADD_COMPARE(publishScreenCaptureVideo); +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__) ADD_COMPARE(publishScreenCaptureAudio); +#endif +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__) + ADD_COMPARE(publishScreenCaptureVideo); #else ADD_COMPARE(publishScreenTrack); ADD_COMPARE(publishSecondaryScreenTrack); @@ -1374,6 +1635,10 @@ struct ChannelMediaOptions { ADD_COMPARE(isAudioFilterable); ADD_COMPARE(isInteractiveAudience); ADD_COMPARE(parameters); + ADD_COMPARE(enableMultipath); + ADD_COMPARE(uplinkMultipathMode); + ADD_COMPARE(downlinkMultipathMode); + ADD_COMPARE(preferMultipathType); END_COMPARE(); #undef BEGIN_COMPARE @@ -1391,9 +1656,11 @@ struct ChannelMediaOptions { REPLACE_BY(publishThirdCameraTrack); REPLACE_BY(publishFourthCameraTrack); REPLACE_BY(publishMicrophoneTrack); -#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) - REPLACE_BY(publishScreenCaptureVideo); +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(TARGET_OS_MAC) || defined(__OHOS__) REPLACE_BY(publishScreenCaptureAudio); +#endif +#if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(__OHOS__) + REPLACE_BY(publishScreenCaptureVideo); #else REPLACE_BY(publishScreenTrack); REPLACE_BY(publishSecondaryScreenTrack); @@ -1426,26 +1693,41 @@ struct ChannelMediaOptions { REPLACE_BY(isAudioFilterable); REPLACE_BY(isInteractiveAudience); REPLACE_BY(parameters); + REPLACE_BY(enableMultipath); + REPLACE_BY(uplinkMultipathMode); + REPLACE_BY(downlinkMultipathMode); + REPLACE_BY(preferMultipathType); #undef REPLACE_BY } return *this; } }; +/** + * @brief The cloud proxy type. + */ enum PROXY_TYPE { - /** 0: Do not use the cloud proxy. + /** + * 0: Reserved for future use. */ NONE_PROXY_TYPE = 0, - /** 1: The cloud proxy for the UDP protocol. + /** + * 1: The cloud proxy for the UDP protocol, that is, the Force UDP cloud proxy mode. In this mode, + * the SDK always transmits data over UDP. */ UDP_PROXY_TYPE = 1, - /** 2: The cloud proxy for the TCP (encrypted) protocol. + /** + * 2: The cloud proxy for the TCP (encryption) protocol, that is, the Force TCP cloud proxy mode. In + * this mode, the SDK always transmits data over TCP/TLS 443. */ TCP_PROXY_TYPE = 2, - /** 3: The local proxy. + /** + * 3: Reserved for future use. */ LOCAL_PROXY_TYPE = 3, - /** 4: auto fallback to tcp cloud proxy + /** + * 4: Automatic mode. In this mode, the SDK attempts a direct connection to SD-RTN™ and + * automatically switches to TCP/TLS 443 if the attempt fails. */ TCP_PROXY_AUTO_FALLBACK_TYPE = 4, /** 5: The http proxy. @@ -1456,13 +1738,22 @@ enum PROXY_TYPE { HTTPS_PROXY_TYPE = 6, }; +/** + * @brief The type of the advanced feature. + */ enum FeatureType { + /** + * 1: Virtual background. + */ VIDEO_VIRTUAL_BACKGROUND = 1, + /** + * 2: Image enhancement. + */ VIDEO_BEAUTY_EFFECT = 2, }; /** - * The options for leaving a channel. + * @brief The options for leaving a channel. */ struct LeaveChannelOptions { /** @@ -1504,13 +1795,24 @@ class IRtcEngineEventHandler { virtual const char* eventHandlerType() const { return "event_handler"; } /** - * Occurs when a user joins a channel. + * @brief Occurs when a user joins a channel. * + * @details * This callback notifies the application that a user joins a specified channel. + * Call timing: The SDK triggers this callback when you call `joinChannel(const char* token, const + * char* channelId, const char* info, uid_t uid)`, `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` + * , `joinChannelWithUserAccount(const char* token, const char* channelId, const char* + * userAccount)`, `joinChannelWithUserAccount(const char* token, const char* channelId, const char* + * userAccount, const ChannelMediaOptions& options)` , `joinChannelEx` + * or `joinChannelWithUserAccountEx` to join a channel. * * @param channel The channel name. * @param uid The ID of the user who joins the channel. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel until the SDK triggers this callback. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the + * SDK triggers this callback. + * */ virtual void onJoinChannelSuccess(const char* channel, uid_t uid, int elapsed) { (void)channel; @@ -1519,14 +1821,18 @@ class IRtcEngineEventHandler { } /** - * Occurs when a user rejoins the channel. + * @brief Occurs when a user rejoins the channel. * - * When a user loses connection with the server because of network problems, the SDK automatically tries to reconnect - * and triggers this callback upon reconnection. + * @details + * Call timing: When a user loses connection with the server because of network problems, the SDK + * automatically tries to reconnect and triggers this callback upon reconnection. * * @param channel The channel name. * @param uid The ID of the user who rejoins the channel. - * @param elapsed Time elapsed (ms) from the local user calling the joinChannel method until this callback is triggered. + * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the SDK + * triggers this callback. + * */ virtual void onRejoinChannelSuccess(const char* channel, uid_t uid, int elapsed) { (void)channel; @@ -1534,12 +1840,24 @@ class IRtcEngineEventHandler { (void)elapsed; } - /** Occurs when join success after calling \ref IRtcEngine::setLocalAccessPoint "setLocalAccessPoint" or \ref IRtcEngine::setCloudProxy "setCloudProxy" - @param channel Channel name. - @param uid User ID of the user joining the channel. - @param proxyType type of proxy agora sdk connected, proxyType will be NONE_PROXY_TYPE if not connected to proxy(fallback). - @param localProxyIp local proxy ip. if not join local proxy, it will be "". - @param elapsed Time elapsed (ms) from the user calling the \ref IRtcEngine::joinChannel "joinChannel" method until the SDK triggers this callback. + /** + * @brief Reports the proxy connection state. + * + * @details + * You can use this callback to listen for the state of the SDK connecting to a proxy. For example, + * when a user calls `setCloudProxy` and joins a channel successfully, the SDK triggers this + * callback to report the user ID, the proxy type connected, and the time elapsed fromthe user + * calling `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` until this callback is triggered. + * + * @param channel The channel name. + * @param uid The user ID. + * @param proxyType The proxy type connected. See `PROXY_TYPE`. + * @param localProxyIp Reserved for future use. + * @param elapsed The time elapsed (ms) from the user calling `joinChannel(const char* token, const + * char* channelId, uid_t uid, const ChannelMediaOptions& options)` until this + * callback is triggered. + * */ virtual void onProxyConnected(const char* channel, uid_t uid, PROXY_TYPE proxyType, const char* localProxyIp, int elapsed) { (void)channel; @@ -1549,31 +1867,41 @@ class IRtcEngineEventHandler { (void)elapsed; } - /** An error occurs during the SDK runtime. - - @param err The error code: #ERROR_CODE_TYPE. - @param msg The detailed error message. - */ + /** + * @brief Reports an error during SDK runtime. + * + * @details + * This callback indicates that an error (concerning network or media) occurs during SDK runtime. In + * most cases, the SDK cannot fix the issue and resume running. The SDK requires the app to take + * action or informs the user about the issue. + * + * @param err Error code. See `ERROR_CODE_TYPE`. + * @param msg The error message. + * + */ virtual void onError(int err, const char* msg) { (void)err; (void)msg; } - /** Reports the statistics of the audio stream from each remote - user/broadcaster. - - @deprecated This callback is deprecated. Use onRemoteAudioStats instead. - - The SDK triggers this callback once every two seconds to report the audio - quality of each remote user/host sending an audio stream. If a channel has - multiple remote users/hosts sending audio streams, the SDK triggers this - callback as many times. - - @param uid The user ID of the remote user sending the audio stream. - @param quality The audio quality of the user: #QUALITY_TYPE - @param delay The network delay (ms) from the sender to the receiver, including the delay caused by audio sampling pre-processing, network transmission, and network jitter buffering. - @param lost The audio packet loss rate (%) from the sender to the receiver. - */ + /** + * @brief Reports the statistics of the audio stream sent by each remote user. + * + * @deprecated This callback is deprecated. Use onRemoteAudioStats instead. + * + * @details + * The SDK triggers this callback once every two seconds to report the audio quality of each remote + * user who is sending an audio stream. If a channel has multiple users sending audio streams, the + * SDK triggers this callback as many times. + * + * @param uid The user ID of the remote user sending the audio stream. + * @param quality Audio quality of the user. See `QUALITY_TYPE`. + * @param delay The network delay (ms) from the sender to the receiver, including the delay caused + * by audio sampling pre-processing, network transmission, and network jitter buffering. + * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the + * receiver. + * + */ virtual void onAudioQuality(uid_t uid, int quality, unsigned short delay, unsigned short lost) __deprecated { (void)uid; (void)quality; @@ -1581,40 +1909,55 @@ class IRtcEngineEventHandler { (void)lost; } - /** Reports the result of the last-mile network probe result. + /** + * @brief Reports the last mile network probe result. + * + * @details + * The SDK triggers this callback within 30 seconds after the app calls `startLastmileProbeTest`. + * + * @param result The uplink and downlink last-mile network probe test result. See + * `LastmileProbeResult`. * - * The SDK triggers this callback within 30 seconds after the app calls the `startLastmileProbeTest` method. - * @param result The uplink and downlink last-mile network probe test result: LastmileProbeResult. */ virtual void onLastmileProbeResult(const LastmileProbeResult& result) { (void)result; } /** - * Reports the volume information of users. + * @brief Reports the volume information of users. * - * By default, this callback is disabled. You can enable it by calling `enableAudioVolumeIndication`. Once this - * callback is enabled and users send streams in the channel, the SDK triggers the `onAudioVolumeIndication` - * callback at the time interval set in `enableAudioVolumeIndication`. The SDK triggers two independent - * `onAudioVolumeIndication` callbacks simultaneously, which separately report the volume information of the - * local user who sends a stream and the remote users (up to three) whose instantaneous volume is the highest. + * @details + * By default, this callback is disabled. You can enable it by calling + * `enableAudioVolumeIndication`. Once this callback is enabled and users send streams in the + * channel, the SDK triggers the `onAudioVolumeIndication` callback according to the time interval + * set in `enableAudioVolumeIndication`. The SDK triggers two independent `onAudioVolumeIndication` + * callbacks simultaneously, which separately report the volume information of the local user who + * sends a stream and the remote users (up to three) whose instantaneous volume is the highest. * - * @note After you enable this callback, calling muteLocalAudioStream affects the SDK's behavior as follows: - * - If the local user stops publishing the audio stream, the SDK stops triggering the local user's callback. - * - 20 seconds after a remote user whose volume is one of the three highest stops publishing the audio stream, - * the callback excludes this user's information; 20 seconds after all remote users stop publishing audio streams, - * the SDK stops triggering the callback for remote users. + * @note + * Once this callback is enabled, if the local user calls the `muteLocalAudioStream` method to mute, + * the SDK continues to report the volume indication of the local user. + * If a remote user whose volume is one of the three highest in the channel stops publishing the + * audio stream for 20 seconds, the callback excludes this user's information; if all remote users + * stop publishing audio streams for 20 seconds, the SDK stops triggering the callback for remote + * users. + * + * @param speakers The volume information of the users. See `AudioVolumeInfo`. An empty `speakers` + * array in the callback indicates that no remote user is in the channel or is sending a stream. + * @param speakerNumber The total number of users. + * - In the callback for the local user, if the local user is sending streams, the value of + * `speakerNumber` is 1. + * - In the callback for remote users, the value range of `speakerNumber` is [0,3]. If the number of + * remote users who send streams is greater than or equal to three, the value of `speakerNumber` is + * 3. + * @param totalVolume The volume of the speaker. The value range is [0,255]. + * - In the callback for the local user, `totalVolume` is the volume of the local user who sends a + * stream. + * - In the callback for remote users, `totalVolume` is the sum of the volume of all remote users + * (up to three) whose instantaneous volume is the highest. If the user calls + * `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` + * , then `totalVolume` is the volume after audio mixing. * - * @param speakers The volume information of the users, see AudioVolumeInfo. An empty `speakers` array in the - * callback indicates that no remote user is in the channel or sending a stream at the moment. - * @param speakerNumber The total number of speakers. - * - In the local user's callback, when the local user sends a stream, `speakerNumber` is 1. - * - In the callback for remote users, the value range of speakerNumber is [0,3]. If the number of remote users who - * send streams is greater than or equal to three, the value of `speakerNumber` is 3. - * @param totalVolume The volume of the speaker. The value ranges between 0 (lowest volume) and 255 (highest volume). - * - In the local user's callback, `totalVolume` is the volume of the local user who sends a stream. - * - In the remote users' callback, `totalVolume` is the sum of all remote users (up to three) whose instantaneous - * volume is the highest. If the user calls `startAudioMixing`, `totalVolume` is the volume after audio mixing. */ virtual void onAudioVolumeIndication(const AudioVolumeInfo* speakers, unsigned int speakerNumber, int totalVolume) { @@ -1624,36 +1967,46 @@ class IRtcEngineEventHandler { } /** - * Occurs when a user leaves a channel. + * @brief Occurs when a user leaves a channel. + * + * @details + * You can obtain information such as the total duration of a call, and the data traffic that the + * SDK transmits and receives. + * Call timing: The SDK triggers this callback after you call `leaveChannel()`, `leaveChannel(const + * LeaveChannelOptions& options)` + * , `leaveChannelEx(const RtcConnection& connection)`, or `leaveChannelEx(const RtcConnection& + * connection, const LeaveChannelOptions& options)` to leave a channel. * - * This callback notifies the app that the user leaves the channel by calling `leaveChannel`. From this callback, - * the app can get information such as the call duration and quality statistics. + * @param stats Call statistics. See `RtcStats`. * - * @param stats The statistics on the call: RtcStats. */ virtual void onLeaveChannel(const RtcStats& stats) { (void)stats; } /** - * Reports the statistics of the current call. + * @brief Reports the statistics about the current call. * - * The SDK triggers this callback once every two seconds after the user joins the channel. + * @details + * Call timing: The SDK triggers this callback once every two seconds after the user joins the + * channel. + * + * @param stats Statistics of the RTC engine. See `RtcStats`. * - * @param stats The statistics of the current call: RtcStats. */ virtual void onRtcStats(const RtcStats& stats) { (void)stats; } - /** Occurs when the audio device state changes. - - This callback notifies the application that the system's audio device state - is changed. For example, a headset is unplugged from the device. - - @param deviceId The device ID. - @param deviceType The device type: #MEDIA_DEVICE_TYPE. - @param deviceState The device state: - - On macOS: - - 0: The device is ready for use. - - 8: The device is not connected. - - On Windows: #MEDIA_DEVICE_STATE_TYPE. + /** + * @brief Occurs when the audio device state changes. + * + * @details + * This callback notifies the application that the system's audio device state is changed. For + * example, a headset is unplugged from the device. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId The device ID. + * @param deviceType The device type. See `MEDIA_DEVICE_TYPE`. + * @param deviceState The device state. See `MEDIA_DEVICE_STATE_TYPE`. + * */ virtual void onAudioDeviceStateChanged(const char* deviceId, int deviceType, int deviceState) { (void)deviceId; @@ -1662,38 +2015,61 @@ class IRtcEngineEventHandler { } /** - * @brief Reports current AudioMixing progress. + * @brief Reports the playback progress of a music file. + * + * @details + * After you called the `startAudioMixing(const char* filePath, bool loopback, int cycle, int + * startPos)` method to play a music file, the SDK triggers this + * callback every two seconds to report the playback progress. * - * The callback occurs once every one second during the playback and reports the current playback progress. - * @param position Current AudioMixing progress (millisecond). + * @param position The playback progress (ms). + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual void onAudioMixingPositionChanged(int64_t position) {} - /** Occurs when the audio mixing file playback finishes. - @deprecated This method is deprecated, use onAudioMixingStateChanged instead. - - After you call startAudioMixing to play a local music file, this callback occurs when the playback finishes. - If the startAudioMixing method call fails, the SDK returns the error code 701. + /** + * @brief Occurs when the playback of the local music file finishes. + * + * @deprecated This method is deprecated, use onAudioMixingStateChanged instead. + * + * @details + * After you call `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` + * to play a local music file, this callback occurs when the + * playback finishes. If the call of `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)` fails, the error code + * `WARN_AUDIO_MIXING_OPEN_ERROR` is returned. + * */ virtual void onAudioMixingFinished() __deprecated {} /** - * Occurs when the playback of the local audio effect file finishes. + * @brief Occurs when the playback of the local music file finishes. * + * @details * This callback occurs when the local audio effect file finishes playing. * - * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param soundId The ID of the audio effect. The unique ID of each audio effect file. + * */ virtual void onAudioEffectFinished(int soundId) {} - /** Occurs when the video device state changes. - - This callback notifies the application that the system's video device state - is changed. - - @param deviceId Pointer to the device ID. - @param deviceType Device type: #MEDIA_DEVICE_TYPE. - @param deviceState Device state: #MEDIA_DEVICE_STATE_TYPE. + /** + * @brief Occurs when the video device state changes. + * + * @details + * This callback reports the change of system video devices, such as being unplugged or removed. On + * a Windows device with an external camera for video capturing, the video disables once the + * external camera is unplugged. + * + * @note This callback is for Windows and macOS only. + * + * @param deviceId The device ID. + * @param deviceType Media device types. See `MEDIA_DEVICE_TYPE`. + * @param deviceState Media device states. See `MEDIA_DEVICE_STATE_TYPE`. + * */ virtual void onVideoDeviceStateChanged(const char* deviceId, int deviceType, int deviceState) { (void)deviceId; @@ -1702,26 +2078,34 @@ class IRtcEngineEventHandler { } /** - * Reports the last mile network quality of each user in the channel. + * @brief Reports the last mile network quality of each user in the channel. * - * This callback reports the last mile network conditions of each user in the channel. Last mile refers to the - * connection between the local device and Agora's edge server. + * @details + * This callback reports the last mile network conditions of each user in the channel. Last mile + * refers to the connection between the local device and Agora's edge server. + * The SDK triggers this callback once every two seconds. If a channel includes multiple users, the + * SDK triggers this callback as many times. + * This callback provides feedback on network quality through sending and receiving broadcast + * packets within the channel. Excessive broadcast packets can lead to broadcast storms. To prevent + * broadcast storms from causing a large amount of data transmission within the channel, this + * callback supports feedback on the network quality of up to 4 remote hosts simultaneously by + * default. * - * The SDK triggers this callback once every two seconds. If a channel includes multiple users, the SDK triggers - * this callback as many times. + * @note `txQuality` is `UNKNOWN` when the user is not sending a stream; `rxQuality` is `UNKNOWN` + * when the user is not receiving a stream. * - * @note `txQuality` is UNKNOWN when the user is not sending a stream; `rxQuality` is UNKNOWN when the user is not - * receiving a stream. + * @param uid The user ID. The network quality of the user with this user ID is reported. If the uid + * is 0, the local network quality is reported. + * @param txQuality Uplink network quality rating of the user in terms of the transmission bit rate, + * packet loss rate, average RTT (Round-Trip Time) and jitter of the uplink network. This parameter + * is a quality rating helping you understand how well the current uplink network conditions can + * support the selected video encoder configuration. For example, a 1000 Kbps uplink network may be + * adequate for video frames with a resolution of 640 × 480 and a frame rate of 15 fps in the + * LIVE_BROADCASTING profile, but might be inadequate for resolutions higher than 1280 × 720. See + * `QUALITY_TYPE`. + * @param rxQuality Downlink network quality rating of the user in terms of packet loss rate, + * average RTT, and jitter of the downlink network. See `QUALITY_TYPE`. * - * @param uid The user ID. The network quality of the user with this user ID is reported. - * @param txQuality Uplink network quality rating of the user in terms of the transmission bit rate, packet loss rate, - * average RTT (Round-Trip Time) and jitter of the uplink network. This parameter is a quality rating helping you - * understand how well the current uplink network conditions can support the selected video encoder configuration. - * For example, a 1000 Kbps uplink network may be adequate for video frames with a resolution of 640 × 480 and a frame - * rate of 15 fps in the LIVE_BROADCASTING profile, but may be inadequate for resolutions higher than 1280 × 720. - * See #QUALITY_TYPE. - * @param rxQuality Downlink network quality rating of the user in terms of packet loss rate, average RTT, and jitter - * of the downlink network. See #QUALITY_TYPE. */ virtual void onNetworkQuality(uid_t uid, int txQuality, int rxQuality) { (void)uid; @@ -1738,53 +2122,56 @@ class IRtcEngineEventHandler { virtual void onIntraRequestReceived() {} /** - * Occurs when uplink network info is updated. + * @brief Occurs when the uplink network information changes. * + * @details * The SDK triggers this callback when the uplink network information changes. * - * @note This callback only applies to scenarios where you push externally encoded - * video data in H.264 format to the SDK. + * @note This callback only applies to scenarios where you push externally encoded video data in + * H.264 format to the SDK. * - * @param info The uplink network information. See UplinkNetworkInfo. - */ - virtual void onUplinkNetworkInfoUpdated(const UplinkNetworkInfo& info) { - (void)info; - } - - /** - * Occurs when downlink network info is updated. + * @param info The uplink network information. See `UplinkNetworkInfo`. * - * This callback is used for notifying user to switch major/minor stream if needed. - * - * @param info The downlink network info collections. */ - virtual void onDownlinkNetworkInfoUpdated(const DownlinkNetworkInfo& info) { + virtual void onUplinkNetworkInfoUpdated(const UplinkNetworkInfo& info) { (void)info; } /** - * Reports the last-mile network quality of the local user. + * @brief Reports the last-mile network quality of the local user. * + * @details * This callback reports the last-mile network conditions of the local user before the user joins * the channel. Last mile refers to the connection between the local device and Agora's edge server. + * Before the user joins the channel, this callback is triggered by the SDK once + * `startLastmileProbeTest` is called and reports the last-mile network conditions of the local + * user. * - * When the user is not in a channel and the last-mile network test is enabled - * (by calling `startLastmileProbeTest`), this callback function is triggered - * to update the app on the network connection quality of the local user. + * @param quality The last-mile network quality. See `QUALITY_TYPE`. * - * @param quality The last mile network quality. See #QUALITY_TYPE. */ virtual void onLastmileQuality(int quality) { (void)quality; } - /** Occurs when the first local video frame is rendered on the local video view. + /** + * @brief Occurs when the first local video frame is displayed on the local video view. * - * @param source The video source: #VIDEO_SOURCE_TYPE. + * @details + * The SDK triggers this callback when the first local video frame is displayed on the local video + * view. + * + * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`. * @param width The width (px) of the first local video frame. * @param height The height (px) of the first local video frame. - * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel` - * method until the SDK triggers this callback. If you call the `startPreview` method before calling - * the `joinChannel` method, then `elapsed` is the time elapsed from calling the - * `startPreview` method until the SDK triggers this callback. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, const char* info, uid_t uid)` or + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` to join the channel to when the SDK triggers this callback. If `startPreview()` + * / `startPreview(VIDEO_SOURCE_TYPE sourceType)` is called before joining the channel, this + * parameter indicates the + * time elapsed from calling `startPreview()` or `startPreview(VIDEO_SOURCE_TYPE sourceType)` to + * when this event + * occurred. + * */ virtual void onFirstLocalVideoFrame(VIDEO_SOURCE_TYPE source, int width, int height, int elapsed) { (void)source; @@ -1793,37 +2180,53 @@ class IRtcEngineEventHandler { (void)elapsed; } - /** Occurs when the first local video frame is published. + /** + * @brief Occurs when the first video frame is published. + * + * @details * The SDK triggers this callback under one of the following circumstances: - * - The local client enables the video module and calls `joinChannel` successfully. - * - The local client calls `muteLocalVideoStream(true)` and muteLocalVideoStream(false) in sequence. + * - The local client enables the video module and calls `joinChannel(const char* token, const char* + * channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` + * to join the channel successfully. + * - The local client calls `muteLocalVideoStream` (`true`) and `muteLocalVideoStream` (`false`) in + * sequence. * - The local client calls `disableVideo` and `enableVideo` in sequence. * - The local client calls `pushVideoFrame` to successfully push the video frame to the SDK. - * @param source The video source type. - * @param elapsed The time elapsed (ms) from the local user calling joinChannel` to the SDK triggers - * this callback. - */ + * + * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const + * char* channelId, uid_t uid, const ChannelMediaOptions& options)` + * until this callback is triggered. + * + */ virtual void onFirstLocalVideoFramePublished(VIDEO_SOURCE_TYPE source, int elapsed) { (void)source; (void)elapsed; } - /** Occurs when the first remote video frame is received and decoded. - - The SDK triggers this callback under one of the following circumstances: - - The remote user joins the channel and sends the video stream. - - The remote user stops sending the video stream and re-sends it after 15 seconds. Reasons for such an interruption include: - - The remote user leaves the channel. - - The remote user drops offline. - - The remote user calls `muteLocalVideoStream` to stop sending the video stream. - - The remote user calls `disableVideo` to disable video. - - @param uid The user ID of the remote user sending the video stream. - @param width The width (pixels) of the video stream. - @param height The height (pixels) of the video stream. - @param elapsed The time elapsed (ms) from the local user calling `joinChannel` - until the SDK triggers this callback. - */ + /** + * @brief Occurs when the first remote video frame is received and decoded. + * + * @details + * The SDK triggers this callback under one of the following circumstances: + * - The remote user joins the channel and sends the video stream. + * - The remote user stops sending the video stream and re-sends it after 15 seconds. Reasons for + * such an interruption include: + * - The remote user leaves the channel. + * - The remote user drops offline. + * - The remote user calls `disableVideo` to disable video. + * + * @param uid The user ID of the remote user sending the video stream. + * @param width The width (px) of the video stream. + * @param height The height (px) of the video stream. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, const char* info, uid_t uid)` or + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` until the SDK triggers this callback. + * + */ virtual void onFirstRemoteVideoDecoded(uid_t uid, int width, int height, int elapsed) __deprecated { (void)uid; (void)width; @@ -1832,45 +2235,91 @@ class IRtcEngineEventHandler { } /** - * Occurs when the local or remote video size or rotation has changed. - * @param sourceType The video source type: #VIDEO_SOURCE_TYPE. - * @param uid The user ID. 0 indicates the local user. - * @param width The new width (pixels) of the video. - * @param height The new height (pixels) of the video. - * @param rotation The rotation information of the video. + * @brief Occurs when the video size or rotation of a specified user changes. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param uid The ID of the user whose video size or rotation changes. (The `uid` for the local user + * is 0. The video is the local user's video preview). + * @param width The width (pixels) of the video stream. + * @param height The height (pixels) of the video stream. + * @param rotation The rotation information. The value range is [0,360).Note: On the iOS platform, + * the parameter value is always 0. + * */ virtual void onVideoSizeChanged(VIDEO_SOURCE_TYPE sourceType, uid_t uid, int width, int height, int rotation) { (void)uid; (void)width; (void)height; (void)rotation; - } + } - /** Occurs when the local video stream state changes. + /** + * @brief Occurs when the local video event occurs. * - * When the state of the local video stream changes (including the state of the video capture and - * encoding), the SDK triggers this callback to report the current state. This callback indicates - * the state of the local video stream, including camera capturing and video encoding, and allows - * you to troubleshoot issues when exceptions occur. + * @since v4.6.1 * - * The SDK triggers the onLocalVideoStateChanged callback with the state code of `LOCAL_VIDEO_STREAM_STATE_FAILED` - * and error code of `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE` in the following situations: - * - The app switches to the background, and the system gets the camera resource. - * - The camera starts normally, but does not output video for four consecutive seconds. + * @details + * This callback is triggered when a video event occurs. You can use this callback to get the reason for such an event. + * + * @param source The video source type: #VIDEO_SOURCE_TYPE. + * @param event The local video event type: #LOCAL_VIDEO_EVENT_TYPE. * - * When the camera outputs the captured video frames, if the video frames are the same for 15 - * consecutive frames, the SDK triggers the `onLocalVideoStateChanged` callback with the state code - * of `LOCAL_VIDEO_STREAM_STATE_CAPTURING` and error code of `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`. - * Note that the video frame duplication detection is only available for video frames with a resolution - * greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate less than 20 Kbps. + */ + virtual void onLocalVideoEvent(VIDEO_SOURCE_TYPE source, LOCAL_VIDEO_EVENT_TYPE event) { + (void)source; + (void)event; + } + + /** + * @brief Occurs when the local video stream state changes. + * + * @details + * When the status of the local video changes, the SDK triggers this callback to report the current + * local video state and the reason for the state change. + * Applicable scenarios: You can use this callback to stay updated on the state changes of the local + * video stream, and take corresponding measures based on the reasons for the state changes, to + * better manage and debug issues related to the video stream. + * Call timing: - The SDK triggeres this callback under the following circumstances, with the + * `state` as LOCAL_VIDEO_STREAM_STATE_FAILED, and the `reason` as + * `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`: + * - The app switches to the background, and the system revokes the camera resource. + * - For Android 9 and later versions, after an app is in the background for a period, the system + * automatically revokes camera permissions. + * - For Android 6 and later versions, if the camera is held by a third-party app for a certain + * duration and then released, the SDK triggers this callback and reports the + * `onLocalVideoStateChanged` (`LOCAL_VIDEO_STREAM_STATE_CAPTURING,LOCAL_VIDEO_STREAM_REASON_OK`) + * callback. + * - The camera starts normally, but does not output video frames for four consecutive seconds. + * - When the camera outputs captured video frames, if the SDK detects 15 consecutive duplicate + * video frames, it triggers this callback, with the `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` + * and the `reason` as `LOCAL_VIDEO_STREAM_REASON_CAPTURE_FAILURE`.Note: + * - Note that the video frame duplication detection is only available for video frames with a + * resolution greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate + * less than 20 Kbps. + * - Normally, if there is an error in video capturing, the issue can be troubleshooted through the + * `reason` parameter in this callback. However, on some devices, when there is an issue with + * capturing (such as freezing), the Android system will not throw any error callbacks, so the SDK + * cannot report the reason for the change in local video status. In this case, you can determine if + * there is no video frame being captured by checking the following: this callback reports the + * `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` or `LOCAL_VIDEO_STREAM_STATE_ENCODING`, and the + * `captureFrameRate` in the `onLocalVideoStats` callback is 0. * - * @note For some device models, the SDK does not trigger this callback when the state of the local - * video changes while the local video capturing device is in use, so you have to make your own - * timeout judgment. + * @note + * - Note that the video frame duplication detection is only available for video frames with a + * resolution greater than 200 × 200, a frame rate greater than or equal to 10 fps, and a bitrate + * less than 20 Kbps. + * - Normally, if there is an error in video capturing, the issue can be troubleshooted through the + * `reason` parameter in this callback. However, on some devices, when there is an issue with + * capturing (such as freezing), the Android system will not throw any error callbacks, so the SDK + * cannot report the reason for the change in local video status. In this case, you can determine if + * there is no video frame being captured by checking the following: this callback reports the + * `state` as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` or `LOCAL_VIDEO_STREAM_STATE_ENCODING`, and the + * `captureFrameRate` in the `onLocalVideoStats` callback is 0. + * + * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param state The state of the local video, see `LOCAL_VIDEO_STREAM_STATE`. + * @param reason The reasons for changes in local video state. See `LOCAL_VIDEO_STREAM_REASON`. * - * @param source The video source type: #VIDEO_SOURCE_TYPE. - * @param state The state of the local video. See #LOCAL_VIDEO_STREAM_STATE. - * @param reason The detailed error information. See #LOCAL_VIDEO_STREAM_REASON. */ virtual void onLocalVideoStateChanged(VIDEO_SOURCE_TYPE source, LOCAL_VIDEO_STREAM_STATE state, LOCAL_VIDEO_STREAM_REASON reason) { (void)source; @@ -1879,15 +2328,18 @@ class IRtcEngineEventHandler { } /** - * Occurs when the remote video state changes. + * @brief Occurs when the remote video stream state changes. * - * @note This callback does not work properly when the number of users (in the voice/video call - * channel) or hosts (in the live streaming channel) in the channel exceeds 17. + * @note This callback does not work properly when the number of users (in the communication + * profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * + * @param uid The ID of the remote user whose video state changes. + * @param state The state of the remote video. See `REMOTE_VIDEO_STATE`. + * @param reason The reason for the remote video state change. See `REMOTE_VIDEO_STATE_REASON`. + * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` method until + * the SDK triggers this callback. * - * @param uid The ID of the user whose video state has changed. - * @param state The remote video state: #REMOTE_VIDEO_STATE. - * @param reason The reason of the remote video state change: #REMOTE_VIDEO_STATE_REASON. - * @param elapsed The time elapsed (ms) from the local client calling `joinChannel` until this callback is triggered. */ virtual void onRemoteVideoStateChanged(uid_t uid, REMOTE_VIDEO_STATE state, REMOTE_VIDEO_STATE_REASON reason, int elapsed) { (void)uid; @@ -1896,12 +2348,21 @@ class IRtcEngineEventHandler { (void)elapsed; } - /** Occurs when the renderer receives the first frame of the remote video. + /** + * @brief Occurs when the renderer receives the first frame of the remote video. + * + * @note This callback is only triggered when the video frame is rendered by the SDK; it will not be + * triggered if the user employs custom video rendering.You need to implement this independently + * using methods outside the SDK. * * @param uid The user ID of the remote user sending the video stream. - * @param width The width (px) of the video frame. + * @param width The width (px) of the video stream. * @param height The height (px) of the video stream. - * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` until the SDK triggers this callback. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, const char* info, uid_t uid)` or + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` until the SDK triggers this callback. + * */ virtual void onFirstRemoteVideoFrame(uid_t uid, int width, int height, int elapsed) { (void)uid; @@ -1911,96 +2372,117 @@ class IRtcEngineEventHandler { } /** - * Occurs when a remote user or broadcaster joins the channel. + * @brief Occurs when a remote user (in the communication profile)/ host (in the live streaming + * profile) joins the channel. * - * - In the COMMUNICATION channel profile, this callback indicates that a remote user joins the channel. - * The SDK also triggers this callback to report the existing users in the channel when a user joins the + * @details + * - In a communication channel, this callback indicates that a remote user joins the channel. The + * SDK also triggers this callback to report the existing users in the channel when a user joins the * channel. - * In the LIVE_BROADCASTING channel profile, this callback indicates that a host joins the channel. The - * SDK also triggers this callback to report the existing hosts in the channel when a host joins the - * channel. Agora recommends limiting the number of hosts to 17. - * - * The SDK triggers this callback under one of the following circumstances: - * - A remote user/host joins the channel by calling the `joinChannel` method. + * - In a live-broadcast channel, this callback indicates that a host joins the channel. The SDK + * also triggers this callback to report the existing hosts in the channel when a host joins the + * channel. Agora recommends limiting the number of co-hosts to 32, with a maximum of 17 video + * hosts. + * Call timing: The SDK triggers this callback under one of the following circumstances: + * - A remote user/host joins the channel. * - A remote user switches the user role to the host after joining the channel. * - A remote user/host rejoins the channel after a network interruption. * - * @param uid The ID of the remote user or broadcaster joining the channel. - * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` or `setClientRole` + * @param uid The ID of the user or host who joins the channel. + * @param elapsed Time delay (ms) from the local user calling `joinChannel(const char* token, const + * char* channelId, const char* info, uid_t uid)` or `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` * until this callback is triggered. - */ + * + */ virtual void onUserJoined(uid_t uid, int elapsed) { (void)uid; (void)elapsed; } /** - * Occurs when a remote user or broadcaster goes offline. + * @brief Occurs when a remote user (in the communication profile)/ host (in the live streaming + * profile) leaves the channel. + * + * @details + * There are generally two reasons for users to become offline: + * - Leave the channel: When a user/host leaves the channel, the user/host sends a goodbye message. + * - Drop offline: When no data packet of the user or host is received for a certain period of time + * (20 seconds for the communication profile, and more for the live broadcast profile), the SDK + * assumes that the user/host drops offline. A poor network connection may lead to false detections. + * It is recommended to use the Agora RTM SDK for reliable offline detection. + * Call timing: This callback is triggered when a remote user (in the communication profile) or host + * (in the live streaming profile) leaves a channel. * - * There are two reasons for a user to go offline: - * - Leave the channel: When the user leaves the channel, the user sends a goodbye message. When this - * message is received, the SDK determines that the user leaves the channel. - * - Drop offline: When no data packet of the user is received for a certain period of time, the SDK assumes - * that the user drops offline. A poor network connection may lead to false detection, so we recommend using - * the RTM SDK for reliable offline detection. - * - The user switches the user role from a broadcaster to an audience. + * @param uid The ID of the user who leaves the channel or goes offline. + * @param reason Reasons why a remote user (in the communication profile) or host (in the live + * streaming profile) goes offline. See `USER_OFFLINE_REASON_TYPE`. * - * @param uid The ID of the remote user or broadcaster who leaves the channel or drops offline. - * @param reason The reason why the remote user goes offline: #USER_OFFLINE_REASON_TYPE. */ virtual void onUserOffline(uid_t uid, USER_OFFLINE_REASON_TYPE reason) { (void)uid; (void)reason; } - /** Occurs when a remote user's audio stream playback pauses/resumes. - - The SDK triggers this callback when the remote user stops or resumes sending the audio stream by - calling the `muteLocalAudioStream` method. - - @note This callback can be inaccurate when the number of users (in the `COMMUNICATION` profile) or hosts (in the `LIVE_BROADCASTING` profile) in the channel exceeds 17. - - @param uid The user ID. - @param muted Whether the remote user's audio stream is muted/unmuted: - - true: Muted. - - false: Unmuted. - */ + /** + * @brief Occurs when a remote user (in the communication profile) or a host (in the live streaming + * profile) stops/resumes sending the audio stream. + * + * @details + * The SDK triggers this callback when the remote user stops or resumes sending the audio stream by + * calling the `muteLocalAudioStream` method. + * + * @note This callback does not work properly when the number of users (in the communication + * profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * + * @param uid The user ID. + * @param muted Whether the remote user's audio stream is muted: + * - `true`: User's audio stream is muted. + * - `false`: User's audio stream is unmuted. + * + */ virtual void onUserMuteAudio(uid_t uid, bool muted) { (void)uid; (void)muted; } - /** Occurs when a remote user pauses or resumes sending the video stream. + /** + * @brief Occurs when a remote user stops or resumes publishing the video stream. + * + * @details + * When a remote user calls `muteLocalVideoStream` to stop or resume publishing the video stream, + * the SDK triggers this callback to report to the local user the state of the streams published by + * the remote user. + * + * @note This callback can be inaccurate when the number of users (in the communication profile) or + * hosts (in the live streaming profile) in a channel exceeds 32. + * + * @param uid The user ID of the remote user. + * @param muted Whether the remote user stops publishing the video stream: + * - `true`: The remote user stops publishing the video stream. + * - `false`: The remote user resumes publishing the video stream. * - * When a remote user calls `muteLocalVideoStream` to stop or resume publishing the video stream, the - * SDK triggers this callback to report the state of the remote user's publishing stream to the local - * user. - - @note This callback is invalid when the number of users or broadacasters in a - channel exceeds 20. - - @param userId ID of the remote user. - @param muted Whether the remote user stops publishing the video stream: - - true: The remote user has paused sending the video stream. - - false: The remote user has resumed sending the video stream. */ virtual void onUserMuteVideo(uid_t uid, bool muted) { (void)uid; (void)muted; } - /** Occurs when a remote user enables or disables the video module. - - Once the video function is disabled, the users cannot see any video. - - The SDK triggers this callback when a remote user enables or disables the video module by calling the - `enableVideo` or `disableVideo` method. - - @param uid The ID of the remote user. - @param enabled Whether the video of the remote user is enabled: - - true: The remote user has enabled video. - - false: The remote user has disabled video. - */ + /** + * @brief Occurs when a remote user enables or disables the video module. + * + * @details + * Once the video module is disabled, the user can only use a voice call. The user cannot send or + * receive any video. + * The SDK triggers this callback when a remote user enables or disables the video module by calling + * the `enableVideo` or `disableVideo` method. + * + * @param uid The user ID of the remote user. + * @param enabled + * - `true`: The video module is enabled. + * - `false`: The video module is disabled. + * + */ virtual void onUserEnableVideo(uid_t uid, bool enabled) { (void)uid; (void)enabled; @@ -2016,89 +2498,108 @@ class IRtcEngineEventHandler { (void)state; } - /** Occurs when a remote user enables or disables local video capturing. - - The SDK triggers this callback when the remote user resumes or stops capturing the video stream by - calling the `enableLocalVideo` method. - - @param uid The ID of the remote user. - @param enabled Whether the specified remote user enables/disables local video: - - `true`: The remote user has enabled local video capturing. - - `false`: The remote user has disabled local video capturing. - */ + /** + * @brief Occurs when a specific remote user enables/disables the local video capturing function. + * + * @details + * The SDK triggers this callback when the remote user resumes or stops capturing the video stream + * by calling the `enableLocalVideo` method. + * + * @param uid The user ID of the remote user. + * @param enabled Whether the specified remote user enables/disables local video capturing: + * - `true`: The video module is enabled. Other users in the channel can see the video of this + * remote user. + * - `false`: The video module is disabled. Other users in the channel can no longer receive the + * video stream from this remote user, while this remote user can still receive the video streams + * from other users. + * + */ virtual void onUserEnableLocalVideo(uid_t uid, bool enabled) __deprecated { (void)uid; (void)enabled; } - /** Reports the statistics of the audio stream from each remote user/host. - - The SDK triggers this callback once every two seconds for each remote user who is sending audio - streams. If a channel includes multiple remote users, the SDK triggers this callback as many times. - - @param stats Statistics of the received remote audio streams. See RemoteAudioStats. + /** + * @brief Reports the transport-layer statistics of each remote audio stream. + * + * @details + * The SDK triggers this callback once every two seconds for each remote user who is sending audio + * streams. If a channel includes multiple remote users, the SDK triggers this callback as many + * times. + * + * @param stats The statistics of the received remote audio streams. See `RemoteAudioStats`. + * */ virtual void onRemoteAudioStats(const RemoteAudioStats& stats) { (void)stats; } - /** Reports the statistics of the local audio stream. + /** + * @brief Reports the statistics of the local audio stream. * + * @details * The SDK triggers this callback once every two seconds. * - * @param stats The statistics of the local audio stream. - * See LocalAudioStats. + * @param stats Local audio statistics. See `LocalAudioStats`. + * */ virtual void onLocalAudioStats(const LocalAudioStats& stats) { (void)stats; } - /** Reports the statistics of the local video stream. + /** + * @brief Reports the statistics of the local video stream. * - * The SDK triggers this callback once every two seconds for each - * user/host. If there are multiple users/hosts in the channel, the SDK - * triggers this callback as many times. + * @details + * The SDK triggers this callback once every two seconds to report the statistics of the local video + * stream. * - * @note If you have called the `enableDualStreamMode` - * method, this callback reports the statistics of the high-video - * stream (high bitrate, and high-resolution video stream). + * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param stats The statistics of the local video stream. See `LocalVideoStats`. * - * @param source The video source type. See #VIDEO_SOURCE_TYPE. - * @param stats Statistics of the local video stream. See LocalVideoStats. */ virtual void onLocalVideoStats(VIDEO_SOURCE_TYPE source, const LocalVideoStats& stats) { (void)source; (void)stats; } - /** Reports the statistics of the video stream from each remote user/host. + /** + * @brief Reports the statistics of the video stream sent by each remote users. + * + * @details + * Reports the statistics of the video stream from the remote users. The SDK triggers this callback + * once every two seconds for each remote user. If a channel has multiple users/hosts sending video + * streams, the SDK triggers this callback as many times. * - * The SDK triggers this callback once every two seconds for each remote user. If a channel has - * multiple users/hosts sending video streams, the SDK triggers this callback as many times. + * @param stats Statistics of the remote video stream. See `RemoteVideoStats`. * - * @param stats Statistics of the remote video stream. See - * RemoteVideoStats. */ virtual void onRemoteVideoStats(const RemoteVideoStats& stats) { (void)stats; } /** - * Occurs when the camera turns on and is ready to capture the video. + * @brief Occurs when the camera turns on and is ready to capture the video. + * * @deprecated Use `LOCAL_VIDEO_STREAM_STATE_CAPTURING(1)` in onLocalVideoStateChanged instead. - * This callback indicates that the camera has been successfully turned on and you can start to capture video. + * + * @details + * This callback indicates that the camera has been successfully turned on and you can start to + * capture video. + * */ virtual void onCameraReady() __deprecated {} /** - * Occurs when the camera focus area changes. + * @brief Occurs when the camera focus area changes. * - * @note This method is for Andriod and iOS only. + * @note This callback is for Android and iOS only. * - * @param x The x coordinate of the changed camera focus area. - * @param y The y coordinate of the changed camera focus area. + * @param x The x-coordinate of the changed camera focus area. + * @param y The y-coordinate of the changed camera focus area. * @param width The width of the changed camera focus area. * @param height The height of the changed camera focus area. + * */ virtual void onCameraFocusAreaChanged(int x, int y, int width, int height) { (void)x; @@ -2107,12 +2608,19 @@ class IRtcEngineEventHandler { (void)height; } /** - * Occurs when the camera exposure area changes. + * @brief Occurs when the camera exposure area changes. + * + * @details + * The SDK triggers this callback when the local user changes the camera exposure position by + * calling `setCameraExposurePosition`. + * + * @note This callback is for Android and iOS only. * * @param x The x coordinate of the changed camera exposure area. * @param y The y coordinate of the changed camera exposure area. * @param width The width of the changed camera exposure area. * @param height The height of the changed exposure area. + * */ virtual void onCameraExposureAreaChanged(int x, int y, int width, int height) { (void)x; @@ -2120,36 +2628,44 @@ class IRtcEngineEventHandler { (void)width; (void)height; } -#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) +#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__) /** - * Reports the face detection result of the local user. + * @brief Reports the face detection result of the local user. * - * Once you enable face detection by calling enableFaceDetection(true), you can get the following - * information on the local user in real-time: + * @details + * Once you enable face detection by calling `enableFaceDetection` `(true)`, you can get the + * following information on the local user in real-time: * - The width and height of the local video. * - The position of the human face in the local view. * - The distance between the human face and the screen. - * - * This value is based on the fitting calculation of the local video size and the position of the human face. + * This value is based on the fitting calculation of the local video size and the position of the + * human face. * * @note * - This callback is for Android and iOS only. * - When it is detected that the face in front of the camera disappears, the callback will be - * triggered immediately. In the state of no face, the trigger frequency of the callback will be - * reduced to save power consumption on the local device. + * triggered immediately. When no human face is detected, the frequency of this callback to be + * triggered wil be decreased to reduce power consumption on the local device. * - The SDK stops triggering this callback when a human face is in close proximity to the screen. - * On Android, the value of `distance` reported in this callback may be slightly different from the - * actual distance. Therefore, Agora does not recommend using it for accurate calculation. + * - On Android, the value of distance reported in this callback may be slightly different from the + * actual `distance`. Therefore, Agora does not recommend using it for accurate calculation. * * @param imageWidth The width (px) of the video image captured by the local camera. * @param imageHeight The height (px) of the video image captured by the local camera. - * @param vecRectangle A Rectangle array of length 'numFaces', which represents the position and size of the human face on the local video: - * - x: The x-coordinate (px) of the human face in the local view. Taking the top left corner of the view as the origin, the x-coordinate represents the horizontal position of the human face relative to the origin. - * - y: The y-coordinate (px) of the human face in the local view. Taking the top left corner of the view as the origin, the y-coordinate represents the vertical position of the human face relative to the origin. - * - width: The width (px) of the human face in the captured view. - * - height: The height (px) of the human face in the captured view. - * @param vecDistance An int array of length 'numFaces', which represents distance (cm) between the human face and the screen. - * @param numFaces The number of faces detected. If the value is 0, it means that no human face is detected. + * @param vecRectangle An array of `numFaces`, representing the detected face information: + * - `x`: The x-coordinate (px) of the human face in the local view. Taking the top left corner of + * the view as the origin, the x-coordinate represents the horizontal position of the human face + * relative to the origin. + * - `y`: The y-coordinate (px) of the human face in the local view. Taking the top left corner of + * the view as the origin, the y-coordinate represents the vertical position of the human face + * relative to the origin. + * - `width`: The width (px) of the human face in the captured view. + * - `height`: The height (px) of the human face in the captured view. + * @param vecDistance An array of `numFaces`, representing the distance (cm) between a face and the + * device screen. + * @param numFaces The number of faces detected. If the value is 0, it means that no human face is + * detected. + * */ virtual void onFacePositionChanged(int imageWidth, int imageHeight, const Rectangle* vecRectangle, const int* vecDistance, @@ -2162,33 +2678,48 @@ class IRtcEngineEventHandler { } #endif /** - * Occurs when the video stops playing. - * @deprecated Use `LOCAL_VIDEO_STREAM_STATE_STOPPED(0)` in the onLocalVideoStateChanged callback instead. + * @brief Occurs when the video stops playing. + * + * @deprecated Use `LOCAL_VIDEO_STREAM_STATE_STOPPED(0)` in the onLocalVideoStateChanged callback + * instead. + * + * @details + * The application can use this callback to change the configuration of the `view` (for example, + * displaying other pictures in the view) after the video stops playing. * - * The app can use this callback to change the configuration of the view (for example, displaying - * other pictures in the view) after the video stops playing. */ virtual void onVideoStopped() __deprecated {} - /** Occurs when the playback state of the music file changes. + /** + * @brief Occurs when the playback state of the music file changes. + * + * @details + * This callback occurs when the playback state of the music file changes, and reports the current + * state and error code. + * + * @param state The playback state of the music file. See `AUDIO_MIXING_STATE_TYPE`. + * @param reason Error code. See `AUDIO_MIXING_REASON_TYPE`. * - * This callback occurs when the playback state of the music file changes, and reports the current state and error code. - - @param state The playback state of the music file. See #AUDIO_MIXING_STATE_TYPE. - @param reason The reason for the change of the music file playback state. See #AUDIO_MIXING_REASON_TYPE. */ virtual void onAudioMixingStateChanged(AUDIO_MIXING_STATE_TYPE state, AUDIO_MIXING_REASON_TYPE reason) { (void)state; (void)reason; } - /** Occurs when the state of the rhythm player changes. - When you call the \ref IRtcEngine::startRhythmPlayer "startRhythmPlayer" - method and the state of rhythm player changes, the SDK triggers this - callback. - - @param state The state code. See #RHYTHM_PLAYER_STATE_TYPE. - @param reason The error code. See #RHYTHM_PLAYER_REASON. + /** + * @brief Occurs when the state of virtual metronome changes. + * + * @details + * When the state of the virtual metronome changes, the SDK triggers this callback to report the + * current state of the virtual metronome. This callback indicates the state of the local audio + * stream and enables you to troubleshoot issues when audio exceptions occur. + * + * @note This callback is for Android and iOS only. + * + * @param state For the current virtual metronome status, see `RHYTHM_PLAYER_STATE_TYPE`. + * @param reason For the error codes and error messages related to virtual metronome errors, see + * `RHYTHM_PLAYER_REASON`. + * */ virtual void onRhythmPlayerStateChanged(RHYTHM_PLAYER_STATE_TYPE state, RHYTHM_PLAYER_REASON reason) { (void)state; @@ -2196,43 +2727,62 @@ class IRtcEngineEventHandler { } /** - * Occurs when the SDK cannot reconnect to the server 10 seconds after its connection to the server is - * interrupted. + * @brief Occurs when the SDK cannot reconnect to Agora's edge server 10 seconds after its + * connection to the server is interrupted. + * + * @details + * The SDK triggers this callback when it cannot connect to the server 10 seconds after calling the + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` method, regardless of whether it is in the channel. If the SDK fails to + * rejoin the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops + * rejoining the channel. * - * The SDK triggers this callback when it cannot connect to the server 10 seconds after calling - * `joinChannel`, regardless of whether it is in the channel or not. If the SDK fails to rejoin - * the channel 20 minutes after being disconnected from Agora's edge server, the SDK stops rejoining the channel. */ virtual void onConnectionLost() {} - /** Occurs when the connection between the SDK and the server is interrupted. + /** + * @brief Occurs when the connection between the SDK and the server is interrupted. + * * @deprecated Use `onConnectionStateChanged` instead. - - The SDK triggers this callback when it loses connection with the serer for more - than 4 seconds after the connection is established. After triggering this - callback, the SDK tries to reconnect to the server. If the reconnection fails - within a certain period (10 seconds by default), the onConnectionLost() - callback is triggered. If the SDK fails to rejoin the channel 20 minutes after - being disconnected from Agora's edge server, the SDK stops rejoining the channel. - - */ + * + * @details + * The SDK triggers this callback when it loses connection with the server for more than four + * seconds after the connection is established. After triggering this callback, the SDK tries to + * reconnect to the server. You can use this callback to implement pop-up reminders. The differences + * between this callback and `onConnectionLost` are as follow: + * - The SDK triggers the `onConnectionInterrupted` callback when it loses connection with the + * server for more than four seconds after it successfully joins the channel. + * - The SDK triggers the `onConnectionLost` callback when it loses connection with the server for + * more than 10 seconds, whether or not it joins the channel. + * If the SDK fails to rejoin the channel 20 minutes after being disconnected from Agora's edge + * server, the SDK stops rejoining the channel. + * + */ virtual void onConnectionInterrupted() __deprecated {} - /** Occurs when your connection is banned by the Agora Server. + /** + * @brief Occurs when the connection is banned by the Agora server. + * * @deprecated Use `onConnectionStateChanged` instead. */ virtual void onConnectionBanned() __deprecated {} - /** Occurs when the local user receives the data stream from the remote user. + /** + * @brief Occurs when the local user receives the data stream from the remote user. + * + * @details + * The SDK triggers this callback when the local user receives the stream message that the remote + * user sends by calling the `sendStreamMessage` method. * - * The SDK triggers this callback when the user receives the data stream that another user sends - * by calling the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage" method. + * @note If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * + * @param uid The ID of the remote user sending the message. + * @param streamId The stream ID of the received message. + * @param data The data received. + * @param length The data length (byte). + * @param sentTs The time when the data stream is sent. * - * @param uid ID of the user who sends the data stream. - * @param streamId The ID of the stream data. - * @param data The data stream. - * @param length The length (byte) of the data stream. - * @param sentTs The time when the data stream sent. */ virtual void onStreamMessage(uid_t uid, int streamId, const char* data, size_t length, uint64_t sentTs) { (void)uid; @@ -2242,17 +2792,22 @@ class IRtcEngineEventHandler { (void)sentTs; } - /** Occurs when the local user does not receive the data stream from the remote user. + /** + * @brief Occurs when the local user does not receive the data stream from the remote user. * - * The SDK triggers this callback when the user fails to receive the data stream that another user sends - * by calling the \ref agora::rtc::IRtcEngine::sendStreamMessage "sendStreamMessage" method. + * @details + * The SDK triggers this callback when the local user fails to receive the stream message that the + * remote user sends by calling the `sendStreamMessage` method. * - * @param uid ID of the user who sends the data stream. - * @param streamId The ID of the stream data. - * @param code The error code. + * @note If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * + * @param uid The ID of the remote user sending the message. + * @param streamId The stream ID of the received message. + * @param code Error code. * @param missed The number of lost messages. - * @param cached The number of incoming cached messages when the data stream is - * interrupted. + * @param cached Number of incoming cached messages when the data stream is interrupted. + * */ virtual void onStreamMessageError(uid_t uid, int streamId, int code, int missed, int cached) { (void)uid; @@ -2263,26 +2818,93 @@ class IRtcEngineEventHandler { } /** - * Occurs when the token expires. + * @brief Occurs when the local user receives data via Reliable Data Transmission (RDT) from a remote user. * - * When the token expires during a call, the SDK triggers this callback to remind the app to renew the token. + * @technical preview * - * Upon receiving this callback, generate a new token at your app server and call - * `joinChannel` to pass the new token to the SDK. + * @details The SDK triggers this callback when the user receives the data stream that another user sends + * by calling the \ref agora::rtc::IRtcEngine::sendRdtMessage "sendRdtMessage" method. * + * @param userId ID of the user who sends the data. + * @param type The RDT stream type. See RdtStreamType. + * @param data The data received. + * @param length The length (byte) of the data. */ - virtual void onRequestToken() {} + virtual void onRdtMessage(uid_t userId, RdtStreamType type, const char *data, size_t length) { + (void)userId; + (void)type; + (void)data; + (void)length; + }; + + /** + * @brief Occurs when the RDT tunnel state changed + * + * @technical preview + * + * @param userId ID of the user who sends the data. + * @param state The RDT tunnel state. See RdtState. + */ + virtual void onRdtStateChanged(uid_t userId, RdtState state) { + (void)userId; + (void)state; + } + + /** + * @brief Occurs when the local user receives media control message sent by a remote user. + * + * @technical preview + * + * @details The SDK triggers this callback when the user receives data sent by a remote user using the sendMediaControlMessage method. + * + * @param userId ID of the user who sends the data. + * @param data The data received. + * @param length The length (byte) of the data. + */ + virtual void onMediaControlMessage(uid_t userId, const char* data, size_t length) { + (void)userId; + (void)data; + (void)length; + } /** - * Occurs when the token will expire in 30 seconds. + * @brief Occurs when the token expires. + * + * @details + * The SDK triggers this callback if the token expires. + * When receiving this callback, you need to generate a new token on your token server and you can + * renew your token through one of the following ways: + * - In scenarios involving one channel: + * - Call `renewToken` to pass in the new token. + * - Call `leaveChannel(const LeaveChannelOptions& options)` to leave the current channel and then + * pass in the new token when + * you call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join a channel. + * - In scenarios involving mutiple channels: Call `updateChannelMediaOptionsEx` to pass in the new + * token. * - * When the token is about to expire in 30 seconds, the SDK triggers this callback to remind the app to renew the token. + */ + virtual void onRequestToken() {} - * Upon receiving this callback, generate a new token at your app server and call - * \ref IRtcEngine::renewToken "renewToken" to pass the new Token to the SDK. + /** + * @brief Occurs when the token expires in 30 seconds. + * + * @details + * When receiving this callback, you need to generate a new token on your token server and you can + * renew your token through one of the following ways: + * - In scenarios involving one channel: + * - Call `renewToken` to pass in the new token. + * - Call `leaveChannel(const LeaveChannelOptions& options)` to leave the current channel and then + * pass in the new token when + * you call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join a channel. + * - In scenarios involving mutiple channels: Call `updateChannelMediaOptionsEx` to pass in the new + * token. + * Call timing: The SDK triggers this callback 30 seconds before the token expires, reminding the + * app to update the token. * + * @param token The token that is about to expire. * - * @param token The token that will expire in 30 seconds. */ virtual void onTokenPrivilegeWillExpire(const char* token) { (void)token; @@ -2297,87 +2919,108 @@ class IRtcEngineEventHandler { (void)error; } - /** Occurs when the first local audio frame is published. + /** + * @brief Occurs when the first audio frame is published. * + * @details * The SDK triggers this callback under one of the following circumstances: - * - The local client enables the audio module and calls `joinChannel` successfully. - * - The local client calls `muteLocalAudioStream(true)` and `muteLocalAudioStream(false)` in sequence. + * - The local client enables the audio module and calls `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` successfully. + * - The local client calls `muteLocalAudioStream` (`true`) and `muteLocalAudioStream` (`false`) in + * sequence. * - The local client calls `disableAudio` and `enableAudio` in sequence. * - The local client calls `pushAudioFrame` to successfully push the audio frame to the SDK. * - * @param elapsed The time elapsed (ms) from the local user calling `joinChannel` to the SDK triggers this callback. + * @param elapsed Time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the SDK + * triggers this callback. + * */ virtual void onFirstLocalAudioFramePublished(int elapsed) { (void)elapsed; } /** - * Occurs when the SDK decodes the first remote audio frame for playback. + * @brief Occurs when the SDK decodes the first remote audio frame for playback. * * @deprecated Use `onRemoteAudioStateChanged` instead. + * + * @details * The SDK triggers this callback under one of the following circumstances: * - The remote user joins the channel and sends the audio stream for the first time. - * - The remote user's audio is offline and then goes online to re-send audio. It means the local user cannot - * receive audio in 15 seconds. Reasons for such an interruption include: + * - The remote user's audio is offline and then goes online to re-send audio. It means the local + * user cannot receive audio in 15 seconds. Reasons for such an interruption include: * - The remote user leaves channel. * - The remote user drops offline. - * - The remote user calls muteLocalAudioStream to stop sending the audio stream. - * - The remote user calls disableAudio to disable audio. - * @param uid User ID of the remote user sending the audio stream. - * @param elapsed The time elapsed (ms) from the loca user calling `joinChannel` - * until this callback is triggered. + * - The remote user calls `muteLocalAudioStream` to stop sending the audio stream. + * - The remote user calls `disableAudio` to disable audio. + * + * @param uid The user ID of the remote user. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the + * SDK triggers this callback. + * */ virtual void onFirstRemoteAudioDecoded(uid_t uid, int elapsed) __deprecated { (void)uid; (void)elapsed; } - /** Occurs when the SDK receives the first audio frame from a specific remote user. + /** + * @brief Occurs when the SDK receives the first audio frame from a specific remote user. + * * @deprecated Use `onRemoteAudioStateChanged` instead. * - * @param uid ID of the remote user. - * @param elapsed The time elapsed (ms) from the loca user calling `joinChannel` - * until this callback is triggered. + * @param uid The user ID of the remote user. + * @param elapsed The time elapsed (ms) from the local user calling `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` until the + * SDK triggers this callback. + * */ virtual void onFirstRemoteAudioFrame(uid_t uid, int elapsed) __deprecated { (void)uid; (void)elapsed; } - /** Occurs when the local audio state changes. + /** + * @brief Occurs when the local audio stream state changes. + * + * @details + * When the state of the local audio stream changes (including the state of the audio capture and + * encoding), the SDK triggers this callback to report the current state. This callback indicates + * the state of the local audio stream, and allows you to troubleshoot issues when audio exceptions + * occur. * - * When the state of the local audio stream changes (including the state of the audio capture and encoding), the SDK - * triggers this callback to report the current state. This callback indicates the state of the local audio stream, - * and allows you to troubleshoot issues when audio exceptions occur. + * @note When the state is `LOCAL_AUDIO_STREAM_STATE_FAILED` (3), you can view the error information + * in the `error` parameter. * - * @note - * When the state is `LOCAL_AUDIO_STREAM_STATE_FAILED(3)`, see the `error` - * parameter for details. + * @param state The state of the local audio. See `LOCAL_AUDIO_STREAM_STATE`. + * @param reason Reasons for local audio state changes. See `LOCAL_AUDIO_STREAM_REASON`. * - * @param state State of the local audio. See #LOCAL_AUDIO_STREAM_STATE. - * @param reason The reason information of the local audio. - * See #LOCAL_AUDIO_STREAM_REASON. */ virtual void onLocalAudioStateChanged(LOCAL_AUDIO_STREAM_STATE state, LOCAL_AUDIO_STREAM_REASON reason) { (void)state; (void)reason; } - /** Occurs when the remote audio state changes. + /** + * @brief Occurs when the remote audio state changes. * - * When the audio state of a remote user (in the voice/video call channel) or host (in the live streaming channel) - * changes, the SDK triggers this callback to report the current state of the remote audio stream. + * @details + * When the audio state of a remote user (in a voice/video call channel) or host (in a live + * streaming channel) changes, the SDK triggers this callback to report the current state of the + * remote audio stream. * - * @note This callback does not work properly when the number of users (in the voice/video call channel) or hosts - * (in the live streaming channel) in the channel exceeds 17. + * @note This callback does not work properly when the number of users (in the communication + * profile) or hosts (in the live streaming channel) in a channel exceeds 32. + * + * @param uid The ID of the remote user whose audio state changes. + * @param state The state of the remote audio. See `REMOTE_AUDIO_STATE`. + * @param reason The reason of the remote audio state change. See `REMOTE_AUDIO_STATE_REASON`. + * @param elapsed Time elapsed (ms) from the local user calling the `joinChannel(const char* token, + * const char* channelId, uid_t uid, const ChannelMediaOptions& options)` method until + * the SDK triggers this callback. * - * @param uid ID of the remote user whose audio state changes. - * @param state State of the remote audio. See #REMOTE_AUDIO_STATE. - * @param reason The reason of the remote audio state change. - * See #REMOTE_AUDIO_STATE_REASON. - * @param elapsed Time elapsed (ms) from the local user calling the - * `joinChannel` method until the SDK - * triggers this callback. */ virtual void onRemoteAudioStateChanged(uid_t uid, REMOTE_AUDIO_STATE state, REMOTE_AUDIO_STATE_REASON reason, int elapsed) { (void)uid; @@ -2387,19 +3030,23 @@ class IRtcEngineEventHandler { } /** - * Occurs when an active speaker is detected. + * @brief Occurs when the most active remote speaker is detected. * - * After a successful call of `enableAudioVolumeIndication`, the SDK continuously detects which remote user has the - * loudest volume. During the current period, the remote user, who is detected as the loudest for the most times, - * is the most active user. + * @details + * After a successful call of `enableAudioVolumeIndication`, the SDK continuously detects which + * remote user has the loudest volume. During the current period, the remote user whose volume is + * detected as the loudest for the most times, is the most active user. + * When the number of users is no less than two and an active remote speaker exists, the SDK + * triggers this callback and reports the `uid` of the most active remote speaker. + * - If the most active remote speaker is always the same user, the SDK triggers the + * `onActiveSpeaker` callback only once. + * - If the most active remote speaker changes to another user, the SDK triggers this callback again + * and reports the `uid` of the new active remote speaker. * - * When the number of users is no less than two and an active remote speaker exists, the SDK triggers this callback and reports the uid of the most active remote speaker. - * - If the most active remote speaker is always the same user, the SDK triggers the `onActiveSpeaker` callback only once. - * - If the most active remote speaker changes to another user, the SDK triggers this callback again and reports the uid of the new active remote speaker. + * @param uid The user ID of the most active speaker. * - * @param userId The ID of the active speaker. A `uid` of 0 means the local user. */ - virtual void onActiveSpeaker(uid_t uid) { + virtual void onActiveSpeaker(uid_t uid) { (void)uid; } @@ -2409,22 +3056,29 @@ class IRtcEngineEventHandler { */ virtual void onContentInspectResult(media::CONTENT_INSPECT_RESULT result) { (void)result; } - /** Reports the result of taking a video snapshot. + /** + * @brief Reports the result of taking a video snapshot. * - * After a successful `takeSnapshot` method call, the SDK triggers this callback to report whether the snapshot is - * successfully taken, as well as the details for that snapshot. + * @details + * After a successful `takeSnapshot(uid_t uid, const char* filePath)` method call, the SDK triggers + * this callback to report + * whether the snapshot is successfully taken as well as the details for the snapshot taken. * - * @param uid The user ID. A `uid` of 0 indicates the local user. + * @param uid The user ID. One `uid` of 0 indicates the local user. * @param filePath The local path of the snapshot. * @param width The width (px) of the snapshot. * @param height The height (px) of the snapshot. - * @param errCode The message that confirms success or gives the reason why the snapshot is not successfully taken: + * @param errCode The message that confirms success or gives the reason why the snapshot is not + * successfully taken: * - 0: Success. - * - < 0: Failure. + * - < 0: Failure: * - -1: The SDK fails to write data to a file or encode a JPEG image. - * - -2: The SDK does not find the video stream of the specified user within one second after the `takeSnapshot` method call succeeds. - * - -3: Calling the `takeSnapshot` method too frequently. Call the `takeSnapshot` method after receiving the `onSnapshotTaken` - * callback from the previous call. + * - -2: The SDK does not find the video stream of the specified user within one second after the + * `takeSnapshot(uid_t uid, const char* filePath)` method call succeeds. The possible reasons are: + * local capture stops, remote + * end stops publishing, or video data processing is blocked. + * - -3: Calling the `takeSnapshot(uid_t uid, const char* filePath)` method too frequently. + * */ virtual void onSnapshotTaken(uid_t uid, const char* filePath, int width, int height, int errCode) { (void)uid; @@ -2435,11 +3089,27 @@ class IRtcEngineEventHandler { } /** - * Occurs when the user role switches in the interactive live streaming. + * @brief Occurs when the user role or the audience latency level changes. + * + * @details + * Call timing: This callback will be triggered in any of the following situations: + * - Calling `setClientRole(CLIENT_ROLE_TYPE role)` or `setClientRole(CLIENT_ROLE_TYPE role, const + * ClientRoleOptions& options)` to set the user role or audience latency + * level **after joining a channel** + * - Calling `setClientRole(CLIENT_ROLE_TYPE role)` or `setClientRole(CLIENT_ROLE_TYPE role, const + * ClientRoleOptions& options)` and set the user role to `AUDIENCE` + * **before joining a channel**. + * + * @note This callback will not be triggered when you call `setClientRole(CLIENT_ROLE_TYPE role)` or + * `setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options)` + * and set the user role to `BROADCASTER` **before joining a channel**. + * + * @param oldRole Role that the user switches from: `CLIENT_ROLE_TYPE`. + * @param newRole Role that the user switches to: `CLIENT_ROLE_TYPE`. + * @param newRoleOptions Since + * v4.1.0 + * Properties of the role that the user switches to. See `ClientRoleOptions`. * - * @param oldRole The old role of the user: #CLIENT_ROLE_TYPE. - * @param newRole The new role of the user: #CLIENT_ROLE_TYPE. - * @param newRoleOptions The client role options of the new role: #ClientRoleOptions. */ virtual void onClientRoleChanged(CLIENT_ROLE_TYPE oldRole, CLIENT_ROLE_TYPE newRole, const ClientRoleOptions& newRoleOptions) { (void)oldRole; @@ -2448,23 +3118,39 @@ class IRtcEngineEventHandler { } /** - * Occurs when the user role in a Live-Broadcast channel fails to switch, for example, from a broadcaster - * to an audience or vice versa. + * @brief Occurs when switching a user role fails. + * + * @details + * This callback informs you about the reason for failing to switching and your current user role. + * Call timing: The SDK triggers this callback when the local user calls + * `setClientRole(CLIENT_ROLE_TYPE role)` or + * `setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options)` after joining a channel + * to switch the user role but the switching fails. + * + * @param reason The reason for a user role switch failure. See `CLIENT_ROLE_CHANGE_FAILED_REASON`. + * @param currentRole Current user role. See `CLIENT_ROLE_TYPE`. * - * @param reason The reason for failing to change the client role: #CLIENT_ROLE_CHANGE_FAILED_REASON. - * @param currentRole The current role of the user: #CLIENT_ROLE_TYPE. */ virtual void onClientRoleChangeFailed(CLIENT_ROLE_CHANGE_FAILED_REASON reason, CLIENT_ROLE_TYPE currentRole) { (void)reason; (void)currentRole; } - /** Occurs when the audio device volume changes. - @param deviceType The device type, see #MEDIA_DEVICE_TYPE - @param volume The volume of the audio device. - @param muted Whether the audio device is muted: - - true: The audio device is muted. - - false: The audio device is not muted. + /** + * @brief Reports the volume change of the audio device or app. + * + * @details + * Occurs when the volume on the playback device, audio capture device, or the volume of the app + * changes. + * + * @note This callback is for Windows and macOS only. + * + * @param deviceType The device type. See `MEDIA_DEVICE_TYPE`. + * @param volume The volume value. The range is [0, 255]. + * @param muted Whether the audio device is muted: + * - `true`: The audio device is muted. + * - `false`: The audio device is not muted. + * */ virtual void onAudioDeviceVolumeChanged(MEDIA_DEVICE_TYPE deviceType, int volume, bool muted) { (void)deviceType; @@ -2473,15 +3159,18 @@ class IRtcEngineEventHandler { } /** - * Occurs when the state of the RTMP streaming changes. + * @brief Occurs when the state of Media Push changes. * - * When the media push state changes, the SDK triggers this callback and reports the URL address and the current state - * of the media push. This callback indicates the state of the media push. When exceptions occur, you can troubleshoot - * issues by referring to the detailed error descriptions in the error code. + * @details + * When the state of Media Push changes, the SDK triggers this callback and reports the URL address + * and the current state of the Media Push. This callback indicates the state of the Media Push. + * When exceptions occur, you can troubleshoot issues by referring to the detailed error + * descriptions in the error code parameter. + * + * @param url The URL address where the state of the Media Push changes. + * @param state The current state of the Media Push. See `RTMP_STREAM_PUBLISH_STATE`. + * @param reason Reasons for the changes in the Media Push status. See `RTMP_STREAM_PUBLISH_REASON`. * - * @param url The URL address where the state of the media push changes. - * @param state The current state of the media push: #RTMP_STREAM_PUBLISH_STATE. - * @param reason The detailed error information for the media push: #RTMP_STREAM_PUBLISH_REASON. */ virtual void onRtmpStreamingStateChanged(const char* url, RTMP_STREAM_PUBLISH_STATE state, RTMP_STREAM_PUBLISH_REASON reason) { @@ -2490,10 +3179,12 @@ class IRtcEngineEventHandler { (void)reason; } - /** Reports events during the media push. + /** + * @brief Reports events during the Media Push. + * + * @param url The URL for Media Push. + * @param eventCode The event code of Media Push. See `RTMP_STREAMING_EVENT`. * - * @param url The URL for media push. - * @param eventCode The event code of media push. See RTMP_STREAMING_EVENT for details. */ virtual void onRtmpStreamingEvent(const char* url, RTMP_STREAMING_EVENT eventCode) { (void)url; @@ -2501,62 +3192,37 @@ class IRtcEngineEventHandler { } /** - * Occurs when the publisher's transcoding settings are updated. + * @brief Occurs when the publisher's transcoding is updated. * - * When the `LiveTranscoding` class in \ref IRtcEngine::setLiveTranscoding "setLiveTranscoding" - * updates, the SDK triggers this callback to report the update information. + * @details + * When the `LiveTranscoding` class in the `startRtmpStreamWithTranscoding` method updates, the SDK + * triggers the `onTranscodingUpdated` callback to report the update information. + * + * @note If you call the `startRtmpStreamWithTranscoding` method to set the `LiveTranscoding` class + * for the first time, the SDK does not trigger this callback. * - * @note - * If you call the `setLiveTranscoding` method to set the `LiveTranscoding` class for the first time, the SDK - * does not trigger this callback. */ virtual void onTranscodingUpdated() {} - /** Occurs when the local audio route changes (for Android, iOS, and macOS only). - - The SDK triggers this callback when the local audio route switches to an - earpiece, speakerphone, headset, or Bluetooth device. - @param routing The current audio output routing: - - -1: Default. - - 0: Headset. - - 1: Earpiece. - - 2: Headset with no microphone. - - 3: Speakerphone. - - 4: Loudspeaker. - - 5: Bluetooth headset. + /** + * @brief Occurs when the local audio route changes. + * + * @note This method is for Android, iOS and macOS only. + * + * @param routing The current audio routing. See `AudioRoute`. + * */ virtual void onAudioRoutingChanged(int routing) { (void)routing; } /** - * Occurs when the state of the media stream relay changes. + * @brief Occurs when the state of the media stream relay changes. * - * The SDK reports the state of the current media relay and possible error messages in this - * callback. + * @details + * The SDK returns the state of the current media relay with any error message. + * + * @param state The state code. See `CHANNEL_MEDIA_RELAY_STATE`. + * @param code The error code of the channel media relay. See `CHANNEL_MEDIA_RELAY_ERROR`. * - * @param state The state code: - * - `RELAY_STATE_IDLE(0)`: The SDK is initializing. - * - `RELAY_STATE_CONNECTING(1)`: The SDK tries to relay the media stream to the destination - * channel. - * - `RELAY_STATE_RUNNING(2)`: The SDK successfully relays the media stream to the destination - * channel. - * - `RELAY_STATE_FAILURE(3)`: A failure occurs. See the details in `code`. - * @param code The error code: - * - `RELAY_OK(0)`: The state is normal. - * - `RELAY_ERROR_SERVER_ERROR_RESPONSE(1)`: An error occurs in the server response. - * - `RELAY_ERROR_SERVER_NO_RESPONSE(2)`: No server response. You can call the leaveChannel method - * to leave the channel. - * - `RELAY_ERROR_NO_RESOURCE_AVAILABLE(3)`: The SDK fails to access the service, probably due to - * limited resources of the server. - * - `RELAY_ERROR_FAILED_JOIN_SRC(4)`: Fails to send the relay request. - * - `RELAY_ERROR_FAILED_JOIN_DEST(5)`: Fails to accept the relay request. - * - `RELAY_ERROR_FAILED_PACKET_RECEIVED_FROM_SRC(6)`: The server fails to receive the media - * stream. - * - `RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST(7)`: The server fails to send the media stream. - * - `RELAY_ERROR_SERVER_CONNECTION_LOST(8)`: The SDK disconnects from the server due to poor - * network connections. You can call the leaveChannel method to leave the channel. - * - `RELAY_ERROR_INTERNAL_ERROR(9)`: An internal error occurs in the server. - * - `RELAY_ERROR_SRC_TOKEN_EXPIRED(10)`: The token of the source channel has expired. - * - `RELAY_ERROR_DEST_TOKEN_EXPIRED(11)`: The token of the destination channel has expired. */ virtual void onChannelMediaRelayStateChanged(int state, int code) { (void)state; @@ -2564,58 +3230,51 @@ class IRtcEngineEventHandler { } /** - * Occurs when the published media stream falls back to an audio-only stream due to poor network conditions or - * switches back to video stream after the network conditions improve. - * - * If you call `setLocalPublishFallbackOption` and set `option` as `STREAM_FALLBACK_OPTION_AUDIO_ONLY(2)`, this - * callback is triggered when the locally published stream falls back to audio-only mode due to poor uplink - * conditions, or when the audio stream switches back to the video after the uplink network condition improves. - * Once the published stream falls back to audio only, the remote app receives the `onRemoteVideoStateChanged` callback. + * @brief Occurs when the remote media stream falls back to the audio-only stream due to poor + * network conditions or switches back to the video stream after the network conditions improve. * - * @param isFallbackOrRecover Whether the published stream fell back to audio-only or switched back to the video: - * - `true`: The published stream fell back to audio-only due to poor network conditions. - * - `false`: The published stream switched back to the video after the network conditions improved. - */ - virtual void onLocalPublishFallbackToAudioOnly(bool isFallbackOrRecover) { - (void)isFallbackOrRecover; - } - - /** - * Occurs when the remote media stream falls back to audio-only stream due to poor network conditions or - * switches back to video stream after the network conditions improve. + * @details + * If you call `setRemoteSubscribeFallbackOption` and set `option` to + * `STREAM_FALLBACK_OPTION_AUDIO_ONLY`, the SDK triggers this callback in the following situations: + * - The downstream network condition is poor, and the subscribed video stream is downgraded to + * audio-only stream. + * - The downstream network condition has improved, and the subscribed stream has been restored to + * video stream. * - * If you call `setRemoteSubscribeFallbackOption` and set `option` as `STREAM_FALLBACK_OPTION_AUDIO_ONLY(2)`, this - * callback is triggered when the remotely subscribed media stream falls back to audio-only mode due to poor downlink - * conditions, or when the remotely subscribed media stream switches back to the video after the downlink network - * condition improves. + * @note Once the remote media stream switches to the low-quality video stream due to weak network + * conditions, you can monitor the stream switch between a high-quality and low-quality stream in + * the `onRemoteVideoStats` callback. * - * @note Once the remote media stream is switched to the low stream due to poor network conditions, you can monitor - * the stream switch between a high and low stream in the `onRemoteVideoStats` callback. + * @param uid The user ID of the remote user. + * @param isFallbackOrRecover - `true`: The subscribed media stream falls back to audio-only due to + * poor network conditions. + * - `false`: The subscribed media stream switches back to the video stream after the network + * conditions improve. * - * @param uid ID of the remote user sending the stream. - * @param isFallbackOrRecover Whether the remote media stream fell back to audio-only or switched back to the video: - * - `true`: The remote media stream fell back to audio-only due to poor network conditions. - * - `false`: The remote media stream switched back to the video stream after the network conditions improved. */ virtual void onRemoteSubscribeFallbackToAudioOnly(uid_t uid, bool isFallbackOrRecover) { (void)uid; (void)isFallbackOrRecover; } - /** Reports the transport-layer statistics of each remote audio stream. + /** + * @brief Reports the transport-layer statistics of each remote audio stream. + * * @deprecated Use `onRemoteAudioStats` instead. - - This callback reports the transport-layer statistics, such as the packet loss rate and network time delay, once every - two seconds after the local user receives an audio packet from a remote user. During a call, when the user receives - the audio packet sent by the remote user/host, the callback is triggered every 2 seconds. - - @param uid ID of the remote user whose audio data packet is received. - @param delay The network time delay (ms) from the sender to the receiver. - @param lost The Packet loss rate (%) of the audio packet sent from the remote - user. - @param rxKBitRate Received bitrate (Kbps) of the audio packet sent from the - remote user. - */ + * + * @details + * This callback reports the transport-layer statistics, such as the packet loss rate and network + * time delay after the local user receives an audio packet from a remote user. During a call, when + * the user receives the audio packet sent by the remote user, the callback is triggered every 2 + * seconds. + * + * @param uid The ID of the remote user sending the audio streams. + * @param delay The network delay (ms) from the remote user to the receiver. + * @param lost The packet loss rate (%) of the audio packet sent from the remote user to the + * receiver. + * @param rxKBitrate The bitrate of the received audio (Kbps). + * + */ virtual void onRemoteAudioTransportStats(uid_t uid, unsigned short delay, unsigned short lost, unsigned short rxKBitRate) __deprecated { (void)uid; (void)delay; @@ -2623,23 +3282,23 @@ class IRtcEngineEventHandler { (void)rxKBitRate; } - /** Reports the transport-layer statistics of each remote video stream. + /** + * @brief Reports the transport-layer statistics of each remote video stream. + * * @deprecated Use `onRemoteVideoStats` instead. - - This callback reports the transport-layer statistics, such as the packet loss rate and network time - delay, once every two seconds after the local user receives a video packet from a remote user. - - During a call, when the user receives the video packet sent by the remote user/host, the callback is - triggered every 2 seconds. - - @param uid ID of the remote user whose video packet is received. - @param delay The network time delay (ms) from the remote user sending the - video packet to the local user. - @param lost The packet loss rate (%) of the video packet sent from the remote - user. - @param rxKBitRate The bitrate (Kbps) of the video packet sent from - the remote user. - */ + * + * @details + * This callback reports the transport-layer statistics, such as the packet loss rate and network + * time delay after the local user receives a video packet from a remote user. + * During a call, when the user receives the video packet sent by the remote user/host, the callback + * is triggered every 2 seconds. + * + * @param uid The ID of the remote user sending the video packets. + * @param delay The network delay (ms) from the sender to the receiver. + * @param lost The packet loss rate (%) of the video packet sent from the remote user. + * @param rxKBitRate The bitrate of the received video (Kbps). + * + */ virtual void onRemoteVideoTransportStats(uid_t uid, unsigned short delay, unsigned short lost, unsigned short rxKBitRate) __deprecated { (void)uid; (void)delay; @@ -2647,13 +3306,16 @@ class IRtcEngineEventHandler { (void)rxKBitRate; } - /** Occurs when the network connection state changes. + /** + * @brief Occurs when the network connection state changes. * + * @details * When the network connection state changes, the SDK triggers this callback and reports the current * connection state and the reason for the change. - - @param state The current connection state. See #CONNECTION_STATE_TYPE. - @param reason The reason for a connection state change. See #CONNECTION_CHANGED_REASON_TYPE. + * + * @param state The current connection state. See `CONNECTION_STATE_TYPE`. + * @param reason The reason for a connection state change. See `CONNECTION_CHANGED_REASON_TYPE`. + * */ virtual void onConnectionStateChanged( CONNECTION_STATE_TYPE state, CONNECTION_CHANGED_REASON_TYPE reason) { @@ -2661,87 +3323,89 @@ class IRtcEngineEventHandler { (void)reason; } - /** Occurs when the WIFI message need be sent to the user. + /** + * @brief Occurs when the local network type changes. * - * @param reason The reason of notifying the user of a message. - * @param action Suggest an action for the user. - * @param wlAccMsg The message content of notifying the user. - */ - virtual void onWlAccMessage(WLACC_MESSAGE_REASON reason, WLACC_SUGGEST_ACTION action, const char* wlAccMsg) { - (void)reason; - (void)action; - (void)wlAccMsg; - } - - /** Occurs when SDK statistics wifi acceleration optimization effect. + * @details + * This callback occurs when the connection state of the local user changes. You can get the + * connection state and reason for the state change in this callback. When the network connection is + * interrupted, this callback indicates whether the interruption is caused by a network type change + * or poor network conditions. * - * @param currentStats Instantaneous value of optimization effect. - * @param averageStats Average value of cumulative optimization effect. - */ - virtual void onWlAccStats(const WlAccStats& currentStats, const WlAccStats& averageStats) { - (void)currentStats; - (void)averageStats; - } - - /** Occurs when the local network type changes. + * @param type The type of the local network connection. See `NETWORK_TYPE`. * - * This callback occurs when the connection state of the local user changes. You can get the - * connection state and reason for the state change in this callback. When the network connection - * is interrupted, this callback indicates whether the interruption is caused by a network type - * change or poor network conditions. - - @param type The type of the local network connection. See #NETWORK_TYPE. */ virtual void onNetworkTypeChanged(NETWORK_TYPE type) { (void)type; } - /** Reports the built-in encryption errors. + /** + * @brief Reports the built-in encryption errors. * + * @details * When encryption is enabled by calling `enableEncryption`, the SDK triggers this callback if an * error occurs in encryption or decryption on the sender or the receiver side. - - @param errorType The error type. See #ENCRYPTION_ERROR_TYPE. + * + * @param errorType Details about the error type. See `ENCRYPTION_ERROR_TYPE`. + * */ virtual void onEncryptionError(ENCRYPTION_ERROR_TYPE errorType) { (void)errorType; } - /** Occurs when the SDK cannot get the device permission. + /** + * @brief Occurs when the SDK cannot get the device permission. * + * @details * When the SDK fails to get the device permission, the SDK triggers this callback to report which * device permission cannot be got. * - * @note This method is for Android and iOS only. - - @param permissionType The type of the device permission. See #PERMISSION_TYPE. - */ + * @param permissionType The type of the device permission. See `PERMISSION_TYPE`. + * + */ virtual void onPermissionError(PERMISSION_TYPE permissionType) { (void)permissionType; } - /** Occurs when the local user registers a user account. +#if defined(__ANDROID__) + /** + * Reports the permission granted. + * @param permission {@link PERMISSION} + */ + virtual void onPermissionGranted(agora::rtc::PERMISSION_TYPE permissionType) {} +#endif + + /** + * @brief Occurs when the local user registers a user account. * + * @details * After the local user successfully calls `registerLocalUserAccount` to register the user account - * or calls `joinChannelWithUserAccount` to join a channel, the SDK triggers the callback and + * or calls `joinChannelWithUserAccount(const char* token, const char* channelId, const char* + * userAccount, const ChannelMediaOptions& options)` to join a channel, the SDK triggers the + * callback and * informs the local user's UID and User Account. - - @param uid The ID of the local user. - @param userAccount The user account of the local user. + * + * @param uid The ID of the local user. + * @param userAccount The user account of the local user. + * */ virtual void onLocalUserRegistered(uid_t uid, const char* userAccount) { (void)uid; (void)userAccount; } - /** Occurs when the SDK gets the user ID and user account of the remote user. - - After a remote user joins the channel, the SDK gets the UID and user account of the remote user, - caches them in a mapping table object (`userInfo`), and triggers this callback on the local client. - - @param uid The ID of the remote user. - @param info The `UserInfo` object that contains the user ID and user account of the remote user. - */ + /** + * @brief Occurs when the SDK gets the user ID and user account of the remote user. + * + * @details + * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, + * caches them in a mapping table object, and triggers this callback on the local client. + * + * @param uid The user ID of the remote user. + * @param info The UserInfo object that contains the user ID and user account of the remote user. + * See `UserInfo` for details. + * + */ virtual void onUserInfoUpdated(uid_t uid, const UserInfo& info) { (void)uid; (void)info; @@ -2759,11 +3423,20 @@ class IRtcEngineEventHandler { } /** - * Reports the tracing result of video rendering event of the user. - * + * @brief Video frame rendering event callback. + * + * @details + * After calling the `startMediaRenderingTracing` method or joining a channel, the SDK triggers this + * callback to report the events of video frame rendering and the indicators during the rendering + * process. Developers can optimize the indicators to improve the efficiency of the first video + * frame rendering. + * * @param uid The user ID. - * @param currentEvent The current event of the tracing result: #MEDIA_TRACE_EVENT. - * @param tracingInfo The tracing result: #VideoRenderingTracingInfo. + * @param currentEvent The current video frame rendering event. See `MEDIA_TRACE_EVENT`. + * @param tracingInfo The indicators during the video frame rendering process. Developers need to + * reduce the value of indicators as much as possible in order to improve the efficiency of the + * first video frame rendering. See `VideoRenderingTracingInfo`. + * */ virtual void onVideoRenderingTracingResult(uid_t uid, MEDIA_TRACE_EVENT currentEvent, VideoRenderingTracingInfo tracingInfo) { (void)uid; @@ -2772,10 +3445,16 @@ class IRtcEngineEventHandler { } /** - * Occurs when local video transcoder stream has an error. + * @brief Occurs when there's an error during the local video mixing. + * + * @details + * When you fail to call `startLocalVideoTranscoder` or `updateLocalTranscoderConfiguration`, the + * SDK triggers this callback to report the reason. + * + * @param stream The video streams that cannot be mixed during video mixing. See + * `TranscodingVideoStream`. + * @param error The reason for local video mixing error. See `VIDEO_TRANSCODER_ERROR`. * - * @param stream Stream type of TranscodingVideoStream. - * @param error Error code of VIDEO_TRANSCODER_ERROR. */ virtual void onLocalVideoTranscoderError(const TranscodingVideoStream& stream, VIDEO_TRANSCODER_ERROR error){ (void)stream; @@ -2795,13 +3474,14 @@ class IRtcEngineEventHandler { } /** - * Occurs when the audio subscribing state changes. + * @brief Occurs when the audio subscribing state changes. * - * @param channel The name of the channel. - * @param uid The ID of the remote user. - * @param oldState The previous subscribing status: #STREAM_SUBSCRIBE_STATE. - * @param newState The current subscribing status: #STREAM_SUBSCRIBE_STATE. + * @param channel The channel name. + * @param uid The user ID of the remote user. + * @param oldState The previous subscribing status. See `STREAM_SUBSCRIBE_STATE`. + * @param newState The current subscribing status. See `STREAM_SUBSCRIBE_STATE`. * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * */ virtual void onAudioSubscribeStateChanged(const char* channel, uid_t uid, STREAM_SUBSCRIBE_STATE oldState, STREAM_SUBSCRIBE_STATE newState, int elapseSinceLastState) { (void)channel; @@ -2812,13 +3492,14 @@ class IRtcEngineEventHandler { } /** - * Occurs when the video subscribing state changes. + * @brief Occurs when the video subscribing state changes. * - * @param channel The name of the channel. - * @param uid The ID of the remote user. - * @param oldState The previous subscribing status: #STREAM_SUBSCRIBE_STATE. - * @param newState The current subscribing status: #STREAM_SUBSCRIBE_STATE. + * @param channel The channel name. + * @param uid The user ID of the remote user. + * @param oldState The previous subscribing status. See `STREAM_SUBSCRIBE_STATE`. + * @param newState The current subscribing status. See `STREAM_SUBSCRIBE_STATE`. * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * */ virtual void onVideoSubscribeStateChanged(const char* channel, uid_t uid, STREAM_SUBSCRIBE_STATE oldState, STREAM_SUBSCRIBE_STATE newState, int elapseSinceLastState) { (void)channel; @@ -2829,12 +3510,13 @@ class IRtcEngineEventHandler { } /** - * Occurs when the audio publishing state changes. + * @brief Occurs when the audio publishing state changes. * - * @param channel The name of the channel. - * @param oldState The previous publishing state: #STREAM_PUBLISH_STATE. - * @param newState The current publishing state: #STREAM_PUBLISH_STATE. + * @param channel The channel name. + * @param oldState The previous publishing state. See `STREAM_PUBLISH_STATE`. + * @param newState The current publishing stat. See `STREAM_PUBLISH_STATE`. * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * */ virtual void onAudioPublishStateChanged(const char* channel, STREAM_PUBLISH_STATE oldState, STREAM_PUBLISH_STATE newState, int elapseSinceLastState) { (void)channel; @@ -2844,13 +3526,14 @@ class IRtcEngineEventHandler { } /** - * Occurs when the video publishing state changes. + * @brief Occurs when the video publishing state changes. * - * @param source The video source type. - * @param channel The name of the channel. - * @param oldState The previous publishing state: #STREAM_PUBLISH_STATE. - * @param newState The current publishing state: #STREAM_PUBLISH_STATE. + * @param channel The channel name. + * @param source The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param oldState The previous publishing state. See `STREAM_PUBLISH_STATE`. + * @param newState The current publishing stat. See `STREAM_PUBLISH_STATE`. * @param elapseSinceLastState The time elapsed (ms) from the previous state to the current state. + * */ virtual void onVideoPublishStateChanged(VIDEO_SOURCE_TYPE source, const char* channel, STREAM_PUBLISH_STATE oldState, STREAM_PUBLISH_STATE newState, int elapseSinceLastState) { (void)source; @@ -2861,13 +3544,23 @@ class IRtcEngineEventHandler { } /** - * Occurs when receive a video transcoder stream which has video layout info. + * @brief Occurs when the local user receives a mixed video stream carrying layout information. + * + * @details + * When the local user receives a mixed video stream sent by the video mixing server for the first + * time, or when there is a change in the layout information of the mixed stream, the SDK triggers + * this callback, reporting the layout information of each sub-video stream within the mixed video + * stream. + * + * @note This callback is for Android and iOS only. + * + * @param uid User ID who published this mixed video stream. + * @param width Width (px) of the mixed video stream. + * @param height Heitht (px) of the mixed video stream. + * @param layoutCount The number of layout information in the mixed video stream. + * @param layoutlist Layout information of a specific sub-video stream within the mixed stream. See + * `VideoLayout`. * - * @param uid user id of the transcoded stream. - * @param width width of the transcoded stream. - * @param height height of the transcoded stream. - * @param layoutCount count of layout info in the transcoded stream. - * @param layoutlist video layout info list of the transcoded stream. */ virtual void onTranscodedStreamLayoutInfo(uid_t uid, int width, int height, int layoutCount,const VideoLayout* layoutlist) { (void)uid; @@ -2883,22 +3576,24 @@ class IRtcEngineEventHandler { * @param uid ID of the remote user. * @param metadata The pointer of metadata * @param length Size of metadata - * @technical preview + * @technical preview */ virtual void onAudioMetadataReceived(uid_t uid, const char* metadata, size_t length) { (void)uid; (void)metadata; (void)length; } - + /** - * The event callback of the extension. + * @brief The event callback of the extension. * + * @details * To listen for events while the extension is running, you need to register this callback. - * - * @param context The context of the extension. + * + * @param context The context information of the extension, see `ExtensionContext`. * @param key The key of the extension. * @param value The value of the extension key. + * */ virtual void onExtensionEventWithContext(const ExtensionContext &context, const char* key, const char* value) { (void)context; @@ -2907,36 +3602,44 @@ class IRtcEngineEventHandler { } /** - * Occurs when the extension is enabled. - * - * After a successful creation of filter , the extension triggers this callback. - * - * @param context The context of the extension. + * @brief Occurrs when the extension is enabled. + * + * @details + * The callback is triggered after the extension is successfully enabled. + * + * @param context The context information of the extension, see `ExtensionContext`. + * */ virtual void onExtensionStartedWithContext(const ExtensionContext &context) { (void)context; } /** - * Occurs when the extension is disabled. - * - * After a successful destroy filter, the extension triggers this callback. - * - * @param context The context of the extension. + * @brief Occurs when the extension is disabled. + * + * @details + * The callback is triggered after the extension is successfully disabled. + * + * @param context The context information of the extension, see `ExtensionContext`. + * */ virtual void onExtensionStoppedWithContext(const ExtensionContext &context) { (void)context; } /** - * Occurs when the extension runs incorrectly. - * - * When the extension runs in error, the extension triggers - * this callback and reports the error code and reason. + * @brief Occurs when the extension runs incorrectly. + * + * @details + * In case of extension enabling failure or runtime errors, the extension triggers this callback and + * reports the error code along with the reasons. + * + * @param context The context information of the extension, see `ExtensionContext`. + * @param error Error code. For details, see the extension documentation provided by the extension + * provider. + * @param message Reason. For details, see the extension documentation provided by the extension + * provider. * - * @param context The context of the extension. - * @param error The error code. For details, see the extension documentation provided by the extension provider. - * @param message The error message. For details, see the extension documentation provided by the extension provider. */ virtual void onExtensionErrorWithContext(const ExtensionContext &context, int error, const char* message) { (void)context; @@ -2953,6 +3656,36 @@ class IRtcEngineEventHandler { virtual void onSetRtmFlagResult(int code) { (void)code; } + + /** + * @brief Report the multipath transmission statistics + * + * @post This callback is triggered after you set `enableMultipath` to `true` to enable multipath transmission. + * + * @since 4.6.0 + * + * @param stats The multipath statistics. See the MultipathStats structure for details. + */ virtual void onMultipathStats(const MultipathStats& stats) { + (void)stats; + } + + /** + * @brief Callback for `renewToken` call result. + * + * @since 4.6.0 + * + * @details + * This callback is triggered after the user calls the `renewToken` method to update the token, and + * is used to notify the app of the result. + * + * @param token Token. + * @param code Error code. See `RENEW_TOKEN_ERROR_CODE`. + * + */ + virtual void onRenewTokenResult(const char* token, RENEW_TOKEN_ERROR_CODE code) { + (void)token; + (void)code; + } }; /** @@ -2963,9 +3696,10 @@ class IVideoDeviceCollection { virtual ~IVideoDeviceCollection() {} /** - * Gets the total number of the indexed video capture devices in the system. + * @brief Gets the total number of the indexed video devices in the system. * - * @return The total number of the indexed video capture devices. + * @return + * The total number of the indexed video devices in the system. */ virtual int getCount() = 0; @@ -2994,7 +3728,7 @@ class IVideoDeviceCollection { char deviceIdUTF8[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Releases all the resources occupied by the IVideoDeviceCollection object. + * @brief Releases all the resources occupied by the `IVideoDeviceCollection` object. */ virtual void release() = 0; }; @@ -3006,15 +3740,17 @@ class IVideoDeviceManager { public: virtual ~IVideoDeviceManager() {} /** - * Enumerates the video devices. + * @brief Enumerates the video devices. * + * @details * This method returns an `IVideoDeviceCollection` object including all video devices in the system. * With the `IVideoDeviceCollection` object, the application can enumerate video devices. The - * application must call the release method to release the returned object after using it. + * application must call the `release` method to release the returned object after using it. + * + * @note This method is for Windows and macOS only. * * @return - * - Success: An `IVideoDeviceCollection` object including all video devices in the system. - * - Failure: NULL. + * - Success: One `IVideoDeviceCollection` object including all video devices in the system. */ virtual IVideoDeviceCollection* enumerateVideoDevices() = 0; @@ -3031,8 +3767,12 @@ class IVideoDeviceManager { virtual int setDevice(const char deviceIdUTF8[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Retrieves the current video capture device. - * @param deviceIdUTF8 Output parameter. The device ID. The maximum length is #MAX_DEVICE_ID_LENGTH_TYPE. + * @brief Retrieves the current video capture device. + * + * @note This method is for Windows and macOS only. + * + * @param deviceIdUTF8 An output parameter. The device ID. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. * * @return * - 0: Success. @@ -3040,39 +3780,45 @@ class IVideoDeviceManager { */ virtual int getDevice(char deviceIdUTF8[MAX_DEVICE_ID_LENGTH]) = 0; -#if defined(_WIN32) || (defined(__linux__) && !defined(__ANDROID__)) || \ +#if defined(_WIN32) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) || \ (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) /** - * Gets the number of video formats supported by the specified video capture device. + * @brief Gets the number of video formats supported by the specified video capture device. * + * @details * Video capture devices may support multiple video formats, and each format supports different * combinations of video frame width, video frame height, and frame rate. - * * You can call this method to get how many video formats the specified video capture device can * support, and then call `getCapability` to get the specific video frame information in the * specified video format. * + * @note This method is for Windows and macOS only. + * * @param deviceIdUTF8 The ID of the video capture device. * * @return - * - 0: Success. Returns the number of video formats supported by this device. For example: If the + * - > 0: Success. Returns the number of video formats supported by this device. For example: If the * specified camera supports 10 different video formats, the return value is 10. - * - < 0: Failure. + * - ≤ 0: Failure. */ virtual int numberOfCapabilities(const char* deviceIdUTF8) = 0; /** - * Gets the detailed video frame information of the video capture device in the specified video format. + * @brief Gets the detailed video frame information of the video capture device in the specified + * video format. * - * After calling `numberOfCapabilities` to get the number of video formats supported by the video capture - * device, you can call this method to get the specific video frame information supported by the - * specified index number. + * @details + * After calling `numberOfCapabilities` to get the number of video formats supported by the video + * capture device, you can call this method to get the specific video frame information supported by + * the specified index number. * - * @param deviceIdUTF8 ID of the video capture device. - * @param deviceCapabilityNumber The index number of the video format. If the return value of `numberOfCapabilities` - * is i, the value range of this parameter is [0,i). - * @param capability Output parameter. Indicates the specific information of the specified video format, - * including width (px), height (px), and frame rate (fps). See VideoFormat. + * @note This method is for Windows and macOS only. + * + * @param deviceIdUTF8 The ID of the video capture device. + * @param deviceCapabilityNumber The index number of the video format. If the return value of + * `numberOfCapabilities` is i, the value range of this parameter is [0,i). + * @param capability An output parameter. Indicates the specific information of the specified video + * format, including width (px), height (px), and frame rate (fps). See `VideoFormat`. * * @return * - 0: Success. @@ -3104,34 +3850,238 @@ class IVideoDeviceManager { virtual int stopDeviceTest() = 0; /** - * Releases all the resources occupied by the `IVideoDeviceManager` object. + * @brief Releases all the resources occupied by the `IVideoDeviceManager` object. + * + * @note This method is for Windows and macOS only. + * */ virtual void release() = 0; }; /** - * The context of IRtcEngine. + * @brief Provides methods to manage and configure video effects, such as beauty, style makeup, and filter. + * + * @since v4.6.0 + */ +class IVideoEffectObject : public RefCountInterface { + public: + virtual ~IVideoEffectObject() {} + + /** + * @brief Types of applicable video effect nodes. + * + * @since v4.6.0 + */ + enum class VIDEO_EFFECT_NODE_ID : uint32_t { + /** + * (1): Beauty effect node. + */ + BEAUTY = 1U << 0, + /** + * (2): Style makeup effect node. + */ + STYLE_MAKEUP = 1U << 1, + /** + * (4): Filter effect node. + */ + FILTER = 1U << 2, + }; + + /** + * @brief Actions that can be performed on video effect nodes. + * + * @since v4.6.0 + */ + enum VIDEO_EFFECT_ACTION { + /** + * (1): Save the current parameters of the video effect. + */ + SAVE = 1, + /** + * (2): Reset the video effect to default parameters. + */ + RESET = 2, + }; + + /** + * @brief Adds or updates the video effect for the specified node ID and template. + * + * @since v4.6.0 + * + * @note + * Priority rules: + * - The `VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP` node takes precedence over the + * `VIDEO_EFFECT_NODE_ID::FILTER` parameter. + * - To apply the `VIDEO_EFFECT_NODE_ID::FILTER` parameter, you must first remove the + * `VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP` node: + * ``` + * removeVideoEffect(VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP); + * addOrUpdateVideoEffect(VIDEO_EFFECT_NODE_ID::FILTER, "template name"); + * ``` + * + * @param nodeId The unique identifier or combination of identifiers for the video effect node. See + * `VIDEO_EFFECT_NODE_ID`. + * Examples: + * - Single effect: `VIDEO_EFFECT_NODE_ID::BEAUTY` + * - Combined effects: `VIDEO_EFFECT_NODE_ID::BEAUTY | VIDEO_EFFECT_NODE_ID::STYLE_MAKEUP` + * @param templateName The name of the effect template. If set to null or an empty string, the SDK + * loads the default configuration from the resource package. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int addOrUpdateVideoEffect(uint32_t nodeId, const char* templateName) = 0; + + /** + * @brief Removes the video effect with the specified node ID. + * + * @since v4.6.0 + * + * @param nodeId The unique identifier of the video effect node to remove. See + * `VIDEO_EFFECT_NODE_ID`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int removeVideoEffect(uint32_t nodeId) = 0; + + /** + * @brief Performs an action on the specified video effect node. + * + * @since v4.6.0 + * + * @param nodeId The unique identifier of the video effect node. + * @param actionId The action to perform. See `VIDEO_EFFECT_ACTION`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int performVideoEffectAction(uint32_t nodeId, VIDEO_EFFECT_ACTION actionId) = 0; + + /** + * @brief Sets the float parameter for video effects. + * + * @since v4.6.0 + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * @param param The float value to set. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setVideoEffectFloatParam(const char* option, const char* key, float param) = 0; + + /** + * @brief Sets an integer parameter for video effects. + * + * @since v4.6.0 + * + * @param option The category of the option to which the parameter belongs. + * @param key The key name of the parameter. + * @param param The integer parameter value to set. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setVideoEffectIntParam(const char* option, const char* key, int param) = 0; + + /** + * @brief Sets the boolean parameter for video effects. + * + * @since v4.6.0 + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * @param param The boolean value to set. + * - `true`: Enables the option. + * - `false`: Disables the option. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setVideoEffectBoolParam(const char* option, const char* key, bool param) = 0; + + /** + * @brief Retrieves `float` type parameters in video effects. + * + * @since v4.6.0 + * + * @details + * Used to retrieve the value of a `float` type parameter corresponding to the specified option and + * key in video effects. + * + * @param option The category of the option to which the parameter belongs. + * @param key The key name of the parameter. + * + * @return + * - If the parameter exists, returns the corresponding `float` value. + * - If the parameter does not exist or an error occurs, returns 0.0f. + */ + virtual float getVideoEffectFloatParam(const char* option, const char* key) = 0; + + /** + * @brief Retrieves integer parameters in video effects. + * + * @since v4.6.0 + * + * @details + * Used to retrieve integer-type parameters in video effects. + * + * @param option The category of the parameter option. + * @param key The key name of the parameter. + * + * @return + * - If the parameter exists, returns the corresponding integer value. + * - If the parameter does not exist or an error occurs, returns 0. + */ + virtual int getVideoEffectIntParam(const char* option, const char* key) = 0; + + /** + * @brief Gets the boolean parameter in video effects. + * + * @since v4.6.0 + * + * @param option The option category to which the parameter belongs. + * @param key The key name of the parameter. + * + * @return + * - `true`: The parameter is enabled. + * - `false`: The parameter is not enabled or does not exist. + */ + virtual bool getVideoEffectBoolParam(const char* option, const char* key) = 0; + +}; + +/** + * @brief Configurations for the `RtcEngineContext` instance. */ struct RtcEngineContext { /** - * The event handler for IRtcEngine. + * The event handler for `IRtcEngine`. See `IRtcEngineEventHandler`. */ IRtcEngineEventHandler* eventHandler; /** * The App ID issued by Agora for your project. Only users in apps with the same App ID can join the - * same channel and communicate with each other. An App ID can only be used to create one `IRtcEngine` - * instance. To change your App ID, call release to destroy the current IRtcEngine instance, and then - * create a new one. + * same channel and communicate with each other. An App ID can only be used to create one + * `IRtcEngine` instance. To change your App ID, call `release` to destroy the current `IRtcEngine` + * instance, and then create a new one. */ const char* appId; /** - * - For Android, it is the context of Activity or Application. - * - For Windows, it is the window handle of app. Once set, this parameter enables you to plug - * or unplug the video devices while they are powered. + * - For Windows, it is the window handle of the app. Once set, this parameter enables you to + * connect or disconnect the video devices while they are powered. + * - For Android, it is the context of Android Activity. */ void* context; /** - * The channel profile. See #CHANNEL_PROFILE_TYPE. + * The channel profile. See `CHANNEL_PROFILE_TYPE`. */ CHANNEL_PROFILE_TYPE channelProfile; @@ -3141,37 +4091,35 @@ struct RtcEngineContext { const char* license; /** - * The audio application scenario. See #AUDIO_SCENARIO_TYPE. - * - * @note Agora recommends the following scenarios: - * - `AUDIO_SCENARIO_DEFAULT(0)` - * - `AUDIO_SCENARIO_GAME_STREAMING(3)` + * The audio scenarios. Under different audio scenarios, the device uses different volume types. See + * `AUDIO_SCENARIO_TYPE`. */ AUDIO_SCENARIO_TYPE audioScenario; /** - * The region for connection. This is an advanced feature and applies to scenarios that have regional restrictions. - * - * For the regions that Agora supports, see #AREA_CODE. The area codes support bitwise operation. - * - * After specifying the region, the app integrated with the Agora SDK connects to the Agora servers - * within that region. + * The region for connection. This is an advanced feature and applies to scenarios that have + * regional restrictions. For details on supported regions, see `AREA_CODE`. The area codes support + * bitwise operation. */ unsigned int areaCode; /** - * The log files that the SDK outputs. See LogConfig. - * - * By default, the SDK generates five SDK log files and five API call log files with the following rules: - * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`, and `agorasdk.4.log`. - * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`, `agoraapi.3.log`, and `agoraapi.4.log`. - * - The default size for each SDK log file is 1,024 KB; the default size for each API call log file is 2,048 KB. These log files are encoded in UTF-8. + * Sets the log file size. See `LogConfig`. + * By default, the SDK generates five SDK log files and five API call log files with the following + * rules: + * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`, + * and `agorasdk.4.log`. + * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`, + * `agoraapi.3.log`, and `agoraapi.4.log`. + * - The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded + * in UTF-8. * - The SDK writes the latest logs in `agorasdk.log` or `agoraapi.log`. - * - When `agorasdk.log` is full, the SDK processes the log files in the following order: - * - Delete the `agorasdk.4.log` file (if any). - * - Rename `agorasdk.3.log` to `agorasdk.4.log`. - * - Rename `agorasdk.2.log` to `agorasdk.3.log`. - * - Rename `agorasdk.1.log` to `agorasdk.2.log`. - * - Create a new `agorasdk.log` file. + * - When `agorasdk.log` is full, the SDK processes the log files in the following order:1. Delete + * the `agorasdk.4.log` file (if any). + * 2. Rename `agorasdk.3.log` to `agorasdk.4.log`. + * 3. Rename `agorasdk.2.log` to `agorasdk.3.log`. + * 4. Rename `agorasdk.1.log` to `agorasdk.2.log`. + * 5. Create a new `agorasdk.log` file. + * - The overwrite rules for the `agoraapi.log` file are the same as for `agorasdk.log`. */ commons::LogConfig logConfig; @@ -3181,7 +4129,7 @@ struct RtcEngineContext { Optional threadPriority; /** - * Whether to use egl context in the current thread as sdk‘s root egl context, + * Whether to use egl context in the current thread as sdk's root egl context, * which is shared by all egl related modules. eg. camera capture, video renderer. * * @note @@ -3190,16 +4138,20 @@ struct RtcEngineContext { bool useExternalEglContext; /** - * Determines whether to enable domain limit - * -true: only connect to servers which already parsed by DNS - * -false: (Default) connect to servers with no limit + * Whether to enable domain name restriction: + * - `true`: Enables the domain name restriction. This value is suitable for scenarios where IoT + * devices use IoT cards for network access. The SDK will only connect to servers in the domain name + * or IP whitelist that has been reported to the operator. + * - `false`: (Default) Disables the domain name restriction. This value is suitable for most common + * scenarios. */ bool domainLimit; /** - * Whether to automatically register Agora extensions when initializing RtcEngine. - * -true: (Default) Automatically register Agora extensions. - * -false: Do not automatically register Agora extensions. The user calls EnableExtension to manually register an Agora extension. + * Whether to automatically register the Agora extensions when initializing `IRtcEngine`: + * - `true`: (Default) Automatically register the Agora extensions when initializing `IRtcEngine`. + * - `false`: Do not register the Agora extensions when initializing `IRtcEngine`. You need to call + * `enableExtension` to register the Agora extensions. */ bool autoRegisterAgoraExtensions; @@ -3215,16 +4167,17 @@ class IMetadataObserver { public: virtual ~IMetadataObserver() {} - /** The metadata type. - * - * @note We only support video metadata for now. + /** + * @brief Metadata type of the observer. We only support video metadata for now. */ enum METADATA_TYPE { - /** -1: (Not supported) Unknown. + /** + * -1: The type of metadata is unknown. */ UNKNOWN_METADATA = -1, - /** 0: (Supported) Video metadata. + /** + * 0: The type of metadata is video. */ VIDEO_METADATA = 0, }; @@ -3238,124 +4191,184 @@ class IMetadataObserver { MAX_METADATA_SIZE_IN_BYTE = 1024 }; - /** Metadata. + /** + * @brief Media metadata. */ struct Metadata { - /** The channel ID of the `metadata`. + /** + * The channel name. */ const char* channelId; - /** The User ID that sent the metadata. - * - For the receiver: The user ID of the user who sent the `metadata`. - * - For the sender: Ignore this value. + /** + * The user ID. + * - For the recipient: The ID of the remote user who sent the `Metadata`. + * - For the sender: Ignore it. */ unsigned int uid; - /** The buffer size of the sent or received `metadata`. + /** + * The buffer size of the sent or received `Metadata`. */ unsigned int size; - /** The buffer address of the sent or received `metadata`. + /** + * The buffer address of the received `Metadata`. */ unsigned char *buffer; - /** The NTP timestamp (ms) when the metadata is sent. - * @note If the receiver is audience, the receiver cannot get the NTP timestamp (ms). + /** + * The timestamp (ms) of when the `Metadata` is sent. */ long long timeStampMs; Metadata() : channelId(NULL), uid(0), size(0), buffer(NULL), timeStampMs(0) {} }; - /** Occurs when the SDK requests the maximum size of the metadata. - * - * - * After successfully complete the registration by calling `registerMediaMetadataObserver`, the SDK - * triggers this callback once every video frame is sent. You need to specify the maximum size of - * the metadata in the return value of this callback. - * - * @return The maximum size of the buffer of the metadata that you want to use. The highest value is - * 1024 bytes. Ensure that you set the return value. - */ - virtual int getMaxMetadataSize() { return DEFAULT_METADATA_SIZE_IN_BYTE; } - - /** Occurs when the local user receives the metadata. - - @note Ensure that the size of the metadata does not exceed the value set in the `getMaxMetadataSize` callback. - - @param metadata The metadata that the user wants to send. For details, see Metadata. - @param source_type The video data type: #VIDEO_SOURCE_TYPE. - @return - - true: Send. - - false: Do not send. + /** + * @brief Occurs when the SDK requests the maximum size of the metadata. + * + * @details + * After successfully complete the registration by calling `registerMediaMetadataObserver`, the SDK + * triggers this callback once every video frame is sent. You need to specify the maximum size of + * the metadata in the return value of this callback. + * + * @return + * The maximum size of the `buffer` of the metadata that you want to use. The highest value is 1024 + * bytes. Ensure that you set the return value. */ - virtual bool onReadyToSendMetadata(Metadata &metadata, VIDEO_SOURCE_TYPE source_type) = 0; + virtual int getMaxMetadataSize() { return DEFAULT_METADATA_SIZE_IN_BYTE; } - /** Occurs when the local user receives the metadata. + /** + * @brief Occurs when the SDK is ready to send metadata. * - * @param metadata The metadata received. See Metadata. + * @details + * This callback is triggered when the SDK is ready to send metadata. + * + * @note Ensure that the size of the metadata does not exceed the value set in the + * `getMaxMetadataSize` callback. + * + * @param source_type Video data type. See `VIDEO_SOURCE_TYPE`. + * @param metadata The metadata that the user wants to send. See `Metadata`. + * + * @return + * - `true`: Send the video frame. + * - `false`: Do not send the video frame. + */ + virtual bool onReadyToSendMetadata(Metadata &metadata, VIDEO_SOURCE_TYPE source_type) = 0; + + /** + * @brief Occurs when the local user receives the metadata. + * + * @param metadata The metadata received. See `Metadata`. * - * @note If the receiver is audience, the receiver cannot get the NTP timestamp (ms) - * that the metadata sends. */ virtual void onMetadataReceived(const Metadata& metadata) = 0; }; -// The reason codes for media streaming -// GENERATED_JAVA_ENUM_PACKAGE: io.agora.streaming +/** + * @brief Reasons for the changes in CDN streaming status. + * + * @deprecated v4.6.0. + */ enum DIRECT_CDN_STREAMING_REASON { // No error occurs. + /** + * 0: No error. + */ DIRECT_CDN_STREAMING_REASON_OK = 0, // A general error occurs (no specified reason). + /** + * 1: A general error; no specific reason. You can try to push the media stream again. + */ DIRECT_CDN_STREAMING_REASON_FAILED = 1, // Audio publication error. + /** + * 2: An error occurs when pushing audio streams. For example, the local audio capture device is not + * working properly, is occupied by another process, or does not get the permission required. + */ DIRECT_CDN_STREAMING_REASON_AUDIO_PUBLICATION = 2, // Video publication error. + /** + * 3: An error occurs when pushing video streams. For example, the local video capture device is not + * working properly, is occupied by another process, or does not get the permission required. + */ DIRECT_CDN_STREAMING_REASON_VIDEO_PUBLICATION = 3, + /** + * 4: Fails to connect to the CDN. + */ DIRECT_CDN_STREAMING_REASON_NET_CONNECT = 4, // Already exist stream name. + /** + * 5: The URL is already being used. Use a new URL for streaming. + */ DIRECT_CDN_STREAMING_REASON_BAD_NAME = 5, }; -// The connection state of media streaming -// GENERATED_JAVA_ENUM_PACKAGE: io.agora.streaming +/** + * @brief The current CDN streaming state. + * + * @deprecated v4.6.0. + */ enum DIRECT_CDN_STREAMING_STATE { + /** + * 0: The initial state before the CDN streaming starts. + */ DIRECT_CDN_STREAMING_STATE_IDLE = 0, + /** + * 1: Streams are being pushed to the CDN. The SDK returns this value when you call the + * `startDirectCdnStreaming` method to push streams to the CDN. + */ DIRECT_CDN_STREAMING_STATE_RUNNING = 1, + /** + * 2: Stops pushing streams to the CDN. The SDK returns this value when you call the + * `stopDirectCdnStreaming` method to stop pushing streams to the CDN. + */ DIRECT_CDN_STREAMING_STATE_STOPPED = 2, + /** + * 3: Fails to push streams to the CDN. You can troubleshoot the issue with the information reported + * by the `onDirectCdnStreamingStateChanged` callback, and then push streams to the CDN again. + */ DIRECT_CDN_STREAMING_STATE_FAILED = 3, + /** + * 4: Tries to reconnect the Agora server to the CDN. The SDK attempts to reconnect a maximum of 10 + * times; if the connection is not restored, the streaming state becomes + * DIRECT_CDN_STREAMING_STATE_FAILED. + */ DIRECT_CDN_STREAMING_STATE_RECOVERING = 4, }; /** - * The statistics of the Direct Cdn Streams. + * @brief The statistics of the current CDN streaming. + * + * @deprecated v4.6.0. */ struct DirectCdnStreamingStats { /** - * Width of the video pushed by rtmp. + * The width (px) of the video frame. */ int videoWidth; /** - * Height of the video pushed by rtmp. + * The height (px) of the video frame. */ int videoHeight; /** - * The frame rate of the video pushed by rtmp. + * The frame rate (fps) of the current video frame. */ int fps; /** - * Real-time bit rate of the video streamed by rtmp. + * The bitrate (bps) of the current video frame. */ int videoBitrate; /** - * Real-time bit rate of the audio pushed by rtmp. + * The bitrate (bps) of the current audio frame. */ int audioBitrate; }; @@ -3363,16 +4376,26 @@ struct DirectCdnStreamingStats { /** * The event handler for direct cdn streaming * + * @deprecated v4.6.0. + * */ class IDirectCdnStreamingEventHandler { public: virtual ~IDirectCdnStreamingEventHandler() {} /** - * Event callback of direct cdn streaming - * @param state Current status - * @param reason Reason Code - * @param message Message + * @brief Occurs when the CDN streaming state changes. + * + * @details + * When the host directly pushes streams to the CDN, if the streaming state changes, the SDK + * triggers this callback to report the changed streaming state, error codes, and other information. + * You can troubleshoot issues by referring to this callback. + * + * @param state The current CDN streaming state. See `DIRECT_CDN_STREAMING_STATE`. + * @param reason Reasons for changes in the status of CDN streaming. See + * `DIRECT_CDN_STREAMING_REASON`. + * @param message The information about the changed streaming state. + * */ virtual void onDirectCdnStreamingStateChanged(DIRECT_CDN_STREAMING_STATE state, DIRECT_CDN_STREAMING_REASON reason, const char* message) { (void)state; @@ -3380,37 +4403,49 @@ class IDirectCdnStreamingEventHandler { (void)message; }; + /** + * @brief Reports the CDN streaming statistics. + * + * @details + * When the host directly pushes media streams to the CDN, the SDK triggers this callback every one + * second. + * + * @param stats The statistics of the current CDN streaming. See `DirectCdnStreamingStats`. + * + */ virtual void onDirectCdnStreamingStats(const DirectCdnStreamingStats& stats) { (void)stats; }; }; /** - * The channel media options. + * @brief The media setting options for the host. + * + * @deprecated v4.6.0. */ struct DirectCdnStreamingMediaOptions { /** - * Determines whether to publish the video of the camera track. - * - true: Publish the video track of the camera capturer. - * - false: (Default) Do not publish the video track of the camera capturer. + * Sets whether to publish the video captured by the camera: + * - `true`: Publish the video captured by the camera. + * - `false`: (Default) Do not publish the video captured by the camera. */ Optional publishCameraTrack; /** - * Determines whether to publish the recorded audio. - * - true: Publish the recorded audio. - * - false: (Default) Do not publish the recorded audio. + * Sets whether to publish the audio captured by the microphone: + * - `true`: Publish the audio captured by the microphone. + * - `false`: (Default) Do not publish the audio captured by the microphone. */ Optional publishMicrophoneTrack; /** - * Determines whether to publish the audio of the custom audio track. - * - true: Publish the audio of the custom audio track. - * - false: (Default) Do not publish the audio of the custom audio track. + * Sets whether to publish the captured audio from a custom source: + * - `true`: Publish the captured audio from a custom source. + * - `false`: (Default) Do not publish the captured audio from the custom source. */ Optional publishCustomAudioTrack; /** - * Determines whether to publish the video of the custom video track. - * - true: Publish the video of the custom video track. - * - false: (Default) Do not publish the video of the custom video track. + * Sets whether to publish the captured video from a custom source: + * - `true`: Publish the captured video from a custom source. + * - `false`: (Default) Do not publish the captured video from the custom source. */ Optional publishCustomVideoTrack; /** @@ -3425,8 +4460,8 @@ struct DirectCdnStreamingMediaOptions { */ Optional publishMediaPlayerId; /** - * The custom video track id which will used to publish. - * You can get the VideoTrackId after calling createCustomVideoTrack() of IRtcEngine. + * The video track ID returned by calling the `createCustomVideoTrack` method. The default value is + * 0. */ Optional customVideoTrackId; @@ -3523,6 +4558,20 @@ struct ExtensionInfo { class IMediaPlayer; class IMediaRecorder; +/** + * @brief Callback triggered when `IRtcEngine` is released. + * + * @since v4.6.0 + * + * @details + * This callback is triggered when the `release` method is called to asynchronously release the + * `IRtcEngine` object. + * Call timing: This callback is triggered when the `release` method is called to asynchronously + * release the `IRtcEngine` object. + * + */ +using RtcEngineReleaseCallback = void(*)(); + /** * The IRtcEngine class, which is the basic interface of the Agora SDK that implements the core functions of real-time communication. * @@ -3532,52 +4581,63 @@ class IMediaRecorder; class IRtcEngine : public agora::base::IEngineBase { public: /** - * Releases the IRtcEngine object. + * @brief Releases the `IRtcEngine` instance. * + * @details * This method releases all resources used by the Agora SDK. Use this method for apps in which users - * occasionally make voice or video calls. When users do not make calls, you can free up resources for - * other operations. - * + * occasionally make voice or video calls. When users do not make calls, you can free up resources + * for other operations. * After a successful method call, you can no longer use any method or callback in the SDK anymore. - * If you want to use the real-time communication functions again, you must call `createAgoraRtcEngine` - * and `initialize` to create a new `IRtcEngine` instance. + * If you want to use the real-time communication functions again, you must call + * `createAgoraRtcEngine` and `initialize` to create a new `IRtcEngine` instance. * - * @note If you want to create a new `IRtcEngine` instance after destroying the current one, ensure - * that you wait till the `release` method execution to complete. + * @note Agora does not recommend you calling `release` in any callback of the SDK. Otherwise, the + * SDK cannot release the resources until the callbacks return results, which may result in a + * deadlock. * - * @param sync Determines whether this method is a synchronous call. - * - `true`: This method is a synchronous call, which means that the result of this method call - * returns after the IRtcEngine object resources are released. Do not call this method - * in any callback generated by the SDK, or it may result in a deadlock. - * - `false`: This method is an asynchronous call. The result returns immediately even when the - * IRtcEngine object resources are not released. + * @param callback (Optional) Callback function pointer for setting the destruction mode of the + * engine to either synchronous or asynchronous. See `RtcEngineReleaseCallback`. + * - Non `nullptr`: Destroy the engine asynchronously. The method will return immediately, at which + * point the engine resources may not have been fully released yet. After the engine is destroyed, + * the SDK triggers `RtcEngineReleaseCallback`. + * - `nullptr`: Destroy the engine synchronously. This method only returns after the engine + * resources have been fully released. * */ - AGORA_CPP_API static void release(bool sync = false); + AGORA_CPP_API static void release(RtcEngineReleaseCallback callback = nullptr); /** - * Initializes `IRtcEngine`. + * @brief Initializes `IRtcEngine`. * + * @details + * Call timing: Before calling other APIs, you must call `createAgoraRtcEngine` and `initialize` to + * create and initialize the `IRtcEngine` object. + * + * @note + * The SDK supports creating only one `IRtcEngine` instance for an app. * All called methods provided by the `IRtcEngine` class are executed asynchronously. Agora * recommends calling these methods in the same thread. * - * @note - * - Before calling other APIs, you must call `createAgoraRtcEngine` and `initialize `to create and - * initialize the `IRtcEngine` object. - * - The SDK supports creating only one `IRtcEngine` instance for an app. + * @param context Configurations for the `IRtcEngine` instance. See `RtcEngineContext`. * - * @param context The RtcEngineContext object. * @return * - 0: Success. * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -7: The SDK is not initialized. + * - -22: The resource request failed. The SDK fails to allocate resources because your app + * consumes too much system resource or the system resources are insufficient. + * - -101: The App ID is invalid. */ virtual int initialize(const RtcEngineContext& context) = 0; /** - * Gets the pointer to the specified interface. + * @brief Gets the pointer to the specified interface. + * + * @param iid The ID of the interface. See `INTERFACE_ID_TYPE`. + * @param inter An output parameter. The pointer to the specified interface. * - * @param iid The ID of the interface. See #INTERFACE_ID_TYPE for details. - * @param inter Output parameter. The pointer to the specified interface. * @return * - 0: Success. * - < 0: Failure. @@ -3586,313 +4646,436 @@ class IRtcEngine : public agora::base::IEngineBase { /** - * Gets the SDK version. - * @param build The build number. - * @return The version of the current SDK in the string format. + * @brief Gets the SDK version. + * + * @param build The SDK build index. + * + * @return + * The SDK version number. The format is a string. */ virtual const char* getVersion(int* build) = 0; /** - * Gets the warning or error description. - * @param code The error code or warning code reported by the SDK. - * @return The specific error or warning description. + * @brief Gets the warning or error description. + * + * @param code The error code reported by the SDK. + * + * @return + * The specific error description. */ virtual const char* getErrorDescription(int code) = 0; /** - * Queries the capacity of the current device codec. + * @brief Queries the video codec capabilities of the SDK. * - * @param codec_info An array of the codec cap information: CodecCapInfo. - * @param size The array size. - * @return - * 0: Success. - * < 0: Failure. + * @param codecInfo Input and output parameter. An array representing the video codec capabilities + * of the SDK. See `CodecCapInfo`. + * - Input value: One `CodecCapInfo` defined by the user when executing this method, representing + * the video codec capability to be queried. + * - Output value: The `CodecCapInfo` after the method is executed, representing the actual video + * codec capabilities of the SDK. + * @param size Input and output parameter, represent the size of the `CodecCapInfo` array. + * - Input value: Size of the `CodecCapInfo` defined by the user when executing the method. + * - Output value: Size of the output `CodecCapInfo` after this method is executed. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int queryCodecCapability(CodecCapInfo* codecInfo, int& size) = 0; - /** - * Queries the score of the current device. + /** + * @brief Queries device score. * - * @return - * > 0: If the value is greater than 0, it means that the device score has been retrieved and represents the score value. - * Most devices score between 60-100, with higher scores indicating better performance. - * - * < 0: Failure. + * @details + * Applicable scenarios: In high-definition or ultra-high-definition video scenarios, you can first + * call this method to query the device's score. If the returned score is low (for example, below + * 60), you need to lower the video resolution to avoid affecting the video experience. The minimum + * device score required for different business scenarios is varied. For specific score + * recommendations, please `technical support`. + * + * @return + * - >0: The method call succeeeds, the value is the current device's score, the range is [0,100], + * the larger the value, the stronger the device capability. Most devices are rated between 60 and + * 100. + * - < 0: Failure. */ virtual int queryDeviceScore() = 0; /** - * Preload a channel. + * @brief Preloads a channel with `token`, `channelId`, and `uid`. * - * This method enables users to preload a channel. + * @details + * When audience members need to switch between different channels frequently, calling the method + * can help shortening the time of joining a channel, thus reducing the time it takes for audience + * members to hear and see the host. + * If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to + * call this method unless the token for preloading the channel expires. + * Call timing: To improve the user experience of preloading channels, Agora recommends that before + * joining the channel, calling this method as early as possible once confirming the channel name + * and user information. * - * A successful call of this method will reduce the time of joining the same channel. - * - * Note: - * 1. The SDK supports preloading up to 20 channels. Once the preloaded channels exceed the limit, the SDK will keep the latest 20 available. - * 2. Renew the token of the preloaded channel by calling this method with the same 'channelId' and 'uid'. - * - * @param token The token generated on your server for authentication. - * @param channelId The channel name. This parameter signifies the channel in which users engage in - * real-time audio and video interaction. Under the premise of the same App ID, users who fill in - * the same channel ID enter the same channel for audio and video interaction. The string length - * must be less than 64 bytes. Supported character scopes are: + * @note + * - When calling this method, ensure you set the user role as audience and do not set the audio + * scenario as `AUDIO_SCENARIO_CHORUS`, otherwise, this method does not take effect. + * - You also need to make sure that the channel name, user ID and token passed in for preloading + * are the same as the values passed in when joinning the channel, otherwise, this method does not + * take effect. + * - One `IRtcEngine` instance supports preloading 20 channels at most. When exceeding this limit, + * the latest 20 preloaded channels take effect. + * Failing to preload a channel does not mean that you can't join a channel, nor will it increase + * the time of joining a channel. + * + * @param token The token generated on your server for authentication. See .When the token for + * preloading channels expires, you can update the token based on the number of channels you + * preload. + * - When preloading one channel, calling this method to pass in the new token. + * - When preloading more than one channels: + * - If you use a wildcard token for all preloaded channels, call `updatePreloadChannelToken` to + * update the token.Note: When generating a wildcard token, ensure the user ID is not set as 0. See + * `Secure authentication with tokens`. + * - If you use different tokens to preload different channels, call this method to pass in your + * user ID, channel name and the new token. + * @param channelId The channel name that you want to preload. This parameter signifies the channel + * in which users engage in real-time audio and video interaction. Under the premise of the same App + * ID, users who fill in the same channel ID enter the same channel for audio and video interaction. + * The string length must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", - * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * @param uid The user ID. This parameter is used to identify the user in the channel for real-time * audio and video interaction. You need to set and manage user IDs yourself, and ensure that each * user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value - * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user - * ID and returns it in the onJoinChannelSuccess callback. Your application must record and maintain - * the returned user ID, because the SDK does not do so. + * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user + * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and + * maintain the returned user ID, because the SDK does not do so. * * @return * - 0: Success. * - < 0: Failure. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` * object before calling this method. - * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to - * preload the channel again. + * - -102: The channel name is invalid. You need to pass in a valid channel name and join the + * channel again. */ virtual int preloadChannel(const char* token, const char* channelId, uid_t uid) = 0; /** - * Preload a channel. - * - * This method enables users to preload a channel. + * @brief Preloads a channel with `token`, `channelId`, and `userAccount`. * - * A successful call of this method will reduce the time of joining the same channel. - * - * Note: - * 1. The SDK supports preloading up to 20 channels. Once the preloaded channels exceed the limit, the SDK will keep the latest 20 available. - * 2. Renew the token of the preloaded channel by calling this method with the same 'channelId' and 'userAccount'. + * @details + * When audience members need to switch between different channels frequently, calling the method + * can help shortening the time of joining a channel, thus reducing the time it takes for audience + * members to hear and see the host. + * If you join a preloaded channel, leave it and want to rejoin the same channel, you do not need to + * call this method unless the token for preloading the channel expires. + * Call timing: To improve the user experience of preloading channels, Agora recommends that before + * joining the channel, calling this method as early as possible once confirming the channel name + * and user information. * - * @param token The token generated on your server for authentication. - * @param channelId The channel name. This parameter signifies the channel in which users engage in - * real-time audio and video interaction. Under the premise of the same App ID, users who fill in - * the same channel ID enter the same channel for audio and video interaction. The string length - * must be less than 64 bytes. Supported character scopes are: + * @note + * - When calling this method, ensure you set the user role as audience and do not set the audio + * scenario as `AUDIO_SCENARIO_CHORUS`, otherwise, this method does not take effect. + * - You also need to make sure that the User Account, channel ID and token passed in for preloading + * are the same as the values passed in when joining the channel, otherwise, this method does not + * take effect. + * - One `IRtcEngine` instance supports preloading 20 channels at most. When exceeding this limit, + * the latest 20 preloaded channels take effect. + * Failing to preload a channel does not mean that you can't join a channel, nor will it increase + * the time of joining a channel. + * + * @param token The token generated on your server for authentication. See .When the token for + * preloading channels expires, you can update the token based on the number of channels you + * preload. + * - When preloading one channel, calling this method to pass in the new token. + * - When preloading more than one channels: + * - If you use a wildcard token for all preloaded channels, call `updatePreloadChannelToken` to + * update the token.Note: When generating a wildcard token, ensure the user ID is not set as 0. See + * `Secure authentication with tokens`. + * - If you use different tokens to preload different channels, call this method to pass in your + * user ID, channel name and the new token. + * @param channelId The channel name that you want to preload. This parameter signifies the channel + * in which users engage in real-time audio and video interaction. Under the premise of the same App + * ID, users who fill in the same channel ID enter the same channel for audio and video interaction. + * The string length must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", - * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param userAccount The user account. This parameter is used to identify the user in the channel + * for real-time audio and video engagement. You need to set and manage user accounts yourself and + * ensure that each user account in the same channel is unique. The maximum length of this parameter + * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters + * are as follows(89 in total): + * - The 26 lowercase English letters: a to z. + * - The 26 uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - Space + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * * @return * - 0: Success. * - < 0: Failure. - * - -2: The parameter is invalid. For example, the userAccount parameter is empty. - * You need to pass in a valid parameter and preload the channel again. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine + * - -2: The parameter is invalid. For example, the User Account is empty. You need to pass in a + * valid parameter and join the channel again. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` * object before calling this method. - * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to - * preload the channel again. + * - -102: The channel name is invalid. You need to pass in a valid channel name and join the + * channel again. */ virtual int preloadChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount) = 0; /** - * Update token of the preloaded channels. - * - * An easy way to update all preloaded channels' tokens, if all preloaded channels use the same token. + * @brief Updates the wildcard token for preloading channels. * - * If preloaded channels use different tokens, we need to call the 'preloadChannel' method with the same 'channelId' - * and 'uid' or 'userAccount' to update the corresponding token. + * @details + * You need to maintain the life cycle of the wildcard token by yourself. When the token expires, + * you need to generate a new wildcard token and then call this method to pass in the new token. + * Applicable scenarios: In scenarios involving multiple channels, such as switching between + * different channels, using a wildcard token means users do not need to apply for a new token every + * time joinning a new channel, which can save users time for switching channels and reduce the + * pressure on your token server. * - * @param token The token generated on your server for authentication. + * @param token The new token. * * @return * - 0: Success. * - < 0: Failure. - * - -2: The token is invalid. You need to pass in a valid token and update the token again. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine + * - -2: The parameter is invalid. For example, the token is invalid. You need to pass in a valid + * parameter and join the channel again. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` * object before calling this method. */ virtual int updatePreloadChannelToken(const char* token) = 0; /** - * Joins a channel. - * - * This method enables users to join a channel. Users in the same channel can talk to each other, - * and multiple users in the same channel can start a group chat. Users with different App IDs - * cannot call each other. + * @brief Joins a channel. * - * A successful call of this method triggers the following callbacks: + * @details + * By default, the user subscribes to the audio and video streams of all the other users in the + * channel, giving rise to usage and **billings**. To stop subscribing to a specified stream or all + * remote streams, call the corresponding `mute` methods. + * Call timing: Call this method after `initialize`. + * Related callbacks: A successful call of this method triggers the following callbacks: * - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks. - * - The remote client: `onUserJoined`, if the user joining the channel is in the Communication - * profile or is a host in the Live-broadcasting profile. + * - The remote client: The `onUserJoined` callback, if a user joining the channel in the + * COMMUNICATION profile, or a host joining a channel in the LIVE_BROADCASTING profile. + * When the connection between the local client and Agora's server is interrupted due to poor + * network conditions, the SDK tries reconnecting to the server. When the local client successfully + * rejoins the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. * - * When the connection between the client and Agora's server is interrupted due to poor network - * conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins - * the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. - * - * @note Once a user joins the channel, the user subscribes to the audio and video streams of all - * the other users in the channel by default, giving rise to usage and billing calculation. To - * stop subscribing to a specified stream or all remote streams, call the corresponding `mute` methods. - * - * @param token The token generated on your server for authentication. + * @note + * - This method only supports users joining one channel at a time. + * - Users with different App IDs cannot call each other. + * - Before joining a channel, ensure that the App ID you use to generate a token is the same as + * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the + * token. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. * @param channelId The channel name. This parameter signifies the channel in which users engage in * real-time audio and video interaction. Under the premise of the same App ID, users who fill in * the same channel ID enter the same channel for audio and video interaction. The string length - * must be less than 64 bytes. Supported character scopes are: + * must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", - * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * @param info (Optional) Reserved for future use. * @param uid The user ID. This parameter is used to identify the user in the channel for real-time * audio and video interaction. You need to set and manage user IDs yourself, and ensure that each * user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value - * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user - * ID and returns it in the onJoinChannelSuccess callback. Your application must record and maintain - * the returned user ID, because the SDK does not do so. + * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user + * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and + * maintain the returned user ID, because the SDK does not do so. * * @return * - 0: Success. * - < 0: Failure. - * - -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set - * to an integer, or the value of a member in the `ChannelMediaOptions` structure is invalid. You need - * to pass in a valid parameter and join the channel again. - * - -3: Failes to initialize the `IRtcEngine` object. You need to reinitialize the IRtcEngine object. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` * object before calling this method. - * - -8: The internal state of the IRtcEngine object is wrong. The typical cause is that you call - * this method to join the channel without calling `stopEchoTest` to stop the test after calling - * `startEchoTest` to start a call loop test. You need to call `stopEchoTest` before calling this method. - * - -17: The request to join the channel is rejected. The typical cause is that the user is in the - * channel. Agora recommends using the `onConnectionStateChanged` callback to get whether the user is - * in the channel. Do not call this method to join the channel unless you receive the - * `CONNECTION_STATE_DISCONNECTED(1)` state. - * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to * rejoin the channel. - * - -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. */ virtual int joinChannel(const char* token, const char* channelId, const char* info, uid_t uid) = 0; /** - * Joins a channel with media options. + * @brief Joins a channel with media options. * - * This method enables users to join a channel. Users in the same channel can talk to each other, - * and multiple users in the same channel can start a group chat. Users with different App IDs - * cannot call each other. - * - * A successful call of this method triggers the following callbacks: + * @details + * Compared to `joinChannel(const char* token, const char* channelId, const char* info, uid_t uid)`, + * this method has the `options` parameter which is used to set + * media options, such as whether to publish audio and video streams within a channel, or whether to + * automatically subscribe to the audio and video streams of all remote users when joining a + * channel. By default, the user subscribes to the audio and video streams of all the other users in + * the channel, giving rise to usage and **billings**. To stop subscribing to other streams, set the + * `options` parameter or call the corresponding `mute` methods. + * Call timing: Call this method after `initialize`. + * Related callbacks: A successful call of this method triggers the following callbacks: * - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks. - * - The remote client: `onUserJoined`, if the user joining the channel is in the Communication - * profile or is a host in the Live-broadcasting profile. - * - * When the connection between the client and Agora's server is interrupted due to poor network - * conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins - * the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. - * - * Compared to `joinChannel`, this method adds the options parameter to configure whether to - * automatically subscribe to all remote audio and video streams in the channel when the user - * joins the channel. By default, the user subscribes to the audio and video streams of all - * the other users in the channel, giving rise to usage and billings. To unsubscribe, set the - * `options` parameter or call the `mute` methods accordingly. + * - The remote client: The `onUserJoined` callback, if a user joining the channel in the + * COMMUNICATION profile, or a host joining a channel in the LIVE_BROADCASTING profile. + * When the connection between the local client and Agora's server is interrupted due to poor + * network conditions, the SDK tries reconnecting to the server. When the local client successfully + * rejoins the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. * * @note - * - This method allows users to join only one channel at a time. - * - Ensure that the app ID you use to generate the token is the same app ID that you pass in the - * `initialize` method; otherwise, you may fail to join the channel by token. - * - * @param token The token generated on your server for authentication. - * + * - This method only supports users joining one channel at a time. + * - Users with different App IDs cannot call each other. + * - Before joining a channel, ensure that the App ID you use to generate a token is the same as + * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the + * token. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. * @param channelId The channel name. This parameter signifies the channel in which users engage in * real-time audio and video interaction. Under the premise of the same App ID, users who fill in * the same channel ID enter the same channel for audio and video interaction. The string length - * must be less than 64 bytes. Supported character scopes are: + * must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", - * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * @param uid The user ID. This parameter is used to identify the user in the channel for real-time * audio and video interaction. You need to set and manage user IDs yourself, and ensure that each * user ID in the same channel is unique. This parameter is a 32-bit unsigned integer. The value - * range is 1 to 232-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user - * ID and returns it in the `onJoinChannelSuccess` callback. Your application must record and maintain - * the returned user ID, because the SDK does not do so. - * @param options The channel media options: ChannelMediaOptions. + * range is 1 to 2^32-1. If the user ID is not assigned (or set to 0), the SDK assigns a random user + * ID and `onJoinChannelSuccess` returns it in the callback. Your application must record and + * maintain the returned user ID, because the SDK does not do so. + * @param options The channel media options. See `ChannelMediaOptions`. * * @return * - 0: Success. * - < 0: Failure. - * - -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set - * to an integer, or the value of a member in the `ChannelMediaOptions` structure is invalid. You need - * to pass in a valid parameter and join the channel again. - * - -3: Failes to initialize the `IRtcEngine` object. You need to reinitialize the IRtcEngine object. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` * object before calling this method. - * - -8: The internal state of the IRtcEngine object is wrong. The typical cause is that you call - * this method to join the channel without calling `stopEchoTest` to stop the test after calling - * `startEchoTest` to start a call loop test. You need to call `stopEchoTest` before calling this method. - * - -17: The request to join the channel is rejected. The typical cause is that the user is in the - * channel. Agora recommends using the `onConnectionStateChanged` callback to get whether the user is - * in the channel. Do not call this method to join the channel unless you receive the - * `CONNECTION_STATE_DISCONNECTED(1)` state. - * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to * rejoin the channel. - * - -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. */ virtual int joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options) = 0; /** - * Updates the channel media options after joining the channel. + * @brief Updates the channel media options after joining the channel. + * + * @param options The channel media options. See `ChannelMediaOptions`. * - * @param options The channel media options: ChannelMediaOptions. * @return * - 0: Success. * - < 0: Failure. + * - -2: The value of a member in `ChannelMediaOptions` is invalid. For example, the token or the + * user ID is invalid. You need to fill in a valid parameter. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The possible reason is that the + * user is not in the channel. Agora recommends that you use the `onConnectionStateChanged` callback + * to see whether the user is in the channel. If you receive the `CONNECTION_STATE_DISCONNECTED` (1) + * or `CONNECTION_STATE_FAILED` (5) state, the user is not in the channel. You need to call + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` to join a channel before calling this method. */ virtual int updateChannelMediaOptions(const ChannelMediaOptions& options) = 0; /** - * Leaves the channel. + * @brief Leaves a channel. * - * This method allows a user to leave the channel, for example, by hanging up or exiting a call. - * - * This method is an asynchronous call, which means that the result of this method returns even before - * the user has not actually left the channel. Once the user successfully leaves the channel, the - * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. + * @details + * After calling this method, the SDK terminates the audio and video interaction, leaves the current + * channel, and releases all resources related to the session. + * After joining the channel, you must call this method to end the call; otherwise, the next call + * cannot be started. + * Call timing: Call this method after joining a channel. + * Related callbacks: A successful call of this method triggers the following callbacks: + * - The local client: The `onLeaveChannel` callback will be triggered. + * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves + * the channel. * * @note - * If you call \ref release "release" immediately after this method, the leaveChannel process will be - * interrupted, and the SDK will not trigger the `onLeaveChannel` callback. + * If you call `release` immediately after calling this method, the SDK does not trigger the + * `onLeaveChannel` callback. + * - This method call is asynchronous. When this method returns, it does not necessarily mean that + * the user has left the channel. + * - If you have called `joinChannelEx` to join multiple channels, calling this method will leave + * all the channels you joined. * * @return * - 0: Success. * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -7: The SDK is not initialized. */ virtual int leaveChannel() = 0; /** - * Leaves the channel. - * - * @param options The leave channel options. - * - * This method allows a user to leave the channel, for example, by hanging up or exiting a call. + * @brief Sets channel options and leaves the channel. * - * This method is an asynchronous call, which means that the result of this method returns even before - * the user has not actually left the channel. Once the user successfully leaves the channel, the - * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. + * @details + * After calling this method, the SDK terminates the audio and video interaction, leaves the current + * channel, and releases all resources related to the session. + * After joining a channel, you must call this method or `leaveChannel()` to end the call, + * otherwise, the next call cannot be started. If you have called `joinChannelEx` to join multiple + * channels, calling this method will leave all the channels you joined. + * Call timing: Call this method after joining a channel. + * Related callbacks: A successful call of this method triggers the following callbacks: + * - The local client: The `onLeaveChannel` callback will be triggered. + * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves + * the channel. * * @note - * If you call \ref release "release" immediately after this method, the leaveChannel process will be - * interrupted, and the SDK will not trigger the `onLeaveChannel` callback. + * If you call `release` immediately after calling this method, the SDK does not trigger the + * `onLeaveChannel` callback. + * This method call is asynchronous. When this method returns, it does not necessarily mean that the + * user has left the channel. + * + * @param options The options for leaving the channel. See `LeaveChannelOptions`. * * @return * - 0: Success. @@ -3901,96 +5084,176 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int leaveChannel(const LeaveChannelOptions& options) = 0; /** - * Renews the token. + * @brief Renews the token. * - * Once a token is enabled and used, it expires after a certain period of time. - * - * Under the following circumstances, generate a new token on your server, and then call this method to - * renew it. Failure to do so results in the SDK disconnecting from the server. - * - The \ref IRtcEngineEventHandler onTokenPrivilegeWillExpire "onTokenPrivilegeWillExpire" callback is triggered; - * - The \ref IRtcEngineEventHandler::onRequestToken "onRequestToken" callback is triggered; - * - The `ERR_TOKEN_EXPIRED(-109)` error is reported. + * @details + * This method is used to update the token. After successfully calling this method, the SDK will + * trigger the `onRenewTokenResult` callback. A token will expire after a certain period of time, at + * which point the SDK will be unable to establish a connection with the server. + * Call timing: In any of the following cases, Agora recommends that you generate a new token on + * your server and then call this method to renew your token: + * - Receiving the `onTokenPrivilegeWillExpire` callback reporting the token is about to expire. + * - Receiving the `onRequestToken` callback reporting the token has expired. + * - Receiving the `onConnectionStateChanged` callback reporting `CONNECTION_CHANGED_TOKEN_EXPIRED` + * (9). * * @param token The new token. + * * @return * - 0: Success. * - < 0: Failure. + * - -2: The parameter is invalid. For example, the token is empty. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - 110: Invalid token. Ensure the following: + * - The user ID specified when generating the token is consistent with the user ID used when + * joining the channel. + * - The generated token is the same as the token passed in to join the channel. */ virtual int renewToken(const char* token) = 0; /** - * Sets the channel profile. + * @brief Sets the channel profile. * - * The IRtcEngine differentiates channel profiles and applies different optimization algorithms accordingly. - * For example, it prioritizes smoothness and low latency for a video call, and prioritizes video quality - * for a video broadcast. + * @details + * You can call this method to set the channel profile. The SDK adopts different optimization + * strategies for different channel profiles. For example, in a live streaming scenario, the SDK + * prioritizes video quality. After initializing the SDK, the default channel profile is the live + * streaming profile. + * Call timing: Call this method before joining a channel. * * @note - * - To ensure the quality of real-time communication, we recommend that all users in a channel use the - * same channel profile. - * - Call this method before calling `joinChannel`. You cannot set the channel profile - * once you have joined the channel. + * To ensure the quality of real-time communication, Agora recommends that all users in a channel + * use the same channel profile. + * In different channel scenarios, the default audio routing of the SDK is different. See + * `setDefaultAudioRouteToSpeakerphone`. + * + * @param profile The channel profile. See `CHANNEL_PROFILE_TYPE`. * - * @param profile The channel profile: #CHANNEL_PROFILE_TYPE. * @return * - 0: Success. * - < 0: Failure. - * - -8(ERR_INVALID_STATE): The current status is invalid, only allowed to be called when the connection is disconnected. + * - -2: The parameter is invalid. + * - -7: The SDK is not initialized. */ virtual int setChannelProfile(CHANNEL_PROFILE_TYPE profile) = 0; /** - * Sets the role of a user. - * - * This method sets the user role as either BROADCASTER or AUDIENCE (default). - * - The broadcaster sends and receives streams. - * - The audience receives streams only. - * - * By default, all users are audience regardless of the channel profile. - * Call this method to change the user role to BROADCASTER so that the user can - * send a stream. + * @brief Sets the client role. + * + * @details + * By default,the SDK sets the user role as audience. You can call this method to set the user role + * as host. The user role ( `roles` ) determines the users' permissions at the SDK level, including + * whether they can publish audio and video streams in a channel. + * Call timing: You can call this method either before or after joining a channel. + * If you call this method to set the user role as the host before joining the channel and set the + * local video property through the `setupLocalVideo` method, the local video preview is + * automatically enabled when the user joins the channel. + * If you call this method to set the user role after joining a channel, the SDK will automatically + * call the `muteLocalAudioStream` and `muteLocalVideoStream` method to change the state for + * publishing audio and video streams. + * Related callbacks: If you call this method to switch the user role after joining the channel, the + * SDK triggers the following callbacks: + * - Triggers `onClientRoleChanged` on the local client.Note: Calling this method before joining a + * channel and set the `role` to `AUDIENCE` will trigger this callback as well. + * - Triggers `onUserJoined` or `onUserOffline` on the remote client. + * If you call this method to set the user role after joining a channel but encounter a failure, the + * SDK trigger the `onClientRoleChangeFailed` callback to report the reason for the failure and the + * current user role. * * @note - * After calling the setClientRole() method to CLIENT_ROLE_AUDIENCE, the SDK stops audio recording. - * However, CLIENT_ROLE_AUDIENCE will keep audio recording with AUDIO_SCENARIO_CHATROOM(5). - * Normally, app developer can also use mute api to achieve the same result, and we implement - * this 'non-orthogonal' behavior only to make API backward compatible. + * When calling this method before joining a channel and setting the user role to `BROADCASTER`, the + * `onClientRoleChanged` callback will not be triggered on the local client. + * Calling this method before joining a channel and set the `role` to `AUDIENCE` will trigger this + * callback as well. * - * @param role The role of the client: #CLIENT_ROLE_TYPE. + * @param role The user role. See `CLIENT_ROLE_TYPE`. + * Note: If you set the user role as an audience member, you cannot publish audio and video streams + * in the channel. If you want to publish media streams in a channel during live streaming, ensure + * you set the user role as broadcaster. * * @return * - 0: Success. * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -7: The SDK is not initialized. */ virtual int setClientRole(CLIENT_ROLE_TYPE role) = 0; - /** Sets the role of the user, such as a host or an audience (default), before joining a channel in the live interactive streaming. - * - * This method can be used to switch the user role in the live interactive streaming after the user joins a channel. - * - * In the `LIVE_BROADCASTING` profile, when a user switches user roles after joining a channel, a successful \ref agora::rtc::IRtcEngine::setClientRole "setClientRole" method call triggers the following callbacks: - * - The local client: \ref agora::rtc::IRtcEngineEventHandler::onClientRoleChanged "onClientRoleChanged" - * - The remote client: \ref agora::rtc::IRtcEngineEventHandler::onUserJoined "onUserJoined" or \ref agora::rtc::IRtcEngineEventHandler::onUserOffline "onUserOffline" (BECOME_AUDIENCE) - * - * @note - * This method applies only to the `LIVE_BROADCASTING` profile. - * - * @param role Sets the role of the user. See #CLIENT_ROLE_TYPE. - * @param options Sets the audience latency level of the user. See #ClientRoleOptions. - * - * @return - * - 0(ERR_OK): Success. - * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INALID_ARGUMENT): The parameter is invalid. - * - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. - * - -8(ERR_INVALID_STATE): The channel profile is not `LIVE_BROADCASTING`. - */ + /** + * @brief Sets the user role and the audience latency level in a live streaming scenario. + * + * @details + * By default,the SDK sets the user role as audience. You can call this method to set the user role + * as host. The user role ( `roles` ) determines the users' permissions at the SDK level, including + * whether they can publish audio and video streams in a channel. + * The difference between this method and `setClientRole(CLIENT_ROLE_TYPE role)` is that, this + * method supports + * setting the `audienceLatencyLevel`. `audienceLatencyLevel` needs to be used together with `role` + * to determine the level of service that users can enjoy within their permissions. For example, an + * audience member can choose to receive remote streams with low latency or ultra-low latency. + * Call timing: You can call this method either before or after joining a channel. + * If you call this method to set the user role as the host before joining the channel and set the + * local video property through the `setupLocalVideo` method, the local video preview is + * automatically enabled when the user joins the channel. + * If you call this method to set the user role after joining a channel, the SDK will automatically + * call the `muteLocalAudioStream` and `muteLocalVideoStream` method to change the state for + * publishing audio and video streams. + * Related callbacks: If you call this method to switch the user role after joining the channel, the + * SDK triggers the following callbacks: + * - Triggers `onClientRoleChanged` on the local client.Note: Calling this method before joining a + * channel and set the `role` to `AUDIENCE` will trigger this callback as well. + * - Triggers `onUserJoined` or `onUserOffline` on the remote client. + * If you call this method to set the user role after joining a channel but encounter a failure, the + * SDK trigger the `onClientRoleChangeFailed` callback to report the reason for the failure and the + * current user role. + * + * @note + * When the user role is set to host, the audience latency level can only be set to + * AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY. + * When calling this method before joining a channel and setting the `role` to `BROADCASTER`, the + * `onClientRoleChanged` callback will not be triggered on the local client. + * Calling this method before joining a channel and set the `role` to `AUDIENCE` will trigger this + * callback as well. + * + * @param role The user role. See `CLIENT_ROLE_TYPE`. + * Note: If you set the user role as an audience member, you cannot publish audio and video streams + * in the channel. If you want to publish media streams in a channel during live streaming, ensure + * you set the user role as broadcaster. + * @param options The detailed options of a user, including the user level. See `ClientRoleOptions`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -5: The request is rejected. + * - -7: The SDK is not initialized. + */ virtual int setClientRole(CLIENT_ROLE_TYPE role, const ClientRoleOptions& options) = 0; - /** Starts a video call test. + /** + * @brief Starts an audio device loopback test. + * + * @details + * To test whether the user's local sending and receiving streams are normal, you can call this + * method to perform an audio and video call loop test, which tests whether the audio and video + * devices and the user's upstream and downstream networks are working properly. + * After starting the test, the user needs to make a sound or face the camera. The audio or video is + * output after about two seconds. If the audio playback is normal, the audio device and the user's + * upstream and downstream networks are working properly; if the video playback is normal, the video + * device and the user's upstream and downstream networks are working properly. + * Call timing: You can call this method either before or after joining a channel. * - * @param config: configuration for video call test. + * @note + * - When calling in a channel, make sure that no audio or video stream is being published. + * - After calling this method, call `stopEchoTest` to end the test; otherwise, the user cannot + * perform the next audio and video call loop test and cannot join the channel. + * - In live streaming scenarios, this method only applies to hosts. + * + * @param config The configuration of the audio and video call loop test. See + * `EchoTestConfiguration`. * * @return * - 0: Success. @@ -3998,20 +5261,59 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int startEchoTest(const EchoTestConfiguration& config) = 0; - /** Stops the audio call test. - @return int - - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Stops the audio call test. + * + * @details + * After calling `startEchoTest`, you must call this method to end the test; otherwise, the user + * cannot perform the next audio and video call loop test and cannot join the channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -5(ERR_REFUSED): Failed to stop the echo test. The echo test may not be running. + */ virtual int stopEchoTest() = 0; #if defined(__APPLE__) && TARGET_OS_IOS - /** Enables the SDK use AVCaptureMultiCamSession or AVCaptureSession. Applies to iOS 13.0+ only. - * @param enabled Whether to enable multi-camera when capturing video: - * - true: Enable multi-camera, and the SDK uses AVCaptureMultiCamSession. - * - false: Disable multi-camera, and the SDK uses AVCaptureSession. - * @param config The config for secondary camera capture session. See #CameraCapturerConfiguration. + /** + * @brief Enables or disables multi-camera capture. + * + * @details + * In scenarios where there are existing cameras to capture video, Agora recommends that you use the + * following steps to capture and publish video with multiple cameras:1. Call this method to enable + * multi-channel camera capture. + * 2. Call `startPreview(VIDEO_SOURCE_TYPE sourceType)` to start the local video preview. + * 3. Call `startCameraCapture`, and set `sourceType` to start video capture with the second camera. + * 4. Call `joinChannelEx`, and set `publishSecondaryCameraTrack` to `true` to publish the video + * stream captured by the second camera in the channel. + * If you want to disable multi-channel camera capture, use the following steps:1. Call + * `stopCameraCapture`. + * 2. Call this method with `enabled` set to `false`. + * This method applies to iOS only. + * When using this function, ensure that the system version is 13.0 or later. + * The minimum iOS device types that support multi-camera capture are as follows: + * - iPhone XR + * - iPhone XS + * - iPhone XS Max + * - iPad Pro 3rd generation and later + * + * @note + * You can call this method before and after `startPreview(VIDEO_SOURCE_TYPE sourceType)` to enable + * multi-camera capture: + * - If it is enabled before `startPreview(VIDEO_SOURCE_TYPE sourceType)`, the local video preview + * shows the image captured + * by the two cameras at the same time. + * - If it is enabled after `startPreview(VIDEO_SOURCE_TYPE sourceType)`, the SDK stops the current + * camera capture first, + * and then enables the primary camera and the second camera. The local video preview appears black + * for a short time, and then automatically returns to normal. + * + * @param enabled Whether to enable multi-camera video capture mode: + * - `true`: Enable multi-camera capture mode; the SDK uses multiple cameras to capture video. + * - `false`: Disable multi-camera capture mode; the SDK uses a single camera to capture video. + * @param config Capture configuration for the second camera. See `CameraCapturerConfiguration`. + * * @return * - 0: Success. * - < 0: Failure. @@ -4019,15 +5321,28 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableMultiCamera(bool enabled, const CameraCapturerConfiguration& config) = 0; #endif /** - * Enables the video. + * @brief Enables the video module. * - * You can call this method either before joining a channel or during a call. - * If you call this method before entering a channel, the service starts the video; if you call it - * during a call, the audio call switches to a video call. + * @details + * The video module is disabled by default, call this method to enable it. If you need to disable + * the video module later, you need to call `disableVideo`. + * Call timing: This method can be called either before joining a channel or while in the channel: + * - If called before joining a channel, it enables the video module. + * - If called during an audio-only call, the audio call automatically switches to a video call. + * Related callbacks: A successful call of this method triggers the `onRemoteVideoStateChanged` + * callback on the remote client. * * @note - * This method controls the underlying states of the Engine. It is still - * valid after one leaves the channel. + * - This method enables the internal engine and is valid after leaving the channel. + * - Calling this method will reset the entire engine, resulting in a slow response time. You can + * use the following methods to independently control a specific function of the video module based + * on your actual needs: + * - `enableLocalVideo`: Whether to enable the camera to create the local video stream. + * - `muteLocalVideoStream`: Whether to publish the local video stream. + * - `muteRemoteVideoStream`: Whether to subscribe to and play the remote video stream. + * - `muteAllRemoteVideoStreams`: Whether to subscribe to and play all remote video streams. + * - A successful call of this method resets `enableLocalVideo`, `muteRemoteVideoStream`, and + * `muteAllRemoteVideoStreams`. Proceed it with caution. * * @return * - 0: Success. @@ -4036,22 +5351,44 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableVideo() = 0; /** - * Disables the video. + * @brief Disables the video module. * - * This method stops capturing the local video and receiving any remote video. - * To enable the local preview function, call \ref enableLocalVideo "enableLocalVideo" (true). - * @return int + * @details + * This method is used to disable the video module. + * Call timing: This method can be called either before or after joining the channel. + * - If it is called before joining the channel, the audio-only mode is enabled. + * - If it is called after joining the channel, it switches from video mode to audio-only mode. + * Then, calling `enableVideo` can swithch to video mode again. + * Related callbacks: A successful call of this method triggers the `onUserEnableVideo` (`false`) + * callback on the remote client. + * + * @note + * - This method affects the internal engine and can be called after leaving the channel. + * - Calling this method will reset the entire engine, resulting in a slow response time. You can + * use the following methods to independently control a specific function of the video module based + * on your actual needs: + * - `enableLocalVideo`: Whether to enable the camera to create the local video stream. + * - `muteLocalVideoStream`: Whether to publish the local video stream. + * - `muteRemoteVideoStream`: Whether to subscribe to and play the remote video stream. + * - `muteAllRemoteVideoStreams`: Whether to subscribe to and play all remote video streams. + * + * @return * - 0: Success. * - < 0: Failure. */ virtual int disableVideo() = 0; /** - * Starts the local video preview before joining a channel. + * @brief Enables the local video preview. + * + * @details + * You can call this method to enable local video preview. + * Call timing: This method must be called after `enableVideo` and `setupLocalVideo`. * - * Once you call this method to start the local video preview, if you leave - * the channel by calling \ref leaveChannel "leaveChannel", the local video preview remains until - * you call \ref stopPreview "stopPreview" to disable it. + * @note + * - The local preview enables the mirror mode by default. + * - After leaving the channel, local preview remains enabled. You need to call `stopPreview()` + * to disable local preview. * * @return * - 0: Success. @@ -4060,8 +5397,20 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int startPreview() = 0; /** - * Starts the local video preview for specific source type. - * @param sourceType - The video source type. + * @brief Enables the local video preview and specifies the video source for the preview. + * + * @details + * This method is used to start local video preview and specify the video source that appears in the + * preview screen. + * Call timing: This method must be called after `enableVideo` and `setupLocalVideo`. + * + * @note + * - The local preview enables the mirror mode by default. + * - After leaving the channel, local preview remains enabled. You need to call `stopPreview()` + * to disable local preview. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -4069,7 +5418,12 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int startPreview(VIDEO_SOURCE_TYPE sourceType) = 0; /** - * Stops the local video preview and the video. + * @brief Stops the local video preview. + * + * @details + * Applicable scenarios: After calling `startPreview()` to start the preview, if you want to + * stop the local video preview, call this method. + * Call timing: Call this method before joining a channel or after leaving a channel. * * @return * - 0: Success. @@ -4078,290 +5432,621 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int stopPreview() = 0; /** - * Stops the local video preview for specific source type. - * @param sourceType - The video source type. + * @brief Stops the local video preview. + * + * @details + * Applicable scenarios: After calling `startPreview(VIDEO_SOURCE_TYPE sourceType)` to start the + * preview, if you want to + * stop the local video preview, call this method. + * Call timing: Call this method before joining a channel or after leaving a channel. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. */ virtual int stopPreview(VIDEO_SOURCE_TYPE sourceType) = 0; - /** Starts the last-mile network probe test. - - This method starts the last-mile network probe test before joining a channel - to get the uplink and downlink last-mile network statistics, including the - bandwidth, packet loss, jitter, and round-trip time (RTT). - - Call this method to check the uplink network quality before users join a - channel or before an audience switches to a host. Once this method is - enabled, the SDK returns the following callbacks: - - \ref IRtcEngineEventHandler::onLastmileQuality "onLastmileQuality": the - SDK triggers this callback depending on the network - conditions. This callback rates the network conditions and is more closely - linked to the user experience. - - \ref IRtcEngineEventHandler::onLastmileProbeResult "onLastmileProbeResult": - the SDK triggers this callback within 30 seconds depending on the network - conditions. This callback returns the real-time statistics of the network - conditions and is more objective. - - @note - - Do not call other methods before receiving the - \ref IRtcEngineEventHandler::onLastmileQuality "onLastmileQuality" and - \ref IRtcEngineEventHandler::onLastmileProbeResult "onLastmileProbeResult" - callbacks. Otherwise, the callbacks may be interrupted. - - In the Live-broadcast profile, a host should not call this method after - joining a channel. - - @param config Sets the configurations of the last-mile network probe test. See - LastmileProbeConfig. - - @return - - 0: Success. - - < 0: Failure. - */ - virtual int startLastmileProbeTest(const LastmileProbeConfig& config) = 0; - - /** Stops the last-mile network probe test. */ - virtual int stopLastmileProbeTest() = 0; - /** - * Sets the video encoder configuration. + * @brief Starts the last mile network probe test. * - * Each configuration profile corresponds to a set of video parameters, including - * the resolution, frame rate, and bitrate. + * @details + * This method starts the last-mile network probe test before joining a channel to get the uplink + * and downlink last mile network statistics, including the bandwidth, packet loss, jitter, and + * round-trip time (RTT). + * Call timing: Do not call other methods before receiving the `onLastmileQuality` and + * `onLastmileProbeResult` callbacks. Otherwise, the callbacks may be interrupted. + * Related callbacks: After successfully calling this method, the SDK sequentially triggers the + * following 2 callbacks: + * - `onLastmileQuality`: The SDK triggers this callback within two seconds depending on the network + * conditions. This callback rates the network conditions and is more closely linked to the user + * experience. + * - `onLastmileProbeResult`: The SDK triggers this callback within 30 seconds depending on the + * network conditions. This callback returns the real-time statistics of the network conditions and + * is more objective. * - * The parameters specified in this method are the maximum values under ideal network conditions. - * If the video engine cannot render the video using the specified parameters due - * to poor network conditions, the parameters further down the list are considered - * until a successful configuration is found. + * @param config The configurations of the last-mile network probe test. See `LastmileProbeConfig`. * - * @param config The local video encoder configuration: VideoEncoderConfiguration. * @return * - 0: Success. * - < 0: Failure. */ - virtual int setVideoEncoderConfiguration(const VideoEncoderConfiguration& config) = 0; + virtual int startLastmileProbeTest(const LastmileProbeConfig& config) = 0; - /** Enables/Disables image enhancement and sets the options. - * - * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + /** + * @brief Stops the last mile network probe test. * - * @param enabled Sets whether or not to enable image enhancement: - * - true: enables image enhancement. - * - false: disables image enhancement. - * @param options Sets the image enhancement option. See BeautyOptions. + * @return + * - 0: Success. + * - < 0: Failure. */ - virtual int setBeautyEffectOptions(bool enabled, const BeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; - /** Enables/Disables face shape and sets the beauty options. + virtual int stopLastmileProbeTest() = 0; + + /** + * @brief Sets the video encoder configuration. * - * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + * @details + * Sets the encoder configuration for the local video. Each configuration profile corresponds to a + * set of video parameters, including the resolution, frame rate, and bitrate. + * Call timing: You can call this method either before or after joining a channel. If the user does + * not need to reset the video encoding properties after joining the channel, Agora recommends + * calling this method before `enableVideo` to reduce the time to render the first video frame. * - * @param enabled Sets whether or not to enable face shape: - * - true: enables face shape. - * - false: disables face shape. - * @param options Sets the face shape beauty option. See FaceShapeBeautyOptions. - */ - virtual int setFaceShapeBeautyOptions(bool enabled, const FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; - /** Enables/Disables face shape and sets the area options. + * @note + * - Both this method and the `getMirrorApplied` method support setting the mirroring effect. Agora + * recommends that you only choose one method to set it up. Using both methods at the same time + * causes the mirroring effect to overlap, and the mirroring settings fail. + * - The `config` specified in this method is the maximum value under ideal network conditions. If + * the video engine cannot render the video using the specified `config` due to unreliable network + * conditions, the parameters further down the list are considered until a successful configuration + * is found. * - * @note Call this method after calling the \ref IRtcEngine::setFaceShapeBeautyOptions "setFaceShapeBeautyOptions" method. + * @param config Video profile. See `VideoEncoderConfiguration`. * - * @param options Sets the face shape area option. See FaceShapeAreaOptions. + * @return + * - 0: Success. + * - < 0: Failure. */ - virtual int setFaceShapeAreaOptions(const FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; - - /** Gets the face shape beauty options. + virtual int setVideoEncoderConfiguration(const VideoEncoderConfiguration& config) = 0; + + /** + * @brief Sets the image enhancement options. * - * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + * @details + * Enables or disables image enhancement, and sets the options. + * Call timing: Call this method after calling `enableVideo` or `startPreview(VIDEO_SOURCE_TYPE + * sourceType)`. * - * @param options Gets the face shape beauty option. See FaceShapeBeautyOptions. - */ - virtual int getFaceShapeBeautyOptions(FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; - - /** Gets the face shape area options. + * @note + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - This feature has high requirements on device performance. When calling this method, the SDK + * automatically checks the capabilities of the current device. * - * @note Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + * @param enabled Whether to enable the image enhancement function: + * - `true`: Enable the image enhancement function. + * - `false`: (Default) Disable the image enhancement function. + * @param options The image enhancement options. See `BeautyOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * - * @param shapeArea The face area. See FaceShapeAreaOptions::FACE_SHAPE_AREA. - * @param options Gets the face area beauty option. See FaceShapeAreaOptions. + * @return + * - 0: Success. + * - < 0: Failure. + * - -4: The current device does not support this feature. Possible reasons include: + * - The current device capabilities do not meet the requirements for image enhancement. Agora + * recommends you replace it with a high-performance device. */ - virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; - + virtual int setBeautyEffectOptions(bool enabled, const BeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; /** - * Sets low-light enhancement. - * - * @since v4.0.0 - * - * The low-light enhancement feature can adaptively adjust the brightness value of the video captured in situations with low or uneven lighting, such as backlit, cloudy, or dark scenes. It restores or highlights the image details and improves the overall visual effect of the video. + * @brief Sets the face shape options and specifies the media source. * - * You can call this method to enable the low-light enhancement feature and set the options of the low-light enhancement effect. + * @details + * Calling this method allows for modifying various parts of the face, achieving slimming, enlarging + * eyes, slimming nose, and other minor cosmetic effects all at once using preset parameters, + * supporting fine-tuning the overall modification intensity. + * Call timing: Call this method after calling `enableVideo`. * * @note - * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` - * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". - * - The low-light enhancement feature has certain performance requirements on devices. If your device overheats after you enable low-light enhancement, Agora recommends modifying the low-light enhancement options to a less performance-consuming level or disabling low-light enhancement entirely. + * - This method only applies to Android 4.4 or later. + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - This feature has high requirements on device performance. When calling this method, the SDK + * automatically checks the capabilities of the current device. * - * @param enabled Sets whether to enable low-light enhancement: - * - `true`: Enable. - * - `false`: (Default) Disable. - * @param options The low-light enhancement options. See LowlightEnhanceOptions. + * @param enabled Whether to enable the face shape effect: + * - `true`: Enable the face shape effect. + * - `false`: (Default) Disable the face shape effect. + * @param options Face shaping style options, see `FaceShapeBeautyOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * * @return * - 0: Success. * - < 0: Failure. + * - -4: The current device does not support this feature. Possible reasons include: + * - The current device capabilities do not meet the requirements for image enhancement. Agora + * recommends you replace it with a high-performance device. + * - The current device version is lower than Android 4.4 and does not support this feature. + * Agora recommends you replace the device or upgrade the operating system. */ - virtual int setLowlightEnhanceOptions(bool enabled, const LowlightEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + virtual int setFaceShapeBeautyOptions(bool enabled, const FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; /** - * Sets video noise reduction. + * @brief Sets the image enhancement options for facial areas and specifies the media source. * - * @since v4.0.0 - * - * Underlit environments and low-end video capture devices can cause video images to contain significant noise, which affects video quality. In real-time interactive scenarios, video noise also consumes bitstream resources and reduces encoding efficiency during encoding. - * - * You can call this method to enable the video noise reduction feature and set the options of the video noise reduction effect. + * @details + * If the preset beauty effects implemented in the `setFaceShapeBeautyOptions` method do not meet + * expectations, you can use this method to set beauty area options, individually fine-tune each + * part of the face, and achieve a more refined beauty effect. + * Call timing: Call this method after calling `setFaceShapeBeautyOptions`. * * @note - * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` - * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". - * - The video noise reduction feature has certain performance requirements on devices. If your device overheats after you enable video noise reduction, Agora recommends modifying the video noise reduction options to a less performance-consuming level or disabling video noise reduction entirely. + * - This method only applies to Android 4.4 or later. + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - This feature has high requirements on device performance. When calling this method, the SDK + * automatically checks the capabilities of the current device. * - * @param enabled Sets whether to enable video noise reduction: - * - `true`: Enable. - * - `false`: (Default) Disable. - * @param options The video noise reduction options. See VideoDenoiserOptions. + * @param options Facial enhancement areas, see `FaceShapeAreaOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * * @return * - 0: Success. * - < 0: Failure. + * - -4: The current device does not support this feature. Possible reasons include: + * - The current device capabilities do not meet the requirements for image enhancement. Agora + * recommends you replace it with a high-performance device. + * - The current device version is lower than Android 4.4 and does not support this feature. + * Agora recommends you replace the device or upgrade the operating system. */ - virtual int setVideoDenoiserOptions(bool enabled, const VideoDenoiserOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + virtual int setFaceShapeAreaOptions(const FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** - * Sets color enhancement. + * @brief Gets the beauty effect options. * - * @since v4.0.0 + * @details + * Calling this method can retrieve the current settings of the beauty effect. + * Applicable scenarios: When the user opens the beauty style and style intensity menu in the app, + * you can call this method to get the current beauty effect options, then refresh the menu in the + * user interface according to the results, and update the UI. + * Call timing: Call this method after calling `enableVideo`. * - * The video images captured by the camera can have color distortion. The color enhancement feature intelligently adjusts video characteristics such as saturation and contrast to enhance the video color richness and color reproduction, making the video more vivid. + * @param options Face shaping style options, see `FaceShapeBeautyOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * - * You can call this method to enable the color enhancement feature and set the options of the color enhancement effect. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int getFaceShapeBeautyOptions(FaceShapeBeautyOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + + /** + * @brief Gets the facial beauty area options. * - * @note - * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` - * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". - * - The color enhancement feature has certain performance requirements on devices. If your device overheats after you enable color enhancement, Agora recommends modifying the color enhancement options to a less performance-consuming level or disabling color enhancement entirely. + * @details + * Calling this method can retrieve the current settings of the beauty effect. + * Applicable scenarios: When the user opens the facial beauty area and shaping intensity menu in + * the app, you can call this method to get the current beauty effect options, then refresh the menu + * in the user interface according to the results, and update the UI. + * Call timing: Call this method after calling `enableVideo`. * - * @param enabled Sets whether to enable color enhancement: - * - `true`: Enable. - * - `false`: (Default) Disable. - * @param options The color enhancement options. See ColorEnhanceOptions. + * @param shapeArea Facial enhancement areas. See `FACE_SHAPE_AREA`. + * @param options Facial enhancement areas, see `FaceShapeAreaOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * * @return * - 0: Success. * - < 0: Failure. */ - virtual int setColorEnhanceOptions(bool enabled, const ColorEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; /** - * Enables/Disables the virtual background. (beta function) + * @brief Sets the filter effect options and specifies the media source. * - * @since v3.7.200 + * @since v4.4.1 * - * After enabling the virtual background function, you can replace the original background image of the local user - * with a custom background image. After the replacement, all users in the channel can see the custom background - * image. + * @details + * Call timing: Call this method after calling `enableVideo`. * * @note - * - Before calling this method, ensure that you have integrated the - * `libagora_segmentation_extension.dll` (Windows)/`AgoraVideoSegmentationExtension.framework` (macOS) dynamic - * library into the project folder. - * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". - * - This function requires a high-performance device. Agora recommends that you use this function on devices with - * an i5 CPU and better. - * - Agora recommends that you use this function in scenarios that meet the following conditions: - * - A high-definition camera device is used, and the environment is uniformly lit. - * - The captured video image is uncluttered, the user's portrait is half-length and largely unobstructed, and the - * background is a single color that differs from the color of the user's clothing. + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - This feature has high requirements on device performance. When calling this method, the SDK + * automatically checks the capabilities of the current device. * - * @param enabled Sets whether to enable the virtual background: - * - true: Enable. - * - false: Disable. - * @param backgroundSource The custom background image. See VirtualBackgroundSource. **Note**: To adapt the - * resolution of the custom background image to the resolution of the SDK capturing video, the SDK scales and crops - * the custom background image while ensuring that the content of the custom background image is not distorted. + * @param enabled Whether to enable the filter effect: + * - `true`: Yes. + * - `false`: (Default) No. + * @param options The filter effect options. See `FilterEffectOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * * @return * - 0: Success. * - < 0: Failure. */ - virtual int enableVirtualBackground(bool enabled, VirtualBackgroundSource backgroundSource, SegmentationProperty segproperty, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + virtual int setFilterEffectOptions(bool enabled, const FilterEffectOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** - * Initializes the video view of a remote user. + * @brief Creates a video effect object. * - * This method initializes the video view of a remote stream on the local device. It affects only the - * video view that the local user sees. + * @since v4.6.0 * - * Usually the app should specify the `uid` of the remote video in the method call before the - * remote user joins the channel. If the remote `uid` is unknown to the app, set it later when the - * app receives the \ref IRtcEngineEventHandler::onUserJoined "onUserJoined" callback. + * @details + * Creates an `IVideoEffectObject` video effect object and returns its pointer. * - * To unbind the remote user from the view, set `view` in VideoCanvas as `null`. - * - * @note - * Ensure that you call this method in the UI thread. + * @param bundlePath The path to the video effect bundle. + * @param type The media source type. See `MEDIA_SOURCE_TYPE`. * - * @param canvas The remote video view settings: VideoCanvas. - * @return int - * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_SUCCESS = 0, - * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_IMAGE_NOT_EXIST = -1, - * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_COLOR_FORMAT_NOT_SUPPORTED = -2, - * VIRTUAL_BACKGROUND_SOURCE_STATE_REASON_DEVICE_NOT_SUPPORTED = -3, + * @return + * - The `IVideoEffectObject` object pointer, if the method call succeeds. + * - An empty pointer, if the method call fails. */ - virtual int setupRemoteVideo(const VideoCanvas& canvas) = 0; + virtual agora_refptr createVideoEffectObject(const char* bundlePath, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** - * Initializes the local video view. + * @brief Destroys a video effect object. * - * This method initializes the video view of the local stream on the local device. It affects only - * the video view that the local user sees, not the published local video stream. + * @since v4.6.0 * - * To unbind the local video from the view, set `view` in VideoCanvas as `null`. + * @param videoEffectObject The video effect object to be destroyed. See `IVideoEffectObject`. * - * @note - * Call this method before joining a channel. - * - * @param canvas The local video view setting: VideoCanvas. * @return * - 0: Success. * - < 0: Failure. */ - virtual int setupLocalVideo(const VideoCanvas& canvas) = 0; + virtual int destroyVideoEffectObject(agora_refptr videoEffectObject) = 0; /** - * Sets the Video application scenario. + * @brief Sets low-light enhancement. * - * @since v4.2.0 + * @since v4.0.0 + * + * @details + * You can call this method to enable the color enhancement feature and set the options of the color + * enhancement effect. + * Applicable scenarios: The low-light enhancement feature can adaptively adjust the brightness + * value of the video captured in situations with low or uneven lighting, such as backlit, cloudy, + * or dark scenes. It restores or highlights the image details and improves the overall visual + * effect of the video. + * Call timing: Call this method after calling `enableVideo`. + * + * @note + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - Dark light enhancement has certain requirements for equipment performance. The low-light + * enhancement feature has certain performance requirements on devices. If your device overheats + * after you enable low-light enhancement, Agora recommends modifying the low-light enhancement + * options to a less performance-consuming level or disabling low-light enhancement entirely. + * - If you want to prioritize image quality ( LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY ) when using the + * low-light enhancement function, you need to first call `setVideoDenoiserOptions` to achieve video + * noise reduction, the specific corresponding relationship is as follows: + * - When low light enhancement is set to automatic mode ( LOW_LIGHT_ENHANCE_AUTO ), video noise + * reduction needs to be set to prioritize image quality ( VIDEO_DENOISER_LEVEL_HIGH_QUALITY ) and + * automatic mode ( VIDEO_DENOISER_AUTO ). + * - When low-light enhancement is set to manual mode ( LOW_LIGHT_ENHANCE_MANUAL ), video noise + * reduction needs to be set to prioritize image quality ( VIDEO_DENOISER_LEVEL_HIGH_QUALITY ) and + * manual mode ( VIDEO_DENOISER_MANUAL ). + * + * @param enabled Whether to enable low-light enhancement: + * - `true`: Enable low-light enhancement. + * - `false`: (Default) Disable low-light enhancement. + * @param options The low-light enhancement options. See `LowlightEnhanceOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setLowlightEnhanceOptions(bool enabled, const LowlightEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** + * @brief Sets video noise reduction. * - * You can call this method to set the expected video scenario. - * The SDK will optimize the video experience for each scenario you set. + * @since v4.0.0 * + * @details + * You can call this method to enable the video noise reduction feature and set the options of the + * video noise reduction effect. + * Applicable scenarios: dark environments and low-end video capture devices can cause video images + * to contain significant noise, which affects video quality. In real-time interactive scenarios, + * video noise also consumes bitstream resources and reduces encoding efficiency during encoding. + * Call timing: Call this method after calling `enableVideo`. * - * @param scenarioType The video application scenario. See #ApplicationScenarioType. + * @note + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * - Video noise reduction has certain requirements for equipment performance. If your device + * overheats after you enable video noise reduction, Agora recommends modifying the video noise + * reduction options to a less performance-consuming level or disabling video noise reduction + * entirely. + * If the noise reduction implemented by this method does not meet your needs, Agora recommends that + * you call the `setBeautyEffectOptions` method to enable the beauty and skin smoothing function to + * achieve better video noise reduction effects. The recommended `BeautyOptions` settings for + * intense noise reduction effect are as follows: + * - `lighteningContrastLevel` LIGHTENING_CONTRAST_NORMAL + * - `lighteningLevel`: 0.0 + * - `smoothnessLevel`: 0.5 + * - `rednessLevel`: 0.0 + * - `sharpnessLevel`: 0.1 + * + * @param enabled Whether to enable video noise reduction: + * - `true`: Enable video noise reduction. + * - `false`: (Default) Disable video noise reduction. + * @param options The video noise reduction options. See `VideoDenoiserOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. * * @return * - 0: Success. * - < 0: Failure. - * - ERR_FAILED (1): A general error occurs (no specified reason). - * - ERR_NOT_SUPPORTED (4): Unable to set video application scenario. - * - ERR_NOT_INITIALIZED (7): The SDK is not initialized. + */ + virtual int setVideoDenoiserOptions(bool enabled, const VideoDenoiserOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** + * @brief Sets color enhancement. + * + * @since v4.0.0 + * + * @details + * The video images captured by the camera can have color distortion. The color enhancement feature + * intelligently adjusts video characteristics such as saturation and contrast to enhance the video + * color richness and color reproduction, making the video more vivid. + * You can call this method to enable the color enhancement feature and set the options of the color + * enhancement effect. + * + * @note + * - Call this method after calling `enableVideo`. + * - The color enhancement feature has certain performance requirements on devices. With color + * enhancement turned on, Agora recommends that you change the color enhancement level to one that + * consumes less performance or turn off color enhancement if your device is experiencing severe + * heat problems. + * - This method relies on the image enhancement dynamic library + * `libagora_clear_vision_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * + * @param enabled Whether to enable color enhancement: + * - `true` Enable color enhancement. + * - `false`: (Default) Disable color enhancement. + * @param options The color enhancement options. See `ColorEnhanceOptions`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setColorEnhanceOptions(bool enabled, const ColorEnhanceOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + + /** + * @brief Enables/Disables the virtual background. + * + * @since v3.7.200 + * + * @details + * The virtual background feature enables the local user to replace their original background with a + * static image, dynamic video, blurred background, or portrait-background segmentation to achieve + * picture-in-picture effect. Once the virtual background feature is enabled, all users in the + * channel can see the custom background. + * Call this method after calling `enableVideo` or `startPreview(VIDEO_SOURCE_TYPE sourceType)`. + * + * @note + * - Using a video as a your virtual background will lead to continuous increase in memory usage, + * which may cause issues such as app crashes. Therefore,it is recommended to reduce the resolution + * and frame rate of the video when using it. + * - This feature has high requirements on device performance. When calling this method, the SDK + * automatically checks the capabilities of the current device. Agora recommends you use virtual + * background on devices with the following processors: + * - Snapdragon 700 series 750G and later + * - Snapdragon 800 series 835 and later + * - Dimensity 700 series 720 and later + * - Kirin 800 series 810 and later + * - Kirin 900 series 980 and later + * - Devices with an i5 CPU and better + * - Devices with an A9 chip and better, as follows: + * - iPhone 6S and later + * - iPad Air 3rd generation and later + * - iPad 5th generation and later + * - iPad Pro 1st generation and later + * - iPad mini 5th generation and later + * - Agora recommends that you use this feature in scenarios that meet the following conditions: + * - A high-definition camera device is used, and the environment is uniformly lit. + * - There are few objects in the captured video. Portraits are half-length and unobstructed. + * Ensure that the background is a solid color that is different from the color of the user's + * clothing. + * - This method relies on the virtual background dynamic library + * `libagora_segmentation_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * + * @param enabled Whether to enable virtual background: + * - `true`: Enable virtual background. + * - `false`: Disable virtual background. + * @param backgroundSource The custom background. See `VirtualBackgroundSource`. To adapt the + * resolution of the custom background image to that of the video captured by the SDK, the SDK + * scales and crops the custom background image while ensuring that the content of the custom + * background image is not distorted. + * @param segproperty Processing properties for background images. See `SegmentationProperty`. + * @param type The type of the media source to which the filter effect is applied. See + * `MEDIA_SOURCE_TYPE`.Attention: In this method, this parameter supports only the following two + * settings: + * - Use the default value `PRIMARY_CAMERA_SOURCE` if you use camera to capture local video. + * - Set this parameter to `CUSTOM_VIDEO_SOURCE` if you use custom video source. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -4: The device capabilities do not meet the requirements for the virtual background feature. + * Agora recommends you try it on devices with higher performance. + */ + virtual int enableVirtualBackground(bool enabled, VirtualBackgroundSource backgroundSource, SegmentationProperty segproperty, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + + /** + * @brief Initializes the video view of a remote user. + * + * @details + * This method initializes the video view of a remote stream on the local device. It affects only + * the video view that the local user sees. Call this method to bind the remote video stream to a + * video view and to set the rendering and mirror modes of the video view. + * You need to specify the ID of the remote user in this method. If the remote user ID is unknown to + * the application, set it after the app receives the `onUserJoined` callback. + * To unbind the remote user from the view, set the `view` parameter to NULL. + * Once the remote user leaves the channel, the SDK unbinds the remote user. + * In the scenarios of custom layout for mixed videos on the mobile end, you can call this method + * and set a separate `view` for rendering each sub-video stream of the mixed video stream. + * + * @note + * - To update the rendering or mirror mode of the remote video view during a call, use the + * `setRemoteRenderMode` method. + * - When using the recording service, the app does not need to bind a view, as it does not send a + * video stream. If your app does not recognize the recording service, bind the remote user to the + * view when the SDK triggers the `onFirstRemoteVideoDecoded` callback. + * + * @param canvas The remote video view and settings. See `VideoCanvas`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setupRemoteVideo(const VideoCanvas& canvas) = 0; + + /** + * @brief Initializes the local video view. + * + * @details + * This method initializes the video view of a local stream on the local device. It only affects the + * video seen by the local user and does not impact the publishing of the local video. Call this + * method to bind the local video stream to a video view ( `view` ) and to set the rendering and + * mirror modes of the video view. + * The binding remains valid after leaving the channel. To stop rendering or unbind the local video + * from the view, set `view` as NULL. + * Applicable scenarios: After initialization, call this method to set the local video and then join + * the channel. + * In real-time interactive scenarios, if you need to simultaneously view multiple preview frames in + * the local video preview, and each frame is at a different observation position along the video + * link, you can repeatedly call this method to set different `view` s and set different observation + * positions for each `view. ` For example, by setting the video source to the camera and then + * configuring two `view` s with `position` setting to POSITION_POST_CAPTURER_ORIGIN and + * POSITION_POST_CAPTURER, you can simultaneously preview the raw, unprocessed video frame and the + * video frame that has undergone preprocessing (image enhancement effects, virtual background, + * watermark) in the local video preview. + * Call timing: You can call this method either before or after joining a channel. + * + * @note To update only the rendering or mirror mode of the local video view during a call, call + * `setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode)` + * instead. + * + * @param canvas The local video view and settings. See `VideoCanvas`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setupLocalVideo(const VideoCanvas& canvas) = 0; + + /** + * @brief Sets video application scenarios. + * + * @since v4.2.0 + * + * @details + * After successfully calling this method, the SDK will automatically enable the best practice + * strategies and adjust key performance metrics based on the specified scenario, to optimize the + * video experience. + * + * @note Call this method before joining a channel. + * + * @param scenarioType The type of video application scenario. See + * `VIDEO_APPLICATION_SCENARIO_TYPE`.`APPLICATION_SCENARIO_MEETING` (1) is suitable for meeting + * scenarios. The SDK automatically enables the following strategies: + * - In meeting scenarios where low-quality video streams are required to have a high bitrate, the + * SDK automatically enables multiple technologies used to deal with network congestions, to enhance + * the performance of the low-quality streams and to ensure the smooth reception by subscribers. + * - The SDK monitors the number of subscribers to the high-quality video stream in real time and + * dynamically adjusts its configuration based on the number of subscribers. + * - If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate + * and frame rate to save upstream bandwidth. + * - If someone subscribes to the high-quality stream, the SDK resets the high-quality stream to + * the `VideoEncoderConfiguration` configuration used in the most recent calling of + * `setVideoEncoderConfiguration`. If no configuration has been set by the user previously, the + * following values are used: + * - Resolution: (Windows and macOS) 1280 × 720; (Android and iOS) 960 × 540 + * - Frame rate: 15 fps + * - Bitrate: (Windows and macOS) 1600 Kbps; (Android and iOS) 1000 Kbps + * - The SDK monitors the number of subscribers to the low-quality video stream in real time and + * dynamically enables or disables it based on the number of subscribers.Note: If the user has + * called `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)` + * to set that never send low-quality video stream ( + * `DISABLE_SIMULCAST_STREAM` ), the dynamic adjustment of the low-quality stream in meeting + * scenarios will not take effect. + * - If nobody subscribes to the low-quality stream, the SDK automatically disables it to save + * upstream bandwidth. + * - If someone subscribes to the low-quality stream, the SDK enables the low-quality stream and + * resets it to the `SimulcastStreamConfig` configuration used in the most recent calling of + * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`. If no + * configuration has been set by the user previously, the following + * values are used: + * - Resolution: 480 × 272 + * - Frame rate: 15 fps + * - Bitrate: 500 Kbps + * `APPLICATION_SCENARIO_1V1` (2) This is applicable to the `one to one live` scenario. To meet the + * requirements for low latency and high-quality video in this scenario, the SDK optimizes its + * strategies, improving performance in terms of video quality, first frame rendering, latency on + * mid-to-low-end devices, and smoothness under weak network conditions.Attention: This enumeration + * value is only applicable to the broadcaster vs. broadcaster scenario. + * `APPLICATION_SCENARIO_LIVESHOW` (3) This is applicable to the `show room` scenario. In this + * scenario, fast video rendering and high image quality are crucial. The SDK implements several + * performance optimizations, including automatically enabling accelerated audio and video frame + * rendering to minimize first-frame latency (no need to call `enableInstantMediaRendering` ), and + * B-frame encoding to achieve better image quality and bandwidth efficiency. The SDK also provides + * enhanced video quality and smooth playback, even in poor network conditions or on lower-end + * devices. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -4: Video application scenarios are not supported. Possible reasons include that you use the + * Voice SDK instead of the Video SDK. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. */ virtual int setVideoScenario(VIDEO_APPLICATION_SCENARIO_TYPE scenarioType) = 0; @@ -4386,13 +6071,24 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setVideoQoEPreference(VIDEO_QOE_PREFERENCE_TYPE qoePreference) = 0; /** - * Enables the audio. + * @brief Enables the audio module. * - * The audio is enabled by default. + * @details + * The audio module is enabled by default After calling `disableAudio` to disable the audio module, + * you can call this method to re-enable it. + * Call timing: This method can be called either before or after joining the channel. It is still + * valid after one leaves channel. * * @note - * This method controls the underlying states of the Engine. It is still - * valid after one leaves channel. + * - Calling this method will reset the entire engine, resulting in a slow response time. You can + * use the following methods to independently control a specific function of the audio module based + * on your actual needs: + * - `enableLocalAudio`: Whether to enable the microphone to create the local audio stream. + * - `muteLocalAudioStream`: Whether to publish the local audio stream. + * - `muteRemoteAudioStream`: Whether to subscribe and play the remote audio stream. + * - `muteAllRemoteAudioStreams`: Whether to subscribe to and play all remote audio streams. + * - A successful call of this method resets `enableLocalAudio`, `muteRemoteAudioStream`, and + * `muteAllRemoteAudioStreams`. Proceed it with caution. * * @return * - 0: Success. @@ -4401,34 +6097,51 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableAudio() = 0; /** - * Disables the audio. + * @brief Disables the audio module. * - * @note - * This method controls the underlying states of the Engine. It is still + * @details + * The audio module is enabled by default, and you can call this method to disable the audio module. + * Call timing: This method can be called either before or after joining the channel. It is still * valid after one leaves channel. * + * @note + * This method resets the internal engine and takes some time to take effect. Agora recommends using + * the following API methods to control the audio modules separately: + * - `enableLocalAudio`: Whether to enable the microphone to create the local audio stream. + * - `enableLoopbackRecording`: Whether to enable loopback audio capturing. + * - `muteLocalAudioStream`: Whether to publish the local audio stream. + * - `muteRemoteAudioStream`: Whether to subscribe and play the remote audio stream. + * - `muteAllRemoteAudioStreams`: Whether to subscribe to and play all remote audio streams. + * * @return * - 0: Success. * - < 0: Failure. */ virtual int disableAudio() = 0; - + /** - * Sets the audio parameters and application scenarios. + * @brief Sets the audio profile and audio scenario. * - * @deprecated This method is deprecated. You can use the - * \ref IRtcEngine::setAudioProfile(AUDIO_PROFILE_TYPE) "setAudioProfile" - * method instead. To set the audio scenario, call the \ref IRtcEngine::initialize "initialize" - * method and pass value in the `audioScenario` member in the RtcEngineContext struct. + * @deprecated This method is deprecated. You can use the `setAudioProfile(AUDIO_PROFILE_TYPE profile) = 0` + * method instead. To set the audio scenario, call the `initialize` method and pass value in the + * `audioScenario` member in the RtcEngineContext struct. * - * @note - * - Call this method before calling the `joinChannel` method. - * - In scenarios requiring high-quality audio, we recommend setting `profile` as `MUSIC_HIGH_QUALITY`(4) - * and `scenario` as `AUDIO_SCENARIO_GAME_STREAMING`(3). + * @details + * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as + * needed. For example, in scenarios with high audio quality requirements such as music teaching, it + * is recommended to set `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY`(4) and `scenario` to + * `AUDIO_SCENARIO_GAME_STREAMING`(3). + * Call timing: You can call this method either before or after joining a channel. + * + * @note Due to iOS system restrictions, some audio routes cannot be recognized in call volume mode. + * Therefore, if you need to use an external sound card, it is recommended to set the audio scenario + * to `AUDIO_SCENARIO_GAME_STREAMING`(3). In this scenario, the SDK will switch to media volume to + * avoid this issue. * - * @param profile Sets the sample rate, bitrate, encoding mode, and the number of channels: - * #AUDIO_PROFILE_TYPE. - * @param scenario Sets the audio application scenarios: #AUDIO_SCENARIO_TYPE. + * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the + * number of channels. See `AUDIO_PROFILE_TYPE`. + * @param scenario The audio scenarios. Under different audio scenarios, the device uses different + * volume types. See `AUDIO_SCENARIO_TYPE`. * * @return * - 0: Success. @@ -4437,15 +6150,18 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setAudioProfile(AUDIO_PROFILE_TYPE profile, AUDIO_SCENARIO_TYPE scenario) __deprecated = 0; /** - * Sets the audio profile. + * @brief Sets audio profiles. * - * @note - * - Call this method before calling the `joinChannel` method. - * - In scenarios requiring high-quality audio, Agora recommends setting `profile` as `MUSIC_HIGH_QUALITY`(4). - * - To set the audio scenario, call the \ref IRtcEngine::initialize "initialize" - * method and pass value in the `audioScenario` member in the RtcEngineContext struct. + * @details + * If you need to set the audio scenario, you can either call `setAudioScenario`, or `initialize` + * and set the `audioScenario` in `RtcEngineContext`. + * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as + * needed. For example, in scenarios with high audio quality requirements such as music teaching, it + * is recommended to set `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY`(4). + * Call timing: You can call this method either before or after joining a channel. * - * @param profile The audio profile, such as the sample rate, bitrate and codec type: #AUDIO_PROFILE_TYPE. + * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the + * number of channels. See `AUDIO_PROFILE_TYPE`. * * @return * - 0: Success. @@ -4453,31 +6169,53 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setAudioProfile(AUDIO_PROFILE_TYPE profile) = 0; /** - * Set the audio scenario. + * @brief Sets audio scenarios. + * + * @details + * Applicable scenarios: This method is suitable for various audio scenarios. You can choose as + * needed. For example, in scenarios such as music teaching that require high sound quality, it is + * recommended to set `scenario` to `AUDIO_SCENARIO_GAME_STREAMING`(3). + * Call timing: You can call this method either before or after joining a channel. + * + * @note Due to iOS system restrictions, some audio routes cannot be recognized in call volume mode. + * Therefore, if you need to use an external sound card, it is recommended to set the audio scenario + * to `AUDIO_SCENARIO_GAME_STREAMING`(3). In this scenario, the SDK will switch to media volume to + * avoid this issue. + * + * @param scenario The audio scenarios. Under different audio scenarios, the device uses different + * volume types. See `AUDIO_SCENARIO_TYPE`. * - * @param scenario The audio scenario: #AUDIO_SCENARIO_TYPE. * @return * - 0: Success. * - < 0: Failure. */ virtual int setAudioScenario(AUDIO_SCENARIO_TYPE scenario) = 0; /** - * Enables or disables the local audio capture. - * - * The audio function is enabled by default. This method disables or re-enables the - * local audio function, that is, to stop or restart local audio capture and - * processing. - * - * This method does not affect receiving or playing the remote audio streams, - * and `enableLocalAudio` (false) is applicable to scenarios where the user wants - * to receive remote audio streams without sending any audio stream to other users - * in the channel. + * @brief Enables or disables the local audio capture. + * + * @details + * The audio function is enabled by default when users joining a channel. This method disables or + * re-enables the local audio function to stop or restart local audio capturing. + * The difference between this method and `muteLocalAudioStream` are as follows: + * - `enableLocalAudio`: Disables or re-enables the local audio capturing and processing. If you + * disable or re-enable local audio capturing using the `enableLocalAudio` method, the local user + * might hear a pause in the remote audio playback. + * - `muteLocalAudioStream`: Sends or stops sending the local audio streams without affecting the + * audio capture status. + * Applicable scenarios: This method does not affect receiving the remote audio streams. + * `enableLocalAudio` `(false)` is suitable for scenarios where the user wants to receive remote + * audio streams without sending locally captured audio. + * Call timing: You can call this method either before or after joining a channel. Calling it before + * joining a channel only sets the device state, and it takes effect immediately after you join the + * channel. + * Related callbacks: Once the local audio function is disabled or re-enabled, the SDK triggers the + * `onLocalAudioStateChanged` callback, which reports `LOCAL_AUDIO_STREAM_STATE_STOPPED` (0) or + * `LOCAL_AUDIO_STREAM_STATE_RECORDING` (1). * - * @param enabled Determines whether to disable or re-enable the local audio function: - * - true: (Default) Re-enable the local audio function, that is, to start local - * audio capture and processing. - * - false: Disable the local audio function, that is, to stop local audio - * capture and processing. + * @param enabled + * - `true`: (Default) Re-enable the local audio function, that is, to start the + * local audio capturing device (for example, the microphone). + * - `false`: Disable the local audio function, that is, to stop local audio capturing. * * @return * - 0: Success. @@ -4486,29 +6224,24 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableLocalAudio(bool enabled) = 0; /** - Stops or resumes sending the local audio stream. - - After calling this method successfully, the SDK triggers the - \ref IRtcEngineEventHandler::onRemoteAudioStateChanged "onRemoteAudioStateChanged" - callback with the following parameters: - - REMOTE_AUDIO_STATE_STOPPED(0) and REMOTE_AUDIO_REASON_REMOTE_MUTED(5). - - REMOTE_AUDIO_STATE_DECODING(2) and REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6). - - @note - - When `mute` is set as `true`, this method does not disable the - microphone, which does not affect any ongoing recording. - - If you call \ref IRtcEngine::setChannelProfile "setChannelProfile" after - this method, the SDK resets whether or not to mute the local audio - according to the channel profile and user role. Therefore, we recommend - calling this method after the `setChannelProfile` method. - - @param mute Determines whether to send or stop sending the local audio stream: - - true: Stop sending the local audio stream. - - false: (Default) Send the local audio stream. - - @return - - 0: Success. - - < 0: Failure. + * @brief Stops or resumes publishing the local audio stream. + * + * @details + * This method is used to control whether to publish the locally captured audio stream. If you call + * this method to stop publishing locally captured audio streams, the audio capturing device will + * still work and won't be affected. + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After successfully calling this method, the local end triggers callback + * `onAudioPublishStateChanged`; the remote end triggers `onUserMuteAudio` and + * `onRemoteAudioStateChanged` callbacks. + * + * @param mute Whether to stop publishing the local audio stream: + * - `true`: Stops publishing the local audio stream. + * - `false`: (Default) Resumes publishing the local audio stream. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int muteLocalAudioStream(bool mute) = 0; @@ -4523,9 +6256,9 @@ class IRtcEngine : public agora::base::IEngineBase { - If you call muteAllRemoteAudioStreams(true) after joining a channel, the local use stops receiving any audio stream from any user in the channel, including any user who joins the channel after you call this method. - - If you call muteAllRemoteAudioStreams(true) after leaving a channel, the - local user does not receive any audio stream the next time the user joins a - channel. + - If you call muteAllRemoteAudioStreams(true) after leaving a channel, + the local user does not receive any audio stream the next time the user + joins a channel. After you successfully call muteAllRemoteAudioStreams(true), you can take the following actions: @@ -4553,19 +6286,46 @@ class IRtcEngine : public agora::base::IEngineBase { - 0: Success. - < 0: Failure. */ + /** + * @brief Stops or resumes subscribing to the audio streams of all remote users. + * + * @details + * After successfully calling this method, the local user stops or resumes subscribing to the audio + * streams of all remote users, including all subsequent users. + * Call timing: Call this method after joining a channel. + * + * @note + * If you call this method and then call `enableAudio` or `disableAudio`, the latest call will + * prevail. + * By default, the SDK subscribes to the audio streams of all remote users when joining a channel. + * To modify this behavior, you can set `autoSubscribeAudio` to `false` when calling + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` + * to join the channel, which will cancel the subscription to the audio streams of all users + * upon joining the channel. + * + * @param mute Whether to stop subscribing to the audio streams of all remote users: + * - `true`: Stops subscribing to the audio streams of all remote users. + * - `false`: (Default) Subscribes to the audio streams of all remote users by default. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int muteAllRemoteAudioStreams(bool mute) = 0; /** - * Stops or resumes receiving the audio stream of a specified user. + * @brief Stops or resumes subscribing to the audio stream of a specified user. * - * @note - * You can call this method before or after joining a channel. If a user - * leaves a channel, the settings in this method become invalid. + * @details + * Call timing: Call this method after joining a channel. + * Related callbacks: After a successful method call, the SDK triggers the + * `onAudioSubscribeStateChanged` callback. * - * @param uid The ID of the specified user. - * @param mute Whether to stop receiving the audio stream of the specified user: - * - true: Stop receiving the audio stream of the specified user. - * - false: (Default) Resume receiving the audio stream of the specified user. + * @param uid The user ID of the specified user. + * @param mute Whether to subscribe to the specified remote user's audio stream. + * - `true`: Stop subscribing to the audio stream of the specified user. + * - `false`: (Default) Subscribe to the audio stream of the specified user. * * @return * - 0: Success. @@ -4574,11 +6334,22 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int muteRemoteAudioStream(uid_t uid, bool mute) = 0; /** - * Stops or resumes sending the local video stream. + * @brief Stops or resumes publishing the local video stream. * - * @param mute Determines whether to send or stop sending the local video stream: - * - true: Stop sending the local video stream. - * - false: (Default) Send the local video stream. + * @details + * This method is used to control whether to publish the locally captured video stream. If you call + * this method to stop publishing locally captured video streams, the video capturing device will + * still work and won't be affected. + * Compared to `enableLocalVideo` (`false`), which can also cancel the publishing of local video + * stream by turning off the local video stream capture, this method responds faster. + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After successfully calling this method, the local end triggers callback + * `onVideoPublishStateChanged`; the remote end triggers `onUserMuteVideo` and + * `onRemoteVideoStateChanged` callbacks. + * + * @param mute Whether to stop publishing the local video stream. + * - `true`: Stop publishing the local video stream. + * - `false`: (Default) Publish the local video stream. * * @return * - 0: Success. @@ -4587,24 +6358,29 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int muteLocalVideoStream(bool mute) = 0; /** - * Disables or re-enables the local video capture. - * - * Once you enable the video using \ref enableVideo "enableVideo", the local video is enabled - * by default. This method disables or re-enables the local video capture. + * @brief Enables/Disables the local video capture. * - * `enableLocalVideo(false)` applies to scenarios when the user wants to watch the remote video - * without sending any video stream to the other user. + * @details + * This method disables or re-enables the local video capture, and does not affect receiving the + * remote video stream. + * After calling `enableVideo`, the local video capture is enabled by default. + * If you call `enableLocalVideo` (`false`) to disable local video capture within the channel, it + * also simultaneously stops publishing the video stream within the channel. If you want to restart + * video catpure, you can call `enableLocalVideo` (`true`) and then call `updateChannelMediaOptions` + * to set the `options` parameter to publish the locally captured video stream in the channel. + * After the local video capturer is successfully disabled or re-enabled, the SDK triggers the + * `onRemoteVideoStateChanged` callback on the remote client. * * @note - * Call this method after `enableVideo`. Otherwise, this method may not work properly. + * - You can call this method either before or after joining a channel. However, if you call it + * before joining, the settings will only take effect once you have joined the channel. + * - This method enables the internal engine and is valid after leaving the channel. * - * @param enabled Determines whether to disable or re-enable the local video, including - * the capturer, renderer, and sender: - * - true: (Default) Re-enable the local video. - * - false: Disable the local video. Once the local video is disabled, the remote - * users can no longer receive the video stream of this user, while this user - * can still receive the video streams of other remote users. When you set - * `enabled` as `false`, this method does not require a local camera. + * @param enabled Whether to enable the local video capture. + * - `true`: (Default) Enable the local video capture. + * - `false`: Disable the local video capture. Once the local video is disabled, the remote users + * cannot receive the video stream of the local user, while the local user can still receive the + * video streams of remote users. When set to `false`, this method does not require a local camera. * * @return * - 0: Success. @@ -4651,12 +6427,66 @@ class IRtcEngine : public agora::base::IEngineBase { - 0: Success. - < 0: Failure. */ + /** + * @brief Stops or resumes subscribing to the video streams of all remote users. + * + * @details + * After successfully calling this method, the local user stops or resumes subscribing to the video + * streams of all remote users, including all subsequent users. + * Call timing: Call this method after joining a channel. + * + * @note + * If you call this method and then call `enableVideo` or `disableVideo`, the latest call will + * prevail. + * By default, the SDK subscribes to the video streams of all remote users when joining a channel. + * To modify this behavior, you can set `autoSubscribeVideo` to`false` when calling + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` + * to join the channel, which will cancel the subscription to the video streams of all users + * upon joining the channel. + * + * @param mute Whether to stop subscribing to the video streams of all remote users. + * - `true`: Stop subscribing to the video streams of all remote users. + * - `false`: (Default) Subscribe to the video streams of all remote users by default. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int muteAllRemoteVideoStreams(bool mute) = 0; /** - * Sets the default stream type of the remote video if the remote user has enabled dual-stream. + * @brief Sets the default video stream type to subscribe to. * - * @param streamType Sets the default video stream type: #VIDEO_STREAM_TYPE. + * @details + * Depending on the default behavior of the sender and the specific settings when calling + * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`, the + * scenarios for the receiver calling this method are as follows: + * - The SDK enables low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) on the + * sender side by default, meaning only the high-quality video stream is transmitted. Only the + * receiver with the role of the **host**can call this method to initiate a low-quality video stream + * request. Once the sender receives the request, it starts automatically sending the low-quality + * video stream. At this point, all users in the channel can call this method to switch to + * low-quality video stream subscription mode. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `DISABLE_SIMULCAST_STREAM` + * (never send low-quality video stream), then calling this method will have no effect. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video stream), both the host and audience receivers can call this method + * to switch to low-quality video stream subscription mode. + * The SDK will dynamically adjust the size of the corresponding video stream based on the size of + * the video window to save bandwidth and computing resources. The default aspect ratio of the + * low-quality video stream is the same as that of the high-quality video stream. According to the + * current aspect ratio of the high-quality video stream, the system will automatically allocate the + * resolution, frame rate, and bitrate of the low-quality video stream. + * Call timing: Call this method before joining a channel. The SDK does not support changing the + * default subscribed video stream type after joining a channel. + * + * @note If you call both this method and `setRemoteVideoStreamType`, the setting of + * `setRemoteVideoStreamType` takes effect. + * + * @param streamType The default video-stream type. See `VIDEO_STREAM_TYPE`. * * @return * - 0: Success. @@ -4665,16 +6495,17 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setRemoteDefaultVideoStreamType(VIDEO_STREAM_TYPE streamType) = 0; /** - * Stops or resumes receiving the video stream of a specified user. + * @brief Stops or resumes subscribing to the video stream of a specified user. * - * @note - * You can call this method before or after joining a channel. If a user - * leaves a channel, the settings in this method become invalid. + * @details + * Call timing: Call this method after joining a channel. + * Related callbacks: After a successful method call, the SDK triggers the + * `onVideoSubscribeStateChanged` callback. * - * @param uid The ID of the specified user. - * @param mute Whether to stop receiving the video stream of the specified user: - * - true: Stop receiving the video stream of the specified user. - * - false: (Default) Resume receiving the video stream of the specified user. + * @param uid The user ID of the specified user. + * @param mute Whether to subscribe to the specified remote user's video stream. + * - `true`: Stop subscribing to the video streams of the specified user. + * - `false`: (Default) Subscribe to the video stream of the specified user. * * @return * - 0: Success. @@ -4683,17 +6514,38 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int muteRemoteVideoStream(uid_t uid, bool mute) = 0; /** - * Sets the remote video stream type. - * - * If the remote user has enabled the dual-stream mode, by default the SDK receives the high-stream video by - * Call this method to switch to the low-stream video. + * @brief Sets the video stream type to subscribe to. + * + * @details + * Depending on the default behavior of the sender and the specific settings when calling + * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`, the + * scenarios for the receiver calling this method are as follows: + * - The SDK enables low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) on the + * sender side by default, meaning only the high-quality video stream is transmitted. Only the + * receiver with the role of the **host**can call this method to initiate a low-quality video stream + * request. Once the sender receives the request, it starts automatically sending the low-quality + * video stream. At this point, all users in the channel can call this method to switch to + * low-quality video stream subscription mode. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `DISABLE_SIMULCAST_STREAM` + * (never send low-quality video stream), then calling this method will have no effect. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video stream), both the host and audience receivers can call this method + * to switch to low-quality video stream subscription mode. + * The SDK will dynamically adjust the size of the corresponding video stream based on the size of + * the video window to save bandwidth and computing resources. The default aspect ratio of the + * low-quality video stream is the same as that of the high-quality video stream. According to the + * current aspect ratio of the high-quality video stream, the system will automatically allocate the + * resolution, frame rate, and bitrate of the low-quality video stream. * * @note - * This method applies to scenarios where the remote user has enabled the dual-stream mode using - * \ref enableDualStreamMode "enableDualStreamMode"(true) before joining the channel. + * - You can call this method either before or after joining a channel. + * - If you call both this method and `setRemoteDefaultVideoStreamType`, the setting of this method + * takes effect. * - * @param uid ID of the remote user sending the video stream. - * @param streamType Sets the video stream type: #VIDEO_STREAM_TYPE. + * @param uid The user ID. + * @param streamType The video stream type, see `VIDEO_STREAM_TYPE`. * * @return * - 0: Success. @@ -4702,11 +6554,25 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setRemoteVideoStreamType(uid_t uid, VIDEO_STREAM_TYPE streamType) = 0; /** - * Sets the remote video subscription options + * @brief Options for subscribing to remote video streams. * + * @details + * When a remote user has enabled dual-stream mode, you can call this method to choose the option + * for subscribing to the video streams sent by the remote user. The default subscription behavior + * of the SDK for remote video streams depends on the type of registered video observer: + * - If the `IVideoFrameObserver` observer is registered, the default is to subscribe to both raw + * data and encoded data. + * - If the `IVideoEncodedFrameObserver` observer is registered, the default is to subscribe only to + * the encoded data. + * - If both types of observers are registered, the default behavior follows the last registered + * video observer. For example, if the last registered observer is the `IVideoFrameObserver` + * observer, the default is to subscribe to both raw data and encoded data. + * If you want to modify the default behavior, or set different subscription options for different + * `uids`, you can call this method to set it. + * + * @param uid The user ID of the remote user. + * @param options The video subscription options. See `VideoSubscriptionOptions`. * - * @param uid ID of the remote user sending the video stream. - * @param options Sets the video subscription options. * @return * - 0: Success. * - < 0: Failure. @@ -4714,14 +6580,27 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setRemoteVideoSubscriptionOptions(uid_t uid, const VideoSubscriptionOptions &options) = 0; /** - * Sets the blocklist of subscribe remote stream audio. + * @brief Sets the blocklist of subscriptions for audio streams. * - * @param uidList The id list of users whose audio you do not want to subscribe to. - * @param uidNumber The number of uid in uidList. + * @details + * You can call this method to specify the audio streams of a user that you do not want to subscribe + * to. * * @note - * If uid is in uidList, the remote user's audio will not be subscribed, - * even if muteRemoteAudioStream(uid, false) and muteAllRemoteAudioStreams(false) are operated. + * - You can call this method either before or after joining a channel. + * - The blocklist is not affected by the setting in `muteRemoteAudioStream`, + * `muteAllRemoteAudioStreams`, and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the blocklist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you do not want to subscribe to. + * If you want to specify the audio streams of a user that you do not want to subscribe to, add the + * user ID in this list. If you want to remove a user from the blocklist, you need to call the + * `setSubscribeAudioBlocklist` method to update the user ID list; this means you only add the `uid` + * of users that you do not want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. * * @return * - 0: Success. @@ -4730,16 +6609,26 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setSubscribeAudioBlocklist(uid_t* uidList, int uidNumber) = 0; /** - * Sets the allowlist of subscribe remote stream audio. + * @brief Sets the allowlist of subscriptions for audio streams. * - * @param uidList The id list of users whose audio you want to subscribe to. - * @param uidNumber The number of uid in uidList. + * @details + * You can call this method to specify the audio streams of a user that you want to subscribe to. * * @note - * If uid is in uidList, the remote user's audio will be subscribed, - * even if muteRemoteAudioStream(uid, true) and muteAllRemoteAudioStreams(true) are operated. + * - You can call this method either before or after joining a channel. + * - The allowlist is not affected by the setting in `muteRemoteAudioStream`, + * `muteAllRemoteAudioStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the allowlist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. * - * If a user is in the blocklist and allowlist at the same time, only the blocklist takes effect. + * @param uidList The user ID list of users that you want to subscribe to. + * If you want to specify the audio streams of a user for subscription, add the user ID in this + * list. If you want to remove a user from the allowlist, you need to call the + * `setSubscribeAudioAllowlist` method to update the user ID list; this means you only add the `uid` + * of users that you want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. * * @return * - 0: Success. @@ -4748,14 +6637,27 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setSubscribeAudioAllowlist(uid_t* uidList, int uidNumber) = 0; /** - * Sets the blocklist of subscribe remote stream video. + * @brief Sets the blocklist of subscriptions for video streams. * - * @param uidList The id list of users whose video you do not want to subscribe to. - * @param uidNumber The number of uid in uidList. + * @details + * You can call this method to specify the video streams of a user that you do not want to subscribe + * to. * * @note - * If uid is in uidList, the remote user's video will not be subscribed, - * even if muteRemoteVideoStream(uid, false) and muteAllRemoteVideoStreams(false) are operated. + * - You can call this method either before or after joining a channel. + * - The blocklist is not affected by the setting in `muteRemoteVideoStream`, + * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the blocklist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you do not want to subscribe to. + * If you want to specify the video streams of a user that you do not want to subscribe to, add the + * user ID of that user in this list. If you want to remove a user from the blocklist, you need to + * call the `setSubscribeVideoBlocklist` method to update the user ID list; this means you only add + * the `uid` of users that you do not want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. * * @return * - 0: Success. @@ -4764,16 +6666,26 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setSubscribeVideoBlocklist(uid_t* uidList, int uidNumber) = 0; /** - * Sets the allowlist of subscribe remote stream video. + * @brief Sets the allowlist of subscriptions for video streams. * - * @param uidList The id list of users whose video you want to subscribe to. - * @param uidNumber The number of uid in uidList. + * @details + * You can call this method to specify the video streams of a user that you want to subscribe to. * * @note - * If uid is in uidList, the remote user's video will be subscribed, - * even if muteRemoteVideoStream(uid, true) and muteAllRemoteVideoStreams(true) are operated. + * - You can call this method either before or after joining a channel. + * - The allowlist is not affected by the setting in `muteRemoteVideoStream`, + * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the allowlist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. * - * If a user is in the blocklist and allowlist at the same time, only the blocklist takes effect. + * @param uidList The user ID list of users that you want to subscribe to. + * If you want to specify the video streams of a user for subscription, add the user ID of that user + * in this list. If you want to remove a user from the allowlist, you need to call the + * `setSubscribeVideoAllowlist` method to update the user ID list; this means you only add the `uid` + * of users that you want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. * * @return * - 0: Success. @@ -4782,26 +6694,32 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setSubscribeVideoAllowlist(uid_t* uidList, int uidNumber) = 0; /** - * Enables the `onAudioVolumeIndication` callback to report on which users are speaking - * and the speakers' volume. + * @brief Enables the reporting of users' volume indication. * - * Once the \ref IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication" - * callback is enabled, the SDK returns the volume indication in the at the time interval set - * in `enableAudioVolumeIndication`, regardless of whether any user is speaking in the channel. + * @details + * This method enables the SDK to regularly report the volume information to the app of the local + * user who sends a stream and remote users (three users at most) whose instantaneous volumes are + * the highest. + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: The SDK triggers the `onAudioVolumeIndication` callback according to the + * interval you set if this method is successfully called and there are users publishing streams in + * the channel. * * @param interval Sets the time interval between two consecutive volume indications: - * - <= 0: Disables the volume indication. - * - > 0: Time interval (ms) between two consecutive volume indications, - * and should be integral multiple of 200 (less than 200 will be set to 200). - * @param smooth The smoothing factor that sets the sensitivity of the audio volume - * indicator. The value range is [0, 10]. The greater the value, the more sensitive the - * indicator. The recommended value is 3. - * @param reportVad - * - `true`: Enable the voice activity detection of the local user. Once it is enabled, the `vad` parameter of the - * `onAudioVolumeIndication` callback reports the voice activity status of the local user. - * - `false`: (Default) Disable the voice activity detection of the local user. Once it is disabled, the `vad` parameter - * of the `onAudioVolumeIndication` callback does not report the voice activity status of the local user, except for - * the scenario where the engine automatically detects the voice activity of the local user. + * - ≤ 0: Disables the volume indication. + * - > 0: Time interval (ms) between two consecutive volume indications. Ensure this parameter is + * set to a value greater than 10, otherwise you will not receive the `onAudioVolumeIndication` + * callback. Agora recommends that this value is set as greater than 100. + * @param smooth The smoothing factor that sets the sensitivity of the audio volume indicator. The + * value ranges between 0 and 10. The recommended value is 3. The greater the value, the more + * sensitive the indicator. + * @param reportVad - `true`: Enables the voice activity detection of the local user. Once it is + * enabled, the `vad` parameter of the `onAudioVolumeIndication` callback reports the voice activity + * status of the local user. + * - `false`: (Default) Disables the voice activity detection of the local user. Once it is + * disabled, the `vad` parameter of the `onAudioVolumeIndication` callback does not report the voice + * activity status of the local user, except for the scenario where the engine automatically detects + * the voice activity of the local user. * * @return * - 0: Success. @@ -4809,49 +6727,62 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int enableAudioVolumeIndication(int interval, int smooth, bool reportVad) = 0; - /** Starts an audio recording. - - The SDK allows recording during a call, which supports either one of the - following two formats: - - - .wav: Large file size with high sound fidelity - - .aac: Small file size with low sound fidelity - - Ensure that the directory to save the recording file exists and is writable. - This method is usually called after the joinChannel() method. - The recording automatically stops when the leaveChannel() method is - called. - - @param filePath Full file path of the recording file. The string of the file - name is in UTF-8 code. - @param quality Sets the audio recording quality: #AUDIO_RECORDING_QUALITY_TYPE. - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Starts client-side audio recording with recording configuration. + * + * @details + * The SDK supports recording on the client during a call. After calling this method, you can record + * the audio of users in the channel and obtain a recording file. The recording file supports the + * following formats only: + * - WAV: Higher audio fidelity, larger file size. For example, with a sample rate of 32000 Hz, a + * 10-minute recording is about 73 MB. + * - AAC: Lower audio fidelity, smaller file size. For example, with a sample rate of 32000 Hz and + * recording quality set to AUDIO_RECORDING_QUALITY_MEDIUM, a 10-minute recording is about 2 MB. + * Recording automatically stops when the user leaves the channel. + * Call timing: This method must be called after joining a channel. + * + * @param config Recording configuration. See `AudioRecordingConfiguration`. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int startAudioRecording(const char* filePath, AUDIO_RECORDING_QUALITY_TYPE quality) = 0; - /** Starts an audio recording. - - The SDK allows recording during a call, which supports either one of the - following two formats: - - - .wav: Large file size with high sound fidelity - - .aac: Small file size with low sound fidelity - - Ensure that the directory to save the recording file exists and is writable. - This method is usually called after the joinChannel() method. - The recording automatically stops when the leaveChannel() method is - called. - - @param filePath Full file path of the recording file. The string of the file - name is in UTF-8 code. - @param sampleRate Sample rate, value should be 16000, 32000, 44100, or 48000. - @param quality Sets the audio recording quality: #AUDIO_RECORDING_QUALITY_TYPE. - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Starts client-side audio recording and sets the recording sample rate. + * + * @details + * The SDK supports recording on the client during a call. After calling this method, you can record + * the audio of all users in the channel and obtain a recording file that includes all voices. The + * recording file supports the following formats only: + * - .wav: Large file size, higher audio fidelity. + * - .aac: Smaller file size, lower audio fidelity. + * + * @note + * - Make sure the path you specify in this method exists and is writable. + * - This method must be called after `joinChannel(const char* token, const char* channelId, uid_t + * uid, const ChannelMediaOptions& options)`. If `leaveChannel(const LeaveChannelOptions& options)` + * is called while recording is in progress, the recording will automatically stop. + * - To ensure recording quality, when `sampleRate` is set to 44.1 kHz or 48 kHz, it is recommended + * to set `quality` to `AUDIO_RECORDING_QUALITY_MEDIUM` + * or `AUDIO_RECORDING_QUALITY_HIGH`. + * + * @param filePath The absolute path where the recording file will be saved locally, including the + * file name and extension. For example: `C:\music\audio.aac`. + * Note: + * Make sure the specified path exists and is writable. + * @param sampleRate Recording sample rate (Hz). You can set it to one of the following values: + * - 16000 + * - 32000 (default) + * - 44100 + * - 48000 + * @param quality Recording quality. See `AUDIO_RECORDING_QUALITY_TYPE`. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int startAudioRecording(const char* filePath, int sampleRate, AUDIO_RECORDING_QUALITY_TYPE quality) = 0; @@ -4876,68 +6807,94 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int startAudioRecording(const AudioRecordingConfiguration& config) = 0; - /** register encoded audio frame observer - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Registers an encoded audio observer. + * + * @note + * - Call this method after joining a channel. + * - You can call this method or `startAudioRecording [3/3]` to set the recording type and quality + * of audio files, but Agora does not recommend using this method and `startAudioRecording [3/3]` at + * the same time. Only the method called later will take effect. + * + * @param config Observer settings for the encoded audio. See `AudioEncodedFrameObserverConfig`. + * @param observer The encoded audio observer. See `IAudioEncodedFrameObserver`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int registerAudioEncodedFrameObserver(const AudioEncodedFrameObserverConfig& config, IAudioEncodedFrameObserver *observer) = 0; - /** Stops the audio recording on the client. - - The recording automatically stops when the leaveChannel() method is called. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Stops client-side audio recording. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. + */ virtual int stopAudioRecording() = 0; - /** - * Creates a media player source object and return its pointer. If full featured - * media player source is supported, it will create it, or it will create a simple - * media player. + /** + * @brief Creates a media player object. + * + * @details + * Before calling any APIs in the `IMediaPlayer` class, you need to call this method to create an + * instance of the media player. If you need to create multiple instances, you can call this method + * multiple times. + * Call timing: You can call this method either before or after joining a channel. * * @return - * - The pointer to \ref rtc::IMediaPlayerSource "IMediaPlayerSource", - * if the method call succeeds. - * - The empty pointer NULL, if the method call fails. + * - An `IMediaPlayer` object, if the method call succeeds. + * - An empty pointer, if the method call fails. */ virtual agora_refptr createMediaPlayer() = 0; /** - * Destroy a media player source instance. - * If a media player source instance is destroyed, the video and audio of it cannot - * be published. + * @brief Destroys the media player instance. * - * @param media_player The pointer to \ref rtc::IMediaPlayerSource. + * @param media_player `IMediaPlayer` object. * * @return - * - >0: The id of media player source instance. + * - ≥ 0: Success. Returns the ID of media player instance. * - < 0: Failure. */ virtual int destroyMediaPlayer(agora_refptr media_player) = 0; /** - * Creates a media recorder object and return its pointer. + * @brief Creates an audio and video recording object. + * + * @details + * Before starting to record audio and video streams, you need to call this method to create a + * recording object. The SDK supports recording multiple audio and video streams from local or + * remote users. You can call this method multiple times to create recording objects, and use the + * `info` + * parameter to specify the channel name and the user ID of the stream to be recorded. + * After successful creation, you need to call `setMediaRecorderObserver` to register an observer + * for the recording object to listen for related callbacks, and then call `startRecording` to begin + * recording. + * + * @param info Information about the audio and video stream to be recorded. See + * `RecorderStreamInfo`. * - * @param info The RecorderStreamInfo object. It contains the user ID and the channel name. - * * @return - * - The pointer to \ref rtc::IMediaRecorder "IMediaRecorder", - * if the method call succeeds. - * - The empty pointer NULL, if the method call fails. + * - If the method call succeeds: Returns an `IMediaRecorder` object. + * - If the method call fails: Returns a null pointer. */ virtual agora_refptr createMediaRecorder(const RecorderStreamInfo& info) = 0; /** - * Destroy a media recorder object. + * @brief Destroys an audio and video recording object. + * + * @details + * When you no longer need to record audio and video streams, you can call this method to destroy + * the corresponding recording object. If recording is in progress, call `stopRecording` first, then + * call this method to destroy the recording object. * - * @param mediaRecorder The pointer to \ref rtc::IMediaRecorder. + * @param mediaRecorder The `IMediaRecorder` object to be destroyed. * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int destroyMediaRecorder(agora_refptr mediaRecorder) = 0; @@ -4982,6 +6939,57 @@ class IRtcEngine : public agora::base::IEngineBase { - 0: Success. - < 0: Failure. */ + /** + * @brief Starts playing the music file. + * + * @details + * For the audio file formats supported by this method, see `What formats of audio files does the + * Agora RTC SDK support`. If the local music file does not exist, the SDK does not support the file + * format, or the the SDK cannot access the music file URL, the SDK reports + * AUDIO_MIXING_REASON_CAN_NOT_OPEN. + * Call timing: You can call this method either before or after joining a channel. + * Related callbacks: A successful method call triggers the `onAudioMixingStateChanged` + * (`AUDIO_MIXING_STATE_PLAYING`) callback. When the audio mixing file playback finishes, the SDK + * triggers the `onAudioMixingStateChanged` (`AUDIO_MIXING_STATE_STOPPED`) callback on the local + * client. + * + * @note + * - If you call this method to play short sound effect files, you may encounter playback failure. + * Agora recommends using `playEffect` instead to play such files. + * - If you need to call this method multiple times, ensure that the time interval between calling + * this method is more than 500 ms. + * - On Android, there are following considerations: + * - To use this method, ensure that the Android device is v4.2 or later, and the API version is + * v16 or later. + * - If you need to play an online music file, Agora does not recommend using the redirected URL + * address. Some Android devices may fail to open a redirected URL address. + * - If you call this method on an emulator, ensure that the music file is in the `/sdcard/` + * directory and the format is MP3. + * + * @param filePath The file path. The SDK supports URLs and absolute path of local files. The + * absolute path needs to be accurate to the file name and extension. Supported audio formats + * include MP3, AAC, M4A, MP4, WAV, and 3GP. See `Supported Audio Formats`. + * Attention: If you have preloaded an audio effect into memory by calling `preloadEffect`, ensure + * that the value of this parameter is the same as that of `filePath` in `preloadEffect`. + * @param loopback Whether to only play music files on the local client: + * - `true`: Only play music files on the local client so that only the local user can hear the + * music. + * - `false`: Publish music files to remote clients so that both the local user and remote users can + * hear the music. + * @param cycle The number of times the music file plays. + * - >0: The number of times for playback. For example, 1 represents playing 1 time. + * - -1: Play the audio file in an infinite loop. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -3: The SDK is not ready. + * - The audio module is disabled. + * - The program is not complete. + * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`. + */ virtual int startAudioMixing(const char* filePath, bool loopback, int cycle) = 0; /** Starts playing and mixing the music file. @@ -5027,164 +7035,324 @@ class IRtcEngine : public agora::base::IEngineBase { - 0: Success. - < 0: Failure. */ + /** + * @brief Starts playing the music file. + * + * @details + * For the audio file formats supported by this method, see `What formats of audio files does the + * Agora RTC SDK support`. If the local music file does not exist, the SDK does not support the file + * format, or the the SDK cannot access the music file URL, the SDK reports + * AUDIO_MIXING_REASON_CAN_NOT_OPEN. + * Call timing: You can call this method either before or after joining a channel. + * Related callbacks: A successful method call triggers the `onAudioMixingStateChanged` + * (`AUDIO_MIXING_STATE_PLAYING`) callback. When the audio mixing file playback finishes, the SDK + * triggers the `onAudioMixingStateChanged` (`AUDIO_MIXING_STATE_STOPPED`) callback on the local + * client. + * + * @note + * - If you call this method to play short sound effect files, you may encounter playback failure. + * Agora recommends using `playEffect` instead to play such files. + * - If you need to call this method multiple times, ensure that the time interval between calling + * this method is more than 500 ms. + * - On Android, there are following considerations: + * - To use this method, ensure that the Android device is v4.2 or later, and the API version is + * v16 or later. + * - If you need to play an online music file, Agora does not recommend using the redirected URL + * address. Some Android devices may fail to open a redirected URL address. + * - If you call this method on an emulator, ensure that the music file is in the `/sdcard/` + * directory and the format is MP3. + * + * @param filePath File path: + * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports + * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter + * permission issues if you use an absolute path to access a local file, so Agora recommends using a + * URI address instead. For example`: + * content://com.android.providers.media.documents/document/audio%3A14441` + * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio + * effect file. For example`: C:\music\audio.mp4`. + * @param loopback Whether to only play music files on the local client: + * - `true`: Only play music files on the local client so that only the local user can hear the + * music. + * - `false`: Publish music files to remote clients so that both the local user and remote users can + * hear the music. + * @param cycle The number of times the music file plays. + * - >0: The number of times for playback. For example, 1 represents playing 1 time. + * - -1: Play the audio file in an infinite loop. + * @param startPos The playback position (ms) of the music file. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -3: The SDK is not ready. + * - The audio module is disabled. + * - The program is not complete. + * - The initialization of `IRtcEngine` fails. Reinitialize the `IRtcEngine`. + */ virtual int startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos) = 0; - /** Stops playing and mixing the music file. - - Call this method when you are in a channel. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Stops playing the music file. + * + * @details + * After calling `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` to + * play a music file, you can call this method to stop the + * playing. If you only need to pause the playback, call `pauseAudioMixing`. + * Call timing: Call this method after joining a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int stopAudioMixing() = 0; - /** Pauses playing and mixing the music file. - - Call this method when you are in a channel. + /** + * @brief Pauses playing and mixing the music file. + * + * @details + * After calling `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` to + * play a music file, you can call this method to pause + * the playing. If you need to stop the playback, call `stopAudioMixing`. + * Call timing: Call this method after joining a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int pauseAudioMixing() = 0; - @return - - 0: Success. - - < 0: Failure. - */ - virtual int pauseAudioMixing() = 0; - - /** Resumes playing and mixing the music file. - - Call this method when you are in a channel. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Resumes playing and mixing the music file. + * + * @details + * After calling `pauseAudioMixing` to pause the playback, you can call this method to resume the + * playback. + * Call timing: Call this method after joining a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int resumeAudioMixing() = 0; - /** Select audio track for the music file. - - Call this method when you are in a channel. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Selects the audio track used during playback. + * + * @details + * After getting the track index of the audio file, you can call this method to specify any track to + * play. For example, if different tracks of a multi-track file store songs in different languages, + * you can call this method to set the playback language. + * + * @note + * - For the supported formats of audio files, see + * `https://docs.agora.io/en/help/general-product-inquiry/audio_format#extended-audio-file-formats`. + * - You need to call this method after calling `startAudioMixing(const char* filePath, bool + * loopback, int cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @param index The audio track you want to specify. The value should be greater than 0 and less + * than that of returned by `getAudioTrackCount`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int selectAudioTrack(int index) = 0; - /** Get audio track count of the music file. - - Call this method when you are in a channel. - - @return - - ≥ 0: Audio track count of the music file, if the method call is successful. - - < 0: Failure. + /** + * @brief Gets the index of audio tracks of the current music file. + * + * @note You need to call this method after calling `startAudioMixing(const char* filePath, bool + * loopback, int cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @return + * - The SDK returns the index of the audio tracks if the method call succeeds. + * - < 0: Failure. */ virtual int getAudioTrackCount() = 0; - /** Adjusts the volume during audio mixing. - - Call this method when you are in a channel. - - @note This method does not affect the volume of audio effect file playback - invoked by the \ref IRtcEngine::playEffect "playEffect" method. - - @param volume The audio mixing volume. The value ranges between 0 and 100 - (default). - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Adjusts the volume during audio mixing. + * + * @details + * This method adjusts the audio mixing volume on both the local client and remote clients. + * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)`. + * + * @note This method does not affect the volume of the audio file set in the `playEffect` method. + * + * @param volume Audio mixing volume. The value ranges between 0 and 100. The default value is 100, + * which means the original volume. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int adjustAudioMixingVolume(int volume) = 0; - /** Adjusts the audio mixing volume for publishing (for remote users). - @note Call this method when you are in a channel. - @param volume Audio mixing volume for publishing. The value ranges between 0 and 100 (default). - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Adjusts the volume of audio mixing for publishing. + * + * @details + * This method adjusts the volume of audio mixing for publishing (sending to other users). + * Call timing: Call this method after calling `startAudioMixing(const char* filePath, bool + * loopback, int cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100 + * (default). 100 represents the original volume. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int adjustAudioMixingPublishVolume(int volume) = 0; - /** Retrieves the audio mixing volume for publishing. - This method helps troubleshoot audio volume related issues. - @note Call this method when you are in a channel. - @return - - ≥ 0: The audio mixing volume for publishing, if this method call succeeds. The value range is [0,100]. - - < 0: Failure. + /** + * @brief Retrieves the audio mixing volume for publishing. + * + * @details + * This method helps troubleshoot audio volume‑related issues. + * + * @note You need to call this method after calling `startAudioMixing(const char* filePath, bool + * loopback, int cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @return + * - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100]. + * - < 0: Failure. */ virtual int getAudioMixingPublishVolume() = 0; - /** Adjusts the audio mixing volume for local playback. - @note Call this method when you are in a channel. - @param volume Audio mixing volume for local playback. The value ranges between 0 and 100 (default). - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Adjusts the volume of audio mixing for local playback. + * + * @details + * Call timing: You need to call this method after calling `startAudioMixing(const char* filePath, + * bool loopback, int cycle, int startPos)` and receiving + * the `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @param volume The volume of audio mixing for local playback. The value ranges between 0 and 100 + * (default). 100 represents the original volume. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int adjustAudioMixingPlayoutVolume(int volume) = 0; - /** Retrieves the audio mixing volume for local playback. - This method helps troubleshoot audio volume related issues. - @note Call this method when you are in a channel. - @return - - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100]. - - < 0: Failure. + /** + * @brief Retrieves the audio mixing volume for local playback. + * + * @details + * You can call this method to get the local playback volume of the mixed audio file, which helps in + * troubleshooting volume‑related issues. + * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @return + * - ≥ 0: The audio mixing volume, if this method call succeeds. The value range is [0,100]. + * - < 0: Failure. */ virtual int getAudioMixingPlayoutVolume() = 0; - /** Gets the duration (ms) of the music file. - - Call this API when you are in a channel. - - @return - - Returns the audio mixing duration, if the method call is successful. - - < 0: Failure. + /** + * @brief Retrieves the duration (ms) of the music file. + * + * @details + * Retrieves the total duration (ms) of the audio. + * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @return + * - ≥ 0: The audio mixing duration, if this method call succeeds. + * - < 0: Failure. */ virtual int getAudioMixingDuration() = 0; - /** Gets the playback position (ms) of the music file. - - Call this method when you are in a channel. - - @return - - ≥ 0: The current playback position of the audio mixing, if this method - call succeeds. - - < 0: Failure. + /** + * @brief Retrieves the playback position (ms) of the music file. + * + * @details + * Retrieves the playback position (ms) of the audio. + * + * @note + * - You need to call this method after calling `startAudioMixing(const char* filePath, bool + * loopback, int cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * - If you need to call `getAudioMixingCurrentPosition` multiple times, ensure that the time + * interval between calling this method is more than 500 ms. + * + * @return + * - ≥ 0: The current playback position (ms) of the audio mixing, if this method call succeeds. 0 + * represents that the current music file does not start playing. + * - < 0: Failure. */ virtual int getAudioMixingCurrentPosition() = 0; - /** Sets the playback position of the music file to a different starting - position (the default plays from the beginning). - - @param pos The playback starting position (ms) of the audio mixing file. - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the audio mixing position. + * + * @details + * Call this method to set the playback position of the music file to a different starting position + * (the default plays from the beginning). + * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @param pos Integer. The playback position (ms). + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setAudioMixingPosition(int pos /*in ms*/) = 0; - /** In dual-channel music files, different audio data can be stored on the left and right channels. - * According to actual needs, you can set the channel mode as the original mode, - * the left channel mode, the right channel mode or the mixed mode - - @param mode The mode of channel mode - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the channel mode of the current audio file. + * + * @details + * In a stereo music file, the left and right channels can store different audio data. According to + * your needs, you can set the channel mode to original mode, left channel mode, right channel mode, + * or mixed channel mode. + * Applicable scenarios: For example, in the KTV scenario, the left channel of the music file stores + * the musical accompaniment, and the right channel stores the original singer's vocals. You can set + * according to actual needs: + * - If you only want to hear the accompaniment, use this method to set the audio file's channel + * mode to left channel mode. + * - If you need to hear both the accompaniment and the original vocals simultaneously, call this + * method to set the channel mode to mixed mode. + * Call timing: Call this method after `startAudioMixing(const char* filePath, bool loopback, int + * cycle, int startPos)` and receiving the + * `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. + * + * @note This method only applies to stereo audio files. + * + * @param mode The channel mode. See `AUDIO_MIXING_DUAL_MONO_MODE`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setAudioMixingDualMonoMode(media::AUDIO_MIXING_DUAL_MONO_MODE mode) = 0; - /** Sets the pitch of the local music file. - * - * When a local music file is mixed with a local human voice, call this method to set the pitch of the local music file only. + /** + * @brief Sets the pitch of the local music file. * - * @note Call this method after calling \ref IRtcEngine::startAudioMixing "startAudioMixing" and - * receiving the \ref IRtcEngineEventHandler::onAudioMixingStateChanged "onAudioMixingStateChanged" (AUDIO_MIXING_STATE_PLAYING) callback. + * @details + * When a local music file is mixed with a local human voice, call this method to set the pitch of + * the local music file only. + * Call timing: You need to call this method after calling `startAudioMixing(const char* filePath, + * bool loopback, int cycle, int startPos)` and receiving + * the `onAudioMixingStateChanged` `(AUDIO_MIXING_STATE_PLAYING)` callback. * - * @param pitch Sets the pitch of the local music file by chromatic scale. The default value is 0, - * which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value between - * consecutive values is a chromatic value. The greater the absolute value of this parameter, the - * higher or lower the pitch of the local music file. + * @param pitch Sets the pitch of the local music file by the chromatic scale. The default value is + * 0, which means keeping the original pitch. The value ranges from -12 to 12, and the pitch value + * between consecutive values is a chromatic value. The greater the absolute value of this + * parameter, the higher or lower the pitch of the local music file. * * @return * - 0: Success. @@ -5193,12 +7361,15 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setAudioMixingPitch(int pitch) = 0; /** - * Sets the playback speed of the current music file. + * @brief Sets the playback speed of the current audio file. * - * @note Call this method after calling \ref IRtcEngine::startAudioMixing(const char*,bool,bool,int,int) "startAudioMixing" [2/2] - * and receiving the \ref IRtcEngineEventHandler::onAudioMixingStateChanged "onAudioMixingStateChanged" (AUDIO_MIXING_STATE_PLAYING) callback. + * @details + * Ensure you call this method after calling `startAudioMixing(const char* filePath, bool loopback, + * int cycle, int startPos)` receiving the + * `onAudioMixingStateChanged` callback reporting the state as `AUDIO_MIXING_STATE_PLAYING`. * - * @param speed The playback speed. Agora recommends that you limit this value to between 50 and 400, defined as follows: + * @param speed The playback speed. Agora recommends that you set this to a value between 50 and + * 400, defined as follows: * - 50: Half the original speed. * - 100: The original speed. * - 400: 4 times the original speed. @@ -5208,116 +7379,145 @@ class IRtcEngine : public agora::base::IEngineBase { * - < 0: Failure. */ virtual int setAudioMixingPlaybackSpeed(int speed) = 0; - + /** - * Gets the volume of audio effects. + * @brief Retrieves the volume of the audio effects. + * + * @details + * The volume is an integer ranging from 0 to 100. The default value is 100, which means the + * original volume. + * + * @note Call this method after `playEffect`. * * @return - * - ≥ 0: The volume of audio effects. The value ranges between 0 and 100 (original volume). + * - Volume of the audio effects, if this method call succeeds. * - < 0: Failure. */ virtual int getEffectsVolume() = 0; - /** Sets the volume of audio effects. + /** + * @brief Sets the volume of the audio effects. * - * @param volume The volume of audio effects. The value ranges between 0 - * and 100 (original volume). + * @details + * Call timing: Call this method after `playEffect`. + * + * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which + * represents the original volume. * * @return * - 0: Success. * - < 0: Failure. */ virtual int setEffectsVolume(int volume) = 0; - /** Preloads a specified audio effect. - * - * This method preloads only one specified audio effect into the memory each time - * it is called. To preload multiple audio effects, call this method multiple times. - * - * After preloading, you can call \ref IRtcEngine::playEffect "playEffect" - * to play the preloaded audio effect or call - * \ref IRtcEngine::playAllEffects "playAllEffects" to play all the preloaded - * audio effects. + /** + * @brief Preloads a specified audio effect file into the memory. * + * @details + * Ensure the size of all preloaded files does not exceed the limit. + * For the audio file formats supported by this method, see `What formats of audio files does the + * Agora RTC SDK support`. + * Call timing: Agora recommends that you call this method before joining a channel. + * * @note - * - To ensure smooth communication, limit the size of the audio effect file. - * - Agora recommends calling this method before joining the channel. - * - * @param soundId The ID of the audio effect. - * @param filePath The absolute path of the local audio effect file or the URL - * of the online audio effect file. Supported audio formats: mp3, mp4, m4a, aac, - * 3gp, mkv, and wav. + * - If preloadEffect is called before playEffect is executed, the file resource will not be closed after playEffect. + * The next time playEffect is executed, it will directly seek to play at the beginning. + * - If preloadEffect is not called before playEffect is executed, the resource will be destroyed after playEffect. + * The next time playEffect is executed, it will try to reopen the file and play it from the beginning. + * + * @param soundId The audio effect ID. The ID of each audio effect file is unique. + * @param filePath File path: + * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports + * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter + * permission issues if you use an absolute path to access a local file, so Agora recommends using a + * URI address instead. For example: + * `content://com.android.providers.media.documents/document/audio%3A14441` + * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio + * effect file. For example: `C:\music\audio.mp4`. + * - iOS or macOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: `/var/mobile/Containers/Data/audio.mp4`. + * @param startPos The playback position (ms) of the audio effect file. * * @return * - 0: Success. * - < 0: Failure. */ virtual int preloadEffect(int soundId, const char* filePath, int startPos = 0) = 0; - /** Plays a specified audio effect. - * - * After calling \ref IRtcEngine::preloadEffect "preloadEffect", you can call - * this method to play the specified audio effect for all users in - * the channel. - * - * This method plays only one specified audio effect each time it is called. - * To play multiple audio effects, call this method multiple times. - * - * @note - * - Agora recommends playing no more than three audio effects at the same time. - * - The ID and file path of the audio effect in this method must be the same - * as that in the \ref IRtcEngine::preloadEffect "preloadEffect" method. - * - * @param soundId The ID of the audio effect. - * @param filePath The absolute path of the local audio effect file or the URL - * of the online audio effect file. Supported audio formats: mp3, mp4, m4a, aac, - * 3gp, mkv, and wav. - * @param loopCount The number of times the audio effect loops: - * - `-1`: Play the audio effect in an indefinite loop until - * \ref IRtcEngine::stopEffect "stopEffect" or - * \ref IRtcEngine::stopAllEffects "stopAllEffects" - * - `0`: Play the audio effect once. - * - `1`: Play the audio effect twice. - * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0. - * The default value is `1.0` (original pitch). The lower the value, the lower the pitch. + /** + * @brief Plays the specified local or online audio effect file. + * + * @details + * To play multiple audio effect files at the same time, call this method multiple times with + * different `soundId` and `filePath`. To achieve the optimal user experience, Agora recommends that + * you do not playing more than three audio files at the same time. + * Call timing: You can call this method either before or after joining a channel. + * Related callbacks: After the playback of an audio effect file completes, the SDK triggers the + * `onAudioEffectFinished` callback. + * + * @note + * - If you need to play an online audio effect file, Agora recommends that you cache the online + * audio effect file to your local device, call `preloadEffect` to preload the file into memory, and + * then call this method to play the audio effect. Otherwise, you might encounter playback failures + * or no sound during playback due to loading timeouts or failures. + * - If preloadEffect is called before playEffect is executed, the file resource will not be closed after playEffect. + * The next time playEffect is executed, it will directly seek to play at the beginning. + * - If preloadEffect is not called before playEffect is executed, the resource will be destroyed after playEffect. + * The next time playEffect is executed, it will try to reopen the file and play it from the beginning. + * + * @param soundId The audio effect ID. The ID of each audio effect file is unique.Attention: If you + * have preloaded an audio effect into memory by calling `preloadEffect`, ensure that the value of + * this parameter is the same as that of `soundId` in `preloadEffect`. + * @param filePath The file path. The SDK supports URLs and absolute path of local files. The + * absolute path needs to be accurate to the file name and extension. Supported audio formats + * include MP3, AAC, M4A, MP4, WAV, and 3GP. See `Supported Audio Formats`. + * Attention: If you have preloaded an audio effect into memory by calling `preloadEffect`, ensure + * that the value of this parameter is the same as that of `filePath` in `preloadEffect`. + * @param loopCount The number of times the audio effect loops. + * - ≥ 0: The number of playback times. For example, 1 means looping one time, which means playing + * the audio effect two times in total. + * - -1: Play the audio file in an infinite loop. + * @param pitch The pitch of the audio effect. The value range is 0.5 to 2.0. The default value is + * 1.0, which means the original pitch. The lower the value, the lower the pitch. * @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0: - * - `-1.0`: The audio effect displays to the left. - * - `0.0`: The audio effect displays ahead. - * - `1.0`: The audio effect displays to the right. - * @param gain The volume of the audio effect. The value ranges between 0 and 100. - * The default value is `100` (original volume). The lower the value, the lower - * the volume of the audio effect. - * @param publish Sets whether to publish the audio effect to the remote: - * - true: Publish the audio effect to the remote. - * - false: (Default) Do not publish the audio effect to the remote. + * - -1.0: The audio effect is heard on the left of the user. + * - 0.0: The audio effect is heard in front of the user. + * - 1.0: The audio effect is heard on the right of the user. + * @param gain The volume of the audio effect. The value range is 0.0 to 100.0. The default value is + * 100.0, which means the original volume. The smaller the value, the lower the volume. + * @param publish Whether to publish the audio effect to the remote users: + * - `true`: Publish the audio effect to the remote users. Both the local user and remote users can + * hear the audio effect. + * - `false`: Do not publish the audio effect to the remote users. Only the local user can hear the + * audio effect. + * @param startPos The playback position (ms) of the audio effect file. * * @return * - 0: Success. * - < 0: Failure. */ virtual int playEffect(int soundId, const char* filePath, int loopCount, double pitch, double pan, int gain, bool publish = false, int startPos = 0) = 0; - /** Plays all audio effects. + /** + * @brief Plays all audio effect files. * - * After calling \ref IRtcEngine::preloadEffect "preloadEffect" multiple times - * to preload multiple audio effects into the memory, you can call this - * method to play all the specified audio effects for all users in - * the channel. + * @details + * After calling `preloadEffect` multiple times to preload multiple audio effects into the memory, + * you can call this method to play all the specified audio effects for all users in the channel. * * @param loopCount The number of times the audio effect loops: - * - `-1`: Play the audio effect in an indefinite loop until - * \ref IRtcEngine::stopEffect "stopEffect" or - * \ref IRtcEngine::stopAllEffects "stopAllEffects" - * - `0`: Play the audio effect once. - * - `1`: Play the audio effect twice. - * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0. - * The default value is `1.0` (original pitch). The lower the value, the lower the pitch. + * - -1: Play the audio effect files in an indefinite loop until you call `stopEffect` or + * `stopAllEffects`. + * - 0: Play the audio effect once. + * - 1: Play the audio effect twice. + * @param pitch The pitch of the audio effect. The value ranges between 0.5 and 2.0. The default + * value is 1.0 (original pitch). The lower the value, the lower the pitch. * @param pan The spatial position of the audio effect. The value ranges between -1.0 and 1.0: - * - `-1.0`: The audio effect displays to the left. - * - `0.0`: The audio effect displays ahead. - * - `1.0`: The audio effect displays to the right. - * @param gain The volume of the audio effect. The value ranges between 0 and 100. - * The default value is `100` (original volume). The lower the value, the lower - * the volume of the audio effect. - * @param publish Sets whether to publish the audio effect to the remote: - * - true: Publish the audio effect to the remote. - * - false: (Default) Do not publish the audio effect to the remote. + * - -1.0: The audio effect shows on the left. + * - 0: The audio effect shows ahead. + * - 1.0: The audio effect shows on the right. + * @param gain The volume of the audio effect. The value range is [0, 100]. The default value is 100 + * (original volume). The smaller the value, the lower the volume. + * @param publish Whether to publish the audio effect to the remote users: + * - `true`: Publish the audio effect to the remote users. Both the local user and remote users can + * hear the audio effect. + * - `false`: (Default) Do not publish the audio effect to the remote users. Only the local user can + * hear the audio effect. * * @return * - 0: Success. @@ -5325,86 +7525,119 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int playAllEffects(int loopCount, double pitch, double pan, int gain, bool publish = false) = 0; - /** Gets the volume of the specified audio effect. + /** + * @brief Gets the volume of a specified audio effect file. * - * @param soundId The ID of the audio effect. + * @param soundId The ID of the audio effect file. * * @return - * - ≥ 0: The volume of the specified audio effect. The value ranges - * between 0 and 100 (original volume). + * - ≥ 0: Returns the volume of the specified audio effect, if the method call is successful. The + * value ranges between 0 and 100. 100 represents the original volume. * - < 0: Failure. */ virtual int getVolumeOfEffect(int soundId) = 0; - /** Sets the volume of the specified audio effect. + /** + * @brief Gets the volume of a specified audio effect file. + * + * @details + * Call timing: Call this method after `playEffect`. * - * @param soundId The ID of the audio effect. - * @param volume The volume of the specified audio effect. The value ranges - * between 0 and 100 (original volume). + * @param soundId The ID of the audio effect. The unique ID of each audio effect file. + * @param volume The playback volume. The value range is [0, 100]. The default value is 100, which + * represents the original volume. * * @return * - 0: Success. * - < 0: Failure. */ virtual int setVolumeOfEffect(int soundId, int volume) = 0; - /** Pauses playing the specified audio effect. + /** + * @brief Pauses a specified audio effect file. * - * @param soundId The ID of the audio effect. + * @param soundId The audio effect ID. The ID of each audio effect file is unique. * * @return * - 0: Success. * - < 0: Failure. */ virtual int pauseEffect(int soundId) = 0; - /** Pauses playing audio effects. + /** + * @brief Pauses all audio effects. * * @return * - 0: Success. * - < 0: Failure. */ virtual int pauseAllEffects() = 0; - /** Resumes playing the specified audio effect. + /** + * @brief Resumes playing a specified audio effect. * - * @param soundId The ID of the audio effect. + * @param soundId The audio effect ID. The ID of each audio effect file is unique. * * @return * - 0: Success. * - < 0: Failure. */ virtual int resumeEffect(int soundId) = 0; - /** Resumes playing audio effects. + /** + * @brief Resumes playing all audio effect files. + * + * @details + * After you call `pauseAllEffects` to pause the playback, you can call this method to resume the + * playback. + * Call timing: Call this method after `pauseAllEffects`. * * @return * - 0: Success. * - < 0: Failure. */ virtual int resumeAllEffects() = 0; - /** Stops playing the specified audio effect. + /** + * @brief Stops playing a specified audio effect. * - * @param soundId The ID of the audio effect. + * @details + * When you no longer need to play the audio effect, you can call this method to stop the playback. + * If you only need to pause the playback, call `pauseEffect`. + * Call timing: Call this method after `playEffect`. + * + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. * * @return * - 0: Success. * - < 0: Failure. */ virtual int stopEffect(int soundId) = 0; - /** Stops playing audio effects. + /** + * @brief Stops playing all audio effects. + * + * @details + * When you no longer need to play the audio effect, you can call this method to stop the playback. + * If you only need to pause the playback, call `pauseAllEffects`. + * Call timing: Call this method after `playEffect`. * * @return * - 0: Success. * - < 0: Failure. */ virtual int stopAllEffects() = 0; - /** Releases the specified preloaded audio effect from the memory. + /** + * @brief Releases a specified preloaded audio effect from the memory. * - * @param soundId The ID of the audio effect. + * @details + * After loading the audio effect file into memory using `preloadEffect`, if you need to release the + * audio effect file, call this method. + * Call timing: You can call this method either before or after joining a channel. + * + * @param soundId The ID of the audio effect. Each audio effect has a unique ID. * * @return * - 0: Success. * - < 0: Failure. */ virtual int unloadEffect(int soundId) = 0; - /** Releases preloaded audio effects from the memory. + /** + * @brief Releases a specified preloaded audio effect from the memory. * * @return * - 0: Success. @@ -5412,146 +7645,176 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int unloadAllEffects() = 0; /** - * Gets the duration of the audio effect file. - * @note - * - Call this method after joining a channel. - * - For the audio file formats supported by this method, see [What formats of audio files does the Agora RTC SDK support](https://docs.agora.io/en/faq/audio_format). + * @brief Retrieves the duration of the audio effect file. * - * @param filePath The absolute path or URL address (including the filename extensions) - * of the music file. For example: `C:\music\audio.mp4`. - * When you access a local file on Android, Agora recommends passing a URI address or the path starts - * with `/assets/` in this parameter. + * @note Call this method after joining a channel. + * + * @param filePath File path: + * - Android: The file path, which needs to be accurate to the file name and suffix. Agora supports + * URL addresses, absolute paths, or file paths that start with `/assets/`. You might encounter + * permission issues if you use an absolute path to access a local file, so Agora recommends using a + * URI address instead. For example: + * `content://com.android.providers.media.documents/document/audio%3A14441` + * - Windows: The absolute path or URL address (including the suffixes of the filename) of the audio + * effect file. For example: `C:\music\audio.mp4`. + * - iOS or macOS: The absolute path or URL address (including the suffixes of the filename) of the audio effect file. For example: `/var/mobile/Containers/Data/audio.mp4`. * * @return - * - ≥ 0: A successful method call. Returns the total duration (ms) of - * the specified audio effect file. + * - The total duration (ms) of the specified audio effect file, if the method call succeeds. * - < 0: Failure. - * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please - * set a correct `filePath`. */ virtual int getEffectDuration(const char* filePath) = 0; /** - * Sets the playback position of an audio effect file. + * @brief Sets the playback position of an audio effect file. + * + * @details * After a successful setting, the local audio effect file starts playing at the specified position. * - * @note Call this method after \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" . + * @note Call this method after `playEffect`. * - * @param soundId Audio effect ID. Ensure that this parameter is set to the - * same value as in \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" . + * @param soundId The audio effect ID. The ID of each audio effect file is unique. * @param pos The playback position (ms) of the audio effect file. * * @return * - 0: Success. * - < 0: Failure. - * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please - * set a correct `soundId`. */ virtual int setEffectPosition(int soundId, int pos) = 0; /** - * Gets the playback position of the audio effect file. - * @note Call this method after \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" . + * @brief Retrieves the playback position of the audio effect file. + * + * @note Call this method after `playEffect`. * - * @param soundId Audio effect ID. Ensure that this parameter is set to the - * same value as in \ref IRtcEngine::playEffect(int,const char*,int,double,double,int,bool,int) "playEffect" . + * @param soundId The audio effect ID. The ID of each audio effect file is unique. * * @return - * - ≥ 0: A successful method call. Returns the playback position (ms) of - * the specified audio effect file. + * - The playback position (ms) of the specified audio effect file, if the method call succeeds. * - < 0: Failure. - * - `-22(ERR_RESOURCE_LIMITED)`: Cannot find the audio effect file. Please - * set a correct `soundId`. */ virtual int getEffectCurrentPosition(int soundId) = 0; - /** Enables/Disables stereo panning for remote users. - - Ensure that you call this method before joinChannel to enable stereo panning for remote users so that the local user can track the position of a remote user by calling \ref agora::rtc::IRtcEngine::setRemoteVoicePosition "setRemoteVoicePosition". - - @param enabled Sets whether or not to enable stereo panning for remote users: - - true: enables stereo panning. - - false: disables stereo panning. - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Enables or disables stereo panning for remote users. + * + * @details + * Ensure that you call this method before joining a channel to enable stereo panning for remote + * users so that the local user can track the position of a remote user by calling + * `setRemoteVoicePosition`. + * + * @param enabled Whether to enable stereo panning for remote users: + * - `true`: Enable stereo panning. + * - `false`: Disable stereo panning. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int enableSoundPositionIndication(bool enabled) = 0; - /** Sets the sound position and gain of a remote user. - - When the local user calls this method to set the sound position of a remote user, the sound difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a real sense of space. This method applies to massively multiplayer online games, such as Battle Royale games. - - @note - - For this method to work, enable stereo panning for remote users by calling the \ref agora::rtc::IRtcEngine::enableSoundPositionIndication "enableSoundPositionIndication" method before joining a channel. - - This method requires hardware support. For the best sound positioning, we recommend using a wired headset. - - Ensure that you call this method after joining a channel. - - @param uid The ID of the remote user. - @param pan The sound position of the remote user. The value ranges from -1.0 to 1.0: - - 0.0: the remote sound comes from the front. - - -1.0: the remote sound comes from the left. - - 1.0: the remote sound comes from the right. - @param gain Gain of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original gain of the remote user). The smaller the value, the less the gain. - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the 2D position (the position on the horizontal plane) of the remote user's voice. + * + * @details + * This method sets the 2D position and volume of a remote user, so that the local user can easily + * hear and identify the remote user's position. + * When the local user calls this method to set the voice position of a remote user, the voice + * difference between the left and right channels allows the local user to track the real-time + * position of the remote user, creating a sense of space. This method applies to massive + * multiplayer online games, such as Battle Royale games. + * + * @note + * - For this method to work, enable stereo panning for remote users by calling the + * `enableSoundPositionIndication` method before joining a channel. + * - For the best voice positioning, Agora recommends using a wired headset. + * - Call this method after joining a channel. + * + * @param uid The user ID of the remote user. + * @param pan The voice position of the remote user. The value ranges from -1.0 to 1.0: + * - 0.0: (Default) The remote voice comes from the front. + * - -1.0: The remote voice comes from the left. + * - 1.0: The remote voice comes from the right. + * @param gain The volume of the remote user. The value ranges from 0.0 to 100.0. The default value + * is 100.0 (the original volume of the remote user). The smaller the value, the lower the volume. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setRemoteVoicePosition(uid_t uid, double pan, double gain) = 0; - /** enable spatial audio - - @param enabled enable/disable spatial audio: - - true: enable spatial audio. - - false: disable spatial audio. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Enables or disables the spatial audio effect. + * + * @details + * After enabling the spatial audio effect, you can call `setRemoteUserSpatialAudioParams` to set + * the spatial audio effect parameters of the remote user. + * + * @note + * - You can call this method either before or after joining a channel. + * - This method relies on the spatial audio dynamic library `libagora_spatial_audio_extension.dll`. + * If the dynamic library is deleted, the function cannot be enabled normally. + * + * @param enabled Whether to enable the spatial audio effect: + * - `true`: Enable the spatial audio effect. + * - `false`: Disable the spatial audio effect. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int enableSpatialAudio(bool enabled) = 0; - /** Sets remote user parameters for spatial audio - - @param uid The ID of the remote user. - @param param spatial audio parameters: SpatialAudioParams. - - @return int - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the spatial audio effect parameters of the remote user. + * + * @details + * Call this method after `enableSpatialAudio`. After successfully setting the spatial audio effect + * parameters of the remote user, the local user can hear the remote user with a sense of space. + * + * @param uid The user ID. This parameter must be the same as the user ID passed in when the user + * joined the channel. + * @param params The spatial audio parameters. See `SpatialAudioParams`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setRemoteUserSpatialAudioParams(uid_t uid, const agora::SpatialAudioParams& params) = 0; - /** Sets an SDK preset voice beautifier effect. - * - * Call this method to set an SDK preset voice beautifier effect for the local user who sends an - * audio stream. After setting a voice beautifier effect, all users in the channel can hear the - * effect. - * - * You can set different voice beautifier effects for different scenarios. See *Set the Voice - * Beautifier and Audio Effects*. + /** + * @brief Sets a preset voice beautifier effect. * - * To achieve better audio effect quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to - * `AUDIO_SCENARIO_GAME_STREAMING(3)` and the `profile` parameter to - * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before - * calling this method. + * @details + * Call this method to set a preset voice beautifier effect for the local user who sends an audio + * stream. After setting a voice beautifier effect, all users in the channel can hear the effect. + * You can set different voice beautifier effects for different scenarios. + * Call timing: This method can be called either before or after joining the channel. + * To achieve better vocal effects, it is recommended that you call the following APIs before + * calling this method: + * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely + * `AUDIO_SCENARIO_GAME_STREAMING` (3). + * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5). * * @note - * - You can call this method either before or after joining a channel. - * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" to - * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call - * fails. - * - This method works best with the human voice. Agora does not recommend using this method for - * audio containing music. - * - After calling this method, Agora recommends not calling the following methods, because they - * can override \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters": - * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset" - * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset" - * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch" - * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization" - * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb" - * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters" + * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take + * effect. + * - This method has the best effect on human voice processing, and Agora does not recommend calling + * this method to process audio data containing music. + * - After calling `setVoiceBeautifierPreset`, Agora does not recommend calling the following + * methods, otherwise the effect set by `setVoiceBeautifierPreset` will be overwritten: + * - `setAudioEffectPreset` + * - `setAudioEffectParameters` + * - `setLocalVoicePitch` + * - `setLocalVoiceEqualization` + * - `setLocalVoiceReverb` + * - `setVoiceBeautifierParameters` + * - `setVoiceConversionPreset` + * - This method relies on the voice beautifier dynamic library + * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. * - * @param preset The options for SDK preset voice beautifier effects: #VOICE_BEAUTIFIER_PRESET. + * @param preset The preset voice beautifier effect options: `VOICE_BEAUTIFIER_PRESET`. * * @return * - 0: Success. @@ -5559,38 +7822,41 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setVoiceBeautifierPreset(VOICE_BEAUTIFIER_PRESET preset) = 0; - /** Sets an SDK preset audio effect. - * - * Call this method to set an SDK preset audio effect for the local user who sends an audio - * stream. This audio effect does not change the gender characteristics of the original voice. - * After setting an audio effect, all users in the channel can hear the effect. - * - * You can set different audio effects for different scenarios. See *Set the Voice Beautifier and - * Audio Effects*. + /** + * @brief Sets an SDK preset audio effect. * - * To achieve better audio effect quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to - * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method. + * @details + * Call this method to set an SDK preset audio effect for the local user who sends an audio stream. + * This audio effect does not change the gender characteristics of the original voice. After setting + * an audio effect, all users in the channel can hear the effect. + * Call timing: This method can be called either before or after joining the channel. + * To achieve better vocal effects, it is recommended that you call the following APIs before + * calling this method: + * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely + * `AUDIO_SCENARIO_GAME_STREAMING` (3). + * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5). * * @note - * - You can call this method either before or after joining a channel. - * - Do not set the profile `parameter` of `setAudioProfile` to `AUDIO_PROFILE_SPEECH_STANDARD(1)` - * or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call fails. - * - This method works best with the human voice. Agora does not recommend using this method for - * audio containing music. - * - If you call this method and set the `preset` parameter to enumerators except - * `ROOM_ACOUSTICS_3D_VOICE` or `PITCH_CORRECTION`, do not call \ref - * IRtcEngine::setAudioEffectParameters "setAudioEffectParameters"; otherwise, - * `setAudioEffectParameters` overrides this method. - * - After calling this method, Agora recommends not calling the following methods, because they - * can override `setAudioEffectPreset`: - * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset" - * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch" - * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization" - * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb" - * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters" + * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take + * effect. + * - If you call `setAudioEffectPreset` and set enumerators except for `ROOM_ACOUSTICS_3D_VOICE` or + * `PITCH_CORRECTION`, do not call `setAudioEffectParameters`; otherwise, `setAudioEffectPreset` is + * overridden. + * - After calling `setAudioEffectPreset`, Agora does not recommend you to call the following + * methods, otherwise the effect set by `setAudioEffectPreset` will be overwritten: + * - `setVoiceBeautifierPreset` + * - `setLocalVoicePitch` + * - `setLocalVoiceEqualization` + * - `setLocalVoiceReverb` + * - `setVoiceBeautifierParameters` + * - `setVoiceConversionPreset` + * - This method relies on the voice beautifier dynamic library + * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. * - * @param preset The options for SDK preset audio effects. See #AUDIO_EFFECT_PRESET. + * @param preset The options for SDK preset audio effects. See `AUDIO_EFFECT_PRESET`. * * @return * - 0: Success. @@ -5598,37 +7864,43 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setAudioEffectPreset(AUDIO_EFFECT_PRESET preset) = 0; - /** Sets an SDK preset voice conversion. - * - * Call this method to set an SDK preset voice conversion for the local user who sends an audio - * stream. After setting an voice conversion, all users in the channel can hear the effect. - * - * You can set different voice conversion for different scenarios. See *Set the Voice Beautifier and - * Audio Effects*. + /** + * @brief Sets a preset voice beautifier effect. * - * To achieve better voice conversion quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to - * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method. + * @details + * Call this method to set a preset voice changing effect for the local user who publishes an audio + * stream in a channel. After setting the voice changing effect, all users in the channel can hear + * the effect. You can set different voice changing effects for the user depending on different + * scenarios. + * Call timing: This method can be called either before or after joining the channel. + * To achieve better vocal effects, it is recommended that you call the following APIs before + * calling this method: + * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely + * `AUDIO_SCENARIO_GAME_STREAMING` (3). + * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5). * * @note - * - You can call this method either before or after joining a channel. - * - Do not set the profile `parameter` of `setAudioProfile` to `AUDIO_PROFILE_SPEECH_STANDARD(1)` - * or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call fails. - * - This method works best with the human voice. Agora does not recommend using this method for - * audio containing music. - * - If you call this method and set the `preset` parameter to enumerators, - * - After calling this method, Agora recommends not calling the following methods, because they - * can override `setVoiceConversionPreset`: - * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset" - * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset" - * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch" - * - \ref IRtcEngine::setLocalVoiceFormant "setLocalVoiceFormant" - * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization" - * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb" - * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters" - * - \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters" + * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take + * effect. + * - This method has the best effect on human voice processing, and Agora does not recommend calling + * this method to process audio data containing music. + * - After calling `setVoiceConversionPreset`, Agora does not recommend you to call the following + * methods, otherwise the effect set by `setVoiceConversionPreset` will be overwritten: + * - `setAudioEffectPreset` + * - `setAudioEffectParameters` + * - `setVoiceBeautifierPreset` + * - `setVoiceBeautifierParameters` + * - `setLocalVoicePitch` + * - `setLocalVoiceFormant` + * - `setLocalVoiceEqualization` + * - `setLocalVoiceReverb` + * - This method relies on the voice beautifier dynamic library + * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. * - * @param preset The options for SDK preset voice conversion. See #VOICE_CONVERSION_PRESET. + * @param preset The options for the preset voice beautifier effects: `VOICE_CONVERSION_PRESET`. * * @return * - 0: Success. @@ -5636,76 +7908,75 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setVoiceConversionPreset(VOICE_CONVERSION_PRESET preset) = 0; - /** Sets parameters for SDK preset audio effects. + /** + * @brief Sets parameters for SDK preset audio effects. * - * Call this method to set the following parameters for the local user who send an audio stream: + * @details + * Call this method to set the following parameters for the local user who sends an audio stream: * - 3D voice effect: Sets the cycle period of the 3D voice effect. * - Pitch correction effect: Sets the basic mode and tonic pitch of the pitch correction effect. * Different songs have different modes and tonic pitches. Agora recommends bounding this method * with interface elements to enable users to adjust the pitch correction interactively. - * - * After setting parameters, all users in the channel can hear the relevant effect. - * - * You can call this method directly or after \ref IRtcEngine::setAudioEffectPreset - * "setAudioEffectPreset". If you call this method after \ref IRtcEngine::setAudioEffectPreset - * "setAudioEffectPreset", ensure that you set the preset parameter of `setAudioEffectPreset` to - * `ROOM_ACOUSTICS_3D_VOICE` or `PITCH_CORRECTION` and then call this method to set the same - * enumerator; otherwise, this method overrides `setAudioEffectPreset`. + * After setting the audio parameters, all users in the channel can hear the effect. + * To achieve better vocal effects, it is recommended that you call the following APIs before + * calling this method: + * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely + * `AUDIO_SCENARIO_GAME_STREAMING` (3). + * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5). * * @note * - You can call this method either before or after joining a channel. - * - To achieve better audio effect quality, Agora recommends calling \ref - * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `scenario` parameter to - * `AUDIO_SCENARIO_GAME_STREAMING(3)` before calling this method. - * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" to - * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call - * fails. - * - This method works best with the human voice. Agora does not recommend using this method for - * audio containing music. - * - After calling this method, Agora recommends not calling the following methods, because they - * can override `setAudioEffectParameters`: - * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset" - * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset" - * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch" - * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization" - * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb" - * - \ref IRtcEngine::setVoiceBeautifierParameters "setVoiceBeautifierParameters" + * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take + * effect. + * - This method has the best effect on human voice processing, and Agora does not recommend calling + * this method to process audio data containing music. + * - After calling `setAudioEffectParameters`, Agora does not recommend you to call the following + * methods, otherwise the effect set by `setAudioEffectParameters` will be overwritten: + * - `setAudioEffectPreset` + * - `setVoiceBeautifierPreset` + * - `setLocalVoicePitch` + * - `setLocalVoiceEqualization` + * - `setLocalVoiceReverb` + * - `setVoiceBeautifierParameters` + * - `setVoiceConversionPreset` + * - This method relies on the voice beautifier dynamic library + * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * * @param preset The options for SDK preset audio effects: - * - 3D voice effect: `ROOM_ACOUSTICS_3D_VOICE`. - * - Call \ref IRtcEngine::setAudioProfile "setAudioProfile" and set the `profile` parameter to - * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO(3)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before + * - `ROOM_ACOUSTICS_3D_VOICE`, 3D voice effect: + * - You need to set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_MUSIC_STANDARD_STEREO` (3) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5) before * setting this enumerator; otherwise, the enumerator setting does not take effect. - * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear + * - If the 3D voice effect is enabled, users need to use stereo audio playback devices to hear * the anticipated voice effect. - * - Pitch correction effect: `PITCH_CORRECTION`. To achieve better audio effect quality, Agora - * recommends calling \ref IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` - * parameter to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or - * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before setting this enumerator. - * @param param1 - * - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, the `param1` sets the cycle period of the - * 3D voice effect. The value range is [1,60] and the unit is a second. The default value is 10 - * seconds, indicating that the voice moves around you every 10 seconds. - * - If you set `preset` to `PITCH_CORRECTION`, `param1` sets the basic mode of the pitch + * - `PITCH_CORRECTION`, Pitch correction effect: + * @param param1 - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, `param1` sets the cycle period + * of the 3D voice effect. The value range is [1,60] and the unit is seconds. The default value is + * 10, indicating that the voice moves around you every 10 seconds. + * - If you set `preset` to `PITCH_CORRECTION`, `param1` indicates the basic mode of the pitch * correction effect: - * - `1`: (Default) Natural major scale. - * - `2`: Natural minor scale. - * - `3`: Japanese pentatonic scale. - * @param param2 - * - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE`, you need to set `param2` to `0`. - * - If you set `preset` to `PITCH_CORRECTION`, `param2` sets the tonic pitch of the pitch + * - `1`: (Default) Natural major scale. + * - `2`: Natural minor scale. + * - `3`: Japanese pentatonic scale. + * @param param2 - If you set `preset` to `ROOM_ACOUSTICS_3D_VOICE` , you need to set `param2` to + * `0`. + * - If you set `preset` to `PITCH_CORRECTION`, `param2` indicates the tonic pitch of the pitch * correction effect: - * - `1`: A - * - `2`: A# - * - `3`: B - * - `4`: (Default) C - * - `5`: C# - * - `6`: D - * - `7`: D# - * - `8`: E - * - `9`: F - * - `10`: F# - * - `11`: G - * - `12`: G# + * - `1`: A + * - `2`: A# + * - `3`: B + * - `4`: (Default) C + * - `5`: C# + * - `6`: D + * - `7`: D# + * - `8`: E + * - `9`: F + * - `10`: F# + * - `11`: G + * - `12`: G# * * @return * - 0: Success. @@ -5713,40 +7984,46 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setAudioEffectParameters(AUDIO_EFFECT_PRESET preset, int param1, int param2) = 0; - /** Sets parameters for SDK preset voice beautifier effects. + /** + * @brief Sets parameters for the preset voice beautifier effects. * + * @details * Call this method to set a gender characteristic and a reverberation effect for the singing * beautifier effect. This method sets parameters for the local user who sends an audio stream. - * - * After you call this method successfully, all users in the channel can hear the relevant effect. - * - * To achieve better audio effect quality, before you call this method, Agora recommends calling - * \ref IRtcEngine::setAudioProfile "setAudioProfile", and setting the `scenario` parameter as - * `AUDIO_SCENARIO_GAME_STREAMING(3)` and the `profile` parameter as - * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)`. + * After setting the audio parameters, all users in the channel can hear the effect. + * To achieve better vocal effects, it is recommended that you call the following APIs before + * calling this method: + * - Call `setAudioScenario` to set the audio scenario to high-quality audio scenario, namely + * `AUDIO_SCENARIO_GAME_STREAMING` (3). + * - Call `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to set the `profile` parameter to + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` (4) or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` (5). * * @note * - You can call this method either before or after joining a channel. - * - Do not set the `profile` parameter of \ref IRtcEngine::setAudioProfile "setAudioProfile" as - * `AUDIO_PROFILE_SPEECH_STANDARD(1)` or `AUDIO_PROFILE_IOT(6)`; otherwise, this method call does - * not take effect. - * - This method works best with the human voice. Agora does not recommend using this method for - * audio containing music. - * - After you call this method, Agora recommends not calling the following methods, because they - * can override `setVoiceBeautifierParameters`: - * - \ref IRtcEngine::setAudioEffectPreset "setAudioEffectPreset" - * - \ref IRtcEngine::setAudioEffectParameters "setAudioEffectParameters" - * - \ref IRtcEngine::setVoiceBeautifierPreset "setVoiceBeautifierPreset" - * - \ref IRtcEngine::setLocalVoicePitch "setLocalVoicePitch" - * - \ref IRtcEngine::setLocalVoiceEqualization "setLocalVoiceEqualization" - * - \ref IRtcEngine::setLocalVoiceReverb "setLocalVoiceReverb" - * - * @param preset The options for SDK preset voice beautifier effects: - * - `SINGING_BEAUTIFIER`: Singing beautifier effect. + * - Do not set the `profile` parameter in `setAudioProfile(AUDIO_PROFILE_TYPE profile)` to + * `AUDIO_PROFILE_SPEECH_STANDARD` (1) or `AUDIO_PROFILE_IOT` (6), or the method does not take + * effect. + * - This method has the best effect on human voice processing, and Agora does not recommend calling + * this method to process audio data containing music. + * - After calling `setVoiceBeautifierParameters`, Agora does not recommend calling the following + * methods, otherwise the effect set by `setVoiceBeautifierParameters` will be overwritten: + * - `setAudioEffectPreset` + * - `setAudioEffectParameters` + * - `setVoiceBeautifierPreset` + * - `setLocalVoicePitch` + * - `setLocalVoiceEqualization` + * - `setLocalVoiceReverb` + * - `setVoiceConversionPreset` + * - This method relies on the voice beautifier dynamic library + * `libagora_audio_beauty_extension.dll`. If the dynamic library is deleted, the function cannot be + * enabled normally. + * + * @param preset The option for the preset audio effect: + * - `SINGING_BEAUTIFIER`: The singing beautifier effect. * @param param1 The gender characteristics options for the singing voice: * - `1`: A male-sounding voice. * - `2`: A female-sounding voice. - * @param param2 The reverberation effects options: + * @param param2 The reverberation effect options for the singing voice: * - `1`: The reverberation effect sounds like singing in a small room. * - `2`: The reverberation effect sounds like singing in a large room. * - `3`: The reverberation effect sounds like singing in a hall. @@ -5773,99 +8050,149 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setVoiceConversionParameters(VOICE_CONVERSION_PRESET preset, int param1, int param2) = 0; - /** Changes the voice pitch of the local speaker. - - @param pitch The voice pitch. The value ranges between 0.5 and 2.0. The lower - the value, the lower the voice pitch. The default value is 1.0 (no change to - the local voice pitch). - - @return - - 0: Success. - - -1: Failure. - */ + /** + * @brief Changes the voice pitch of the local speaker. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @param pitch The local voice pitch. The value range is [0.5,2.0]. The lower the value, the lower + * the pitch. The default value is 1.0 (no change to the pitch). + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setLocalVoicePitch(double pitch) = 0; - /** Changes the voice formant ratio for local speaker. - - @param formantRatio The voice formant ratio. The value ranges between -1.0 and 1.0. - The lower the value, the deeper the sound, and the higher the value, the more it - sounds like a child. The default value is 0.0 (the local user's voice will not be changed). - - @return - - 0: Success. - - -1: Failure. - */ + /** + * @brief Sets the formant ratio to change the timbre of human voice. + * + * @details + * Formant ratio affects the timbre of voice. The smaller the value, the deeper the sound will be, + * and the larger, the sharper. After you set the formant ratio, all users in the channel can hear + * the changed voice. If you want to change the timbre and pitch of voice at the same time, Agora + * recommends using this method together with `setLocalVoicePitch`. + * Applicable scenarios: You can call this method to set the formant ratio of local audio to change + * the timbre of human voice. + * Call timing: This method can be called either before or after joining the channel. + * + * @param formantRatio The formant ratio. The value range is [-1.0, 1.0]. The default value is 0.0, + * which means do not change the timbre of the voice.Note: Agora recommends setting this value + * within the range of [-0.4, 0.6]. Otherwise, the voice may be seriously distorted. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setLocalVoiceFormant(double formantRatio) = 0; - /** Sets the local voice equalization effect. - - @param bandFrequency The band frequency ranging from 0 to 9, representing the - respective 10-band center frequencies of the voice effects, including 31, 62, - 125, 500, 1k, 2k, 4k, 8k, and 16k Hz. - @param bandGain Gain of each band in dB. The value ranges from -15 to 15. The - default value is 0. - @return - - 0: Success. - - -1: Failure. - */ + /** + * @brief Sets the local voice equalization effect. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @param bandFrequency The band frequency. The value ranges between 0 and 9; representing the + * respective 10-band center frequencies of the voice effects, including 31, 62, 125, 250, 500, 1k, + * 2k, 4k, 8k, and 16k Hz. See `AUDIO_EQUALIZATION_BAND_FREQUENCY`. + * @param bandGain The gain of each band in dB. The value ranges between -15 and 15. The default + * value is 0. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setLocalVoiceEqualization(AUDIO_EQUALIZATION_BAND_FREQUENCY bandFrequency, int bandGain) = 0; - /** Sets the local voice reverberation. - - @param reverbKey The reverberation key: #AUDIO_REVERB_TYPE. - @param value The value of the reverberation key: #AUDIO_REVERB_TYPE. - @return - - 0: Success. - - -1: Failure. - */ + /** + * @brief Sets the local voice reverberation. + * + * @details + * The SDK provides an easier-to-use method, `setAudioEffectPreset`, to directly implement preset + * reverb effects for such as pop, R&B, and KTV. + * + * @note You can call this method either before or after joining a channel. + * + * @param reverbKey The reverberation key. Agora provides five reverberation keys, see + * `AUDIO_REVERB_TYPE`. + * @param value The value of the reverberation key. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setLocalVoiceReverb(AUDIO_REVERB_TYPE reverbKey, int value) = 0; - /** Sets preset audio playback effect for remote headphones after remote audio is mixed. - - @param preset The preset key: #HEADPHONE_EQUALIZER_PRESET. - - HEADPHONE_EQUALIZER_OFF = 0x00000000 : Turn off the eualizer effect for headphones. - - HEADPHONE_EQUALIZER_OVEREAR = 0x04000001 : For over-ear headphones only. - - HEADPHONE_EQUALIZER_INEAR = 0x04000002 : For in-ear headphones only. - @return - - 0: Success. - - < 0: Failure. - - -1(ERR_FAILED): A general error occurs (no specified reason). - */ + /** + * @brief Sets the preset headphone equalization effect. + * + * @details + * This method is mainly used in spatial audio effect scenarios. You can select the preset headphone + * equalizer to listen to the audio to achieve the expected audio experience. + * + * @note If the headphones you use already have a good equalization effect, you may not get a + * significant improvement when you call this method, and could even diminish the experience. + * + * @param preset The preset headphone equalization effect. See `HEADPHONE_EQUALIZER_PRESET`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + */ virtual int setHeadphoneEQPreset(HEADPHONE_EQUALIZER_PRESET preset) = 0; - /** Sets the parameters of audio playback effect for remote headphones after remote audio is mixed. - - @param lowGain The higher the parameter value, the deeper the sound. The value range is [-10,10]. - @param highGain The higher the parameter value, the sharper the sound. The value range is [-10,10]. - @return - - 0: Success. - - < 0: Failure. - - -1(ERR_FAILED): A general error occurs (no specified reason). - */ + /** + * @brief Sets the low- and high-frequency parameters of the headphone equalizer. + * + * @details + * In a spatial audio effect scenario, if the preset headphone equalization effect is not achieved + * after calling the `setHeadphoneEQPreset` method, you can further adjust the headphone + * equalization effect by calling this method. + * + * @param lowGain The low-frequency parameters of the headphone equalizer. The value range is + * [-10,10]. The larger the value, the deeper the sound. + * @param highGain The high-frequency parameters of the headphone equalizer. The value range is + * [-10,10]. The larger the value, the sharper the sound. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + */ virtual int setHeadphoneEQParameters(int lowGain, int highGain) = 0; - /** Enables or disables the voice AI tuner. + /** + * @brief Enables or disables the voice AI tuner. + * + * @details + * The voice AI tuner supports enhancing sound quality and adjusting tone style. + * Applicable scenarios: Social entertainment scenes including online KTV, online podcast and live + * streaming in showrooms, where high sound quality is required. + * Call timing: This method can be called either before or after joining the channel. * - * @param enabled Determines whether to enable the voice AI tuner: - * - true: Enable the voice AI tuner - * - false: (default) Disable the voice AI tuner. + * @param enabled Whether to enable the voice AI tuner: + * - `true`: Enables the voice AI tuner. + * - `false`: (Default) Disable the voice AI tuner. + * @param type Voice AI tuner sound types, see `VOICE_AI_TUNER_TYPE`. * - * @param type. The options for SDK voice AI tuner types. See #VOICE_AI_TUNER_TYPE. * @return * - 0: Success. * - < 0: Failure. */ virtual int enableVoiceAITuner(bool enabled, VOICE_AI_TUNER_TYPE type) = 0; - /** **DEPRECATED** Specifies an SDK output log file. + /** + * @brief Sets the log file. * - * The log file records all log data for the SDK’s operation. Ensure that the - * directory for the log file exists and is writable. + * @details + * Specifies an SDK output log file. The log file records all log data for the SDK’s operation. + * Call timing: This method needs to be called immediately after `initialize`, otherwise the output + * log may be incomplete. * - * @note - * Ensure that you call this method immediately after \ref initialize "initialize", - * or the output log may not be complete. + * @note Ensure that the directory for the log file exists and is writable. * - * @param filePath File path of the log file. The string of the log file is in UTF-8. + * @param filePath The complete path of the log files. These log files are encoded in UTF-8. * * @return * - 0: Success. @@ -5874,22 +8201,17 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLogFile(const char* filePath) = 0; /** - * Sets the output log filter level of the SDK. + * @brief Sets the log output level of the SDK. * - * You can use one or a combination of the filters. The log filter level follows the - * sequence of `OFF`, `CRITICAL`, `ERROR`, `WARNING`, `INFO`, and `DEBUG`. Choose a filter level - * and you will see logs preceding that filter level. For example, if you set the log filter level to - * `WARNING`, you see the logs within levels `CRITICAL`, `ERROR`, and `WARNING`. + * @details + * This method sets the output log level of the SDK. You can use one or a combination of the log + * filter levels. The log level follows the sequence of `LOG_FILTER_OFF`, `LOG_FILTER_CRITICAL`, + * `LOG_FILTER_ERROR`, `LOG_FILTER_WARN`, `LOG_FILTER_INFO`, and `LOG_FILTER_DEBUG`. Choose a level + * to see the logs preceding that level. + * If, for example, you set the log level to `LOG_FILTER_WARN`, you see the logs within levels + * `LOG_FILTER_CRITICAL`, `LOG_FILTER_ERROR` and `LOG_FILTER_WARN`. * - * @param filter The log filter level: - * - `LOG_FILTER_DEBUG(0x80f)`: Output all API logs. Set your log filter as DEBUG - * if you want to get the most complete log file. - * - `LOG_FILTER_INFO(0x0f)`: Output logs of the CRITICAL, ERROR, WARNING, and INFO - * level. We recommend setting your log filter as this level. - * - `LOG_FILTER_WARNING(0x0e)`: Output logs of the CRITICAL, ERROR, and WARNING level. - * - `LOG_FILTER_ERROR(0x0c)`: Output logs of the CRITICAL and ERROR level. - * - `LOG_FILTER_CRITICAL(0x08)`: Output logs of the CRITICAL level. - * - `LOG_FILTER_OFF(0)`: Do not output any log. + * @param filter The output log level of the SDK. See `LOG_FILTER_TYPE`. * * @return * - 0: Success. @@ -5898,16 +8220,12 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLogFilter(unsigned int filter) = 0; /** - * Sets the output log level of the SDK. + * @brief Sets the output log level of the SDK. * - * You can set the SDK to ouput the log files of the specified level. + * @details + * Choose a level to see the logs preceding that level. * - * @param level The log level: - * - `LOG_LEVEL_NONE (0x0000)`: Do not output any log file. - * - `LOG_LEVEL_INFO (0x0001)`: (Recommended) Output log files of the INFO level. - * - `LOG_LEVEL_WARN (0x0002)`: Output log files of the WARN level. - * - `LOG_LEVEL_ERROR (0x0004)`: Output log files of the ERROR level. - * - `LOG_LEVEL_FATAL (0x0008)`: Output log files of the FATAL level. + * @param level The log level. See `LOG_LEVEL`. * * @return * - 0: Success. @@ -5916,15 +8234,34 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLogLevel(commons::LOG_LEVEL level) = 0; /** - * Sets the log file size (KB). + * @brief Sets the log file size. * - * The SDK has two log files, each with a default size of 512 KB. If you set - * `fileSizeInBytes` as 1024 KB, the SDK outputs log files with a total - * maximum size of 2 MB. - * If the total size of the log files exceed the set value, - * the new output log files overwrite the old output log files. + * @details + * By default, the SDK generates five SDK log files and five API call log files with the following + * rules: + * - The SDK log files are: `agorasdk.log`, `agorasdk.1.log`, `agorasdk.2.log`, `agorasdk.3.log`, + * and `agorasdk.4.log`. + * - The API call log files are: `agoraapi.log`, `agoraapi.1.log`, `agoraapi.2.log`, + * `agoraapi.3.log`, and `agoraapi.4.log`. + * - The default size of each SDK log file and API log file is 2,048 KB. These log files are encoded + * in UTF-8. + * - The SDK writes the latest logs in `agorasdk.log` or `agoraapi.log`. + * - When `agorasdk.log` is full, the SDK processes the log files in the following order:1. Delete + * the `agorasdk.4.log` file (if any). + * 2. Rename `agorasdk.3.log` to `agorasdk.4.log`. + * 3. Rename `agorasdk.2.log` to `agorasdk.3.log`. + * 4. Rename `agorasdk.1.log` to `agorasdk.2.log`. + * 5. Create a new `agorasdk.log` file. + * - The overwrite rules for the `agoraapi.log` file are the same as for `agorasdk.log`. + * + * @note This method is used to set the size of the `agorasdk.log` file only and does not effect the + * `agoraapi.log file`. + * + * @param fileSizeInKBytes The size (KB) of an `agorasdk.log` file. The value range is [128,20480]. + * The default value is 2,048 KB. If you set `fileSizeInKByte` smaller than 128 KB, the SDK + * automatically adjusts it to 128 KB; if you set `fileSizeInKByte` greater than 20,480 KB, the SDK + * automatically adjusts it to 20,480 KB. * - * @param fileSizeInKBytes The SDK log file size (KB). * @return * - 0: Success. * - < 0: Failure. @@ -5959,18 +8296,25 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int writeLog(commons::LOG_LEVEL level, const char* fmt, ...) = 0; /** - * Updates the display mode of the local video view. + * @brief Updates the display mode of the local video view. * - * After initializing the local video view, you can call this method to update its rendering mode. - * It affects only the video view that the local user sees, not the published local video stream. + * @details + * After initializing the local video view, you can call this method to update its rendering and + * mirror modes. It affects only the video view that the local user sees and does not impact the + * publishing of the local video. + * Call timing: - Ensure that you have called the `setupLocalVideo` method to initialize the local + * video view before calling this method. + * - During a call, you can call this method as many times as necessary to update the display mode + * of the local video view. * - * @note - * - Ensure that you have called \ref setupLocalVideo "setupLocalVideo" to initialize the local video - * view before this method. - * - During a call, you can call this method as many times as necessary to update the local video view. + * @note This method only takes effect on the primary camera `(PRIMARY_CAMERA_SOURCE)`. In scenarios + * involving custom video capture or the use of alternative video sources, you need to use + * `setupLocalVideo` instead of this method. * - * @param renderMode Sets the local display mode. See #RENDER_MODE_TYPE. - * @param mirrorMode Sets the local mirror mode. See #VIDEO_MIRROR_MODE_TYPE. + * @param renderMode The local video display mode. See `RENDER_MODE_TYPE`. + * @param mirrorMode The mirror mode of the local video view. See `VIDEO_MIRROR_MODE_TYPE`. + * Attention: If you use a front camera, the SDK enables the mirror mode by default; if you use a + * rear camera, the SDK disables the mirror mode by default. * * @return * - 0: Success. @@ -5979,20 +8323,21 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0; /** - * Updates the display mode of the video view of a remote user. + * @brief Updates the display mode of the video view of a remote user. * + * @details * After initializing the video view of a remote user, you can call this method to update its * rendering and mirror modes. This method affects only the video view that the local user sees. * * @note - * - Ensure that you have called \ref setupRemoteVideo "setupRemoteVideo" to initialize the remote video - * view before calling this method. + * - Call this method after initializing the remote view by calling the `setupRemoteVideo` method. * - During a call, you can call this method as many times as necessary to update the display mode * of the video view of a remote user. * - * @param uid ID of the remote user. - * @param renderMode Sets the remote display mode. See #RENDER_MODE_TYPE. - * @param mirrorMode Sets the mirror type. See #VIDEO_MIRROR_MODE_TYPE. + * @param uid The user ID of the remote user. + * @param renderMode The rendering mode of the remote user view. For details, see + * `RENDER_MODE_TYPE`. + * @param mirrorMode The mirror mode of the remote user view. See `VIDEO_MIRROR_MODE_TYPE`. * * @return * - 0: Success. @@ -6000,7 +8345,47 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setRemoteRenderMode(uid_t uid, media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0; - + /** + * @brief Sets the maximum frame rate for rendering local video. + * + * @details + * Applicable scenarios: In scenarios where the requirements for video rendering frame rate are not + * high (such as screen sharing or online education), you can call this method to set the maximum + * frame rate for local video rendering. The SDK will attempt to keep the actual frame rate of local + * rendering close to this value, which helps to reduce CPU consumption and improving system + * performance. + * Call timing: You can call this method either before or after joining a channel. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10, + * 15, 24, 30, 60.CAUTION: Set this parameter to a value lower than the actual video frame rate; + * otherwise, the settings do not take effect. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) = 0; + /** + * @brief Sets the maximum frame rate for rendering remote video. + * + * @details + * Applicable scenarios: In scenarios where the video rendering frame rate is not critical (e.g., + * screen sharing, online education) or when the remote users are using mid-to-low-end devices, you + * can call this method to set the maximum frame rate for video rendering on the remote client. The + * SDK will attempt to render the actual frame rate as close as possible to this value, which helps + * to reduce CPU consumption and improve system performance. + * Call timing: You can call this method either before or after joining a channel. + * + * @param targetFps The capture frame rate (fps) of the local video. Sopported values are: 1, 7, 10, + * 15, 24, 30, 60.CAUTION: Set this parameter to a value lower than the actual video frame rate; + * otherwise, the settings do not take effect. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setRemoteRenderTargetFps(int targetFps) = 0; // The following APIs are either deprecated and going to deleted. /** @@ -6023,11 +8408,9 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLocalRenderMode(media::base::RENDER_MODE_TYPE renderMode) __deprecated = 0; /** - * Sets the local video mirror mode. + * @brief Sets the local video mirror mode. * - * Use this method before calling the \ref startPreview "startPreview" method, or the mirror mode - * does not take effect until you call the `startPreview` method again. - * @param mirrorMode Sets the local video mirror mode. See #VIDEO_MIRROR_MODE_TYPE. + * @param mirrorMode The local video mirror mode. See `VIDEO_MIRROR_MODE_TYPE`. * * @return * - 0: Success. @@ -6036,15 +8419,26 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setLocalVideoMirrorMode(VIDEO_MIRROR_MODE_TYPE mirrorMode) __deprecated = 0; /** - * Enables or disables the dual video stream mode. + * @brief Enables or disables dual-stream mode on the sender side. * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream) - * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType". + * @details + * Dual streams are a pairing of a high-quality video stream and a low-quality video stream: + * - High-quality video stream: High bitrate, high resolution. + * - Low-quality video stream: Low bitrate, low resolution. + * After you enable dual-stream mode, you can call `setRemoteVideoStreamType` to choose to receive + * either the high-quality video stream or the low-quality video stream on the subscriber side. + * + * @note + * - This method is applicable to all types of streams from the sender, including but not limited to + * video streams collected from cameras, screen sharing streams, and custom-collected video streams. + * - If you need to enable dual video streams in a multi-channel scenario, you can call the + * `enableDualStreamModeEx` method. + * - You can call this method either before or after joining a channel. + * + * @param enabled Whether to enable dual-stream mode: + * - `true`: Enable dual-stream mode. + * - `false`: (Default) Disable dual-stream mode. * - * @param enabled - * - true: Enable the dual-stream mode. - * - false: (default) Disable the dual-stream mode. * @return * - 0: Success. * - < 0: Failure. @@ -6052,17 +8446,30 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableDualStreamMode(bool enabled) __deprecated = 0; /** - * Enables or disables the dual video stream mode. + * @brief Sets the dual-stream mode on the sender side and the low-quality video stream. * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream) - * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType". + * @details + * You can call this method to enable or disable the dual-stream mode on the publisher side. Dual + * streams are a pairing of a high-quality video stream and a low-quality video stream: + * - High-quality video stream: High bitrate, high resolution. + * - Low-quality video stream: Low bitrate, low resolution. + * After you enable dual-stream mode, you can call `setRemoteVideoStreamType` to choose to receive + * either the high-quality video stream or the low-quality video stream on the subscriber side. + * + * @note + * - This method is applicable to all types of streams from the sender, including but not limited to + * video streams collected from cameras, screen sharing streams, and custom-collected video streams. + * - If you need to enable dual video streams in a multi-channel scenario, you can call the + * `enableDualStreamModeEx` method. + * - You can call this method either before or after joining a channel. + * + * @param enabled Whether to enable dual-stream mode: + * - `true`: Enable dual-stream mode. + * - `false`: (Default) Disable dual-stream mode. + * @param streamConfig The configuration of the low-quality video stream. See + * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting + * `streamConfig` will not take effect. * - * @param enabled - * - true: Enable the dual-stream mode. - * - false: (default) Disable the dual-stream mode. - * @param streamConfig - * - The minor stream config * @return * - 0: Success. * - < 0: Failure. @@ -6071,14 +8478,32 @@ class IRtcEngine : public agora::base::IEngineBase { /** - * Enables, disables or auto enable the dual video stream mode. + * @brief Sets the dual-stream mode on the sender side. * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream) - * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType". + * @details + * The SDK defaults to enabling low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) + * on the sender side, which means the sender does not actively send low-quality video stream. The + * receiving end with the role of the **host** can initiate a low-quality video stream request by + * calling `setRemoteVideoStreamType`, and upon receiving the request, the sending end automatically + * starts sending low-quality stream. + * - If you want to modify this behavior, you can call this method and set `mode` to + * `DISABLE_SIMULCAST_STREAM` (never send low-quality video streams) or `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video streams). + * - If you want to restore the default behavior after making changes, you can call this method + * again with `mode` set to `AUTO_SIMULCAST_STREAM`. + * + * @note + * The difference and connection between this method and `enableDualStreamMode(bool enabled)` is as + * follows: + * - When calling this method and setting `mode` to `DISABLE_SIMULCAST_STREAM`, it has the same + * effect as `enableDualStreamMode(bool enabled)` `(false)`. + * - When calling this method and setting `mode` to `ENABLE_SIMULCAST_STREAM`, it has the same + * effect as `enableDualStreamMode(bool enabled)` `(true)`. + * - Both methods can be called before and after joining a channel. If both methods are used, the + * settings in the method called later takes precedence. + * + * @param mode The mode in which the video stream is sent. See `SIMULCAST_STREAM_MODE`. * - * @param mode - * - The dual stream mode * @return * - 0: Success. * - < 0: Failure. @@ -6086,31 +8511,61 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setDualStreamMode(SIMULCAST_STREAM_MODE mode) = 0; /** - * Sets the multi-layer video stream configuration. + * @brief Sets the simulcast video stream configuration. + * + * @since v4.6.0 * - * If multi-layer is configured, the subscriber can choose to receive the coresponding layer - * of video stream using {@link setRemoteVideoStreamType setRemoteVideoStreamType}. + * @details + * You can call this method to set video streams with different resolutions for the same video + * source. The subscribers can call `setRemoteVideoStreamType` to select which stream layer to + * receive. The broadcaster can publish up to four layers of video streams: one main stream (highest + * resolution) and three additional streams of different quality levels. * - * @param simulcastConfig - * - The configuration for multi-layer video stream. It includes seven layers, ranging from - * STREAM_LAYER_1 to STREAM_LOW. A maximum of 3 layers can be enabled simultaneously. + * @param simulcastConfig This configuration includes seven layers, from STREAM_LAYER_1 to + * STREAM_LOW, with a maximum of three layers enabled simultaneously. See `SimulcastConfig`. * * @return * - 0: Success. * - < 0: Failure. - * @technical preview */ virtual int setSimulcastConfig(const SimulcastConfig& simulcastConfig) = 0; /** - * Enables, disables or auto enable the dual video stream mode. + * @brief Sets dual-stream mode configuration on the sender side. + * + * @details + * The SDK defaults to enabling low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) + * on the sender side, which means the sender does not actively send low-quality video stream. The + * receiving end with the role of the **host** can initiate a low-quality video stream request by + * calling `setRemoteVideoStreamType`, and upon receiving the request, the sending end automatically + * starts sending low-quality stream. + * - If you want to modify this behavior, you can call this method and set `mode` to + * `DISABLE_SIMULCAST_STREAM` (never send low-quality video streams) or `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video streams). + * - If you want to restore the default behavior after making changes, you can call this method + * again with `mode` set to `AUTO_SIMULCAST_STREAM`. + * The difference between this method and `setDualStreamMode(SIMULCAST_STREAM_MODE mode)` is that + * this method can also + * configure the low-quality video stream, and the SDK sends the stream according to the + * configuration in `streamConfig`. + * + * @note + * The difference and connection between this method and `enableDualStreamMode(bool enabled, const + * SimulcastStreamConfig& streamConfig)` is as follows: + * - When calling this method and setting `mode` to `DISABLE_SIMULCAST_STREAM`, it has the same + * effect as calling `enableDualStreamMode(bool enabled, const SimulcastStreamConfig& streamConfig)` + * and setting `enabled` to `false`. + * - When calling this method and setting `mode` to `ENABLE_SIMULCAST_STREAM`, it has the same + * effect as calling `enableDualStreamMode(bool enabled, const SimulcastStreamConfig& streamConfig)` + * and setting `enabled` to `true`. + * - Both methods can be called before and after joining a channel. If both methods are used, the + * settings in the method called later takes precedence. * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video stream) - * video using \ref setRemoteVideoStreamType "setRemoteVideoStreamType". + * @param mode The mode in which the video stream is sent. See `SIMULCAST_STREAM_MODE`. + * @param streamConfig The configuration of the low-quality video stream. See + * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting + * `streamConfig` will not take effect. * - * @param mode Dual stream mode: #SIMULCAST_STREAM_MODE. - * @param streamConfig Configurations of the low stream: SimulcastStreamConfig. * @return * - 0: Success. * - < 0: Failure. @@ -6118,15 +8573,23 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig) = 0; /** - * Sets the external audio track. + * @brief Sets whether to enable the local playback of external audio source. * - * @note - * Ensure that you call this method before joining the channel. + * @details + * After calling this method to enable the local playback of external audio source, if you need to + * stop local playback, you can call this method again and set `enabled` to `false`. + * You can call `adjustCustomAudioPlayoutVolume` to adjust the local playback volume of the custom + * audio track. + * + * @note Ensure you have called the `createCustomAudioTrack` method to create a custom audio track + * before calling this method. + * + * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in + * `createCustomAudioTrack`. + * @param enabled Whether to play the external audio source: + * - `true`: Play the external audio source. + * - `false`: (Default) Do not play the external source. * - * @param trackId custom audio track id. - * @param enabled Determines whether to local playback the external audio track: - * - true: Local playback the external audio track. - * - false: Local don`t playback the external audio track. * @return * - 0: Success. * - < 0: Failure. @@ -6134,19 +8597,23 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableCustomAudioLocalPlayback(track_id_t trackId, bool enabled) = 0; /** - * Sets the audio recording format for the - * \ref agora::media::IAudioFrameObserver::onRecordAudioFrame "onRecordAudioFrame" callback. + * @brief Sets the format of the captured raw audio data. * - * @param sampleRate The sample rate (Hz) of the audio data returned in the `onRecordAudioFrame` callback, which can set be - * as 8000, 16000, 32000, 44100, or 48000. - * @param channel The number of audio channels of the audio data returned in the `onRecordAudioFrame` callback, which can - * be set as 1 or 2: + * @details + * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and + * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` × + * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the + * `onRecordAudioFrame` callback according to the sampling interval. + * Call timing: Call this method before joining a channel. + * + * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, + * 32000, 44100, or 48000 Hz. + * @param channel The number of audio channels. You can set the value as 1 or 2. * - 1: Mono. * - 2: Stereo. - * @param mode This mode is deprecated. - * @param samplesPerCall not support. Sampling points in the called data returned in - * onRecordAudioFrame(). For example, it is usually set as 1024 for stream - * pushing. + * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`. + * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. + * * @return * - 0: Success. * - < 0: Failure. @@ -6156,20 +8623,23 @@ class IRtcEngine : public agora::base::IEngineBase { int samplesPerCall) = 0; /** - * Sets the audio playback format for the - * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrame "onPlaybackAudioFrame" callback. + * @brief Sets the format of the raw audio playback data. + * + * @details + * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and + * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` × + * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the + * `onPlaybackAudioFrame` callback according to the sampling interval. + * Call timing: Call this method before joining a channel. + * + * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, + * 24000, 32000, 44100, or 48000 Hz. + * @param channel The number of audio channels. You can set the value as 1 or 2. + * - 1: Mono. + * - 2: Stereo. + * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`. + * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. * - * @param sampleRate Sets the sample rate (Hz) of the audio data returned in the `onPlaybackAudioFrame` callback, - * which can set be as 8000, 16000, 32000, 44100, or 48000. - * @param channel The number of channels of the audio data returned in the `onPlaybackAudioFrame` callback, which - * can be set as 1 or 2: - * - 1: Mono - * - 2: Stereo - * @param mode Deprecated. The use mode of the onPlaybackAudioFrame() callback: - * agora::rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE. - * @param samplesPerCall not support. Sampling points in the called data returned in - * onPlaybackAudioFrame(). For example, it is usually set as 1024 for stream - * pushing. * @return * - 0: Success. * - < 0: Failure. @@ -6179,37 +8649,53 @@ class IRtcEngine : public agora::base::IEngineBase { int samplesPerCall) = 0; /** - * Sets the mixed audio format for the - * \ref agora::media::IAudioFrameObserver::onMixedAudioFrame "onMixedAudioFrame" callback. + * @brief Sets the format of the raw audio data after mixing for audio capture and playback. * - * @param sampleRate The sample rate (Hz) of the audio data returned in the `onMixedAudioFrame` callback, which can set - * be as 8000, 16000, 32000, 44100, or 48000. - * @param channel The number of channels of the audio data in `onMixedAudioFrame` callback, which can be set as 1 or 2: - * - 1: Mono - * - 2: Stereo - * @param samplesPerCall not support. Sampling points in the called data returned in - * `onMixedAudioFrame`. For example, it is usually set as 1024 for stream pushing. - * @return - * - 0: Success. - * - < 0: Failure. - */ + * @details + * The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and + * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` × + * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the `onMixedAudioFrame` + * callback according to the sampling interval. + * Call timing: Call this method before joining a channel. + * + * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, + * 32000, 44100, or 48000 Hz. + * @param channel The number of audio channels. You can set the value as 1 or 2. + * - 1: Mono. + * - 2: Stereo. + * @param samplesPerCall The number of data samples, such as 1024 for the Media Push. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setMixedAudioFrameParameters(int sampleRate, int channel, int samplesPerCall) = 0; /** - * Sets the audio ear monitoring format for the - * \ref agora::media::IAudioFrameObserver::onEarMonitoringAudioFrame "onEarMonitoringAudioFrame" callback. + * @brief Sets the format of the in-ear monitoring raw audio data. + * + * @details + * This method is used to set the in-ear monitoring audio data format reported by the + * `onEarMonitoringAudioFrame` callback. + * + * @note + * - Before calling this method, you need to call `enableInEarMonitoring`, and set + * `includeAudioFilters` to `EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS` or + * `EAR_MONITORING_FILTER_NOISE_SUPPRESSION`. + * - The SDK calculates the sampling interval based on the `samplesPerCall`, `sampleRate` and + * `channel` parameters set in this method.Sample interval (sec) = `samplePerCall` /( `sampleRate` × + * `channel` ). Ensure that the sample interval ≥ 0.01 (s). The SDK triggers the + * `onEarMonitoringAudioFrame` callback according to the sampling interval. + * + * @param sampleRate The sample rate of the audio data reported in the `onEarMonitoringAudioFrame` + * callback, which can be set as 8,000, 16,000, 32,000, 44,100, or 48,000 Hz. + * @param channel The number of audio channels reported in the `onEarMonitoringAudioFrame` callback. + * - 1: Mono. + * - 2: Stereo. + * @param mode The use mode of the audio frame. See `RAW_AUDIO_FRAME_OP_MODE_TYPE`. + * @param samplesPerCall The number of data samples reported in the `onEarMonitoringAudioFrame` + * callback, such as 1,024 for the Media Push. * - * @param sampleRate Sets the sample rate (Hz) of the audio data returned in the `onEarMonitoringAudioFrame` callback, - * which can set be as 8000, 16000, 32000, 44100, or 48000. - * @param channel The number of channels of the audio data returned in the `onEarMonitoringAudioFrame` callback, which - * can be set as 1 or 2: - * - 1: Mono - * - 2: Stereo - * @param mode Deprecated. The use mode of the onEarMonitoringAudioFrame() callback: - * agora::rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE. - * @param samplesPerCall not support. Sampling points in the called data returned in - * onEarMonitoringAudioFrame(). For example, it is usually set as 1024 for stream - * pushing. * @return * - 0: Success. * - < 0: Failure. @@ -6219,16 +8705,19 @@ class IRtcEngine : public agora::base::IEngineBase { int samplesPerCall) = 0; /** - * Sets the audio playback format before mixing in the - * \ref agora::media::IAudioFrameObserver::onPlaybackAudioFrameBeforeMixing "onPlaybackAudioFrameBeforeMixing" - * callback. + * @brief Sets the format of the raw audio playback data before mixing. + * + * @details + * The SDK triggers the `onPlaybackAudioFrameBeforeMixing` callback according to the sampling + * interval. + * Call timing: Call this method before joining a channel. + * + * @param sampleRate The sample rate returned in the callback, which can be set as 8000, 16000, + * 32000, 44100, or 48000 Hz. + * @param channel The number of audio channels. You can set the value as 1 or 2. + * - 1: Mono. + * - 2: Stereo. * - * @param sampleRate The sample rate (Hz) of the audio data returned in - * `onPlaybackAudioFrameBeforeMixing`, which can set be as 8000, 16000, 32000, 44100, or 48000. - * @param channel Number of channels of the audio data returned in `onPlaybackAudioFrameBeforeMixing`, - * which can be set as 1 or 2: - * - 1: Mono - * - 2: Stereo * @return * - 0: Success. * - < 0: Failure. @@ -6236,18 +8725,55 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setPlaybackAudioFrameBeforeMixingParameters(int sampleRate, int channel) = 0; /** - * Enable the audio spectrum monitor. + * @brief Sets the format of audio data in the `onPlaybackAudioFrameBeforeMixing` callback. + * + * @details + * Used to set the sample rate, number of channels, and number of samples per callback for the audio + * data returned in the `onPlaybackAudioFrameBeforeMixing` callback. + * + * @param sampleRate Set the sample rate returned in the `onPlaybackAudioFrameBeforeMixing` + * callback. It can be set as the following values: 8000、16000、32000、44100 or 48000. + * @param channel Set the number of channels for the audio data returned in the + * `onPlaybackAudioFrameBeforeMixing` callback. It can be set to: + * - 1: Mono. + * - 2: Stereo. + * @param samplesPerCall Set the sample rate of the audio data returned in the + * `onPlaybackAudioFrameBeforeMixing` callback. In the RTMP streaming scenario, it is recommended to + * set it to 1024. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setPlaybackAudioFrameBeforeMixingParameters(int sampleRate, int channel, int samplesPerCall) = 0; + + /** + * @brief Turns on audio spectrum monitoring. + * + * @details + * If you want to obtain the audio spectrum data of local or remote users, you can register the + * audio spectrum observer and enable audio spectrum monitoring. * - * @param intervalInMS Sets the time interval(ms) between two consecutive audio spectrum callback. - * The default value is 100. This param should be larger than 10. + * @note You can call this method either before or after joining a channel. + * + * @param intervalInMS The interval (in milliseconds) at which the SDK triggers the + * `onLocalAudioSpectrum` and `onRemoteAudioSpectrum` callbacks. The default value is 100. Do not + * set this parameter to a value less than 10, otherwise calling this method would fail. * * @return * - 0: Success. * - < 0: Failure. + * - -2: Invalid parameters. */ virtual int enableAudioSpectrumMonitor(int intervalInMS = 100) = 0; /** - * Disalbe the audio spectrum monitor. + * @brief Disables audio spectrum monitoring. + * + * @details + * After calling `enableAudioSpectrumMonitor`, if you want to disable audio spectrum monitoring, you + * can call this method. + * + * @note You can call this method either before or after joining a channel. * * @return * - 0: Success. @@ -6256,52 +8782,73 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int disableAudioSpectrumMonitor() = 0; /** - * Registers an audio spectrum observer. + * @brief Registers an audio spectrum observer. + * + * @details + * After successfully registering the audio spectrum observer and calling + * `enableAudioSpectrumMonitor` to enable the audio spectrum monitoring, the SDK reports the + * callback that you implement in the `IAudioSpectrumObserver` class according to the time interval + * you set. * - * You need to implement the `IAudioSpectrumObserver` class in this method, and register the following callbacks - * according to your scenario: - * - \ref agora::media::IAudioSpectrumObserver::onAudioSpectrumComputed "onAudioSpectrumComputed": Occurs when - * the SDK receives the audio data and at set intervals. + * @note You can call this method either before or after joining a channel. + * + * @param observer The audio spectrum observer. See `IAudioSpectrumObserver`. * - * @param observer A pointer to the audio spectrum observer: \ref agora::media::IAudioSpectrumObserver - * "IAudioSpectrumObserver". * @return * - 0: Success. * - < 0: Failure. */ virtual int registerAudioSpectrumObserver(agora::media::IAudioSpectrumObserver * observer) = 0; /** - * Releases the audio spectrum observer. + * @brief Unregisters the audio spectrum observer. + * + * @details + * After calling `registerAudioSpectrumObserver`, if you want to disable audio spectrum monitoring, + * you can call this method. + * + * @note You can call this method either before or after joining a channel. + * + * @param observer The audio spectrum observer. See `IAudioSpectrumObserver`. * - * @param observer The pointer to the audio spectrum observer: \ref agora::media::IAudioSpectrumObserver - * "IAudioSpectrumObserver". * @return * - 0: Success. * - < 0: Failure. */ virtual int unregisterAudioSpectrumObserver(agora::media::IAudioSpectrumObserver * observer) = 0; - /** Adjusts the recording volume. - - @param volume The recording volume, which ranges from 0 to 400: - - - 0: Mute the recording volume. - - 100: The Original volume. - - 400: (Maximum) Four times the original volume with signal clipping - protection. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Adjusts the capturing signal volume. + * + * @details + * If you only need to mute the audio signal, Agora recommends that you use `muteRecordingSignal` + * instead. + * Call timing: This method can be called either before or after joining the channel. + * + * @param volume The volume of the user. The value range is [0,400]. + * - 0: Mute. + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int adjustRecordingSignalVolume(int volume) = 0; /** - * Mute or resume recording signal volume. + * @brief Whether to mute the recording signal. + * + * @details + * If you have already called `adjustRecordingSignalVolume` to adjust the recording signal volume, + * when you call this method and set it to `true`, the SDK behaves as follows:1. Records the + * adjusted volume. + * 2. Mutes the recording signal. + * When you call this method again and set it to `false`, the recording signal volume will be + * restored to the volume recorded by the SDK before muting. + * Call timing: This method can be called either before or after joining the channel. * - * @param mute Determines whether to mute or resume the recording signal volume. - * - true: Mute the recording signal volume. - * - false: (Default) Resume the recording signal volume. + * @param mute - `true`: Mute the recording signal. + * - `false`: (Default) Do not mute the recording signal. * * @return * - 0: Success. @@ -6309,80 +8856,67 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int muteRecordingSignal(bool mute) = 0; - /** Adjusts the playback volume. - - @param volume The playback volume, which ranges from 0 to 400: - - - 0: Mute the recoridng volume. - - 100: The Original volume. - - 400: (Maximum) Four times the original volume with signal clipping - protection. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Adjusts the playback signal volume of all remote users. + * + * @details + * This method is used to adjust the signal volume of all remote users mixed and played locally. If + * you need to adjust the signal volume of a specified remote user played locally, it is recommended + * that you call `adjustUserPlaybackSignalVolume` instead. + * Call timing: This method can be called either before or after joining the channel. + * + * @param volume The volume of the user. The value range is [0,400]. + * - 0: Mute. + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int adjustPlaybackSignalVolume(int volume) = 0; - /* - * Adjust the playback volume of the user specified by uid. + /** + * @brief Adjusts the playback signal volume of a specified remote user. * - * You can call this method to adjust the playback volume of the user specified by uid - * in call. If you want to adjust playback volume of the multi user, you can call this - * this method multi times. + * @details + * You can call this method to adjust the playback volume of a specified remote user. To adjust the + * playback volume of different remote users, call the method as many times, once for each remote + * user. + * Call timing: Call this method after joining a channel. * - * @note - * Please call this method after join channel. - * This method adjust the playback volume of specified user. + * @param uid The user ID of the remote user. + * @param volume The volume of the user. The value range is [0,400]. + * - 0: Mute. + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). * - * @param uid Remote user ID. - * @param volume The playback volume of the specified remote user. The value ranges between 0 and 400, including the following: - * 0: Mute. - * 100: (Default) Original volume. - * 400: Four times the original volume with signal-clipping protection. * @return * - 0: Success. * - < 0: Failure. */ virtual int adjustUserPlaybackSignalVolume(uid_t uid, int volume) = 0; - /** Sets the fallback option for the published video stream based on the network conditions. - - If `option` is set as #STREAM_FALLBACK_OPTION_AUDIO_ONLY (2), the SDK will: - - - Disable the upstream video but enable audio only when the network conditions deteriorate and cannot support both video and audio. - - Re-enable the video when the network conditions improve. - - When the published video stream falls back to audio only or when the audio-only stream switches back to the video, the SDK triggers the \ref agora::rtc::IRtcEngineEventHandler::onLocalPublishFallbackToAudioOnly "onLocalPublishFallbackToAudioOnly" callback. - - @note - - Agora does not recommend using this method for CDN live streaming, because the remote CDN live user will have a noticeable lag when the published video stream falls back to audio only. - - Ensure that you call this method before joining a channel. - - @param option Sets the fallback option for the published video stream: - - #STREAM_FALLBACK_OPTION_DISABLED (0): (Default) No fallback behavior for the published video stream when the uplink network condition is poor. The stream quality is not guaranteed. - - #STREAM_FALLBACK_OPTION_AUDIO_ONLY (2): The published video stream falls back to audio only when the uplink network condition is poor. - - @return - - 0: Success. - - < 0: Failure. - */ - virtual int setLocalPublishFallbackOption(STREAM_FALLBACK_OPTIONS option) = 0; - - /** Sets the fallback option for the remotely subscribed video stream based on the network conditions. - - The default setting for `option` is #STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW (1), where the remotely subscribed video stream falls back to the low-stream video (low resolution and low bitrate) under poor downlink network conditions. - - If `option` is set as #STREAM_FALLBACK_OPTION_AUDIO_ONLY (2), the SDK automatically switches the video from a high-stream to a low-stream, or disables the video when the downlink network conditions cannot support both audio and video to guarantee the quality of the audio. The SDK monitors the network quality and restores the video stream when the network conditions improve. - - When the remotely subscribed video stream falls back to audio only or when the audio-only stream switches back to the video stream, the SDK triggers the \ref agora::rtc::IRtcEngineEventHandler::onRemoteSubscribeFallbackToAudioOnly "onRemoteSubscribeFallbackToAudioOnly" callback. - - @note Ensure that you call this method before joining a channel. - - @param option Sets the fallback option for the remotely subscribed video stream. See #STREAM_FALLBACK_OPTIONS. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the fallback option for the subscribed video stream based on the network conditions. + * + * @details + * An unstable network affects the audio and video quality in a video call or interactive live video + * streaming. If `option` is set as `STREAM_FALLBACK_OPTION_VIDEO_STREAM_LOW` or + * `STREAM_FALLBACK_OPTION_AUDIO_ONLY`, the SDK automatically switches the video from a high-quality + * stream to a low-quality stream or disables the video when the downlink network conditions cannot + * support both audio and video to guarantee the quality of the audio. Meanwhile, the SDK + * continuously monitors network quality and resumes subscribing to audio and video streams when the + * network quality improves. + * When the subscribed video stream falls back to an audio-only stream, or recovers from an + * audio-only stream to an audio-video stream, the SDK triggers the + * `onRemoteSubscribeFallbackToAudioOnly` callback. + * + * @param option Fallback options for the subscribed stream. See `STREAM_FALLBACK_OPTIONS`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setRemoteSubscribeFallbackOption(STREAM_FALLBACK_OPTIONS option) = 0; @@ -6454,20 +8988,30 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int getExtensionProperty(const char* provider, const char* extension, const ExtensionInfo& extensionInfo, const char* key, char* value, int buf_len) = 0; - /** Enables loopback recording. + /** + * @brief Enables loopback audio capturing. * - * If you enable loopback recording, the output of the default sound card is mixed into - * the audio stream sent to the other end. + * @details + * If you enable loopback audio capturing, the output of the sound card is mixed into the audio + * stream sent to the other end. * - * @note This method is for Windows only. + * @note + * - This method applies to the macOS and Windows only. + * - You can call this method either before or after joining a channel. + * - If you call the `disableAudio` method to disable the audio module, audio capturing will be + * disabled as well. If you need to enable audio capturing, call the `enableAudio` method to enable + * the audio module and then call the `enableLoopbackRecording` method. + * + * @param enabled Sets whether to enable loopback audio capturing. + * - `true`: Enable sound card capturing. You can find the name of the virtual sound card in your + * system's**Audio Devices > Output**. + * - `false`: Disable sound card capturing. The name of the virtual sound card will not be shown in + * your system's **Audio Devices > Output**. + * @param deviceName - macOS: The device name of the virtual sound card. The default value is set to + * NULL, which means using AgoraALD for loopback audio capturing. + * - Windows: The device name of the sound card. The default is set to NULL, which means the SDK + * uses the sound card of your device for loopback audio capturing. * - * @param enabled Sets whether to enable/disable loopback recording. - * - true: Enable loopback recording. - * - false: (Default) Disable loopback recording. - * @param deviceName Pointer to the device name of the sound card. The default value is NULL (the default sound card). - * - This method is for macOS and Windows only. - * - macOS does not support loopback capturing of the default sound card. If you need to use this method, - * please use a virtual sound card and pass its name to the deviceName parameter. Agora has tested and recommends using soundflower. * @return * - 0: Success. * - < 0: Failure. @@ -6475,18 +9019,20 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableLoopbackRecording(bool enabled, const char* deviceName = NULL) = 0; - /** Adjusts the loopback recording volume. - - @param volume The loopback volume, which ranges from 0 to 100: - - - 0: Mute the recoridng volume. - - 100: The Original volume. - protection. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Adjusts the volume of the signal captured by the sound card. + * + * @details + * After calling `enableLoopbackRecording` to enable loopback audio capturing, you can call this + * method to adjust the volume of the signal captured by the sound card. + * + * @param volume Audio mixing volume. The value ranges between 0 and 100. The default value is 100, + * which means the original volume. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int adjustLoopbackSignalVolume(int volume) = 0; /** Retrieves the audio volume for recording loopback. @@ -6498,40 +9044,87 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int getLoopbackRecordingVolume() = 0; /** - * Enables in-ear monitoring. + * @brief Enables in-ear monitoring. + * + * @details + * This method enables or disables in-ear monitoring. + * Call timing: This method can be called either before or after joining the channel. + * + * @note Users must use earphones (wired or Bluetooth) to hear the in-ear monitoring effect. + * + * @param enabled Enables or disables in-ear monitoring. + * - `true`: Enables in-ear monitoring. + * - `false`: (Default) Disables in-ear monitoring. + * @param includeAudioFilters The audio filter types of in-ear monitoring. See + * `EAR_MONITORING_FILTER_TYPE`. * - * @param enabled Determines whether to enable in-ear monitoring. - * - true: Enable. - * - false: (Default) Disable. - * @param includeAudioFilters The type of the ear monitoring: #EAR_MONITORING_FILTER_TYPE * @return * - 0: Success. * - < 0: Failure. + * - - 8: Make sure the current audio routing is Bluetooth or headset. */ virtual int enableInEarMonitoring(bool enabled, int includeAudioFilters) = 0; /** - * Sets the volume of the in-ear monitor. + * @brief Sets the volume of the in-ear monitor. + * + * @details + * Call timing: This method can be called either before or after joining the channel. * - * @param volume Sets the volume of the in-ear monitor. The value ranges - * between 0 and 100 (default). + * @param volume The volume of the user. The value range is [0,400]. + * - 0: Mute. + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). * * @return * - 0: Success. * - < 0: Failure. + * - -2: Invalid parameter settings, such as in-ear monitoring volume exceeding the valid range (< + * 0 or > 400). */ virtual int setInEarMonitoringVolume(int volume) = 0; -#if defined (_WIN32) || defined(__linux__) || defined(__ANDROID__) +#if defined(_WIN32) || defined(__linux__) || defined(__ANDROID__) + /** + * @brief Loads an extension. + * + * @details + * This method is used to add extensions external to the SDK (such as those from Extensions + * Marketplace and SDK extensions) to the SDK. + * Call timing: Make sure the `IRtcEngine` is initialized before you call this method. + * + * @note + * If you want to load multiple extensions, you need to call this method multiple times. + * (For Windows and Android only) + * (For Windows and Android only) + * + * @param path The extension library path and name. For example: + * `/library/libagora_segmentation_extension.dll`. + * @param unload_after_use Whether to uninstall the current extension when you no longer using it: + * - `true`: Uninstall the extension when the `IRtcEngine` is destroyed. + * - `false`: (Rcommended) Do not uninstall the extension until the process terminates. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int loadExtensionProvider(const char* path, bool unload_after_use = false) = 0; #endif /** - * Sets the provider property of an extension. + * @brief Sets the properties of the extension provider. * - * @param provider The name of the extension provider, e.g. agora.io. + * @details + * You can call this method to set the attributes of the extension provider and initialize the + * relevant parameters according to the type of the provider. + * Call timing: Call this method before `enableExtension` and after `registerExtension`. + * + * @note If you want to set the properties of the extension provider for multiple extensions, you + * need to call this method multiple times. + * + * @param provider The name of the extension provider. * @param key The key of the extension. - * @param value The JSON formatted value of the extension key. + * @param value The value of the extension key. * * @return * - 0: Success. @@ -6540,48 +9133,82 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setExtensionProviderProperty(const char* provider, const char* key, const char* value) = 0; /** - * Registers an extension. Normally you should call this function immediately after engine initialization. - * Once an extension is registered, the SDK will automatically create and add it to the pipeline. + * @brief Registers an extension. * - * @param provider The name of the extension provider, e.g. agora.io. - * @param extension The name of the extension, e.g. agora.beauty. - * @param type The source type of the extension, e.g. PRIMARY_CAMERA_SOURCE. The default is UNKNOWN_MEDIA_SOURCE. + * @details + * For extensions external to the SDK (such as those from Extensions Marketplace and SDK + * Extensions), you need to load them before calling this method. Extensions internal to the SDK + * (those included in the full SDK package) are automatically loaded and registered after the + * initialization of `IRtcEngine`. + * Call timing: - Agora recommends you call this method after the initialization of `IRtcEngine` and + * before joining a channel. + * - For video extensions (such as the image enhancement extension), you need to call this method + * after enabling the video module by calling `enableVideo` or `enableLocalVideo`. + * - Before calling this method, you need to call `loadExtensionProvider` to load the extension + * first. + * + * @note + * - If you want to register multiple extensions, you need to call this method multiple times. + * - The data processing order of different extensions in the SDK is determined by the order in + * which the extensions are registered. That is, the extension that is registered first will process + * the data first. + * + * @param provider The name of the extension provider. + * @param extension The name of the extension. + * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`. * * @return * - 0: Success. * - < 0: Failure. + * - -3: The extension library is not loaded. Agora recommends that you check the storage location + * or the name of the dynamic library. */ virtual int registerExtension(const char* provider, const char* extension, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0; /** - * Enable/Disable an extension. - * By calling this function, you can dynamically enable/disable the extension without changing the pipeline. - * For example, enabling/disabling Extension_A means the data will be adapted/bypassed by Extension_A. + * @brief Enables or disables extensions. * - * NOTE: For compatibility reasons, if you haven't call registerExtension, - * enableExtension will automatically register the specified extension. - * We suggest you call registerExtension explicitly. + * @details + * Call timing: Agora recommends that you call this method after joining a channel. + * Related callbacks: When this method is successfully called within the channel, it triggers + * `onExtensionStartedWithContext` or `onExtensionStoppedWithContext`. * - * @param provider The name of the extension provider, e.g. agora.io. - * @param extension The name of the extension, e.g. agora.beauty. + * @note + * - If you want to enable multiple extensions, you need to call this method multiple times. + * - After a successful call of this method, you cannot load other extensions. + * + * @param provider The name of the extension provider. + * @param extension The name of the extension. * @param enable Whether to enable the extension: - * - true: (Default) Enable the extension. - * - false: Disable the extension. - * @param type The source type of the extension, e.g. PRIMARY_CAMERA_SOURCE. The default is UNKNOWN_MEDIA_SOURCE. + * - `true`: Enable the extension. + * - `false`: Disable the extension. + * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`. * * @return * - 0: Success. * - < 0: Failure. + * - -3: The extension library is not loaded. Agora recommends that you check the storage location + * or the name of the dynamic library. */ virtual int enableExtension(const char* provider, const char* extension, bool enable=true, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0; /** - * Sets the properties of an extension. + * @brief Sets the properties of the extension. * - * @param provider The name of the extension provider, e.g. agora.io. - * @param extension The name of the extension, e.g. agora.beauty. + * @details + * After enabling the extension, you can call this method to set the properties of the extension. + * Call timing: Call this mehtod after calling `enableExtension`. + * Related callbacks: After calling this method, it may trigger the `onExtensionEventWithContext` + * callback, and the specific triggering logic is related to the extension itself. + * + * @note If you want to set properties for multiple extensions, you need to call this method + * multiple times. + * + * @param provider The name of the extension provider. + * @param extension The name of the extension. * @param key The key of the extension. - * @param value The JSON formatted value of the extension key. + * @param value The value of the extension key. + * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`. * * @return * - 0: Success. @@ -6592,13 +9219,18 @@ class IRtcEngine : public agora::base::IEngineBase { const char* key, const char* value, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0; /** - * Gets the properties of an extension. + * @brief Gets detailed information on the extensions. * - * @param provider The name of the extension provider, e.g. agora.io. - * @param extension The name of the extension, e.g. agora.beauty. - * @param key The key of the extension. - * @param value The value of the extension key. - * @param buf_len Maximum length of the JSON string indicating the extension property. + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @param provider An output parameter. The name of the extension provider. + * @param extension An output parameter. The name of the extension. + * @param key An output parameter. The key of the extension. + * @param value An output parameter. The value of the extension key. + * @param type Source type of the extension. See `MEDIA_SOURCE_TYPE`. + * @param buf_len Maximum length of the JSON string indicating the extension property. The maximum + * value is 512 bytes. * * @return * - 0: Success. @@ -6608,13 +9240,24 @@ class IRtcEngine : public agora::base::IEngineBase { const char* provider, const char* extension, const char* key, char* value, int buf_len, agora::media::MEDIA_SOURCE_TYPE type = agora::media::UNKNOWN_MEDIA_SOURCE) = 0; - /** Sets the camera capture configuration. - * @note Call this method before enabling the local camera. - * That said, you can call this method before calling \ref IRtcEngine::joinChannel "joinChannel", - * \ref IRtcEngine::enableVideo "enableVideo", or \ref IRtcEngine::enableLocalVideo "enableLocalVideo", - * depending on which method you use to turn on your local camera. + /** + * @brief Sets the camera capture configuration. + * + * @details + * Call timing: Call this method before enabling local camera capture, such as before calling + * `startPreview(VIDEO_SOURCE_TYPE sourceType)` and `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)`. + * + * @note + * To adjust the camera focal length configuration, It is recommended to call + * `queryCameraFocalLengthCapability` first to check the device's focal length capabilities, and + * then configure based on the query results. + * Due to limitations on some Android devices, even if you set the focal length type according to + * the results returned in `queryCameraFocalLengthCapability`, the settings may not take effect. + * + * @param config The camera capture configuration. See `CameraCapturerConfiguration`.Attention: In + * this method, you do not need to set the `deviceId` parameter. * - * @param config Sets the camera capturer configuration. See CameraCapturerConfiguration. * @return * - 0: Success. * - < 0: Failure. @@ -6622,11 +9265,22 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setCameraCapturerConfiguration(const CameraCapturerConfiguration& config) = 0; /** - * Get an custom video track id created by internal,which could used to publish or preview + * @brief Creates a custom video track. + * + * @details + * To publish a custom video source, see the following steps:1. Call this method to create a video + * track and get the video track ID. + * 2. Call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. In `ChannelMediaOptions`, set + * `customVideoTrackId` to the video track ID that you want to publish, and set + * `publishCustomVideoTrack` to `true`. + * 3. Call `pushVideoFrame` and specify `videoTrackId` as the video track ID set in step 2. You can + * then publish the corresponding custom video source in the channel. * * @return - * - > 0: the useable video track id. - * - < 0: Failure. + * - If the method call is successful, the video track ID is returned as the unique identifier of + * the video track. + * - If the method call fails, 0xffffffff is returned. */ virtual video_track_id_t createCustomVideoTrack() = 0; @@ -6640,9 +9294,10 @@ class IRtcEngine : public agora::base::IEngineBase { virtual video_track_id_t createCustomEncodedVideoTrack(const SenderOptions& sender_option) = 0; /** - * destroy a created custom video track id + * @brief Destroys the specified video track. + * + * @param video_track_id The video track ID returned by calling the `createCustomVideoTrack` method. * - * @param video_track_id The video track id which was created by createCustomVideoTrack * @return * - 0: Success. * - < 0: Failure. @@ -6659,11 +9314,23 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int destroyCustomEncodedVideoTrack(video_track_id_t video_track_id) = 0; -#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) +#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__) /** - * Switches between front and rear cameras. + * @brief Switches between front and rear cameras. + * + * @details + * You can call this method to dynamically switch cameras based on the actual camera availability + * during the app's runtime, without having to restart the video stream or reconfigure the video + * source. + * Call timing: This method must be called after the camera is successfully enabled, that is, after + * the SDK triggers the `onLocalVideoStateChanged` callback and returns the local video state as + * `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note + * - This method only switches the camera for the video stream captured by the first camera, that is, + * the video source set to `VIDEO_SOURCE_CAMERA` (0) when calling `startCameraCapture`. + * - This method is for Android and iOS only. * - * @note This method applies to Android and iOS only. * @return * - 0: Success. * - < 0: Failure. @@ -6671,65 +9338,127 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int switchCamera() = 0; /** - * Checks whether the camera zoom function is supported. + * @brief Checks whether the device supports camera zoom. + * + * @details + * Call timing: This method must be called after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note This method is for Android and iOS only. * * @return - * - true: The camera zoom function is supported. - * - false: The camera zoom function is not supported. + * - `true`: The device supports camera zoom. + * - `false`: The device does not support camera zoom. */ virtual bool isCameraZoomSupported() = 0; /** - * Checks whether the camera face detect is supported. + * @brief Checks whether the device camera supports face detection. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). * * @return - * - true: The camera face detect is supported. - * - false: The camera face detect is not supported. + * - `true`: The device camera supports face detection. + * - `false`: The device camera does not support face detection. */ virtual bool isCameraFaceDetectSupported() = 0; /** - * Checks whether the camera flash function is supported. + * @brief Checks whether the device supports camera flash. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - The app enables the front camera by default. If your front camera does not support flash, this + * method returns false. If you want to check whether the rear camera supports the flash function, + * call `switchCamera` before this method. + * - On iPads with system version 15, even if `isCameraTorchSupported` returns `true`, you might + * fail to successfully enable the flash by calling `setCameraTorchOn` due to system issues. * * @return - * - true: The camera flash function is supported. - * - false: The camera flash function is not supported. + * - `true`: The device supports camera flash. + * - `false`: The device does not support camera flash. */ virtual bool isCameraTorchSupported() = 0; /** - * Checks whether the camera manual focus function is supported. + * @brief Check whether the device supports the manual focus function. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). * * @return - * - true: The camera manual focus function is supported. - * - false: The camera manual focus function is not supported. + * - `true`: The device supports the manual focus function. + * - `false`: The device does not support the manual focus function. */ virtual bool isCameraFocusSupported() = 0; /** - * Checks whether the camera auto focus function is supported. + * @brief Checks whether the device supports the face auto-focus function. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). * * @return - * - true: The camera auto focus function is supported. - * - false: The camera auto focus function is not supported. + * - `true`: The device supports the face auto-focus function. + * - `false`: The device does not support the face auto-focus function. */ virtual bool isCameraAutoFocusFaceModeSupported() = 0; /** - * Sets the camera zoom ratio. + * @brief Sets the camera zoom factor. + * + * @details + * For iOS devices equipped with multi-lens rear cameras, such as those featuring dual-camera + * (wide-angle and ultra-wide-angle) or triple-camera (wide-angle, ultra-wide-angle, and telephoto), + * you can call `setCameraCapturerConfiguration` first to set the `cameraFocalLengthType` as + * `CAMERA_FOCAL_LENGTH_DEFAULT` (0) (standard lens). Then, adjust the camera zoom factor to a value + * less than 1.0. This configuration allows you to capture video with an ultra-wide-angle + * perspective. + * + * @note + * - This method is for Android and iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @param factor Camera zoom factor. For devices that do not support ultra-wide-angle, the value + * ranges from 1.0 to the maximum zoom factor; for devices that support ultra-wide-angle, the value + * ranges from 0.5 to the maximum zoom factor. You can get the maximum zoom factor supported by the + * device by calling the `getCameraMaxZoomFactor` method. * - * @param factor The camera zoom factor. It ranges from 1.0 to the maximum zoom - * supported by the camera. * @return - * - 0: Success. - * - < 0: Failure. + * - The camera zoom `factor` value, if successful. + * - < 0: if the method if failed. */ virtual int setCameraZoomFactor(float factor) = 0; /** - * Sets the camera face detection. + * @brief Enables or disables face detection for the local user. + * + * @details + * Call timing: This method needs to be called after the camera is started (for example, by calling + * `startPreview(VIDEO_SOURCE_TYPE sourceType)` or `enableVideo` ). + * Related callbacks: Once face detection is enabled, the SDK triggers the `onFacePositionChanged` + * callback to report the face information of the local user, which includes the following: + * - The width and height of the local video. + * - The position of the human face in the local view. + * - The distance between the human face and the screen. + * + * @note This method is for Android and iOS only. + * + * @param enabled Whether to enable face detection for the local user: + * - `true`: Enable face detection. + * - `false`: (Default) Disable face detection. * - * @param enabled The camera face detection enabled. * @return * - 0: Success. * - < 0: Failure. @@ -6737,89 +9466,161 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableFaceDetection(bool enabled) = 0; /** - * Gets the maximum zoom ratio supported by the camera. - * @return The maximum zoom ratio supported by the camera. + * @brief Gets the maximum zoom ratio supported by the camera. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @return + * The maximum zoom ratio supported by the camera. */ virtual float getCameraMaxZoomFactor() = 0; /** - * Sets the manual focus position. + * @brief Sets the camera manual focus position. + * + * @note + * - This method is for Android and iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - After a successful method call, the SDK triggers the `onCameraFocusAreaChanged` callback. + * + * @param positionX The horizontal coordinate of the touchpoint in the view. + * @param positionY The vertical coordinate of the touchpoint in the view. * - * @param positionX The horizontal coordinate of the touch point in the view. - * @param positionY The vertical coordinate of the touch point in the view. * @return * - 0: Success. * - < 0: Failure. - */ + */ virtual int setCameraFocusPositionInPreview(float positionX, float positionY) = 0; /** - * Enables the camera flash. + * @brief Enables the camera flash. + * + * @note + * - This method is for Android and iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @param isOn Whether to turn on the camera flash: + * - `true`: Turn on the flash. + * - `false`: (Default) Turn off the flash. * - * @param isOn Determines whether to enable the camera flash. - * - true: Enable the flash. - * - false: Do not enable the flash. + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setCameraTorchOn(bool isOn) = 0; /** - * Enables the camera auto focus face function. + * @brief Enables the camera auto-face focus function. * - * @param enabled Determines whether to enable the camera auto focus face mode. - * - true: Enable the auto focus face function. - * - false: Do not enable the auto focus face function. + * @details + * By default, the SDK disables face autofocus on Android and enables face autofocus on iOS. To set + * face autofocus, call this method. + * Call timing: This method must be called after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note This method is for Android and iOS only. + * + * @param enabled Whether to enable face autofocus: + * - `true`: Enable the camera auto-face focus function. + * - `false`: Disable face autofocus. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setCameraAutoFocusFaceModeEnabled(bool enabled) = 0; - /** Checks whether the camera exposure function is supported. - * - * Ensure that you call this method after the camera starts, for example, by calling `startPreview` or `joinChannel`. + /** + * @brief Checks whether the device supports manual exposure. * * @since v2.3.2. + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * * @return - *

    - *
  • true: The device supports the camera exposure function.
  • - *
  • false: The device does not support the camera exposure function.
  • - *
+ * - `true`: The device supports manual exposure. + * - `false`: The device does not support manual exposure. */ virtual bool isCameraExposurePositionSupported() = 0; - /** Sets the camera exposure position. - * - * Ensure that you call this method after the camera starts, for example, by calling `startPreview` or `joinChannel`. + /** + * @brief Sets the camera exposure position. * - * A successful setCameraExposurePosition method call triggers the {@link IRtcEngineEventHandler#onCameraExposureAreaChanged onCameraExposureAreaChanged} callback on the local client. * @since v2.3.2. - * @param positionXinView The horizontal coordinate of the touch point in the view. - * @param positionYinView The vertical coordinate of the touch point in the view. + * + * @note + * - This method is for Android and iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - After a successful method call, the SDK triggers the `onCameraExposureAreaChanged` callback. + * + * @param positionXinView The horizontal coordinate of the touchpoint in the view. + * @param positionYinView The vertical coordinate of the touchpoint in the view. * * @return - *
    - *
  • 0: Success.
  • - *
  • < 0: Failure.
  • - *
+ * - 0: Success. + * - < 0: Failure. */ virtual int setCameraExposurePosition(float positionXinView, float positionYinView) = 0; /** - * Returns whether exposure value adjusting is supported by the current device. - * Exposure compensation is in auto exposure mode. - * @since v4.2.2 - * @note - * This method only supports Android and iOS. - * This interface returns valid values only after the device is initialized. - * - * @return - * - true: exposure value adjusting is supported. - * - false: exposure value adjusting is not supported or device is not initialized. - */ + * @brief Queries whether the current camera supports adjusting exposure value. + * + * @since v4.2.2 + * + * @note + * - This method is for Android and iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - Before calling `setCameraExposureFactor`, Agora recoomends that you call this method to query + * whether the current camera supports adjusting the exposure value. + * - By calling this method, you adjust the exposure value of the currently active camera, that is, + * the camera specified when calling `setCameraCapturerConfiguration`. + * + * @return + * - `true`: Success. + * - `false`: Failure. + */ virtual bool isCameraExposureSupported() = 0; /** - * Sets the camera exposure ratio. + * @brief Sets the camera exposure value. + * * @since v4.2.2 - * @param factor The camera zoom factor. The recommended camera exposure factor ranging from -8.0 to 8.0 for iOS, - * and -20.0 to 20.0 for Android. + * + * @details + * Insufficient or excessive lighting in the shooting environment can affect the image quality of + * video capture. To achieve optimal video quality, you can use this method to adjust the camera's + * exposure value. + * + * @note + * - This method is for Android and iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - Before calling this method, Agora recommends calling `isCameraExposureSupported` to check + * whether the current camera supports adjusting the exposure value. + * - By calling this method, you adjust the exposure value of the currently active camera, that is, + * the camera specified when calling `setCameraCapturerConfiguration`. + * + * @param factor The camera exposure value. The default value is 0, which means using the default + * exposure of the camera. The larger the value, the greater the exposure. When the video image is + * overexposed, you can reduce the exposure value; when the video image is underexposed and the dark + * details are lost, you can increase the exposure value. If the exposure value you specified is + * beyond the range supported by the device, the SDK will automatically adjust it to the actual + * supported range of the device. + * On Android, the value range is [-20.0, 20.0]. On iOS, the value range is [-8.0, 8.0]. * * @return * - 0: Success. @@ -6829,128 +9630,228 @@ class IRtcEngine : public agora::base::IEngineBase { #if defined(__APPLE__) /** - * Checks whether the camera auto exposure function is supported. + * @brief Checks whether the device supports auto exposure. + * + * @note + * - This method applies to iOS only. + * - This method must be called after the SDK triggers the `onLocalVideoStateChanged` callback and + * returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). * * @return - * - true: The camera auto exposure function is supported. - * - false: The camera auto exposure function is not supported. + * - `true`: The device supports auto exposure. + * - `false`: The device does not support auto exposure. */ virtual bool isCameraAutoExposureFaceModeSupported() = 0; /** - * Enables the camera auto exposure face function. + * @brief Sets whether to enable auto exposure. + * + * @note + * - This method applies to iOS only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). * - * @param enabled Determines whether to enable the camera auto exposure face mode. - * - true: Enable the auto exposure face function. - * - false: Do not enable the auto exposure face function. + * @param enabled Whether to enable auto exposure: + * - `true`: Enable auto exposure. + * - `false`: Disable auto exposure. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setCameraAutoExposureFaceModeEnabled(bool enabled) = 0; /** - * set camera stabilization mode.If open stabilization mode, fov will be smaller and capture latency will be longer. + * @brief Sets the camera stabilization mode. + * + * @details + * The camera stabilization mode is off by default. You need to call this method to turn it on and + * set the appropriate stabilization mode. + * Applicable scenarios: When shooting on the move, in low light conditions, or with mobile devices, + * you can set the camera stabilization mode to reduce the impact of camera shake and get a more + * stable, clear picture. + * Call timing: This method must be called after the camera is successfully enabled, that is, after + * the SDK triggers the `onLocalVideoStateChanged` callback and returns the local video state as + * `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note + * - Camera stabilization only works for scenarios with a video resolution greater than 1280 x 720. + * - After enabling camera stabilization, the higher the camera stabilization level, the smaller the + * camera's field of view and the greater the camera's latency. To improve user experience, it is + * recommended that you set the `mode` parameter to `CAMERA_STABILIZATION_MODE_LEVEL_1`. + * This method applies to iOS only. * - * @param mode specifies the camera stabilization mode. + * @param mode Camera stabilization mode. See `CAMERA_STABILIZATION_MODE`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setCameraStabilizationMode(CAMERA_STABILIZATION_MODE mode) = 0; #endif - /** Sets the default audio route (for Android and iOS only). - - Most mobile phones have two audio routes: an earpiece at the top, and a - speakerphone at the bottom. The earpiece plays at a lower volume, and the - speakerphone at a higher volume. - - When setting the default audio route, you determine whether audio playback - comes through the earpiece or speakerphone when no external audio device is - connected. - - Depending on the scenario, Agora uses different default audio routes: - - Voice call: Earpiece - - Audio broadcast: Speakerphone - - Video call: Speakerphone - - Video broadcast: Speakerphone - - Call this method before, during, or after a call, to change the default - audio route. When the audio route changes, the SDK triggers the - \ref IRtcEngineEventHandler::onAudioRoutingChanged "onAudioRoutingChanged" - callback. - - @note The system audio route changes when an external audio device, such as - a headphone or a Bluetooth audio device, is connected. See *Principles for changing the audio route*. - - @param defaultToSpeaker Whether to set the speakerphone as the default audio - route: - - true: Set the speakerphone as the default audio route. - - false: Do not set the speakerphone as the default audio route. - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the default audio playback route. + * + * @details + * Most mobile phones have two audio routes: an earpiece at the top, and a speakerphone at the + * bottom. The earpiece plays at a lower volume, and the speakerphone at a higher volume. When + * setting the default audio route, you determine whether audio playback comes through the earpiece + * or speakerphone when no external audio device is connected. + * In different scenarios, the default audio routing of the system is also different. See the + * following: + * - Voice call: Earpiece. + * - Audio broadcast: Speakerphone. + * - Video call: Speakerphone. + * - Video broadcast: Speakerphone. + * You can call this method to change the default audio route. + * Call timing: Call this method before joining a channel. If you need to change the audio route + * after joining a channel, call `setEnableSpeakerphone`. + * Related callbacks: After successfully calling this method, the SDK triggers the + * `onAudioRoutingChanged` callback to report the current audio route. + * + * @note This method is for Android and iOS only. + * + * @param defaultToSpeaker Whether to set the speakerphone as the default audio route: + * - `true`: Set the speakerphone as the default audio route. + * - `false`: Set the earpiece as the default audio route. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setDefaultAudioRouteToSpeakerphone(bool defaultToSpeaker) = 0; - /** Enables/Disables the speakerphone temporarily (for Android and iOS only). - - When the audio route changes, the SDK triggers the - \ref IRtcEngineEventHandler::onAudioRoutingChanged "onAudioRoutingChanged" - callback. - - You can call this method before, during, or after a call. However, Agora - recommends calling this method only when you are in a channel to change - the audio route temporarily. - - @note This method sets the audio route temporarily. Plugging in or - unplugging a headphone, or the SDK re-enabling the audio device module - (ADM) to adjust the media volume in some scenarios relating to audio, leads - to a change in the audio route. See *Principles for changing the audio - route*. - - @param speakerOn Whether to set the speakerphone as the temporary audio - route: - - true: Set the speakerphone as the audio route temporarily. (For iOS only: - calling setEnableSpeakerphone(true) does not change the audio route to the - speakerphone if a headphone or a Bluetooth audio device is connected.) - - false: Do not set the speakerphone as the audio route. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Enables/Disables the audio route to the speakerphone. + * + * @details + * Applicable scenarios: If the default audio route of the SDK or the setting in + * `setDefaultAudioRouteToSpeakerphone` cannot meet your requirements, you can call this method to + * switch the current audio route. + * Call timing: Call this method after joining a channel. + * Related callbacks: After successfully calling this method, the SDK triggers the + * `onAudioRoutingChanged` callback to report the current audio route. + * + * @note + * - This method only sets the audio route in the current channel and does not influence the default + * audio route. If the user leaves the current channel and joins another channel, the default audio + * route is used. + * - If the user uses an external audio playback device such as a Bluetooth or wired headset, this + * method does not take effect, and the SDK plays audio through the external device. When the user + * uses multiple external devices, the SDK plays audio through the last connected device. + * This method is for Android and iOS only. + * + * @param speakerOn Sets whether to enable the speakerphone or earpiece: + * - `true`: Enable device state monitoring. The audio route is the speakerphone. + * - `false`: Disable device state monitoring. The audio route is the earpiece. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setEnableSpeakerphone(bool speakerOn) = 0; - /** Checks whether the speakerphone is enabled (for Android and iOS only). - - @return - - true: The speakerphone is enabled, and the audio plays from the speakerphone. - - false: The speakerphone is not enabled, and the audio plays from devices - other than the speakerphone. For example, the headset or earpiece. + /** + * @brief Checks whether the speakerphone is enabled. + * + * @details + * Call timing: You can call this method either before or after joining a channel. + * + * @note This method is for Android and iOS only. + * + * @return + * - `true`: The speakerphone is enabled, and the audio plays from the speakerphone. + * - `false`: The speakerphone is not enabled, and the audio plays from devices other than the + * speakerphone. For example, the headset or earpiece. */ virtual bool isSpeakerphoneEnabled() = 0; - /** Select preferred route for android communication mode - - @param route The preferred route. For example, when a Bluetooth headset is connected, - you can use this API to switch the route to a wired headset. - @return meanless, route switch result is pass through CallbackOnRoutingChanged + /** + * @brief Selects the audio playback route in communication audio mode. + * + * @details + * This method is used to switch the audio route from Bluetooth headphones to earpiece, wired + * headphones or speakers in communication audio mode ( `MODE_IN_COMMUNICATION` ). + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After successfully calling this method, the SDK triggers the + * `onAudioRoutingChanged` callback to report the current audio route. + * + * @note + * Using this method and the `setEnableSpeakerphone` method at the same time may cause conflicts. + * Agora recommends that you use the `setRouteInCommunicationMode` method alone. + * This method is for Android only. + * + * @param route The audio playback route you want to use: + * - -1: The default audio route. + * - 0: Headphones with microphone. + * - 1: Handset. + * - 2: Headphones without microphone. + * - 3: Device's built-in speaker. + * - 4: (Not supported yet) External speakers. + * - 5: Bluetooth headphones. + * - 6: USB device. + * + * @return + * Without practical meaning. */ virtual int setRouteInCommunicationMode(int route) = 0; - -#endif // __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) +#endif // __ANDROID__ || (__APPLE__ && TARGET_OS_IOS) || __OHOS__ #if defined(__APPLE__) /** - * Checks whether the center stage is supported. Use this method after starting the camera. + * @brief Checks if the camera supports portrait center stage. + * + * @details + * Before calling `enableCameraCenterStage` to enable portrait center stage, it is recommended to + * call this method to check if the current device supports the feature. + * Call timing: This method must be called after the camera is successfully enabled, that is, after + * the SDK triggers the `onLocalVideoStateChanged` callback and returns the local video state as + * `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note This method is for iOS and macOS only. * * @return - * - true: The center stage is supported. - * - false: The center stage is not supported. + * - `true`: The current camera supports the portrait center stage. + * - `false`: The current camera supports the portrait center stage. */ virtual bool isCameraCenterStageSupported() = 0; - /** Enables the camera Center Stage. - * @param enabled enable Center Stage: - * - true: Enable Center Stage. - * - false: Disable Center Stage. + /** + * @brief Enables or disables portrait center stage. + * + * @details + * The portrait center stage feature is off by default. You need to call this method to turn it on. + * If you need to disable this feature, you need to call this method again and set `enabled` to + * `false`. + * Applicable scenarios: The portrait center stage feature can be widely used in scenarios such as + * online meetings, shows, online education, etc. The host can use this feature to ensure that they + * are always in the center of the screen, whether they move or not, in order to achieve a good + * display effect. + * Call timing: This method must be called after the camera is successfully enabled, that is, after + * the SDK triggers the `onLocalVideoStateChanged` callback and returns the local video state as + * `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * + * @note + * Due to the high performance requirements of this feature, you need to use it on the following + * types of devices or devices with higher performance: + * - iPad: + * - 12.9-inch iPad Pro (5th generation) + * - 11-inch iPad Pro (3rd generation) + * - iPad (9th generation) + * - iPad mini (6th generation) + * - iPad Air (5th generation) + * - 2020 M1 MacBook Pro 13-inch + iPhone 11 (using iPhone as external camera for the MacBook) + * Agora recommends that you call `isCameraCenterStageSupported` to check whether the current device + * supports portrait center stage before enabling this feature. + * This method is for iOS and macOS only. + * + * @param enabled Whether to enable the portrait center stage: + * - `true`: Enable portrait center stage. + * - `false`: Disable portrait center stage. + * * @return * - 0: Success. * - < 0: Failure. @@ -6958,39 +9859,60 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int enableCameraCenterStage(bool enabled) = 0; #endif -#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) - /** Get \ref ScreenCaptureSourceInfo list including available windows and screens. - * - * @param thumbSize Set expected size for thumb, image will be scaled accordingly. For windows, SIZE is defined in windef.h. - * @param iconSize Set expected size for icon, image will be scaled accordingly. For windows, SIZE is defined in windef.h. - * @param includeScreen Determines whether to include screens info. - * - true: sources will have screens info - * - false: source will only have windows info - * @return - * - IScreenCaptureSourceList* a pointer to an instance of IScreenCaptureSourceList - */ +#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE)|| (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) + /** + * @brief Gets a list of shareable screens and windows. + * + * @details + * You can call this method before sharing a screen or window to get a list of shareable screens and + * windows, which enables a user to use thumbnails in the list to easily choose a particular screen + * or window to share. This list also contains important information such as window ID and screen + * ID, with which you can call `startScreenCaptureByWindowId` or `startScreenCaptureByDisplayId` to + * start the sharing. + * + * @note This method applies to macOS and Windows only. + * + * @param thumbSize The target size of the screen or window thumbnail (the width and height are in + * pixels). See `SIZE`. The SDK scales the original image to make the length of the longest side of + * the image the same as that of the target size without distorting the original image. For example, + * if the original image is 400 × 300 and thumbSize is 100 × 100, the actual size of the `thumbnail` + * is 100 × 75. If the target size is larger than the original size, the thumbnail is the original + * image and the SDK does not scale it. + * @param iconSize The target size of the icon corresponding to the application program (the width + * and height are in pixels). See `SIZE`. The SDK scales the original image to make the length of + * the longest side of the image the same as that of the target size without distorting the original + * image. For example, if the original image is 400 × 300 and iconSize is 100 × 100, the actual size + * of the ` icon` is 100 × 75. If the target size is larger than the original size, the icon is the + * original image and the SDK does not scale it. + * @param includeScreen Whether the SDK returns the screen information in addition to the window + * information: + * - `true`: The SDK returns screen and window information. + * - `false`: The SDK returns window information only. + * + * @return + * `IScreenCaptureSourceList` + */ virtual IScreenCaptureSourceList* getScreenCaptureSources(const SIZE& thumbSize, const SIZE& iconSize, const bool includeScreen) = 0; #endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC) #if (defined(__APPLE__) && TARGET_OS_IOS) - /** Sets the operational permission of the SDK on the audio session. - * - * The SDK and the app can both configure the audio session by default. If - * you need to only use the app to configure the audio session, this method - * restricts the operational permission of the SDK on the audio session. + /** + * @brief Sets the operational permission of the SDK on the audio session. * - * You can call this method either before or after joining a channel. Once - * you call this method to restrict the operational permission of the SDK - * on the audio session, the restriction takes effect when the SDK needs to - * change the audio session. + * @details + * The SDK and the app can both configure the audio session by default. If you need to only use the + * app to configure the audio session, this method restricts the operational permission of the SDK + * on the audio session. + * You can call this method either before or after joining a channel. Once you call this method to + * restrict the operational permission of the SDK on the audio session, the restriction takes effect + * when the SDK needs to change the audio session. * * @note - * - This method is for iOS only. - * - This method does not restrict the operational permission of the app on - * the audio session. + * - This method is only available for iOS. + * - This method does not restrict the operational permission of the app on the audio session. * - * @param restriction The operational permission of the SDK on the audio session. - * See #AUDIO_SESSION_OPERATION_RESTRICTION. This parameter is in bit mask - * format, and each bit corresponds to a permission. + * @param restriction The operational permission of the SDK on the audio session. See + * `AUDIO_SESSION_OPERATION_RESTRICTION`. This parameter is in bit mask format, and each bit + * corresponds to a permission. * * @return * - 0: Success. @@ -6999,57 +9921,89 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setAudioSessionOperationRestriction(AUDIO_SESSION_OPERATION_RESTRICTION restriction) = 0; #endif // __APPLE__ && TARGET_OS_IOS -#if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE && TARGET_OS_MAC) - - /** Shares the whole or part of a screen by specifying the display ID. - - @note This method applies to macOS only. - - @param displayId The display ID of the screen to be shared. This parameter - specifies which screen you want to share. For information on how to get the - displayId, see the advanced guide: Share the Screen. - @param regionRect (Optional) Sets the relative location of the region to the - screen. NIL means sharing the whole screen. See Rectangle. - If the specified region overruns the screen, the SDK shares only the region - within it; if you set width or height as 0, the SDK shares the whole screen. - @param captureParams Sets the screen sharing encoding parameters. See - ScreenCaptureParameters. +#if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE && TARGET_OS_MAC) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) - @return - - 0: Success. - - < 0: Failure: - - ERR_INVALID_ARGUMENT (2): The argument is invalid. - - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. - */ - virtual int startScreenCaptureByDisplayId(uint32_t displayId, const Rectangle& regionRect, + /** + * @brief Captures the screen by specifying the display ID. + * + * @details + * Captures the video stream of a screen or a part of the screen area. + * Applicable scenarios: In the screen sharing scenario, you need to call this method to start + * capturing the screen video stream. + * Call timing: You can call this method either before or after joining the channel, with the + * following differences: + * - Call this method before joining a channel, and then call `joinChannel(const char* token, const + * char* channelId, uid_t uid, const ChannelMediaOptions& options)` to join a channel + * and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen sharing. + * - Call this method after joining a channel, and then call `updateChannelMediaOptions` to join a + * channel and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen + * sharing. + * + * @note This method is for Windows and macOS only. + * + * @param displayId The display ID of the screen to be shared.Note: For the Windows platform, if you + * need to simultaneously share two screens (main screen and secondary screen), you can set + * `displayId` to `-1` when calling this method. + * @param regionRect (Optional) Sets the relative location of the region to the screen. Pass in + * `nil` to share the entire screen. See `Rectangle`. + * @param captureParams Screen sharing configurations. The default video dimension is 1920 x 1080, + * that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. See + * `ScreenCaptureParameters`.Attention: The video properties of the screen sharing stream only need + * to be set through this parameter, and are unrelated to `setVideoEncoderConfiguration`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. + */ + virtual int startScreenCaptureByDisplayId(int64_t displayId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; #endif // __APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE #if defined(_WIN32) /** - * Shares the whole or part of a screen by specifying the screen rect. - * - * @deprecated This method is deprecated, use \ref IRtcEngine::startScreenCaptureByDisplayId "startScreenCaptureByDisplayId" instead. Agora strongly recommends using `startScreenCaptureByDisplayId` if you need to start screen sharing on a device connected to another display. + * @brief Captures the whole or part of a screen by specifying the screen rect. + * + * @deprecated This method is deprecated, use \ref IRtcEngine::startScreenCaptureByDisplayId + * "startScreenCaptureByDisplayId" instead. Agora strongly recommends using + * `startScreenCaptureByDisplayId` if you need to start screen sharing on a device connected to + * another display. + * + * @details + * This method shares a screen or part of the screen. You need to specify the area of the screen to + * be shared. + * You can call this method either before or after joining the channel, with the following + * differences: + * - Call this method before joining a channel, and then call `joinChannel(const char* token, const + * char* channelId, uid_t uid, const ChannelMediaOptions& options)` to join a channel + * and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen sharing. + * - Call this method after joining a channel, and then call `updateChannelMediaOptions` to join a + * channel and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen + * sharing. * * @note This method applies to Windows only. * - * @param screenRect Sets the relative location of the screen to the virtual - * screen. For information on how to get screenRect, see the advanced guide: - * Share the Screen. - * @param regionRect (Optional) Sets the relative location of the region to the - * screen. NULL means sharing the whole screen. See Rectangle. - * If the specified region overruns the screen, the SDK shares only the region - * within it; if you set width or height as 0, the SDK shares the whole screen. - * @param captureParams Sets the screen sharing encoding parameters. See - * ScreenCaptureParameters. + * @param screenRect Sets the relative location of the screen to the virtual screen. + * @param regionRect (Optional) Sets the relative location of the region to the screen. If you do + * not set this parameter, the SDK shares the whole screen. See `Rectangle`. If the specified region + * overruns the screen, the SDK shares only the region within it; if you set width or height as 0, + * the SDK shares the whole screen. + * @param captureParams The screen sharing encoding parameters. The default video resolution is 1920 + * × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the + * charges. See `ScreenCaptureParameters`. * * @return * - 0: Success. - * - < 0: Failure: - * - ERR_INVALID_ARGUMENT (2): The argument is invalid. - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. - */ + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. + */ virtual int startScreenCaptureByScreenRect(const Rectangle& screenRect, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) __deprecated = 0; @@ -7057,300 +10011,716 @@ class IRtcEngine : public agora::base::IEngineBase { #if defined(__ANDROID__) /** - * Gets the the Audio device Info + * @brief Gets the audio device information. + * + * @details + * After calling this method, you can get whether the audio device supports ultra-low-latency + * capture and playback. + * + * @note + * - This method is for Android only. + * - You can call this method either before or after joining a channel. + * + * @param deviceInfo Input and output parameter. A `DeviceInfo` object that identifies the audio + * device information. + * - Input value: A `DeviceInfo` object. + * - Output value: A `DeviceInfo` object containing audio device information. + * * @return * - 0: Success. - * - < 0: Failure.. + * - < 0: Failure. */ virtual int getAudioDeviceInfo(DeviceInfo& deviceInfo) = 0; #endif // __ANDROID__ -#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) - - /** Shares the whole or part of a window by specifying the window ID. - * - * @param windowId The ID of the window to be shared. For information on how to - * get the windowId, see the advanced guide *Share Screen*. - * @param regionRect (Optional) The relative location of the region to the - * window. NULL means sharing the whole window. See Rectangle. If the - * specified region overruns the window, the SDK shares only the region within - * it; if you set width or height as 0, the SDK shares the whole window. - * @param captureParams The window sharing encoding parameters. See - * ScreenCaptureParameters. - * - * @return - * - 0: Success. - * - < 0: Failure: - * - ERR_INVALID_ARGUMENT (2): The argument is invalid. - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. - */ - virtual int startScreenCaptureByWindowId(view_t windowId, const Rectangle& regionRect, +#if defined(_WIN32) || (defined(__APPLE__) && TARGET_OS_MAC && !TARGET_OS_IPHONE) || (defined(__linux__) && !defined(__ANDROID__) && !defined(__OHOS__)) + + /** + * @brief Captures the whole or part of a window by specifying the window ID. + * + * @details + * This method captures a window or part of the window. You need to specify the ID of the window to + * be captured. + * This method supports window sharing of UWP (Universal Windows Platform) applications. Agora tests + * the mainstream UWP applications by using the lastest SDK, see details as follows: + * | **System version** | **Software** | **Compatible versions** | **Support** | + * | ------------------ | ------------------------------------ | ----------------------- | ----------- | + * | win10 | Chrome | 76.0.3809.100 | No | + * | | Office Word | 18.1903.1152.0 | Yes | + * | | Office Excel | | No | + * | | Office PPT | | Yes | + * | | WPS Word | 11.1.0.9145 | Yes | + * | | WPS Excel | | | + * | | WPS PPT | | | + * | | Media Player (comes with the system) | All | Yes | + * | win8 | Chrome | All | Yes | + * | | Office Word | All | Yes | + * | | Office Excel | | | + * | | Office PPT | | | + * | | WPS Word | 11.1.0.9098 | Yes | + * | | WPS Excel | | | + * | | WPS PPT | | | + * | | Media Player (comes with the system) | All | Yes | + * | win7 | Chrome | 73.0.3683.103 | No | + * | | Office Word | All | Yes | + * | | Office Excel | | | + * | | Office PPT | | | + * | | WPS Word | 11.1.0.9098 | No | + * | | WPS Excel | | | + * | | WPS PPT | 11.1.0.9098 | Yes | + * | | Media Player (comes with the system) | All | No | + * Applicable scenarios: In the screen sharing scenario, you need to call this method to start + * capturing the screen video stream. + * Call timing: You can call this method either before or after joining the channel, with the + * following differences: + * - Call this method before joining a channel, and then call `joinChannel(const char* token, const + * char* channelId, uid_t uid, const ChannelMediaOptions& options)` to join a channel + * and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen sharing. + * - Call this method after joining a channel, and then call `updateChannelMediaOptions` to join a + * channel and set `publishScreenTrack` or `publishSecondaryScreenTrack` to `true` to start screen + * sharing. + * + * @note + * The window sharing feature of the Agora SDK relies on WGC (Windows Graphics Capture) or GDI + * (Graphics Device Interface) capture, and WGC cannot be set to disable mouse capture on systems + * earlier than Windows 10 2004. Therefore, `captureMouseCursor(false)` might not work when you + * start window sharing on a device with a system earlier than Windows 10 2004. See + * `ScreenCaptureParameters`. + * This method applies to the macOS and Windows only. + * + * @param windowId The ID of the window to be shared. + * @param regionRect (Optional) Sets the relative location of the region to the screen. If you do + * not set this parameter, the SDK shares the whole screen. See `Rectangle`. If the specified region + * overruns the window, the SDK shares only the region within it; if you set width or height as 0, + * the SDK shares the whole window. + * @param captureParams Screen sharing configurations. The default video resolution is 1920 × 1080, + * that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. See + * `ScreenCaptureParameters`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. + */ + virtual int startScreenCaptureByWindowId(int64_t windowId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; /** - * Sets the content hint for screen sharing. + * @brief Sets the content hint for screen sharing. * + * @details * A content hint suggests the type of the content being shared, so that the SDK applies different - * optimization algorithm to different types of content. + * optimization algorithms to different types of content. If you don't call this method, the default + * content hint is CONTENT_HINT_NONE. + * + * @note You can call this method either before or after you start screen sharing. * - * @param contentHint Sets the content hint for screen sharing: #VIDEO_CONTENT_HINT. + * @param contentHint The content hint for screen sharing. See `VIDEO_CONTENT_HINT`. * * @return * - 0: Success. - * - < 0: Failure: - * - ERR_NOT_SUPPORTED (4): unable to set screencapture content hint - * - ERR_FAILED (1): A general error occurs (no specified reason). - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when set screen capture content hint. + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. */ virtual int setScreenCaptureContentHint(VIDEO_CONTENT_HINT contentHint) = 0; /** - * Updates the screen sharing region. + * @brief Updates the screen capturing region. * - * @param regionRect Sets the relative location of the region to the screen or - * window. NULL means sharing the whole screen or window. See Rectangle. - * If the specified region overruns the screen or window, the SDK shares only - * the region within it; if you set width or height as 0, the SDK shares the - * whole screen or window. + * @note Call this method after starting screen sharing or window sharing. + * + * @param regionRect The relative location of the screen-share area to the screen or window. If you + * do not set this parameter, the SDK shares the whole screen or window. See `Rectangle`. If the + * specified region overruns the screen or window, the SDK shares only the region within it; if you + * set width or height as 0, the SDK shares the whole screen or window. * * @return * - 0: Success. - * - < 0: Failure: - * - ERR_NOT_SUPPORTED (4): unable to update screen capture region - * - ERR_FAILED (1): A general error occurs (no specified reason). - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when update screen capture regoin. + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. */ virtual int updateScreenCaptureRegion(const Rectangle& regionRect) = 0; /** - * Updates the screen sharing parameters. + * @brief Updates the screen capturing parameters. + * + * @note + * - This method is for Windows and macOS only. + * - Call this method after starting screen sharing or window sharing. * - * @param captureParams Sets the screen sharing encoding parameters: ScreenCaptureParameters. + * @param captureParams The screen sharing encoding parameters. See + * `ScreenCaptureParameters`.Attention: The video properties of the screen sharing stream only need + * to be set through this parameter, and are unrelated to `setVideoEncoderConfiguration`. * * @return * - 0: Success. * - < 0: Failure. - * - ERR_NOT_SUPPORTED (4): unable to update screen capture parameters - * - ERR_INVALID_ARGUMENT (2): The argument is invalid. - * - ERR_FAILED (1): A general error occurs (no specified reason). - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when update screen capture parameters. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. */ virtual int updateScreenCaptureParameters(const ScreenCaptureParameters& captureParams) = 0; #endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC) -#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) +#if defined(__ANDROID__) || (defined(__APPLE__) && TARGET_OS_IOS) || defined(__OHOS__) + /** + * @brief Starts screen capture. + * + * @details + * Applicable scenarios: In the screen sharing scenario, you need to call this method to start + * capturing the screen video stream. + * Call timing: You can call this method either before or after joining the channel, with the + * following differences: + * - Call this method first and then call `joinChannel(const char* token, const char* channelId, + * uid_t uid, const ChannelMediaOptions& options)` to join channel and set + * `publishScreenCaptureVideo` to `true` to start screen sharing. + * - Call this method after joining a channel, then call `updateChannelMediaOptions` and set + * `publishScreenCaptureVideo` to `true` to start screen sharing. + * + * @note + * - On the iOS platform, screen sharing is only available on iOS 12.0 and later. + * - If you are using the custom audio source instead of the SDK to capture audio, Agora recommends + * you add the keep-alive processing logic to your application to avoid screen sharing stopping when + * the application goes to the background. + * - This feature requires high-performance device, and Agora recommends that you use it on iPhone X + * and later models. + * - This method relies on the iOS screen sharing dynamic library + * `AgoraReplayKitExtension.xcframework`. If the dynamic library is deleted, screen sharing cannot + * be enabled normally. + * - On the Android platform, if the user has not granted the app screen capture permission, the SDK + * reports the `onPermissionError` `(2)` callback. + * - On Android 9 and later, to avoid the application being killed by the system after going to the + * background, Agora recommends you add the foreground service + * `android.permission.FOREGROUND_SERVICE` to the `/app/Manifests/AndroidManifest.xml` file. + * - Due to performance limitations, screen sharing is not supported on Android TV. + * - Due to system limitations, if you are using Huawei phones, do not adjust the video encoding + * resolution of the screen sharing stream during the screen sharing, or you could experience + * crashes. + * - Due to system limitations, some Xiaomi devices do not support capturing system audio during + * screen sharing. + * - To avoid system audio capture failure when sharing screen, Agora recommends that you set the + * audio application scenario to `AUDIO_SCENARIO_GAME_STREAMING` by using the `setAudioScenario` + * method before joining the channel. + * - This method is for Android and iOS only. + * - The billing for the screen sharing stream is based on the `dimensions` in + * `ScreenVideoParameters`: + * - When you do not pass in a value, Agora bills you at 1280 × 720. + * - When you pass in a value, Agora bills you at that value. + * + * @param captureParams The screen sharing encoding parameters. See `ScreenCaptureParameters2`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2 (iOS platform): Empty parameter. + * - -2 (Android platform): The system version is too low. Ensure that the Android API level is + * not lower than 21. + * - -3 (Android platform): Unable to capture system audio. Ensure that the Android API level is + * not lower than 29. + */ + virtual int startScreenCapture(const ScreenCaptureParameters2& captureParams) = 0; + + /** + * @brief Updates the screen capturing parameters. + * + * @details + * If the system audio is not captured when screen sharing is enabled, and then you want to update + * the parameter configuration and publish the system audio, you can refer to the following steps:1. + * Call this method, and set `captureAudio` to `true`. + * 2. Call `updateChannelMediaOptions`, and set `publishScreenCaptureAudio` to `true` to publish the + * audio captured by the screen. + * + * @note + * - This method is for Android and iOS only. + * - On the iOS platform, screen sharing is only available on iOS 12.0 and later. + * + * @param captureParams The screen sharing encoding parameters. See `ScreenCaptureParameters2`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The parameter is invalid. + * - -8: The screen sharing state is invalid. Probably because you have shared other screens or + * windows. Try calling `stopScreenCapture()` to stop the current sharing and start sharing the + * screen again. + */ + virtual int updateScreenCapture(const ScreenCaptureParameters2& captureParams) = 0; + + /** + * @brief Queries the highest frame rate supported by the device during screen sharing. + * + * @since v4.2.0 + * + * @details + * Applicable scenarios: This method is for Android and iOS only. + * To ensure optimal screen sharing performance, particularly in enabling high frame rates like 60 + * fps, Agora recommends you to query the device's maximum supported frame rate using this method + * beforehand. This way, if the device cannot support such a high frame rate, you can adjust the + * screen sharing stream accordingly to avoid any negative impact on the sharing quality. If the + * device does not support high frame rate, you can reduce the frame rate of the screen sharing + * stream appropriately when sharing the screen to ensure that the sharing effect meets your + * expectation. + * + * @return + * - The highest frame rate supported by the device, if the method is called successfully. See + * `SCREEN_CAPTURE_FRAMERATE_CAPABILITY`. + * - < 0: Failure. + */ + virtual int queryScreenCaptureCapability() = 0; + + /** + * @brief Queries the focal length capability supported by the camera. + * + * @details + * If you want to enable the wide-angle or ultra-wide-angle mode for camera capture, it is + * recommended to start by calling this method to check whether the device supports the required + * focal length capability. Then, adjust the camera's focal length configuration based on the query + * result by calling `setCameraCapturerConfiguration`, ensuring the best camera capture performance. + * + * @note This method is for Android and iOS only. + * + * @param focalLengthInfos Input and output parameter. The pointer to an array of `FocalLengthInfo` + * objects: + * - Input value: The pointer to an array of `FocalLengthInfo` objects, used to store focal length + * information. + * - Output value: After the method is executed, output the queried focal length information. + * @param size Input and output parameter. The number of focal length information items: + * - Input value: Specifies the maximum number of focal length information items that + * `focalLengthInfos` can hold. Ensure this value is not less than 8, meaning `focalLengthInfos` has + * space for at least 8 focal length information items. + * - Output value: After the method is executed, output the number of focal length information items + * retrieved. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int queryCameraFocalLengthCapability(agora::rtc::FocalLengthInfo* focalLengthInfos, int& size) = 0; + +#if defined(__ANDROID__) /** - * Starts screen sharing. + * @brief Configures `MediaProjection` outside of the SDK to capture screen video streams. + * + * @technical preview + * + * @details + * After successfully calling this method, the external `MediaProjection` you set will replace the + * `MediaProjection` requested by the SDK to capture the screen video stream. + * When the screen sharing is stopped or `IRtcEngine` is destroyed, the SDK will automatically + * release the MediaProjection. + * Applicable scenarios: If you are able to apply for `MediaProjection`, you can directly use your + * `MediaProjection` instead of the one applied for by the SDK. The following lists two applicable + * scenarios:`` + * - On custom system devices, it can avoid system pop-ups (such as requiring user permission to + * capture the screen) and directly start capturing the screen video stream. + * - In a screen sharing process that involves one or more sub-processes, it can help avoid errors + * that might occur when creating objects within these sub-processes, which could otherwise lead to + * failures in screen capturing. + * Call timing: Call this method after `startScreenCapture(const ScreenCaptureParameters2& + * captureParams)`. + * + * @note + * Before calling this method, you must first apply for `MediaProjection` permission. + * This method is for Android only. + * + * @param mediaProjection An `MediaProjection` object used to capture screen video streams. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setExternalMediaProjection(void* mediaProjection) = 0; +#endif +#endif + +#if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && !defined(__ANDROID__) && !defined (__OHOS__)) + /** + * @brief Sets the screen sharing scenario. + * + * @details + * When you start screen sharing or window sharing, you can call this method to set the screen + * sharing scenario. The SDK adjusts the video quality and experience of the sharing according to + * the scenario. + * + * @note Agora recommends that you call this method before joining a channel. + * + * @param screenScenario The screen sharing scenario. See `SCREEN_SCENARIO_TYPE`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setScreenCaptureScenario(SCREEN_SCENARIO_TYPE screenScenario) = 0; + + /** + * @brief Stops screen capture. + * + * @details + * Applicable scenarios: If you start screen capture by calling `startScreenCapture(const + * ScreenCaptureParameters2& captureParams)`, + * `startScreenCaptureByWindowId`, or `startScreenCaptureByDisplayId`, you need to call this method + * to stop screen capture. + * Call timing: You can call this method either before or after joining a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int stopScreenCapture() = 0; +#endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC) || __ANDROID__ + + /** + * @brief Retrieves the call ID. + * + * @details + * When a user joins a channel on a client, a `callId` is generated to identify the call from the + * client. You can call this method to get `callId`, and pass it in when calling methods such as + * `rate` and `complain`. + * Call timing: Call this method after joining a channel. + * + * @param callId Output parameter, the current call ID. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int getCallId(agora::util::AString& callId) = 0; + + /** + * @brief Allows a user to rate a call after the call ends. + * + * @note Ensure that you call this method after leaving a channel. + * + * @param callId The current call ID. You can get the call ID by calling `getCallId`. + * @param rating The value is between 1 (the lowest score) and 5 (the highest score). + * @param description (Optional) A description of the call. The string length should be less than + * 800 bytes. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + */ + virtual int rate(const char* callId, int rating, const char* description) = 0; // 0~10 + + /** + * @brief Allows a user to complain about the call quality after a call ends. + * + * @details + * This method allows users to complain about the quality of the call. Call this method after the + * user leaves the channel. + * + * @param callId The current call ID. You can get the call ID by calling `getCallId`. + * @param description (Optional) A description of the call. The string length should be less than + * 800 bytes. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -7: The method is called before `IRtcEngine` is initialized. + */ + virtual int complain(const char* callId, const char* description) = 0; + + /** + * @brief Starts pushing media streams to a CDN without transcoding. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to push an audio or video stream to the specified CDN address. This + * method can push media streams to only one CDN address at a time, so if you need to push streams + * to multiple addresses, call this method multiple times. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. + * + * @note + * - Call this method after joining a channel. + * - Only hosts in the LIVE_BROADCASTING profile can call this method. + * - If you want to retry pushing streams after a failed push, make sure to call `stopRtmpStream` + * first, then call this method to retry pushing streams; otherwise, the SDK returns the same error + * code as the last failed push. + * + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The URL or configuration of transcoding is invalid; check your URL and transcoding + * configurations. + * - -7: The SDK is not initialized before calling this method. + * - -19: The Media Push URL is already in use; use another URL instead. + */ + virtual int startRtmpStreamWithoutTranscoding(const char* url) = 0; + + /** + * @brief Starts Media Push and sets the transcoding configuration. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to push a live audio-and-video stream to the specified CDN address and + * set the transcoding configuration. This method can push media streams to only one CDN address at + * a time, so if you need to push streams to multiple addresses, call this method multiple times. + * Under one Agora project, the maximum number of concurrent tasks to push media streams is 200 by + * default. If you need a higher quota, contact `technical support`. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. + * + * @note + * - Call this method after joining a channel. + * - Only hosts in the LIVE_BROADCASTING profile can call this method. + * - If you want to retry pushing streams after a failed push, make sure to call `stopRtmpStream` + * first, then call this method to retry pushing streams; otherwise, the SDK returns the same error + * code as the last failed push. + * + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param transcoding The transcoding configuration for Media Push. See `LiveTranscoding`. * - * @param captureParams The configuration of the screen sharing. See {@link - * ScreenCaptureParameters ScreenCaptureParameters}. * @return * - 0: Success. * - < 0: Failure. + * - -2: The URL or configuration of transcoding is invalid; check your URL and transcoding + * configurations. + * - -7: The SDK is not initialized before calling this method. + * - -19: The Media Push URL is already in use; use another URL instead. */ - virtual int startScreenCapture(const ScreenCaptureParameters2& captureParams) = 0; + virtual int startRtmpStreamWithTranscoding(const char* url, const LiveTranscoding& transcoding) = 0; /** - * Updates the screen sharing configuration. + * @brief Updates the transcoding configuration. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * After you start pushing media streams to CDN with transcoding, you can dynamically update the + * transcoding configuration according to the scenario. The SDK triggers the `onTranscodingUpdated` + * callback after the transcoding configuration is updated. + * + * @param transcoding The transcoding configuration for Media Push. See `LiveTranscoding`. * - * @param captureParams The configuration of the screen sharing. See {@link - * ScreenCaptureParameters ScreenCaptureParameters}. * @return * - 0: Success. * - < 0: Failure. */ - virtual int updateScreenCapture(const ScreenCaptureParameters2& captureParams) = 0; - - /** - * Queries the ability of screen sharing to support the maximum frame rate. + virtual int updateRtmpTranscoding(const LiveTranscoding& transcoding) = 0; + + /** + * @brief Starts the local video mixing. + * + * @details + * After calling this method, you can merge multiple video streams into one video stream locally. + * For example, you can merge the video streams captured by the camera, screen sharing, media + * player, remote video, video files, images, etc. into one video stream, and then publish the mixed + * video stream to the channel. + * Applicable scenarios: You can enable the local video mixing function in scenarios such as remote + * conferences, live streaming, and online education, which allows users to view and manage multiple + * videos more conveniently, and supports portrait-in-picture effect and other functions. + * The following is a typical use case for implementing the portrait-in-picture effect:1. Call + * `enableVirtualBackground`, and set the custom background image to `BACKGROUND_NONE`, that is, + * separate the portrait and the background in the video captured by the camera. + * 2. Call `startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& + * config)` to start capturing the screen sharing video stream. + * 3. Call this method and set the video source for capturing portraits as one of the video sources + * participating in the local video mixing, picture-in-picture of the portrait can be achived in the + * mixed video. + * Call timing: - If you need to mix the locally collected video streams, you need to call this + * method after `startCameraCapture` or `startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const + * ScreenCaptureConfiguration& config)`. + * - If you want to publish the mixed video stream to the channel, you need to set + * `publishTranscodedVideoTrack` in `ChannelMediaOptions` to `true` when calling `joinChannel(const + * char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& options)` + * or `updateChannelMediaOptions`. + * Related callbacks: When you fail to call this method, the SDK triggers the + * `onLocalVideoTranscoderError` callback to report the reason. + * + * @note + * - Local video mixing requires more CPU resources. Therefore, Agora recommends enabling this + * function on devices with higher performance. + * - If you need to mix locally captured video streams, the SDK supports the following capture + * combinations: + * - On the Windows platform, it supports up to 4 video streams captured by cameras + 4 screen + * sharing streams. + * - On the macOS platform, it supports up to 4 video streams captured by cameras + 1 screen + * sharing stream. + * - On Android and iOS platforms, it supports video streams captured by up to 2 cameras (the + * device itself needs to support dual cameras or supports external cameras) + 1 screen sharing + * stream. + * - When configuring the local video mixing, it is necessary to ensure that the layer number of the + * video stream capturing the portrait is greater than the layer number of the screen sharing + * stream. Otherwise, the portrait will be covered by the screen sharing and will not be displayed + * in the final mixed video stream. + * + * @param config Configuration of the local video mixing, see + * `LocalTranscoderConfiguration`.Attention: + * - The maximum resolution of each video stream participating in the local video mixing is 4096 × + * 2160. If this limit is exceeded, video mixing does not take effect. + * - The maximum resolution of the mixed video stream is 4096 × 2160. * - * @since v4.2.0 - * * @return - * - 0: support 15 fps, Low devices. - * - 1: support 30 fps, Usually low - to mid-range devices. - * - 2: support 60 fps, Advanced devices. + * - 0: Success. * - < 0: Failure. */ - virtual int queryScreenCaptureCapability() = 0; - + virtual int startLocalVideoTranscoder(const LocalTranscoderConfiguration& config) = 0; /** - * Query all focal attributes supported by the camera. - * - * @param focalLengthInfos The camera supports the collection of focal segments.Ensure the size of array is not less than 8. - * - * @param size The camera supports the size of the focal segment set. Ensure the size is not less than 8. - * + * @brief Updates the local video mixing configuration. + * + * @details + * After calling `startLocalVideoTranscoder`, call this method if you want to update the local video + * mixing configuration. + * + * @note If you want to update the video source type used for local video mixing, such as adding a + * second camera or screen to capture video, you need to call this method after `startCameraCapture` + * or `startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config)`. + * + * @param config Configuration of the local video mixing, see `LocalTranscoderConfiguration`. + * * @return * - 0: Success. - * - < 0: Failure.. + * - < 0: Failure. */ - virtual int queryCameraFocalLengthCapability(agora::rtc::FocalLengthInfo* focalLengthInfos, int& size) = 0; -#endif + virtual int updateLocalTranscoderConfiguration(const LocalTranscoderConfiguration& config) = 0; -#if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) /** - * Sets the screen sharing scenario. - * + * @brief Stops pushing media streams to a CDN. * - * When you start screen sharing or window sharing, you can call this method to set the screen sharing scenario. The SDK adjusts the video quality and experience of the sharing according to the scenario. + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to stop the live stream on the specified CDN address. This method can + * stop pushing media streams to only one CDN address at a time, so if you need to stop pushing + * streams to multiple addresses, call this method multiple times. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. * - * - * @param screenScenario The screen sharing scenario. See #SCREEN_SCENARIO_TYPE. + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. * * @return * - 0: Success. * - < 0: Failure. - * - ERR_NOT_SUPPORTED (4): unable to set screencapture scenario - * - ERR_FAILED (1): A general error occurs (no specified reason). - * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when set screencapture scenario. */ - virtual int setScreenCaptureScenario(SCREEN_SCENARIO_TYPE screenScenario) = 0; - + virtual int stopRtmpStream(const char* url) = 0; + /** - * Stops the screen sharing. + * @brief Stops the local video mixing. + * + * @details + * After calling `startLocalVideoTranscoder`, call this method if you want to stop the local video + * mixing. * * @return * - 0: Success. * - < 0: Failure. */ - virtual int stopScreenCapture() = 0; -#endif // _WIN32 || (__APPLE__ && !TARGET_OS_IPHONE && TARGET_OS_MAC) || __ANDROID__ + virtual int stopLocalVideoTranscoder() = 0; /** - * Gets the current call ID. + * @brief Starts local audio mixing. + * + * @details + * This method supports merging multiple audio streams into one audio stream locally. For example, + * merging the audio streams captured from the local microphone, and that from the media player, the + * sound card, and the remote users into one audio stream, and then publish the merged audio stream + * to the channel. + * - If you want to mix the locally captured audio streams, you can set publishMixedAudioTrack in + * `ChannelMediaOptions` to `true`, and then publish the mixed audio stream to the channel. + * - If you want to mix the remote audio stream, ensure that the remote audio stream has been + * published in the channel and you have subcribed to the audio stream that you need to mix. + * Applicable scenarios: You can enable this function in the following scenarios: + * - By utilizing the local video mixing feature, the associated audio streams of the mixed video + * streams can be simultaneously captured and published. + * - In live streaming scenarios, users can receive audio streams within the channel, mix multiple + * audio streams locally, and then forward the mixed audio stream to other channels. + * - In online classes, teachers can mix the audio from interactions with students locally and then + * forward the mixed audio stream to other channels. + * Call timing: You can call this method either before or after joining a channel. * - * When a user joins a channel on a client, a `callId` is generated to identify - * the call. + * @note To ensure audio quality, it is recommended that the number of audio streams to be mixed + * does not exceed 10. * - * After a call ends, you can call `rate` or `complain` to gather feedback from the customer. - * These methods require a `callId` parameter. To use these feedback methods, call the this - * method first to retrieve the `callId` during the call, and then pass the value as an - * argument in the `rate` or `complain` method after the call ends. + * @param config The configurations for mixing the lcoal audio. See `LocalAudioMixerConfiguration`. * - * @param callId The reference to the call ID. * @return - * - The call ID if the method call is successful. + * - 0: Success. * - < 0: Failure. - */ - virtual int getCallId(agora::util::AString& callId) = 0; + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + */ + virtual int startLocalAudioMixer(const LocalAudioMixerConfiguration& config) = 0; /** - * Allows a user to rate the call. + * @brief Updates the configurations for mixing audio streams locally. * - * It is usually called after the call ends. + * @details + * After calling `startLocalAudioMixer`, call this method if you want to update the local audio + * mixing configuration. + * Call timing: Call this method after `startLocalAudioMixer`. * - * @param callId The call ID retrieved from the \ref getCallId "getCallId" method. - * @param rating The rating of the call between 1 (the lowest score) to 5 (the highest score). - * @param description (Optional) The description of the rating. The string length must be less than - * 800 bytes. + * @note To ensure audio quality, it is recommended that the number of audio streams to be mixed + * does not exceed 10. + * + * @param config The configurations for mixing the lcoal audio. See `LocalAudioMixerConfiguration`. * * @return * - 0: Success. * - < 0: Failure. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. */ - virtual int rate(const char* callId, int rating, const char* description) = 0; // 0~10 + virtual int updateLocalAudioMixerConfiguration(const LocalAudioMixerConfiguration& config) = 0; /** - * Allows a user to complain about the call quality. - * - * This method is usually called after the call ends. + * @brief Stops the local audio mixing. * - * @param callId The call ID retrieved from the `getCallId` method. - * @param description (Optional) The description of the complaint. The string length must be less than - * 800 bytes. + * @details + * After calling `startLocalAudioMixer`, call this method if you want to stop the local audio + * mixing. + * Call timing: Call this method after `startLocalAudioMixer`. * * @return * - 0: Success. * - < 0: Failure. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. */ - virtual int complain(const char* callId, const char* description) = 0; - - /** Publishes the local stream without transcoding to a specified CDN live RTMP address. (CDN live only.) - - * The SDK returns the result of this method call in the \ref IRtcEngineEventHandler::onStreamPublished "onStreamPublished" callback. - - * The \ref agora::rtc::IRtcEngine::startRtmpStreamWithoutTranscoding "startRtmpStreamWithoutTranscoding" method call triggers the \ref agora::rtc::IRtcEngineEventHandler::onRtmpStreamingStateChanged "onRtmpStreamingStateChanged" callback on the local client to report the state of adding a local stream to the CDN. - * @note - * - Ensure that the user joins the channel before calling this method. - * - This method adds only one stream RTMP URL address each time it is called. - * - The RTMP URL address must not contain special characters, such as Chinese language characters. - * - This method applies to Live Broadcast only. - - * @param url The CDN streaming URL in the RTMP format. The maximum length of this parameter is 1024 bytes. - - * @return - * - 0: Success. - * - < 0: Failure. - * - #ERR_INVALID_ARGUMENT (2): The RTMP URL address is NULL or has a string length of 0. - * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the stream. - * - #ERR_ALREADY_IN_USE (19): This streaming URL is already in use. Use a new streaming URL for CDN streaming. - */ - virtual int startRtmpStreamWithoutTranscoding(const char* url) = 0; + virtual int stopLocalAudioMixer() = 0; - /** Publishes the local stream with transcoding to a specified CDN live RTMP address. (CDN live only.) - - * The SDK returns the result of this method call in the \ref IRtcEngineEventHandler::onStreamPublished "onStreamPublished" callback. - - * The \ref agora::rtc::IRtcEngine::startRtmpStreamWithTranscoding "startRtmpStreamWithTranscoding" method call triggers the \ref agora::rtc::IRtcEngineEventHandler::onRtmpStreamingStateChanged "onRtmpStreamingStateChanged" callback on the local client to report the state of adding a local stream to the CDN. - * @note - * - Ensure that the user joins the channel before calling this method. - * - This method adds only one stream RTMP URL address each time it is called. - * - The RTMP URL address must not contain special characters, such as Chinese language characters. - * - This method applies to Live Broadcast only. - - * @param url The CDN streaming URL in the RTMP format. The maximum length of this parameter is 1024 bytes. - * @param transcoding Sets the CDN live audio/video transcoding settings. See LiveTranscoding. - - * @return - * - 0: Success. - * - < 0: Failure. - * - #ERR_INVALID_ARGUMENT (2): The RTMP URL address is NULL or has a string length of 0. - * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the stream. - * - #ERR_ALREADY_IN_USE (19): This streaming URL is already in use. Use a new streaming URL for CDN streaming. - */ - virtual int startRtmpStreamWithTranscoding(const char* url, const LiveTranscoding& transcoding) = 0; - - /** Update the video layout and audio settings for CDN live. (CDN live only.) - * @note This method applies to Live Broadcast only. - - * @param transcoding Sets the CDN live audio/video transcoding settings. See LiveTranscoding. - - * @return - * - 0: Success. - * - < 0: Failure. - */ - virtual int updateRtmpTranscoding(const LiveTranscoding& transcoding) = 0; - - virtual int startLocalVideoTranscoder(const LocalTranscoderConfiguration& config) = 0; - virtual int updateLocalTranscoderConfiguration(const LocalTranscoderConfiguration& config) = 0; - - /** Stop an RTMP stream with transcoding or without transcoding from the CDN. (CDN live only.) - - * This method removes the RTMP URL address (added by the \ref IRtcEngine::startRtmpStreamWithoutTranscoding "startRtmpStreamWithoutTranscoding" method - * or IRtcEngine::startRtmpStreamWithTranscoding "startRtmpStreamWithTranscoding" method) from a CDN live stream. - * The SDK returns the result of this method call in the \ref IRtcEngineEventHandler::onStreamUnpublished "onStreamUnpublished" callback. - - * The \ref agora::rtc::IRtcEngine::stopRtmpStream "stopRtmpStream" method call triggers the \ref agora::rtc::IRtcEngineEventHandler::onRtmpStreamingStateChanged "onRtmpStreamingStateChanged" callback on the local client to report the state of removing an RTMP stream from the CDN. - * @note - * - This method removes only one RTMP URL address each time it is called. - * - The RTMP URL address must not contain special characters, such as Chinese language characters. - * - This method applies to Live Broadcast only. - - * @param url The RTMP URL address to be removed. The maximum length of this parameter is 1024 bytes. - - * @return - * - 0: Success. - * - < 0: Failure. - */ - virtual int stopRtmpStream(const char* url) = 0; - - virtual int stopLocalVideoTranscoder() = 0; /** - * Starts video capture with a camera. + * @brief Starts camera capture. + * + * @details + * You can call this method to start capturing video from one or more cameras by specifying + * `sourceType`. + * + * @note On the iOS platform, if you want to enable multi-camera capture, you need to call + * `enableMultiCamera` and set `enabled` to `true` before calling this method. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. + * Note: + * - On iOS devices, you can capture video from up to 2 cameras, provided the device has multiple + * cameras or supports external cameras. + * - On Android devices, you can capture video from up to 4 cameras, provided the device has + * multiple cameras or supports external cameras. + * - On the desktop platforms, you can capture video from up to 4 cameras. + * @param config The configuration of the video capture. See `CameraCapturerConfiguration`. + * Note: On the iOS platform, this parameter has no practical function. Use the `config` parameter + * in `enableMultiCamera` instead to set the video capture configuration. * - * @param config The configuration of the video capture with a primary camera. For details, see CameraCaptureConfiguration. - * @param sourceType Source type of camera. See #VIDEO_SOURCE_TYPE. * @return * - 0: Success. * - < 0: Failure. @@ -7358,23 +10728,40 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int startCameraCapture(VIDEO_SOURCE_TYPE sourceType, const CameraCapturerConfiguration& config) = 0; /** - * Stops capturing video through camera. + * @brief Stops camera capture. + * + * @details + * After calling `startCameraCapture` to start capturing video through one or more cameras, you can + * call this method and set the `sourceType` parameter to stop the capture from the specified + * cameras. * - * You can call this method to stop capturing video through the first camera after calling `startCameraCapture`. + * @note + * If you are using the local video mixing function, calling this method can cause the local video + * mixing to be interrupted. + * On the iOS platform, if you want to disable multi-camera capture, you need to call + * `enableMultiCamera` after calling this method and set `enabled` to `false`. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. * - * @param sourceType Source type of camera. See #VIDEO_SOURCE_TYPE. * @return * - 0: Success. * - < 0: Failure. */ virtual int stopCameraCapture(VIDEO_SOURCE_TYPE sourceType) = 0; /** - * Sets the rotation angle of the video captured by the camera. + * @brief Sets the rotation angle of the captured video. * - * When the video capture device does not have the gravity sensing function, you can call this method to manually adjust the rotation angle of the captured video. + * @note + * - This method applies to Windows only. + * - You must call this method after `enableVideo`. The setting result will take effect after the + * camera is successfully turned on, that is, after the SDK triggers the `onLocalVideoStateChanged` + * callback and returns the local video state as `LOCAL_VIDEO_STREAM_STATE_CAPTURING` (1). + * - When the video capture device does not have the gravity sensing function, you can call this + * method to manually adjust the rotation angle of the captured video. + * + * @param type The video source type. See `VIDEO_SOURCE_TYPE`. + * @param orientation The clockwise rotation angle. See `VIDEO_ORIENTATION`. * - * @param type The video source type. See #VIDEO_SOURCE_TYPE. - * @param orientation The clockwise rotation angle. See #VIDEO_ORIENTATION. * @return * - 0: Success. * - < 0: Failure. @@ -7394,10 +10781,40 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setScreenCaptureOrientation(VIDEO_SOURCE_TYPE type, VIDEO_ORIENTATION orientation) = 0; /** - * Starts sharing a screen. + * @brief Starts screen capture from the specified video source. + * + * @details + * Applicable scenarios: In the screen sharing scenario, you need to call this method to start + * capturing the screen video stream. + * The SDK supports a series of methods for screen capturing, with the following distinctions + * between them. Please choose according to the actual scenario: + * - `startScreenCapture(const ScreenCaptureParameters2& captureParams)` / + * `startScreenCaptureByDisplayId` / `startScreenCaptureByWindowId`: + * Only supports capturing a single screen or window, suitable for scenarios where only a single + * screen is shared. + * - `startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config)`: + * Supports specifying multiple video sources to capture multiple + * screen sharing streams, used for local video mixing or multi-channel scenarios. + * Call timing: You can call this method either before or after joining the channel, with the + * following differences: + * - Call this method first and then call `joinChannel(const char* token, const char* channelId, + * uid_t uid, const ChannelMediaOptions& options)` to join channel and set + * `publishScreenCaptureVideo` to `true` to start screen sharing. + * - Call this method after joining a channel, then call `updateChannelMediaOptions` and set + * `publishScreenCaptureVideo` to `true` to start screen sharing. + * + * @note + * - If you start screen capture by calling this method, you need to call + * `stopScreenCapture(VIDEO_SOURCE_TYPE sourceType)` + * to stop screen capture. + * - On the Windows platform, it supports up to four screen capture video streams. + * - On the macOS platform, it supports only one screen capture video stream. + * This method applies to the macOS and Windows only. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`.Attention: On the macOS + * platform, this parameter can only be set to VIDEO_SOURCE_SCREEN (2). + * @param config The configuration of the captured screen. See `ScreenCaptureConfiguration`. * - * @param config The configuration of the captured screen. For details, see ScreenCaptureConfiguration. - * @param sourceType source type of screen. See #VIDEO_SOURCE_TYPE. * @return * - 0: Success. * - < 0: Failure. @@ -7405,20 +10822,33 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config) = 0; /** - * Stop sharing the screen. + * @brief Stops screen capture from the specified video source. + * + * @details + * Applicable scenarios: If you start screen capture from one or more screens by calling + * `startScreenCapture(VIDEO_SOURCE_TYPE sourceType, const ScreenCaptureConfiguration& config)`, you + * need to call this method to stop screen capture, specifying the + * screen through the `sourceType` parameter. + * Call timing: You can call this method either before or after joining a channel. + * + * @note This method applies to the macOS and Windows only. + * + * @param sourceType The type of the video source. See `VIDEO_SOURCE_TYPE`. * - * After calling `startScreenCapture`, you can call this method to stop sharing the first screen. - * - * @param sourceType source type of screen. See #VIDEO_SOURCE_TYPE. * @return * - 0: Success. * - < 0: Failure. */ virtual int stopScreenCapture(VIDEO_SOURCE_TYPE sourceType) = 0; - /** Gets the current connection state of the SDK. - - @return #CONNECTION_STATE_TYPE. + /** + * @brief Gets the current connection state of the SDK. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @return + * The current connection state. See `CONNECTION_STATE_TYPE`. */ virtual CONNECTION_STATE_TYPE getConnectionState() = 0; @@ -7428,12 +10858,24 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int setRemoteUserPriority(uid_t uid, PRIORITY_TYPE userPriority) = 0; /** - * Registers a packet observer. + * @brief Registers a packet observer. * - * The Agora Native SDK allows your app to register a packet observer to - * receive events whenever a voice or video packet is transmitting. + * @details + * Call this method registers a packet observer. When the Agora SDK triggers `IPacketObserver` + * callbacks registered by for voice or video packet transmission, you can call this method to + * process the packets, such as encryption and decryption. + * + * @note + * - The size of the packet sent to the network after processing should not exceed 1200 bytes, + * otherwise, the SDK may fail to send the packet. + * - Ensure that both receivers and senders call this method; otherwise, you may meet undefined + * behaviors such as no voice and black screen. + * - When you use the Media Push or recording functions, Agora doesn't recommend calling this + * method. + * - Call this method before joining a channel. + * + * @param observer `IPacketObserver`. * - * @param observer The IPacketObserver object. * @return * - 0: Success. * - < 0: Failure. @@ -7441,98 +10883,128 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int registerPacketObserver(IPacketObserver* observer) = 0; - /** Enables/Disables the built-in encryption. - * - * In scenarios requiring high security, Agora recommends calling this method to enable the built-in encryption before joining a channel. + /** + * @brief Enables or disables the built-in encryption. * - * All users in the same channel must use the same encryption mode and encryption key. Once all users leave the channel, the encryption key of this channel is automatically cleared. + * @details + * After the user leaves the channel, the SDK automatically disables the built-in encryption. To + * enable the built-in encryption, call this method before the user joins the channel again. + * Applicable scenarios: Scenarios with higher security requirements. + * Call timing: Call this method before joining a channel. * * @note - * - If you enable the built-in encryption, you cannot use the RTMP streaming function. + * - All users within the same channel must set the same encryption configurations when calling this + * method. + * - If you enable the built-in encryption, you cannot use the Media Push function. * - * @param enabled Whether to enable the built-in encryption: + * @param enabled Whether to enable built-in encryption: * - true: Enable the built-in encryption. - * - false: Disable the built-in encryption. - * @param config Configurations of built-in encryption schemas. See EncryptionConfig. + * - false: (Default) Disable the built-in encryption. + * @param config Built-in encryption configurations. See `EncryptionConfig`. * * @return * - 0: Success. * - < 0: Failure. - * - -2(ERR_INVALID_ARGUMENT): An invalid parameter is used. Set the parameter with a valid value. - * - -4(ERR_NOT_SUPPORTED): The encryption mode is incorrect or the SDK fails to load the external encryption library. Check the enumeration or reload the external encryption library. - * - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this method. + * - -2: An invalid parameter is used. Set the parameter with a valid value. + * - -4: The built-in encryption mode is incorrect or the SDK fails to load the external + * encryption library. Check the enumeration or reload the external encryption library. + * - -7: The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this + * method. */ virtual int enableEncryption(bool enabled, const EncryptionConfig& config) = 0; - /** Creates a data stream. + /** + * @brief Creates a data stream. * - * You can call this method to create a data stream and improve the - * reliability and ordering of data tranmission. + * @details + * You can call this method to create a data stream and improve the reliability and ordering of data + * transmission. + * Call timing: You can call this method either before or after joining a channel. + * Related callbacks: After setting `reliable` to `true`, if the recipient does not receive the data + * within five seconds, the SDK triggers the `onStreamMessageError` callback and returns an error + * code. * * @note - * - Ensure that you set the same value for `reliable` and `ordered`. - * - Each user can only create a maximum of 5 data streams during a RtcEngine - * lifecycle. - * - The data channel allows a data delay of up to 5 seconds. If the receiver - * does not receive the data stream within 5 seconds, the data channel reports - * an error. - * - * @param[out] streamId The ID of the stream data. - * @param reliable Sets whether the recipients are guaranteed to receive - * the data stream from the sender within five seconds: - * - true: The recipients receive the data stream from the sender within - * five seconds. If the recipient does not receive the data stream within - * five seconds, an error is reported to the application. - * - false: There is no guarantee that the recipients receive the data stream - * within five seconds and no error message is reported for any delay or - * missing data stream. - * @param ordered Sets whether the recipients receive the data stream - * in the sent order: - * - true: The recipients receive the data stream in the sent order. - * - false: The recipients do not receive the data stream in the sent order. - * - * @return - * - 0: Success. + * Each user can create up to five data streams during the lifecycle of `IRtcEngine`. The data + * stream will be destroyed when leaving the channel, and the data stream needs to be recreated if + * needed. + * If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * + * @param streamId An output parameter; the ID of the data stream created. + * @param reliable Sets whether the recipients are guaranteed to receive the data stream within five + * seconds: + * - `true`: The recipients receive the data from the sender within five seconds. If the recipient + * does not receive the data within five seconds, the SDK triggers the `onStreamMessageError` + * callback and returns an error code. + * - `false`: There is no guarantee that the recipients receive the data stream within five seconds + * and no error message is reported for any delay or missing data stream. + * Attention: Please ensure that `reliable` and `ordered` are either both set to`true` or both set + * to `false`. + * @param ordered Sets whether the recipients receive the data stream in the sent order: + * - `true`: The recipients receive the data in the sent order. + * - `false`: The recipients do not receive the data in the sent order. + * + * @return + * - 0: The data stream is successfully created. * - < 0: Failure. */ virtual int createDataStream(int* streamId, bool reliable, bool ordered) = 0; - /** Creates a data stream. + /** + * @brief Creates a data stream. * - * Each user can create up to five data streams during the lifecycle of the IChannel. - * @param streamId The ID of the created data stream. - * @param config The config of data stream. - * @return int - * - Returns 0: Success. + * @details + * Compared to `createDataStream(int* streamId, bool reliable, bool ordered)`, this method does not + * guarantee the reliability of data + * transmission. If a data packet is not received five seconds after it was sent, the SDK directly + * discards the data. + * Call timing: You can call this method either before or after joining a channel. + * + * @note + * Each user can create up to five data streams during the lifecycle of `IRtcEngine`. The data + * stream will be destroyed when leaving the channel, and the data stream needs to be recreated if + * needed. + * If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * + * @param streamId An output parameter; the ID of the data stream created. + * @param config The configurations for the data stream. See `DataStreamConfig`. + * + * @return + * - 0: The data stream is successfully created. * - < 0: Failure. */ virtual int createDataStream(int* streamId, const DataStreamConfig& config) = 0; - /** Sends a data stream. - * - * After calling \ref IRtcEngine::createDataStream "createDataStream", you can call - * this method to send a data stream to all users in the channel. + /** + * @brief Sends data stream messages. * + * @details + * After calling `createDataStream(int* streamId, const DataStreamConfig& config)`, you can call + * this method to send data stream messages to + * all users in the channel. * The SDK has the following restrictions on this method: - * - Up to 60 packets can be sent per second in a channel with each packet having a maximum size of 1 KB. - * - Each client can send up to 30 KB of data per second. - * - Each user can have up to five data streams simultaneously. - * - * After the remote user receives the data stream within 5 seconds, the SDK triggers the - * \ref IRtcEngineEventHandler::onStreamMessage "onStreamMessage" callback on - * the remote client. After the remote user does not receive the data stream within 5 seconds, - * the SDK triggers the \ref IRtcEngineEventHandler::onStreamMessageError "onStreamMessageError" + * - Each client within the channel can have up to 5 data channels simultaneously, with a total + * shared packet bitrate limit of 30 KB/s for all data channels. + * - Each data channel can send up to 60 packets per second, with each packet being a maximum of 1 + * KB. + * A successful method call triggers the `onStreamMessage` callback on the remote client, from which + * the remote user gets the stream message. A failed method call triggers the `onStreamMessageError` * callback on the remote client. * * @note - * - Call this method after calling \ref IRtcEngine::createDataStream "createDataStream". - * - This method applies only to the `COMMUNICATION` profile or to - * the hosts in the `LIVE_BROADCASTING` profile. If an audience in the - * `LIVE_BROADCASTING` profile calls this method, the audience may be switched to a host. + * If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * - This method needs to be called after `createDataStream(int* streamId, const DataStreamConfig& + * config)` and joining the channel. + * - This method applies to broadcasters only. * - * @param streamId The ID of the stream data. - * @param data The data stream. - * @param length The length (byte) of the data stream. + * @param streamId The data stream ID. You can get the data stream ID by calling + * `createDataStream(int* streamId, const DataStreamConfig& config)` + * . + * @param data The message to be sent. + * @param length The length of the data. * * @return * - 0: Success. @@ -7540,63 +11012,154 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int sendStreamMessage(int streamId, const char* data, size_t length) = 0; - /** **DEPRECATED** Adds a watermark image to the local video or CDN live stream. - - This method is not recommend, Use \ref agora::rtc::IRtcEngine::addVideoWatermark(const char* watermarkUrl, const WatermarkOptions& options) "addVideoWatermark"2 instead. - - This method adds a PNG watermark image to the local video stream for the recording device, channel audience, and CDN live audience to view and capture. - - To add the PNG file to the CDN live publishing stream, see the \ref IRtcEngine::setLiveTranscoding "setLiveTranscoding" method. - - @param watermark Pointer to the watermark image to be added to the local video stream. See RtcImage. + /** + * @brief Send Reliable message to remote uid in channel. + * + * @technical preview + * + * @param uid remote user id. + * @param type Reliable Data Transmission tunnel message type. See RdtStreamType + * @param data The pointer to the sent data. + * @param length The length of the sent data. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int sendRdtMessage(uid_t uid, RdtStreamType type, const char *data, size_t length) = 0; - @note - - The URL descriptions are different for the local video and CDN live streams: - - In a local video stream, `url` in RtcImage refers to the absolute path of the added watermark image file in the local video stream. - - In a CDN live stream, `url` in RtcImage refers to the URL address of the added watermark image in the CDN live broadcast. - - The source file of the watermark image must be in the PNG file format. If the width and height of the PNG file differ from your settings in this method, the PNG file will be cropped to conform to your settings. - - The Agora SDK supports adding only one watermark image onto a local video or CDN live stream. The newly added watermark image replaces the previous one. + /** + * @brief Send media control message + * + * @technical preview + * + * @param uid Remote user id. In particular, if the uid is set to 0, it means broadcasting the message to the entire channel. + * @param data The pointer to the sent data. + * @param length The length of the sent data, max 1024. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int sendMediaControlMessage(uid_t uid, const char* data, size_t length) = 0; - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Adds a watermark image to the local video. + * + * @details + * This method adds a PNG watermark image to the local video stream in a live streaming session. + * Once the watermark image is added, all the users in the channel (CDN audience included) and the + * video capturing device can see and capture it. If you only want to add a watermark to the CDN + * live streaming, see `startRtmpStreamWithTranscoding`. + * + * @note + * - The URL descriptions are different for the local video and CDN live streaming: In a local video + * stream, URL refers to the absolute path of the added watermark image file in the local video + * stream. In a CDN live stream, URL refers to the URL address of the added watermark image in the + * CDN live streaming. + * - The source file of the watermark image must be in the PNG file format. If the width and height + * of the PNG file differ from your settings in this method, the PNG file will be cropped to conform + * to your settings. + * - The Agora SDK supports adding only one watermark image onto a local video or CDN live stream. + * The newly added watermark image replaces the previous one. + * + * @param watermark The watermark image to be added to the local live streaming: `RtcImage`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int addVideoWatermark(const RtcImage& watermark) __deprecated = 0; - /** Adds a watermark image to the local video. - - This method adds a PNG watermark image to the local video in a live broadcast. Once the watermark image is added, all the audience in the channel (CDN audience included), - and the recording device can see and capture it. Agora supports adding only one watermark image onto the local video, and the newly watermark image replaces the previous one. - - The watermark position depends on the settings in the \ref IRtcEngine::setVideoEncoderConfiguration "setVideoEncoderConfiguration" method: - - If the orientation mode of the encoding video is #ORIENTATION_MODE_FIXED_LANDSCAPE, or the landscape mode in #ORIENTATION_MODE_ADAPTIVE, the watermark uses the landscape orientation. - - If the orientation mode of the encoding video is #ORIENTATION_MODE_FIXED_PORTRAIT, or the portrait mode in #ORIENTATION_MODE_ADAPTIVE, the watermark uses the portrait orientation. - - When setting the watermark position, the region must be less than the dimensions set in the `setVideoEncoderConfiguration` method. Otherwise, the watermark image will be cropped. - - @note - - Ensure that you have called the \ref agora::rtc::IRtcEngine::enableVideo "enableVideo" method to enable the video module before calling this method. - - If you only want to add a watermark image to the local video for the audience in the CDN live broadcast channel to see and capture, you can call this method or the \ref agora::rtc::IRtcEngine::setLiveTranscoding "setLiveTranscoding" method. - - This method supports adding a watermark image in the PNG file format only. Supported pixel formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. - - If the dimensions of the PNG image differ from your settings in this method, the image will be cropped or zoomed to conform to your settings. - - If you have enabled the local video preview by calling the \ref agora::rtc::IRtcEngine::startPreview "startPreview" method, you can use the `visibleInPreview` member in the WatermarkOptions class to set whether or not the watermark is visible in preview. - - If you have enabled the mirror mode for the local video, the watermark on the local video is also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror and watermark functions for the local video at the same time. You can implement the watermark function in your application layer. - - @param watermarkUrl The local file path of the watermark image to be added. This method supports adding a watermark image from the local absolute or relative file path. - @param options Pointer to the watermark's options to be added. See WatermarkOptions for more infomation. - - @return int - - 0: Success. - - < 0: Failure. + /** + * @brief Adds a watermark image to the local video. + * + * @deprecated Use addVideoWatermarkEx(const WatermarkConfig& config, const RtcConnection& + * connection) instead. + * + * @details + * This method adds a PNG watermark image to the local video in the live streaming. Once the + * watermark image is added, all the audience in the channel (CDN audience included), and the + * capturing device can see and capture it. The Agora SDK supports adding only one watermark image + * onto a live video stream. The newly added watermark image replaces the previous one. + * The watermark coordinates are dependent on the settings in the `setVideoEncoderConfiguration` + * method: + * - If the orientation mode of the encoding video ( `ORIENTATION_MODE` ) is fixed landscape mode or + * the adaptive landscape mode, the watermark uses the landscape orientation. + * - If the orientation mode of the encoding video ( `ORIENTATION_MODE` ) is fixed portrait mode or + * the adaptive portrait mode, the watermark uses the portrait orientation. + * - When setting the watermark position, the region must be less than the dimensions set in the + * `setVideoEncoderConfiguration` method; otherwise, the watermark image will be cropped. + * You can control the visibility of the watermark during preview by setting the `visibleInPreview` parameter when calling this method. However, whether it ultimately takes effect also depends on the position parameter you set when calling `setupLocalVideo` (the ` position` of the video frame in the video link). Refer to the table below for details.| Observation position | visibleInPreview value | Watermark visibility | + * | ---------------------------------- | ---------------------- | -------------------- | + * | (Default) `POSITION_POST_CAPTURER` | `true` | Yes | + * | | `false` | No | + * | `POSITION_PRE_ENCODER` | `true` | Yes | + * | | `false` | Yes | + * + * @note + * - Ensure that calling this method after `enableVideo`. + * - If you only want to add a watermark to the media push, you can call this method or the + * `startRtmpStreamWithTranscoding` method. + * - This method supports adding a watermark image in the PNG file format only. Supported pixel + * formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. + * - If the dimensions of the PNG image differ from your settings in this method, the image will be + * cropped or zoomed to conform to your settings. + * - If you have enabled the mirror mode for the local video, the watermark on the local video is + * also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror + * and watermark functions for the local video at the same time. You can implement the watermark + * function in your application layer. + * + * @param watermarkUrl The local file path of the watermark image to be added. This method supports + * adding a watermark image from the local absolute or relative file path. + * @param options The options of the watermark image to be added. See `WatermarkOptions`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int addVideoWatermark(const char* watermarkUrl, const WatermarkOptions& options) = 0; - /** Removes the watermark image on the video stream added by - addVideoWatermark(). + /** + * @brief Adds a watermark image to the local video. + * + * @since 4.6.0 + * + * @details + * You can use this method to overlay a watermark image on the local video stream, and configure the + * watermark's position, size, and visibility in the preview using `WatermarkConfig`. + * + * @param configs Watermark configuration. See `WatermarkConfig`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int addVideoWatermark(const WatermarkConfig& configs) = 0; - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Removes the watermark image from the local video. + * + * @since 4.6.0 + * + * @details + * This method removes a previously added watermark image from the local video stream using the + * specified unique ID. + * + * @param id The ID of the watermark to be removed. This value should match the ID used when the + * watermark was added. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int removeVideoWatermark(const char* id) = 0; + + /** + * @brief Removes the watermark image from the video stream. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int clearVideoWatermarks() = 0; // The following APIs are either deprecated and going to deleted. @@ -7621,66 +11184,74 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int resumeAudio() __deprecated = 0; /** - * Enables interoperability with the Agora Web SDK (Live Broadcast only). + * @brief Enables interoperability with the Agora Web SDK (applicable only in the live streaming * * @deprecated The Agora NG SDK enables the interoperablity with the Web SDK. + * scenarios). * - * Use this method when the channel profile is Live Broadcast. Interoperability - * with the Agora Web SDK is enabled by default when the channel profile is - * Communication. + * @details + * You can call this method to enable or disable interoperability with the Agora Web SDK. If a + * channel has Web SDK users, ensure that you call this method, or the video of the Native user will + * be a black screen for the Web user. + * This method is only applicable in live streaming scenarios, and interoperability is enabled by + * default in communication scenarios. * - * @param enabled Determines whether to enable interoperability with the Agora Web SDK. - * - true: (Default) Enable interoperability with the Agora Native SDK. - * - false: Disable interoperability with the Agora Native SDK. + * @param enabled Whether to enable interoperability: + * - `true`: Enable interoperability. + * - `false`: (Default) Disable interoperability. * - * @return int + * @return * - 0: Success. * - < 0: Failure. */ virtual int enableWebSdkInteroperability(bool enabled) __deprecated = 0; - /** Agora supports reporting and analyzing customized messages. + /** + * @brief Reports customized messages. * - * This function is in the beta stage with a free trial. The ability provided - * in its beta test version is reporting a maximum of 10 message pieces within - * 6 seconds, with each message piece not exceeding 256 bytes. + * @details + * Agora supports reporting and analyzing customized messages. This function is in the beta stage + * with a free trial. The ability provided in its beta test version is reporting a maximum of 10 + * message pieces within 6 seconds, with each message piece not exceeding 256 bytes and each string + * not exceeding 100 bytes. To try out this function, contact `support@agora.io` and discuss the + * format of customized messages with us. * - * To try out this function, contact [support@agora.io](mailto:support@agora.io) - * and discuss the format of customized messages with us. */ virtual int sendCustomReportMessage(const char* id, const char* category, const char* event, const char* label, int value) = 0; - /** Registers the metadata observer. - - You need to implement the IMetadataObserver class and specify the metadata type - in this method. This method enables you to add synchronized metadata in the video - stream for more diversified live interactive streaming, such as sending - shopping links, digital coupons, and online quizzes. - - A successful call of this method triggers - the \ref agora::rtc::IMetadataObserver::getMaxMetadataSize "getMaxMetadataSize" callback. - - @note - - Call this method before the `joinChannel` method. - - This method applies to the `LIVE_BROADCASTING` channel profile. - - @param observer IMetadataObserver. - @param type The metadata type. See \ref IMetadataObserver::METADATA_TYPE "METADATA_TYPE". The SDK supports VIDEO_METADATA (0) only for now. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Registers the metadata observer. + * + * @details + * You need to implement the `IMetadataObserver` class and specify the metadata type in this method. + * This method enables you to add synchronized metadata in the video stream for more diversified + * live interactive streaming, such as sending shopping links, digital coupons, and online quizzes. + * A successful call of this method triggers the `getMaxMetadataSize` callback. + * + * @note Call this method before `joinChannel(const char* token, const char* channelId, uid_t uid, + * const ChannelMediaOptions& options)`. + * + * @param observer The metadata observer. See `IMetadataObserver`. + * @param type The metadata type. The SDK currently only supports `VIDEO_METADATA`. See + * `METADATA_TYPE`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int registerMediaMetadataObserver(IMetadataObserver *observer, IMetadataObserver::METADATA_TYPE type) = 0; - /** Unregisters the metadata observer. - @param observer IMetadataObserver. - @param type The metadata type. See \ref IMetadataObserver::METADATA_TYPE "METADATA_TYPE". The SDK supports VIDEO_METADATA (0) only for now. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Unregisters the specified metadata observer. + * + * @param observer The metadata observer. See `IMetadataObserver`. + * @param type The metadata type. The SDK currently only supports `VIDEO_METADATA`. See + * `METADATA_TYPE`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int unregisterMediaMetadataObserver(IMetadataObserver* observer, IMetadataObserver::METADATA_TYPE type) = 0; /** Start audio frame dump. @@ -7698,16 +11269,38 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int stopAudioFrameDump(const char* channel_id, uid_t uid, const char* location) = 0; - /** - * Enables/Disables Agora AI Noise Suppression(AINS) with preset mode. + /** + * @brief Sets whether to enable the AI ​​noise suppression function and set the noise suppression + * mode. + * + * @details + * You can call this method to enable AI noise suppression function. Once enabled, the SDK + * automatically detects and reduces stationary and non-stationary noise from your audio on the + * premise of ensuring the quality of human voice. Stationary noise refers to noise signal with + * constant average statistical properties and negligibly small fluctuations of level within the + * period of observation. Common sources of stationary noises are: + * - Television; + * - Air conditioner; + * - Machinery, etc. + * Non-stationary noise refers to noise signal with huge fluctuations of level within the period of + * observation; common sources of non-stationary noises are: + * - Thunder; + * - Explosion; + * - Cracking, etc. + * Applicable scenarios: In scenarios such as co-streaming, online education and video meeting, this + * function can detect and reduce background noises to improve experience. + * Call timing: You can call this method either before or after joining a channel. + * + * @note + * - This method relies on the AI noise suppression dynamic library + * `libagora_ai_noise_suppression_extension.dll`. If the dynamic library is deleted, the function + * cannot be enabled. + * - Agora does not recommend enabling this function on devices running Android 6.0 and below. * - * @param enabled Sets whether or not to enable AINS. - * - true: Enables the AINS. - * - false: Disables the AINS. - * @param mode The preset AINS mode, range is [0,1,2]: - * 0: AINS mode with soft suppression level. - * 1: AINS mode with aggressive suppression level. - * 2: AINS mode with aggressive suppression level and low algorithm latency. + * @param enabled Whether to enable the AI noise suppression function: + * - `true`: Enable the AI noise suppression. + * - `false`: (Default) Disable the AI noise suppression. + * @param mode The AI noise suppression modes. See `AUDIO_AINS_MODE`. * * @return * - 0: Success. @@ -7715,32 +11308,54 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setAINSMode(bool enabled, AUDIO_AINS_MODE mode) = 0; - /** Registers a user account. - * - * Once registered, the user account can be used to identify the local user when the user joins the channel. - * After the user successfully registers a user account, the SDK triggers the \ref agora::rtc::IRtcEngineEventHandler::onLocalUserRegistered "onLocalUserRegistered" callback on the local client, - * reporting the user ID and user account of the local user. - * - * To join a channel with a user account, you can choose either of the following: - * - * - Call the \ref agora::rtc::IRtcEngine::registerLocalUserAccount "registerLocalUserAccount" method to create a user account, and then the \ref agora::rtc::IRtcEngine::joinChannelWithUserAccount "joinChannelWithUserAccount" method to join the channel. - * - Call the \ref agora::rtc::IRtcEngine::joinChannelWithUserAccount "joinChannelWithUserAccount" method to join the channel. - * - * The difference between the two is that for the former, the time elapsed between calling the \ref agora::rtc::IRtcEngine::joinChannelWithUserAccount "joinChannelWithUserAccount" method - * and joining the channel is shorter than the latter. + /** + * @brief Registers a user account. + * + * @details + * Once registered, the user account can be used to identify the local user when the user joins the + * channel. After the registration is successful, the user account can identify the identity of the + * local user, and the user can use it to join the channel. + * This method is optional. If you want to join a channel using a user account, you can choose one + * of the following methods: + * - Call the `registerLocalUserAccount` method to register a user account, and then call the + * `joinChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount)` + * or `joinChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount, + * const ChannelMediaOptions& options)` method to join a + * channel, which can shorten the time it takes to enter the channel. + * - Call the `joinChannelWithUserAccount(const char* token, const char* channelId, const char* + * userAccount)` or `joinChannelWithUserAccount(const char* token, const char* channelId, const + * char* userAccount, const ChannelMediaOptions& options)` method to + * join a channel. + * Related callbacks: After successfully calling this method, the `onLocalUserRegistered` callback + * will be triggered on the local client to report the local user's UID and user account. * * @note - * - Ensure that you set the `userAccount` parameter. Otherwise, this method does not take effect. - * - Ensure that the value of the `userAccount` parameter is unique in the channel. - * - To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the uid of the user is set to the same parameter type. - * - * @param appId The App ID of your project. - * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. + * - Starting from v4.6.0, the SDK will no longer automatically map Int UID to the String + * `userAccount` used when registering a User Account. If you want to join a channel with the + * original String `userAccount` used during registration, call the + * `joinChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount, + * const ChannelMediaOptions& options)` + * method to join the channel, instead of calling `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)` and pass in the Int UID + * obtained through this method + * - Ensure that the `userAccount` is unique in the channel. + * - To ensure smooth communication, use the same parameter type to identify the user. For example, + * if a user joins the channel with a UID, then ensure all the other users use the UID too. The same + * applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the + * ID of the user is set to the same parameter type. + * + * @param appId The App ID of your project on Agora Console. + * @param userAccount The user account. This parameter is used to identify the user in the channel + * for real-time audio and video engagement. You need to set and manage user accounts yourself and + * ensure that each user account in the same channel is unique. The maximum length of this parameter + * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters + * are as follow(89 in total): + * - The 26 lowercase English letters: a to z. + * - The 26 uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - Space + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * * @return * - 0: Success. @@ -7748,119 +11363,288 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int registerLocalUserAccount(const char* appId, const char* userAccount) = 0; - /** Joins the channel with a user account. - * - * After the user successfully joins the channel, the SDK triggers the following callbacks: - * - * - The local client: \ref agora::rtc::IRtcEngineEventHandler::onLocalUserRegistered "onLocalUserRegistered" and \ref agora::rtc::IRtcEngineEventHandler::onJoinChannelSuccess "onJoinChannelSuccess" . - * - The remote client: \ref agora::rtc::IRtcEngineEventHandler::onUserJoined "onUserJoined" and \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" , if the user joining the channel is in the `COMMUNICATION` profile, or is a host in the `LIVE_BROADCASTING` profile. - * - * @note To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. - * If a user joins the channel with the Agora Web SDK, ensure that the uid of the user is set to the same parameter type. + /** + * @brief Joins a channel with a User Account and Token. + * + * @details + * Before calling this method, if you have not called `registerLocalUserAccount` to register a user + * account, when you call this method to join a channel, the SDK automatically creates a user + * account for you. Calling the `registerLocalUserAccount` method to register a user account, and + * then calling this method to join a channel can shorten the time it takes to enter the channel. + * Once a user joins the channel, the user subscribes to the audio and video streams of all the + * other users in the channel by default, giving rise to usage and billings. To stop subscribing to + * a specified stream or all remote streams, call the corresponding `mute` methods. + * Call timing: Call this method after `initialize`. + * Related callbacks: After the user successfully joins the channel, the SDK triggers the following + * callbacks: + * - The local client: `onLocalUserRegistered`, `onJoinChannelSuccess` and + * `onConnectionStateChanged` callbacks. + * - The remote client: The `onUserJoined` and `onUserInfoUpdated` callbacks if a user joins the + * channel in the COMMUNICATION profile, or if a host joins the channel in the LIVE_BROADCASTING + * profile. * - * @param token The token generated at your server: - * - For low-security requirements: You can use the temporary token generated at Console. For details, see [Get a temporary toke](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-temporary-token). - * - For high-security requirements: Set it as the token generated at your server. For details, see [Get a token](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-token). - * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * @note + * - This method only supports users joining one channel at a time. + * - Users with different App IDs cannot call each other. + * - Before joining a channel, ensure that the App ID you use to generate a token is the same as + * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the + * token. + * To ensure smooth communication, use the same parameter type to identify the user. For example, if + * a user joins the channel with a UID, then ensure all the other users use the UID too. The same + * applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the + * ID of the user is set to the same parameter type. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. + * @param channelId The channel name. This parameter signifies the channel in which users engage in + * real-time audio and video interaction. Under the premise of the same App ID, users who fill in + * the same channel ID enter the same channel for audio and video interaction. The string length + * must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param userAccount The user account. This parameter is used to identify the user in the channel + * for real-time audio and video engagement. You need to set and manage user accounts yourself and + * ensure that each user account in the same channel is unique. The maximum length of this parameter + * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters + * are as follows(89 in total): + * - The 26 lowercase English letters: a to z. + * - The 26 uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * - Space + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," * * @return * - 0: Success. * - < 0: Failure. + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to + * rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. */ virtual int joinChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount) = 0; - /** Joins the channel with a user account. - * - * After the user successfully joins the channel, the SDK triggers the following callbacks: - * - * - The local client: \ref agora::rtc::IRtcEngineEventHandler::onLocalUserRegistered "onLocalUserRegistered" and \ref agora::rtc::IRtcEngineEventHandler::onJoinChannelSuccess "onJoinChannelSuccess" . - * - The remote client: \ref agora::rtc::IRtcEngineEventHandler::onUserJoined "onUserJoined" and \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" , if the user joining the channel is in the `COMMUNICATION` profile, or is a host in the `LIVE_BROADCASTING` profile. - * - * @note To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. - * If a user joins the channel with the Agora Web SDK, ensure that the uid of the user is set to the same parameter type. + /** + * @brief Join a channel using a user account and token, and set the media options. + * + * @details + * Before calling this method, if you have not called `registerLocalUserAccount` to register a user + * account, when you call this method to join a channel, the SDK automatically creates a user + * account for you. Calling the `registerLocalUserAccount` method to register a user account, and + * then calling this method to join a channel can shorten the time it takes to enter the channel. + * Compared to `joinChannelWithUserAccount(const char* token, const char* channelId, const char* + * userAccount)`, this method has the `options` parameter which is + * used to set media options, such as whether to publish audio and video streams within a channel. + * By default, the user subscribes to the audio and video streams of all the other users in the + * channel, giving rise to usage and **billings**. To stop subscribing to other streams, set the + * `options` parameter or call the corresponding `mute` methods. + * Call timing: Call this method after `initialize`. + * Related callbacks: After the user successfully joins the channel, the SDK triggers the following + * callbacks: + * - The local client: `onLocalUserRegistered`, `onJoinChannelSuccess` and + * `onConnectionStateChanged` callbacks. + * - The remote client: The `onUserJoined` and `onUserInfoUpdated` callbacks if a user joins the + * channel in the COMMUNICATION profile, or if a host joins the channel in the LIVE_BROADCASTING + * profile. * - * @param token The token generated at your server: - * - For low-security requirements: You can use the temporary token generated at Console. For details, see [Get a temporary toke](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-temporary-token). - * - For high-security requirements: Set it as the token generated at your server. For details, see [Get a token](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-token). - * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * @note + * - This method only supports users joining one channel at a time. + * - Users with different App IDs cannot call each other. + * - Before joining a channel, ensure that the App ID you use to generate a token is the same as + * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the + * token. + * To ensure smooth communication, use the same parameter type to identify the user. For example, if + * a user joins the channel with a UID, then ensure all the other users use the UID too. The same + * applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the + * ID of the user is set to the same parameter type. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. + * @param channelId The channel name. This parameter signifies the channel in which users engage in + * real-time audio and video interaction. Under the premise of the same App ID, users who fill in + * the same channel ID enter the same channel for audio and video interaction. The string length + * must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param userAccount The user account. This parameter is used to identify the user in the channel + * for real-time audio and video engagement. You need to set and manage user accounts yourself and + * ensure that each user account in the same channel is unique. The maximum length of this parameter + * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters + * are as follows(89 in total): + * - The 26 lowercase English letters: a to z. + * - The 26 uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param options The channel media options: \ref agora::rtc::ChannelMediaOptions::ChannelMediaOptions "ChannelMediaOptions" + * - Space + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param options The channel media options. See `ChannelMediaOptions`. * * @return * - 0: Success. * - < 0: Failure. + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to + * rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. */ virtual int joinChannelWithUserAccount(const char* token, const char* channelId, const char* userAccount, const ChannelMediaOptions& options) = 0; - /** Joins the channel with a user account. - * - * After the user successfully joins the channel, the SDK triggers the following callbacks: - * - * - The local client: \ref agora::rtc::IRtcEngineEventHandler::onLocalUserRegistered "onLocalUserRegistered" and \ref agora::rtc::IRtcEngineEventHandler::onJoinChannelSuccess "onJoinChannelSuccess" . - * - The remote client: \ref agora::rtc::IRtcEngineEventHandler::onUserJoined "onUserJoined" and \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" , if the user joining the channel is in the `COMMUNICATION` profile, or is a host in the `LIVE_BROADCASTING` profile. - * - * @note To ensure smooth communication, use the same parameter type to identify the user. For example, if a user joins the channel with a user ID, then ensure all the other users use the user ID too. The same applies to the user account. - * If a user joins the channel with the Agora Web SDK, ensure that the uid of the user is set to the same parameter type. + /** + * @brief Join a channel using a user account and token, and set the media options. + * + * @details + * Before calling this method, if you have not called `registerLocalUserAccount` to register a user + * account, when you call this method to join a channel, the SDK automatically creates a user + * account for you. Calling the `registerLocalUserAccount` method to register a user account, and + * then calling this method to join a channel can shorten the time it takes to enter the channel. + * Once a user joins the channel, the user subscribes to the audio and video streams of all the + * other users in the channel by default, giving rise to usage and **billings**. If you want to stop + * subscribing to the media stream of other users, you can set the `options` parameter or call the + * corresponding `mute` method. + * Call timing: Call this method after `initialize`. + * Related callbacks: After the user successfully joins the channel, the SDK triggers the following + * callbacks: + * - The local client: `onLocalUserRegistered`, `onJoinChannelSuccess` and + * `onConnectionStateChanged` callbacks. + * - The remote client: The `onUserJoined` and `onUserInfoUpdated` callbacks if a user joins the + * channel in the COMMUNICATION profile, or if a host joins the channel in the LIVE_BROADCASTING + * profile. * - * @param token The token generated at your server: - * - For low-security requirements: You can use the temporary token generated at Console. For details, see [Get a temporary toke](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-temporary-token). - * - For high-security requirements: Set it as the token generated at your server. For details, see [Get a token](https://docs.agora.io/en/Voice/token?platform=All%20Platforms#get-a-token). - * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * @note + * - This method only supports users joining one channel at a time. + * - Users with different App IDs cannot call each other. + * - Before joining a channel, ensure that the App ID you use to generate a token is the same as + * that you pass in the `initialize` method; otherwise, you may fail to join the channel with the + * token. + * To ensure smooth communication, use the same parameter type to identify the user. For example, if + * a user joins the channel with a UID, then ensure all the other users use the UID too. The same + * applies to the user account. If a user joins the channel with the Agora Web SDK, ensure that the + * ID of the user is set to the same parameter type. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. + * @param channelId The channel name. This parameter signifies the channel in which users engage in + * real-time audio and video interaction. Under the premise of the same App ID, users who fill in + * the same channel ID enter the same channel for audio and video interaction. The string length + * must be less than 64 bytes. Supported characters (89 characters in total): * - All lowercase English letters: a to z. * - All uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param userAccount The user account. This parameter is used to identify the user in the channel + * for real-time audio and video engagement. You need to set and manage user accounts yourself and + * ensure that each user account in the same channel is unique. The maximum length of this parameter + * is 255 bytes. Ensure that you set this parameter and do not set it as NULL. Supported characters + * are as follows(89 in total): + * - The 26 lowercase English letters: a to z. + * - The 26 uppercase English letters: A to Z. * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". - * @param options The channel media options: \ref agora::rtc::ChannelMediaOptions::ChannelMediaOptions "ChannelMediaOptions" - * @param eventHandler The pointer to the IRtcEngine event handler: IRtcEngineEventHandler. + * - Space + * - "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", + * "^", "_", "{", "}", "|", "~", "," + * @param options The channel media options. See `ChannelMediaOptions`. + * @param eventHandler The callback class of `IRtcEngineEx`. See `IRtcEngineEventHandler`. You can + * get the callback events of multiple channels through the `eventHandler` object passed in this + * parameter. * * @return * - 0: Success. * - < 0: Failure. + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to + * rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. */ virtual int joinChannelWithUserAccountEx(const char* token, const char* channelId, const char* userAccount, const ChannelMediaOptions& options, IRtcEngineEventHandler* eventHandler) = 0; - /** Gets the user information by passing in the user account. - * - * After a remote user joins the channel, the SDK gets the user ID and user account of the remote user, caches them - * in a mapping table object (`userInfo`), and triggers the \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" callback on the local client. + /** + * @brief Gets the user information by passing in the user account. * - * After receiving the o\ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" callback, you can call this method to get the user ID of the - * remote user from the `userInfo` object by passing in the user account. + * @details + * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, + * caches them in a mapping table object, and triggers the `onUserInfoUpdated` callback on the local + * client. After receiving the callback, you can call this method and pass in the user account to + * get the UID of the remote user from the `UserInfo` object. + * Call timing: Call this method after receiving the `onUserInfoUpdated` callback. + * Related callbacks: `onUserInfoUpdated` * - * @param userAccount The user account of the user. Ensure that you set this parameter. - * @param [in,out] userInfo A userInfo object that identifies the user: - * - Input: A userInfo object. - * - Output: A userInfo object that contains the user account and user ID of the user. + * @param userAccount The user account. + * @param userInfo Input and output parameter. The `UserInfo` object that identifies the user + * information. + * - Input value: A `UserInfo` object. + * - Output: A `UserInfo` object that contains both the user account and UID. * * @return * - 0: Success. @@ -7868,18 +11652,22 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int getUserInfoByUserAccount(const char* userAccount, rtc::UserInfo* userInfo) = 0; - /** Gets the user information by passing in the user ID. - * - * After a remote user joins the channel, the SDK gets the user ID and user account of the remote user, - * caches them in a mapping table object (`userInfo`), and triggers the \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" callback on the local client. + /** + * @brief Gets the user information by passing in the user ID. * - * After receiving the \ref agora::rtc::IRtcEngineEventHandler::onUserInfoUpdated "onUserInfoUpdated" callback, you can call this method to get the user account of the remote user - * from the `userInfo` object by passing in the user ID. + * @details + * After a remote user joins the channel, the SDK gets the UID and user account of the remote user, + * caches them in a mapping table object, and triggers the `onUserInfoUpdated` callback on the local + * client. After receiving the callback, you can call this method and passi in the UID.to get the + * user account of the specified user from the `UserInfo` object. + * Call timing: Call this method after receiving the `onUserInfoUpdated` callback. + * Related callbacks: `onUserInfoUpdated` * - * @param uid The user ID of the remote user. Ensure that you set this parameter. - * @param[in,out] userInfo A userInfo object that identifies the user: - * - Input: A userInfo object. - * - Output: A userInfo object that contains the user account and user ID of the user. + * @param uid The user ID. + * @param userInfo Input and output parameter. The `UserInfo` object that identifies the user + * information. + * - Input value: A `UserInfo` object. + * - Output: A `UserInfo` object that contains both the user account and UID. * * @return * - 0: Success. @@ -7887,101 +11675,110 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int getUserInfoByUid(uid_t uid, rtc::UserInfo* userInfo) = 0; - /** Starts relaying media streams across channels or updates the channels for media relay. - * - * After a successful method call, the SDK triggers the - * \ref agora::rtc::IRtcEngineEventHandler::onChannelMediaRelayStateChanged - * "onChannelMediaRelayStateChanged" callback, and this callback return the state of the media stream relay. - * - If the - * \ref agora::rtc::IRtcEngineEventHandler::onChannelMediaRelayStateChanged - * "onChannelMediaRelayStateChanged" callback returns - * #RELAY_STATE_RUNNING (2) and #RELAY_OK (0), the host starts sending data to the destination channel. - * - If the - * \ref agora::rtc::IRtcEngineEventHandler::onChannelMediaRelayStateChanged - * "onChannelMediaRelayStateChanged" callback returns - * #RELAY_STATE_FAILURE (3), an exception occurs during the media stream - * relay. - * - * @note - * - Call this method after the \ref joinChannel() "joinChannel" method. - * - This method takes effect only when you are a host in a - * `LIVE_BROADCASTING` channel. - * - Contact sales-us@agora.io before implementing this function. - * - We do not support string user accounts in this API. - * - * @since v4.2.0 - * @param configuration The configuration of the media stream relay: - * ChannelMediaRelayConfiguration. - * - * @return - * - 0: Success. - * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -8(ERR_INVALID_STATE): The current status is invalid, only allowed to be called when the role is the broadcaster. - **/ + /** + * @brief Starts relaying media streams across channels or updates channels for media relay. + * + * @since v4.2.0 + * + * @details + * The first successful call to this method starts relaying media streams from the source channel to + * the destination channels. To relay the media stream to other channels, or exit one of the current + * media relays, you can call this method again to update the destination channels. This feature + * supports relaying media streams to a maximum of six destination channels. + * After a successful method call, the SDK triggers the `onChannelMediaRelayStateChanged` callback, + * and this callback returns the state of the media stream relay. Common states are as follows: + * - If the `onChannelMediaRelayStateChanged` callback returns `RELAY_STATE_RUNNING` (2) and + * `RELAY_OK` (0), it means that the SDK starts relaying media streams from the source channel to + * the destination channel. + * - If the `onChannelMediaRelayStateChanged` callback returns `RELAY_STATE_FAILURE` (3), an + * exception occurs during the media stream relay. + * + * @note + * - Call this method after joining the channel. + * - This method takes effect only when you are a host in a live streaming channel. + * - The relaying media streams across channels function needs to be enabled by contacting + * `technical support`. + * - Agora does not support string user accounts in this API. + * + * @param configuration The configuration of the media stream relay. See + * `ChannelMediaRelayConfiguration`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -8: Internal state error. Probably because the user is not a broadcaster. + */ virtual int startOrUpdateChannelMediaRelay(const ChannelMediaRelayConfiguration &configuration) = 0; - /** Stops the media stream relay. - * - * Once the relay stops, the host quits all the destination - * channels. - * - * After a successful method call, the SDK triggers the - * \ref agora::rtc::IRtcEngineEventHandler::onChannelMediaRelayStateChanged - * "onChannelMediaRelayStateChanged" callback. If the callback returns - * #RELAY_STATE_IDLE (0) and #RELAY_OK (0), the host successfully - * stops the relay. - * - * @note - * If the method call fails, the SDK triggers the - * \ref agora::rtc::IRtcEngineEventHandler::onChannelMediaRelayStateChanged - * "onChannelMediaRelayStateChanged" callback with the - * #RELAY_ERROR_SERVER_NO_RESPONSE (2) or - * #RELAY_ERROR_SERVER_CONNECTION_LOST (8) state code. You can leave the - * channel by calling the \ref leaveChannel() "leaveChannel" method, and - * the media stream relay automatically stops. - * - * @return - * - 0: Success. - * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. - */ + /** + * @brief Stops the media stream relay. Once the relay stops, the host quits all the target + * channels. + * + * @details + * After a successful method call, the SDK triggers the `onChannelMediaRelayStateChanged` callback. + * If the callback reports `RELAY_STATE_IDLE` (0) and `RELAY_OK` (0), the host successfully stops + * the relay. + * + * @note If the method call fails, the SDK triggers the `onChannelMediaRelayStateChanged` callback + * with the `RELAY_ERROR_SERVER_NO_RESPONSE` (2) or `RELAY_ERROR_SERVER_CONNECTION_LOST` (8) status + * code. You can call the `leaveChannel(const LeaveChannelOptions& options)` method to leave the + * channel, and the media stream + * relay automatically stops. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -5: The method call was rejected. There is no ongoing channel media relay. + */ virtual int stopChannelMediaRelay() = 0; - /** pause the channels for media stream relay. + /** + * @brief Pauses the media stream relay to all target channels. + * + * @details + * After the cross-channel media stream relay starts, you can call this method to pause relaying + * media streams to all target channels; after the pause, if you want to resume the relay, call + * `resumeAllChannelMediaRelay`. + * + * @note Call this method after `startOrUpdateChannelMediaRelay`. + * * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. + * - -5: The method call was rejected. There is no ongoing channel media relay. */ virtual int pauseAllChannelMediaRelay() = 0; - /** resume the channels for media stream relay. + /** + * @brief Resumes the media stream relay to all target channels. + * + * @details + * After calling the `pauseAllChannelMediaRelay` method, you can call this method to resume relaying + * media streams to all destination channels. + * + * @note Call this method after `pauseAllChannelMediaRelay`. + * * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. + * - -5: The method call was rejected. There is no paused channel media relay. */ virtual int resumeAllChannelMediaRelay() = 0; - /** Set audio parameters for direct streaming to CDN + /** + * @brief Sets the audio profile of the audio streams directly pushed to the CDN by the host. * - * @note - * Must call this api before "startDirectCdnStreaming" + * @deprecated v4.6.0. * - * @param profile Sets the sample rate, bitrate, encoding mode, and the number of channels: - * #AUDIO_PROFILE_TYPE. + * @details + * When you set the `publishMicrophoneTrack` or `publishCustomAudioTrack` in the + * `DirectCdnStreamingMediaOptions` as `true` to capture audios, you can call this method to set the + * audio profile. + * + * @param profile The audio profile, including the sampling rate, bitrate, encoding mode, and the + * number of channels. See `AUDIO_PROFILE_TYPE`. * * @return * - 0: Success. @@ -7989,15 +11786,24 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setDirectCdnStreamingAudioConfiguration(AUDIO_PROFILE_TYPE profile) = 0; - /** Set video parameters for direct streaming to CDN + /** + * @brief Sets the video profile of the media streams directly pushed to the CDN by the host. * - * Each configuration profile corresponds to a set of video parameters, including - * the resolution, frame rate, and bitrate. + * @deprecated v4.6.0. * - * @note - * Must call this api before "startDirectCdnStreaming" + * @details + * This method only affects video streams captured by cameras or screens, or from custom video + * capture sources. That is, when you set `publishCameraTrack` or `publishCustomVideoTrack` in + * `DirectCdnStreamingMediaOptions` as `true` to capture videos, you can call this method to set the + * video profiles. + * If your local camera does not support the video resolution you set,the SDK automatically adjusts + * the video resolution to a value that is closest to your settings for capture, encoding or + * streaming, with the same aspect ratio as the resolution you set. You can get the actual + * resolution of the video streams through the `onDirectCdnStreamingStats` callback. * - * @param config The local video encoder configuration: VideoEncoderConfiguration. + * @param config Video profile. See `VideoEncoderConfiguration`.Note: During CDN live streaming, + * Agora only supports setting `ORIENTATION_MODE` as `ORIENTATION_MODE_FIXED_LANDSCAPE` or + * `ORIENTATION_MODE_FIXED_PORTRAIT`. * * @return * - 0: Success. @@ -8005,14 +11811,33 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setDirectCdnStreamingVideoConfiguration(const VideoEncoderConfiguration& config) = 0; - /** Start direct cdn streaming + /** + * @brief Starts pushing media streams to the CDN directly. * - * @param eventHandler A pointer to the direct cdn streaming event handler: \ref agora::rtc::IDirectCdnStreamingEventHandler - * "IDirectCdnStreamingEventHandler". - * @param publishUrl The url of the cdn used to publish the stream. - * @param options The direct cdn streaming media options: DirectCdnStreamingMediaOptions. - * This API must pass an audio-related option, and temporarily cannot pass more than one. - * For video-related options, you can either choose to not pass any, or only one. + * @deprecated v4.6.0. + * + * @details + * Aogra does not support pushing media streams to one URL repeatedly. + * **Media options** + * Agora does not support setting the value of `publishCameraTrack` and `publishCustomVideoTrack` as + * `true`, or the value of `publishMicrophoneTrack` and `publishCustomAudioTrack` as `true` at the + * same time. When choosing media setting options ( `DirectCdnStreamingMediaOptions` ), you can + * refer to the following examples: + * If you want to push audio and video streams captured by the host from a custom source, the media + * setting options should be set as follows: + * - `publishCustomAudioTrack` is set as `true` and call the `pushAudioFrame` method + * - `publishCustomVideoTrack` is set as `true` and call the `pushVideoFrame` method + * - `publishCameraTrack` is set as `false` (the default value) + * - `publishMicrophoneTrack` is set as `false` (the default value) + * As of v4.2.0, Agora SDK supports audio-only live streaming. You can set `publishCustomAudioTrack` + * or `publishMicrophoneTrack` in `DirectCdnStreamingMediaOptions` as `true` and call + * `pushAudioFrame` to push audio streams. + * + * @note Agora only supports pushing one audio and video streams or one audio streams to CDN. + * + * @param eventHandler See `onDirectCdnStreamingStateChanged` and `onDirectCdnStreamingStats`. + * @param publishUrl The CDN live streaming URL. + * @param options The media setting options for the host. See `DirectCdnStreamingMediaOptions`. * * @return * - 0: Success. @@ -8021,10 +11846,10 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int startDirectCdnStreaming(IDirectCdnStreamingEventHandler* eventHandler, const char* publishUrl, const DirectCdnStreamingMediaOptions& options) = 0; - /** Stop direct cdn streaming + /** + * @brief Stops pushing media streams to the CDN directly. * - * @note - * This method is synchronous. + * @deprecated v4.6.0. * * @return * - 0: Success. @@ -8033,6 +11858,8 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int stopDirectCdnStreaming() = 0; /** Change the media source during the pushing + * + * @deprecated v4.6.0. * * @note * This method is temporarily not supported. @@ -8045,19 +11872,48 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int updateDirectCdnStreamingMediaOptions(const DirectCdnStreamingMediaOptions& options) = 0; - /** Enables the rhythm player. + /** + * @brief Enables the virtual metronome. * - * @param sound1 The absolute path or URL address (including the filename extensions) of the file for the downbeat. - * @param sound2 The absolute path or URL address (including the filename extensions) of the file for the upbeats. - * @param config The configuration of rhythm player. + * @details + * - After enabling the virtual metronome, the SDK plays the specified audio effect file from the + * beginning, and controls the playback duration of each file according to `beatsPerMinute` you set + * in `AgoraRhythmPlayerConfig`. For example, if you set `beatsPerMinute` as `60`, the SDK plays one + * beat every second. If the file duration exceeds the beat duration, the SDK only plays the audio + * within the beat duration. + * - By default, the sound of the virtual metronome is published in the channel. If you want the + * sound to be heard by the remote users, you can set `publishRhythmPlayerTrack` in + * `ChannelMediaOptions` as `true`. + * Applicable scenarios: In music education, physical education and other scenarios, teachers + * usually need to use a metronome so that students can practice with the correct beat. The meter is + * composed of a downbeat and upbeats. The first beat of each measure is called a downbeat, and the + * rest are called upbeats. + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After successfully calling this method, the SDK triggers the + * `onRhythmPlayerStateChanged` callback locally to report the status of the virtual metronome. * - * @return int + * @param sound1 The absolute path or URL address (including the filename extensions) of the file + * for the downbeat. For example, `C:\music\audio.mp4`. For the audio file formats supported by this + * method, see `What formats of audio files does the Agora RTC SDK support`. + * @param sound2 The absolute path or URL address (including the filename extensions) of the file + * for the upbeats. For example, `C:\music\audio.mp4`. For the audio file formats supported by this + * method, see `What formats of audio files does the Agora RTC SDK support`. + * @param config The metronome configuration. See `AgoraRhythmPlayerConfig`. + * + * @return * - 0: Success. * - < 0: Failure. + * - -22: Cannot find audio effect files. Please set the correct paths for `sound1` and `sound2`. */ virtual int startRhythmPlayer(const char* sound1, const char* sound2, const AgoraRhythmPlayerConfig& config) = 0; - /** Disables the rhythm player. + /** + * @brief Disables the virtual metronome. + * + * @details + * After calling `startRhythmPlayer`, you can call this method to disable the virtual metronome. + * + * @note This method is for Android and iOS only. * * @return * - 0: Success. @@ -8065,32 +11921,50 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int stopRhythmPlayer() = 0; - /** Configures the rhythm player. + /** + * @brief Configures the virtual metronome. * - * @param config The configuration of rhythm player. + * @details + * - After calling `startRhythmPlayer`, you can call this method to reconfigure the virtual + * metronome. + * - After enabling the virtual metronome, the SDK plays the specified audio effect file from the + * beginning, and controls the playback duration of each file according to `beatsPerMinute` you set + * in `AgoraRhythmPlayerConfig`. For example, if you set `beatsPerMinute` as `60`, the SDK plays one + * beat every second. If the file duration exceeds the beat duration, the SDK only plays the audio + * within the beat duration. + * - By default, the sound of the virtual metronome is published in the channel. If you want the + * sound to be heard by the remote users, you can set `publishRhythmPlayerTrack` in + * `ChannelMediaOptions` as `true`. + * Call timing: This method can be called either before or after joining the channel. + * Related callbacks: After successfully calling this method, the SDK triggers the + * `onRhythmPlayerStateChanged` callback locally to report the status of the virtual metronome. * - * @return int + * @param config The metronome configuration. See `AgoraRhythmPlayerConfig`. + * + * @return * - 0: Success. * - < 0: Failure. */ virtual int configRhythmPlayer(const AgoraRhythmPlayerConfig& config) = 0; /** - * Takes a snapshot of a video stream. - * - * This method takes a snapshot of a video stream from the specified user, generates a JPG - * image, and saves it to the specified path. + * @brief Takes a snapshot of a video stream. * - * The method is asynchronous, and the SDK has not taken the snapshot when the method call - * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback - * to report whether the snapshot is successfully taken, as well as the details for that + * @details + * This method takes a snapshot of a video stream from the specified user, generates a JPG image, + * and saves it to the specified path. + * Call timing: Call this method after joining a channel. + * Related callbacks: After a successful call of this method, the SDK triggers the `onSnapshotTaken` + * callback to report whether the snapshot is successfully taken, as well as the details for that * snapshot. * * @note - * - Call this method after joining a channel. - * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. - * - If the user's video has been preprocessed, for example, watermarked or beautified, the resulting - * snapshot includes the pre-processing effect. + * - The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. + * - When used for local video snapshots, this method takes a snapshot for the video streams + * specified in `ChannelMediaOptions`. + * - If the user's video has been preprocessed, for example, watermarked or beautified, the + * resulting snapshot includes the pre-processing effect. * * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. * @param filePath The local path (including filename extensions) of the snapshot. For example: @@ -8098,226 +11972,372 @@ class IRtcEngine : public agora::base::IEngineBase { * - iOS: `/App Sandbox/Library/Caches/example.jpg` * - macOS: `~/Library/Logs/example.jpg` * - Android: `/storage/emulated/0/Android/data//files/example.jpg` + * Attention: Ensure that the path you specify exists and is writable. * - * Ensure that the path you specify exists and is writable. * @return - * - 0 : Success. - * - < 0 : Failure. + * - 0: Success. + * - < 0: Failure. */ virtual int takeSnapshot(uid_t uid, const char* filePath) = 0; - /** Enables the content inspect. - @param enabled Whether to enable content inspect: - - `true`: Yes. - - `false`: No. - @param config The configuration for the content inspection. - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Takes a screenshot of the video at the specified observation point. + * + * @details + * This method takes a snapshot of a video stream from the specified user, generates a JPG image, + * and saves it to the specified path. + * Call timing: Call this method after joining a channel. + * Related callbacks: After a successful call of this method, the SDK triggers the `onSnapshotTaken` + * callback to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. + * - When used for local video snapshots, this method takes a snapshot for the video streams + * specified in `ChannelMediaOptions`. + * + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration of the snaptshot. See `SnapshotConfig`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int takeSnapshot(uid_t uid, const media::SnapshotConfig& config) = 0; + + /** + * @brief Enables or disables video screenshot and upload. + * + * @details + * When video screenshot and upload function is enabled, the SDK takes screenshots and uploads + * videos sent by local users based on the type and frequency of the module you set in + * `ContentInspectConfig`. After video screenshot and upload, the Agora server sends the callback + * notification to your app server in HTTPS requests and sends all screenshots to the third-party + * cloud storage service. + * Call timing: This method can be called either before or after joining the channel. + * + * @note + * - Before calling this method, make sure you have enabled video screenshot and upload on Agora + * console. + * - When the video moderation module is set to video moderation via Agora self-developed extension( + * `CONTENT_INSPECT_SUPERVISION` ), the video screenshot and upload dynamic library + * `libagora_content_inspect_extension.dll` is required. Deleting this library disables the + * screenshot and upload feature. + * + * @param enabled Whether to enalbe video screenshot and upload: + * - `true`: Enables video screenshot and upload. + * - `false`: Disables video screenshot and upload. + * @param config Screenshot and upload configuration. See `ContentInspectConfig`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int enableContentInspect(bool enabled, const media::ContentInspectConfig &config) = 0; - /* - * Adjust the custom audio publish volume by track id. - * @param trackId custom audio track id. - * @param volume The volume, range is [0,100]: - * 0: mute, 100: The original volume + /** + * @brief Adjusts the volume of the custom audio track played remotely. + * + * @details + * If you want to change the volume of the audio played remotely, you need to call this method + * again. + * + * @note Ensure you have called the `createCustomAudioTrack` method to create a custom audio track + * before calling this method. + * + * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in + * `createCustomAudioTrack`. + * @param volume The volume of the audio source. The value can range from 0 to 100. 0 means mute; + * 100 means the original volume. + * * @return * - 0: Success. * - < 0: Failure. */ virtual int adjustCustomAudioPublishVolume(track_id_t trackId, int volume) = 0; - /* - * Adjust the custom audio playout volume by track id. - * @param trackId custom audio track id. - * @param volume The volume, range is [0,100]: - * 0: mute, 100: The original volume + /** + * @brief Adjusts the volume of the custom audio track played locally. + * + * @details + * If you want to change the volume of the audio to be played locally, you need to call this method + * again. + * + * @note Ensure you have called the `createCustomAudioTrack` method to create a custom audio track + * before calling this method. + * + * @param trackId The audio track ID. Set this parameter to the custom audio track ID returned in + * `createCustomAudioTrack`. + * @param volume The volume of the audio source. The value can range from 0 to 100. 0 means mute; + * 100 means the original volume. + * * @return * - 0: Success. * - < 0: Failure. */ virtual int adjustCustomAudioPlayoutVolume(track_id_t trackId, int volume) = 0; - /** Sets the Agora cloud proxy service. + /** + * @brief Sets up cloud proxy service. * * @since v3.3.0 * - * When the user's firewall restricts the IP address and port, refer to *Use Cloud Proxy* to add the specific - * IP addresses and ports to the firewall allowlist; then, call this method to enable the cloud proxy and set - * the `proxyType` parameter as `UDP_PROXY(1)`, which is the cloud proxy for the UDP protocol. - * - * After a successfully cloud proxy connection, the SDK triggers - * the \ref IRtcEngineEventHandler::onConnectionStateChanged "onConnectionStateChanged" (CONNECTION_STATE_CONNECTING, CONNECTION_CHANGED_SETTING_PROXY_SERVER) callback. - * - * To disable the cloud proxy that has been set, call `setCloudProxy(NONE_PROXY)`. To change the cloud proxy type that has been set, - * call `setCloudProxy(NONE_PROXY)` first, and then call `setCloudProxy`, and pass the value that you expect in `proxyType`. + * @details + * When users' network access is restricted by a firewall, configure the firewall to allow specific + * IP addresses and ports provided by Agora; then, call this method to enable the cloud `proxyType` + * and set the cloud proxy type with the proxyType parameter. + * After successfully connecting to the cloud proxy, the SDK triggers the `onConnectionStateChanged` + * ( CONNECTION_STATE_CONNECTING, CONNECTION_CHANGED_SETTING_PROXY_SERVER ) callback. + * To disable the cloud proxy that has been set, call the `setCloudProxy(NONE_PROXY)`. + * To change the cloud proxy type that has been set, call the `setCloudProxy` `(NONE_PROXY) ` first, + * and then call the `setCloudProxy` to set the `proxyType` you want. * * @note - * - Agora recommends that you call this method before joining the channel or after leaving the channel. - * - For the SDK v3.3.x, the services for pushing streams to CDN and co-hosting across channels are not available - * when you use the cloud proxy for the UDP protocol. For the SDK v3.4.0 and later, the services for pushing streams - * to CDN and co-hosting across channels are not available when the user is in a network environment with a firewall - * and uses the cloud proxy for the UDP protocol. + * - Agora recommends that you call this method before joining a channel. + * - When a user is behind a firewall and uses the Force UDP cloud proxy, the services for Media + * Push and cohosting across channels are not available. + * - When you use the Force TCP cloud proxy, note that an error would occur when calling the + * `startAudioMixing(const char* filePath, bool loopback, int cycle, int startPos)` method to play + * online music files in the HTTP protocol. The services for + * Media Push and cohosting across channels use the cloud proxy with the TCP protocol. * - * @param proxyType The cloud proxy type, see #CLOUD_PROXY_TYPE. This parameter is required, and the SDK reports an error if you do not pass in a value. + * @param proxyType The type of the cloud proxy. See `CLOUD_PROXY_TYPE`. + * This parameter is mandatory. The SDK reports an error if you do not pass in a value. * * @return * - 0: Success. * - < 0: Failure. - * - `-2(ERR_INVALID_ARGUMENT)`: The parameter is invalid. - * - `-7(ERR_NOT_INITIALIZED)`: The SDK is not initialized. + * - -2: The parameter is invalid. + * - -7: The SDK is not initialized. */ virtual int setCloudProxy(CLOUD_PROXY_TYPE proxyType) = 0; - /** set local access point addresses in local proxy mode. use this method before join channel. - - @param config The LocalAccessPointConfiguration class, See the definition of LocalAccessPointConfiguration for details. - - @return - - 0: Success - - < 0: Failure + /** + * @brief Configures the connection to Agora's Private Media Server access module. + * + * @details + * After successfully deploying the Agora Private Media Server and integrating the 4.x RTC SDK on + * intranet clients, you can call this method to specify the Local Access Point and assign the + * access module to the SDK. + * Call timing: This method must be called before joining a channel. + * + * @note This method takes effect only after deploying the Agora Hybrid Cloud solution. You can + * `contact sales` to learn more and deploy the Agora Hybrid Cloud. + * + * @param config Local Access Point configuration. See `LocalAccessPointConfiguration`. + * + * @return + * - 0: Success. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int setLocalAccessPoint(const LocalAccessPointConfiguration& config) = 0; - /** set advanced audio options. - @param options The AdvancedAudioOptions class, See the definition of AdvancedAudioOptions for details. - - @return - - 0: Success - - < 0: Failure + /** + * @brief Sets audio advanced options. + * + * @details + * If you have advanced audio processing requirements, such as capturing and sending stereo audio, + * you can call this method to set advanced audio options. + * + * @note Call this method after calling `joinChannel(const char* token, const char* channelId, uid_t + * uid, const ChannelMediaOptions& options)`, `enableAudio` and `enableLocalAudio`. + * + * @param options The advanced options for audio. See `AdvancedAudioOptions`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setAdvancedAudioOptions(AdvancedAudioOptions& options, int sourceType = 0) = 0; - /** Bind local user and a remote user as an audio&video sync group. The remote user is defined by cid and uid. - * There’s a usage limit that local user must be a video stream sender. On the receiver side, media streams from same sync group will be time-synced + /** + * @brief Sets audio-video synchronization for the sender. * - * @param channelId The channel id - * @param uid The user ID of the remote user to be bound with (local user) + * @details + * A user may use two separate devices to send audio and video streams. To ensure audio and video + * are synchronized on the receiving end, you can call this method on the video sender and pass in + * the channel name and user ID of the audio sender. The SDK + * will use the timestamp of the audio stream as the reference to automatically adjust the video + * stream, ensuring audio-video synchronization even if the two sending devices are on different + * uplink networks (e.g., Wi-Fi and 4G). + * + * @note Agora recommends that you call this method before joining a channel. + * + * @param channelId The channel name of the audio sender. + * @param uid The user ID of the audio sender. * * @return * - 0: Success. - * - < 0: Failure. + * - < 0: Failure. See `Error Codes` for details and resolution suggestions. */ virtual int setAVSyncSource(const char* channelId, uid_t uid) = 0; /** - * @brief enable or disable video image source to replace the current video source published or resume it + * @brief Sets whether to replace the current video feeds with images when publishing video streams. + * + * @details + * When publishing video streams, you can call this method to replace the current video feeds with + * custom images. + * Once you enable this function, you can select images to replace the video feeds through the + * `ImageTrackOptions` parameter. If you disable this function, the remote users see the video feeds + * that you publish. + * Call timing: Call this method after joining a channel. * - * @param enable true for enable, false for disable - * @param options options for image track + * @param enable Whether to replace the current video feeds with custom images: + * - `true`: Replace the current video feeds with custom images. + * - `false`: (Default) Do not replace the current video feeds with custom images. + * @param options Image configurations. See `ImageTrackOptions`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int enableVideoImageSource(bool enable, const ImageTrackOptions& options) = 0; - /* - * Get monotonic time in ms which can be used by capture time, - * typical scenario is as follows: + /** + * @brief Gets the current Monotonic Time of the SDK. * - * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - * | // custom audio/video base capture time, e.g. the first audio/video capture time. | - * | int64_t custom_capture_time_base; | - * | | - * | int64_t agora_monotonic_time = getCurrentMonotonicTimeInMs(); | - * | | - * | // offset is fixed once calculated in the begining. | - * | const int64_t offset = agora_monotonic_time - custom_capture_time_base; | - * | | - * | // realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| - * | // actual_audio/video_capture_time is the actual capture time transfered to sdk. | - * | int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | - * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | - * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + * @details + * Monotonic Time refers to a monotonically increasing time series whose value increases over time. + * The unit is milliseconds. + * In custom video capture and custom audio capture scenarios, in order to ensure audio and video + * synchronization, Agora recommends that you call this method to obtain the current Monotonic Time + * of the SDK, and then pass this value into the timestamp parameter in the captured video frame ( + * `VideoFrame` ) and audio frame ( `AudioFrame` ). + * Call timing: This method can be called either before or after joining the channel. * * @return - * - >= 0: Success. + * - ≥0: The method call is successful, and returns the current Monotonic Time of the SDK (in + * milliseconds). * - < 0: Failure. */ virtual int64_t getCurrentMonotonicTimeInMs() = 0; /** - * Turns WIFI acceleration on or off. + * @brief Gets the type of the local network connection. * - * @note - * - This method is called before and after joining a channel. - * - Users check the WIFI router app for information about acceleration. Therefore, if this interface is invoked, the caller accepts that the caller's name will be displayed to the user in the WIFI router application on behalf of the caller. + * @details + * You can use this method to get the type of network in use at any stage. * - * @param enabled - * - true:Turn WIFI acceleration on. - * - false:Turn WIFI acceleration off. + * @note You can call this method either before or after joining a channel. * * @return - * - 0: Success. - * - < 0: Failure. + * - ≥ 0: The method call is successful, and the local network connection type is returned. + * - 0: The SDK disconnects from the network. + * - 1: The network type is LAN. + * - 2: The network type is Wi-Fi (including hotspots). + * - 3: The network type is mobile 2G. + * - 4: The network type is mobile 3G. + * - 5: The network type is mobile 4G. + * - 6: The network type is mobile 5G. + * - < 0: The method call failed with an error code. + * - -1: The network type is unknown. */ - virtual int enableWirelessAccelerate(bool enabled) = 0; - - /** - * get network type value - * - * @return - * - 0: DISCONNECTED. - * - 1: LAN. - * - 2: WIFI. - * - 3: MOBILE_2G. - * - 4: MOBILE_3G. - * - 5: MOBILE_4G. - * - 6: MOBILE_5G. - * - -1: UNKNOWN. - */ - virtual int getNetworkType() = 0; - /** Provides the technical preview functionalities or special customizations by configuring the SDK with JSON options. - - @param parameters Pointer to the set parameters in a JSON string. - - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Provides technical preview functionalities or special customizations by configuring the + * SDK with JSON options. + * + * @details + * Contact `technical support` to get the JSON configuration method. + * + * @param parameters Pointer to the set parameters in a JSON string. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setParameters(const char* parameters) = 0; /** - @brief Start tracing media rendering events. - @since v4.1.1 - @discussion - - SDK will trace media rendering events when this API is called. - - The tracing result can be obtained through callback `IRtcEngineEventHandler::onVideoRenderingTracingResult` - @note - - By default, SDK will trace media rendering events when `IRtcEngine::joinChannel` is called. - - The start point of event tracing will be reset after leaving channel. - @return - - 0: Success. - - < 0: Failure. - - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this method. + * @brief Enables tracing the video frame rendering process. + * + * @since v4.1.1 + * + * @details + * The SDK starts tracing the rendering status of the video frames in the channel from the moment + * this method is successfully called and reports information about the event through the + * `onVideoRenderingTracingResult` callback. + * Applicable scenarios: Agora recommends that you use this method in conjunction with the UI + * settings (such as buttons and sliders) in your app to improve the user experience. For example, + * call this method when the user clicks the Join Channel button, and then get the time spent during + * the video frame rendering process through the `onVideoRenderingTracingResult` callback, so as to + * optimize the indicators accordingly. + * + * @note + * - If you have not called this method, the SDK tracks the rendering events of the video frames + * from the moment you call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. You can call this method at an + * appropriate time according to the actual application scenario to set the starting position for + * tracking video rendering events. + * - After the local user leaves the current channel, the SDK automatically tracks the video + * rendering events from the moment you join a channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -7: The method is called before `IRtcEngine` is initialized. */ virtual int startMediaRenderingTracing() = 0; /** - @brief Enable instant media rendering. - @since v4.1.1 - @discussion - - This method enable SDK to render video or playout audio faster. - @note - - Once enable this mode, we should destroy rtc engine to disable it. - - Enable this mode, will sacrifice some part of experience. - @return - - 0: Success. - - < 0: Failure. - - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this method. + * @brief Enables audio and video frame instant rendering. + * + * @since v4.1.1 + * + * @details + * After successfully calling this method, the SDK enables the instant frame rendering mode, which + * can speed up the first frame rendering after the user joins the channel. + * Applicable scenarios: Agora recommends that you enable this mode for the audience in a live + * streaming scenario. + * Call timing: Call this method before joining a channel. + * + * @note + * Both the host and audience member need to call this method in order to experience instant + * rendering of audio and video frames. + * Once the method is successfully called, you can only cancel instant rendering by calling + * `release` to destroy the `IRtcEngine` object. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -7: The method is called before `IRtcEngine` is initialized. */ virtual int enableInstantMediaRendering() = 0; /** - * Return current NTP(unix timestamp) time in milliseconds. + * @brief Gets the current NTP (Network Time Protocol) time. + * + * @details + * In the real-time chorus scenario, especially when the downlink connections are inconsistent due + * to network issues among multiple receiving ends, you can call this method to obtain the current + * NTP time as the reference time, in order to align the lyrics and music of multiple receiving ends + * and achieve chorus synchronization. + * + * @return + * The Unix timestamp (ms) of the current NTP time. */ virtual uint64_t getNtpWallTimeInMs() = 0; - /** - * @brief Whether the target feature is available for the device. + /** + * @brief Checks whether the device supports the specified advanced feature. + * * @since v4.3.0 - * @param type The feature type. See FeatureType. + * + * @details + * Checks whether the capabilities of the current device meet the requirements for advanced features + * such as virtual background and image enhancement. + * Applicable scenarios: Before using advanced features, you can check whether the current device + * supports these features based on the call result. This helps to avoid performance degradation or + * unavailable features when enabling advanced features on low-end devices. Based on the return + * value of this method, you can decide whether to display or enable the corresponding feature + * button, or notify the user when the device's capabilities are insufficient. + * + * @param type The type of the advanced feature, see `FeatureType`. + * * @return - * - true: available. - * - false: not available. + * - `true`: The current device supports the specified feature. + * - `false`: The current device does not support the specified feature. */ virtual bool isFeatureAvailableOnDevice(FeatureType type) = 0; @@ -8332,6 +12352,18 @@ class IRtcEngine : public agora::base::IEngineBase { * @technical preview */ virtual int sendAudioMetadata(const char* metadata, size_t length) = 0; + + /** + * @brief Queries the HDR capability of the video module + * @since v4.6.0 + * @param videoModule The video module. See VIDEO_MODULE_TYPE + * @param capability HDR capability of video module. See HDR_CAPABILITY + * @return + * - 0: success + * - <0: failure + * @technical preview + */ + virtual int queryHDRCapability(VIDEO_MODULE_TYPE videoModule, HDR_CAPABILITY& capability) = 0; }; // The following types are either deprecated or not implmented yet. @@ -8344,21 +12376,34 @@ enum QUALITY_REPORT_FORMAT_TYPE { QUALITY_REPORT_HTML = 1, }; -/** Media device states. */ +/** + * @brief Media device states. + */ enum MEDIA_DEVICE_STATE_TYPE { - /** 0: The device is ready for use. + /** + * 0: The device is ready for use. */ MEDIA_DEVICE_STATE_IDLE = 0, - /** 1: The device is active. + /** + * 1: The device is in use. */ MEDIA_DEVICE_STATE_ACTIVE = 1, - /** 2: The device is disabled. + /** + * 2: The device is disabled. */ MEDIA_DEVICE_STATE_DISABLED = 2, - /** 4: The device is not present. + + /** + * 3: The device is plugged in. + */ + MEDIA_DEVICE_STATE_PLUGGED_IN = 3, + + /** + * 4: The device is not found. */ MEDIA_DEVICE_STATE_NOT_PRESENT = 4, - /** 8: The device is unplugged. + /** + * 8: The device is unplugged. */ MEDIA_DEVICE_STATE_UNPLUGGED = 8 }; @@ -8538,10 +12583,15 @@ class AVideoDeviceManager : public agora::util::AutoPtr { */ //////////////////////////////////////////////////////// -/** Creates the RTC engine object and returns the pointer. - -* @return Pointer of the RTC engine object. -*/ +/** + * @brief Creates one `IRtcEngine` object. + * + * @details + * Currently, the Agora RTC SDK v4.x supports creating only one `IRtcEngine` object for each app. + * + * @return + * Pointer to the `IRtcEngine` object. + */ AGORA_API agora::rtc::IRtcEngine* AGORA_CALL createAgoraRtcEngine(); //////////////////////////////////////////////////////// diff --git a/include/IAgoraRtcEngineEx.h b/include/IAgoraRtcEngineEx.h index 099de84..cc2e358 100644 --- a/include/IAgoraRtcEngineEx.h +++ b/include/IAgoraRtcEngineEx.h @@ -15,22 +15,15 @@ namespace rtc { // OPTIONAL_ENUM_CLASS RTC_EVENT; /** - * Rtc Connection. + * @brief Contains connection information. */ struct RtcConnection { /** - * The unique channel name for the AgoraRTC session in the string format. The string - * length must be less than 64 bytes. Supported character scopes are: - * - All lowercase English letters: a to z. - * - All uppercase English letters: A to Z. - * - All numeric characters: 0 to 9. - * - The space character. - * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", - * ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * The channel name. */ const char* channelId; /** - * User ID: A 32-bit unsigned integer ranging from 1 to (2^32-1). It must be unique. + * The ID of the local user. */ uid_t localUid; @@ -72,6 +65,9 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { using IRtcEngineEventHandler::onConnectionBanned; using IRtcEngineEventHandler::onStreamMessage; using IRtcEngineEventHandler::onStreamMessageError; + using IRtcEngineEventHandler::onRdtMessage; + using IRtcEngineEventHandler::onRdtStateChanged; + using IRtcEngineEventHandler::onMediaControlMessage; using IRtcEngineEventHandler::onRequestToken; using IRtcEngineEventHandler::onTokenPrivilegeWillExpire; using IRtcEngineEventHandler::onLicenseValidationFailure; @@ -86,8 +82,6 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { using IRtcEngineEventHandler::onRemoteAudioTransportStats; using IRtcEngineEventHandler::onRemoteVideoTransportStats; using IRtcEngineEventHandler::onConnectionStateChanged; - using IRtcEngineEventHandler::onWlAccMessage; - using IRtcEngineEventHandler::onWlAccStats; using IRtcEngineEventHandler::onNetworkTypeChanged; using IRtcEngineEventHandler::onEncryptionError; using IRtcEngineEventHandler::onUploadLogResult; @@ -102,6 +96,8 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { using IRtcEngineEventHandler::onSetRtmFlagResult; using IRtcEngineEventHandler::onTranscodedStreamLayoutInfo; using IRtcEngineEventHandler::onAudioMetadataReceived; + using IRtcEngineEventHandler::onMultipathStats; + using IRtcEngineEventHandler::onRenewTokenResult; virtual const char* eventHandlerType() const { return "event_handler_ex"; } @@ -524,9 +520,10 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { * stream (high bitrate, and high-resolution video stream). * * @param connection The RtcConnection object. + * @param sourceType The video source type: #VIDEO_SOURCE_TYPE. * @param stats Statistics of the local video stream. See LocalVideoStats. */ - virtual void onLocalVideoStats(const RtcConnection& connection, const LocalVideoStats& stats) { + virtual void onLocalVideoStats(const RtcConnection& connection, VIDEO_SOURCE_TYPE sourceType, const LocalVideoStats& stats) { (void)connection; (void)stats; } @@ -627,6 +624,62 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { (void)cached; } + /** + ** @brief Occurs when the local user receives data via Reliable Data Transmission (RDT) from a remote user. + * + * @technical preview + * + * @details The SDK triggers this callback when the user receives the data stream that another user sends + * by calling the \ref agora::rtc::IRtcEngine::sendRdtMessage "sendRdtMessage" method. + * + * @param connection The RtcConnection object. + * @param userId ID of the user who sends the data. + * @param type The RDT stream type. See RdtStreamType. + * @param data The data received. + * @param length The length (byte) of the data. + */ + virtual void onRdtMessage(const RtcConnection& connection, uid_t userId, RdtStreamType type, const char *data, size_t length) { + (void)connection; + (void)userId; + (void)type; + (void)data; + (void)length; + } + + /** + * @brief Occurs when the RDT tunnel state changed + * + * @technical preview + * + * @param connection The RtcConnection object. + * @param userId ID of the user who sends the data. + * @param state The RDT tunnel state. See RdtState. + */ + virtual void onRdtStateChanged(const RtcConnection& connection, uid_t userId, RdtState state) { + (void)connection; + (void)userId; + (void)state; + } + + /** + * @brief Occurs when the local user receives media control message sent by a remote user. + * + * @technical preview + * + * @details The SDK triggers this callback when the user receives data sent by a remote user using the sendMediaControlMessage method. + * + * @param connection The RtcConnection object. + * @param userId ID of the user who sends the data. + * @param data The data received. + * @param length The length (byte) of the data. + */ + virtual void onMediaControlMessage(const RtcConnection& connection, uid_t userId, const char* data, size_t length) { + (void)connection; + (void)userId; + (void)data; + (void)length; + } + /** * Occurs when the token expires. * @@ -882,32 +935,6 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { (void)reason; } - /** Occurs when the WIFI message need be sent to the user. - * - * @param connection The RtcConnection object. - * @param reason The reason of notifying the user of a message. - * @param action Suggest an action for the user. - * @param wlAccMsg The message content of notifying the user. - */ - virtual void onWlAccMessage(const RtcConnection& connection, WLACC_MESSAGE_REASON reason, WLACC_SUGGEST_ACTION action, const char* wlAccMsg) { - (void)connection; - (void)reason; - (void)action; - (void)wlAccMsg; - } - - /** Occurs when SDK statistics wifi acceleration optimization effect. - * - * @param connection The RtcConnection object. - * @param currentStats Instantaneous value of optimization effect. - * @param averageStats Average value of cumulative optimization effect. - */ - virtual void onWlAccStats(const RtcConnection& connection, WlAccStats currentStats, WlAccStats averageStats) { - (void)connection; - (void)currentStats; - (void)averageStats; - } - /** Occurs when the local network type changes. * * This callback occurs when the connection state of the local user changes. You can get the @@ -987,7 +1014,7 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { /** * Reports the tracing result of video rendering event of the user. - * + * * @param connection The RtcConnection object. * @param uid The user ID. * @param currentEvent The current event of the tracing result: #MEDIA_TRACE_EVENT. @@ -1034,161 +1061,323 @@ class IRtcEngineEventHandlerEx : public IRtcEngineEventHandler { * @param uid ID of the remote user. * @param metadata The pointer of metadata * @param length Size of metadata - * @technical preview + * @technical preview */ virtual void onAudioMetadataReceived(const RtcConnection& connection, uid_t uid, const char* metadata, size_t length) { (void)metadata; (void)length; } + + /** + * @brief Callback for multipath transmission statistics. + * + * @since 4.6.0 + * + * @details + * Call timing: This callback is triggered after you set `enableMultipath` to `true` to enable + * multipath transmission. + * + * @param stats Multipath transmission statistics. See `MultipathStats`. + * + */ + virtual void onMultipathStats(const RtcConnection& connection, const MultipathStats& stats) { + (void)stats; + (void)connection; + } + + /** + * Occurs when a user renews the token. + * + * This callback notifies the app that the user renews the token by calling `renewToken`. From this callback, + * the app can get the result of `renewToken`. + * + * @param connection The RtcConnection object. + * @param token The token. + * @param code The error code. + */ + virtual void onRenewTokenResult(const RtcConnection& connection, const char* token, RENEW_TOKEN_ERROR_CODE code) { + (void)token; + (void)code; + } }; class IRtcEngineEx : public IRtcEngine { public: - /** - * Joins a channel with media options. - * - * This method enables users to join a channel. Users in the same channel can talk to each other, - * and multiple users in the same channel can start a group chat. Users with different App IDs - * cannot call each other. - * - * A successful call of this method triggers the following callbacks: - * - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks. - * - The remote client: `onUserJoined`, if the user joining the channel is in the Communication - * profile or is a host in the Live-broadcasting profile. - * - * When the connection between the client and Agora's server is interrupted due to poor network - * conditions, the SDK tries reconnecting to the server. When the local client successfully rejoins - * the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. - * - * Compared to `joinChannel`, this method adds the options parameter to configure whether to - * automatically subscribe to all remote audio and video streams in the channel when the user - * joins the channel. By default, the user subscribes to the audio and video streams of all - * the other users in the channel, giving rise to usage and billings. To unsubscribe, set the - * `options` parameter or call the `mute` methods accordingly. - * - * @note - * - This method allows users to join only one channel at a time. - * - Ensure that the app ID you use to generate the token is the same app ID that you pass in the - * `initialize` method; otherwise, you may fail to join the channel by token. - * - * @param connection The RtcConnection object. - * @param token The token generated on your server for authentication. - * @param options The channel media options: ChannelMediaOptions. - * @param eventHandler The event handler: IRtcEngineEventHandler. - * - * @return - * - 0: Success. - * - < 0: Failure. - * - -2: The parameter is invalid. For example, the token is invalid, the uid parameter is not set - * to an integer, or the value of a member in the `ChannelMediaOptions` structure is invalid. You need - * to pass in a valid parameter and join the channel again. - * - -3: Failes to initialize the `IRtcEngine` object. You need to reinitialize the IRtcEngine object. - * - -7: The IRtcEngine object has not been initialized. You need to initialize the IRtcEngine - * object before calling this method. - * - -8: The internal state of the IRtcEngine object is wrong. The typical cause is that you call - * this method to join the channel without calling `stopEchoTest` to stop the test after calling - * `startEchoTest` to start a call loop test. You need to call `stopEchoTest` before calling this method. - * - -17: The request to join the channel is rejected. The typical cause is that the user is in the - * channel. Agora recommends using the `onConnectionStateChanged` callback to get whether the user is - * in the channel. Do not call this method to join the channel unless you receive the - * `CONNECTION_STATE_DISCONNECTED(1)` state. - * - -102: The channel name is invalid. You need to pass in a valid channel name in channelId to - * rejoin the channel. - * - -121: The user ID is invalid. You need to pass in a valid user ID in uid to rejoin the channel. - */ + /** + * @brief Joins a channel. + * + * @details + * You can call this method multiple times to join more than one channel. If you want to join the + * same channel from different devices, ensure that the user IDs are different for all devices. + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * Call timing: Call this method after `initialize`. + * In a multi-camera capture scenario, you need to call the `startPreview(VIDEO_SOURCE_TYPE + * sourceType)` method after + * calling this method to set the `sourceType` to `VIDEO_SOURCE_CAMERA_SECONDARY`, to ensure that + * the second camera captures normally. + * Related callbacks: A successful call of this method triggers the following callbacks: + * - The local client: The `onJoinChannelSuccess` and `onConnectionStateChanged` callbacks. + * - The remote client: The `onUserJoined` callback, if a user joining the channel in the + * COMMUNICATION profile, or a host joining a channel in the LIVE_BROADCASTING profile. + * When the connection between the local client and Agora's server is interrupted due to poor + * network conditions, the SDK tries reconnecting to the server. When the local client successfully + * rejoins the channel, the SDK triggers the `onRejoinChannelSuccess` callback on the local client. + * + * @note + * If you are already in a channel, you cannot rejoin the channel with the same user ID. + * Before joining a channel, ensure that the App ID you use to generate a token is the same as that + * you pass in the `initialize` method; otherwise, you may fail to join the channel with the token. + * + * @param token The token generated on your server for authentication. See .Note: + * - (Recommended) If your project has enabled the security mode (using APP ID and Token for + * authentication), this parameter is required. + * - If you have only enabled the testing mode (using APP ID for authentication), this parameter is + * optional. You will automatically exit the channel 24 hours after successfully joining in. + * - If you need to join different channels at the same time or switch between channels, Agora + * recommends using a wildcard token so that you don't need to apply for a new token every time + * joining a channel. See `Secure authentication with tokens`. + * @param connection The connection information. See `RtcConnection`. + * @param options The channel media options. See `ChannelMediaOptions`. + * @param eventHandler The callback class of `IRtcEngineEx`. See `IRtcEngineEventHandler`. You can + * get the callback events of multiple channels through the `eventHandler` object passed in this + * parameter. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The parameter is invalid. For example, the token is invalid, the `uid` parameter is not + * set to an integer, or the value of a member in `ChannelMediaOptions` is invalid. You need to pass + * in a valid parameter and join the channel again. + * - -3: Fails to initialize the `IRtcEngine` object. You need to reinitialize the `IRtcEngine` + * object. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The typical cause is that after + * calling `startEchoTest` to start a call loop test, you call this method to join the channel + * without calling `stopEchoTest` to stop the test. You need to call `stopEchoTest` before calling + * this method. + * - -17: The request to join the channel is rejected. The typical cause is that the user is + * already in the channel. Agora recommends that you use the `onConnectionStateChanged` callback to + * see whether the user is in the channel. Do not call this method to join the channel unless you + * receive the `CONNECTION_STATE_DISCONNECTED` (1) state. + * - -102: The channel name is invalid. You need to pass in a valid channel name in `channelId` to + * rejoin the channel. + * - -121: The user ID is invalid. You need to pass in a valid user ID in `uid` to rejoin the + * channel. + */ virtual int joinChannelEx(const char* token, const RtcConnection& connection, const ChannelMediaOptions& options, IRtcEngineEventHandler* eventHandler) = 0; - /** - * Leaves the channel. - * - * This method allows a user to leave the channel, for example, by hanging up or exiting a call. - * - * This method is an asynchronous call, which means that the result of this method returns even before - * the user has not actually left the channel. Once the user successfully leaves the channel, the - * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. - * - * @param connection The RtcConnection object. - * @return - * - 0: Success. - * - < 0: Failure. - */ + /** + * @brief Leaves a channel. + * + * @details + * After calling this method, the SDK terminates the audio and video interaction, leaves the current + * channel, and releases all resources related to the session. + * After calling `joinChannelEx` to join a channel, you must call this method or + * `leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options)` + * to end the call, otherwise, the next call cannot be started. + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: A successful call of this method triggers the following callbacks: + * - The local client: The `onLeaveChannel` callback will be triggered. + * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves + * the channel. + * + * @note + * If you call `release` immediately after calling this method, the SDK does not trigger the + * `onLeaveChannel` callback. + * - This method call is asynchronous. When this method returns, it does not necessarily mean that + * the user has left the channel. + * - If you call `leaveChannel()` or `leaveChannel(const LeaveChannelOptions& options)`, you will + * leave all the channels you + * have joined by calling `joinChannel(const char* token, const char* channelId, const char* info, + * uid_t uid)`, `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)`, or `joinChannelEx`. + * + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int leaveChannelEx(const RtcConnection& connection) = 0; /** - * Leaves the channel with the connection ID. + * @brief Sets channel options and leaves the channel. + * + * @details + * After calling this method, the SDK terminates the audio and video interaction, leaves the current + * channel, and releases all resources related to the session. + * After calling `joinChannelEx` to join a channel, you must call this method or + * `leaveChannelEx(const RtcConnection& connection)` + * to end the call, otherwise, the next call cannot be started. + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: A successful call of this method triggers the following callbacks: + * - The local client: The `onLeaveChannel` callback will be triggered. + * - The remote client: The `onUserOffline` callback will be triggered after the remote host leaves + * the channel. * - * @param connection connection. - * @param options The options for leaving the channel. See #LeaveChannelOptions. - * @return int + * @note + * If you call `release` immediately after calling this method, the SDK does not trigger the + * `onLeaveChannel` callback. + * - This method call is asynchronous. When this method returns, it does not necessarily mean that + * the user has left the channel. + * - If you call `leaveChannel()` or `leaveChannel(const LeaveChannelOptions& options)`, you will + * leave all the channels you + * have joined by calling `joinChannel(const char* token, const char* channelId, const char* info, + * uid_t uid)`, `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)`, or `joinChannelEx`. + * + * @param connection The connection information. See `RtcConnection`. + * @param options Since + * v4.1.0 + * The options for leaving the channel. See `LeaveChannelOptions`. + * Note: This parameter only supports the `stopMicrophoneRecording` member in the + * `LeaveChannelOptions` settings; setting other members does not take effect. + * + * @return * - 0: Success. * - < 0: Failure. */ virtual int leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) = 0; + /** + * Leaves a channel with the channel ID and user account. + * + * This method allows a user to leave the channel, for example, by hanging up or exiting a call. + * + * This method is an asynchronous call, which means that the result of this method returns even before + * the user has not actually left the channel. Once the user successfully leaves the channel, the + * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount) = 0; + + /** + * Leaves a channel with the channel ID and user account and sets the options for leaving. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param options The options for leaving the channel. See #LeaveChannelOptions. + * @return int + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount, const LeaveChannelOptions& options) = 0; + /** - * Updates the channel media options after joining the channel. + * @brief Updates the channel media options after joining the channel. * - * @param options The channel media options: ChannelMediaOptions. - * @param connection The RtcConnection object. - * @return int + * @param options The channel media options. See `ChannelMediaOptions`. + * @param connection The connection information. See `RtcConnection`. + * + * @return * - 0: Success. * - < 0: Failure. + * - -2: The value of a member in `ChannelMediaOptions` is invalid. For example, the token or the + * user ID is invalid. You need to fill in a valid parameter. + * - -7: The `IRtcEngine` object has not been initialized. You need to initialize the `IRtcEngine` + * object before calling this method. + * - -8: The internal state of the `IRtcEngine` object is wrong. The possible reason is that the + * user is not in the channel. Agora recommends that you use the `onConnectionStateChanged` callback + * to see whether the user is in the channel. If you receive the `CONNECTION_STATE_DISCONNECTED` (1) + * or `CONNECTION_STATE_FAILED` (5) state, the user is not in the channel. You need to call + * `joinChannel(const char* token, const char* channelId, uid_t uid, const ChannelMediaOptions& + * options)` to join a channel before calling this method. */ virtual int updateChannelMediaOptionsEx(const ChannelMediaOptions& options, const RtcConnection& connection) = 0; /** - * Sets the video encoder configuration. + * @brief Sets the video encoder configuration. * - * Each configuration profile corresponds to a set of video parameters, including - * the resolution, frame rate, and bitrate. + * @details + * Sets the encoder configuration for the local video. Each configuration profile corresponds to a + * set of video parameters, including the resolution, frame rate, and bitrate. + * Call timing: Call this method after `joinChannelEx`. * - * The parameters specified in this method are the maximum values under ideal network conditions. - * If the video engine cannot render the video using the specified parameters due - * to poor network conditions, the parameters further down the list are considered - * until a successful configuration is found. + * @note The `config` specified in this method is the maximum value under ideal network conditions. + * If the video engine cannot render the video using the specified `config` due to unreliable + * network conditions, the parameters further down the list are considered until a successful + * configuration is found. + * + * @param config Video profile. See `VideoEncoderConfiguration`. + * @param connection The connection information. See `RtcConnection`. * - * @param config The local video encoder configuration: VideoEncoderConfiguration. - * @param connection The RtcConnection object. * @return * - 0: Success. * - < 0: Failure. */ virtual int setVideoEncoderConfigurationEx(const VideoEncoderConfiguration& config, const RtcConnection& connection) = 0; /** - * Initializes the video view of a remote user. - * - * This method initializes the video view of a remote stream on the local device. It affects only the - * video view that the local user sees. - * - * Usually the app should specify the `uid` of the remote video in the method call before the - * remote user joins the channel. If the remote `uid` is unknown to the app, set it later when the - * app receives the \ref IRtcEngineEventHandler::onUserJoined "onUserJoined" callback. - * - * To unbind the remote user from the view, set `view` in VideoCanvas as `null`. + * @brief Initializes the video view of a remote user. + * + * @details + * This method initializes the video view of a remote stream on the local device. It affects only + * the video view that the local user sees. Call this method to bind the remote video stream to a + * video view and to set the rendering and mirror modes of the video view. + * The application specifies the uid of the remote video in the `VideoCanvas` method before the + * remote user joins the channel. + * If the remote uid is unknown to the application, set it after the application receives the + * `onUserJoined` callback. If the Video Recording function is enabled, the Video Recording Service + * joins the channel as a dummy client, causing other clients to also receive the `onUserJoined` + * callback. Do not bind the dummy client to the application view because the dummy client does not + * send any video streams. + * To unbind the remote user from the view, set the `view` parameter to NULL. + * Once the remote user leaves the channel, the SDK unbinds the remote user. * * @note - * Ensure that you call this method in the UI thread. + * - Call this method after `joinChannelEx`. + * - To update the rendering or mirror mode of the remote video view during a call, use the + * `setRemoteRenderModeEx` method. + * + * @param canvas The remote video view settings. See `VideoCanvas`. + * @param connection The connection information. See `RtcConnection`. * - * @param canvas The remote video view settings: VideoCanvas. - * @param connection The RtcConnection object. * @return * - 0: Success. * - < 0: Failure. */ virtual int setupRemoteVideoEx(const VideoCanvas& canvas, const RtcConnection& connection) = 0; /** - * Stops or resumes receiving the audio stream of a specified user. + * @brief Stops or resumes receiving the audio stream of a specified user. * - * @note - * You can call this method before or after joining a channel. If a user - * leaves a channel, the settings in this method become invalid. + * @details + * This method is used to stops or resumes receiving the audio stream of a specified + * user. You can call this method before or after joining a channel. If a user leaves a channel, the + * settings in this method become invalid. * * @param uid The ID of the specified user. * @param mute Whether to stop receiving the audio stream of the specified user: - * - true: Stop receiving the audio stream of the specified user. - * - false: (Default) Resume receiving the audio stream of the specified user. - * @param connection The RtcConnection object. + * - `true`: Stop receiving the audio stream of the specified user. + * - `false`: (Default) Resume receiving the audio stream of the specified user. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1196,17 +1385,18 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int muteRemoteAudioStreamEx(uid_t uid, bool mute, const RtcConnection& connection) = 0; /** - * Stops or resumes receiving the video stream of a specified user. + * @brief Stops or resumes receiving the video stream of a specified user. * - * @note - * You can call this method before or after joining a channel. If a user - * leaves a channel, the settings in this method become invalid. + * @details + * This method is used to stop or resume receiving the video stream of a specified user. You can + * call this method before or after joining a channel. If a user leaves a channel, the settings in + * this method become invalid. * - * @param uid The ID of the specified user. + * @param uid The user ID of the remote user. * @param mute Whether to stop receiving the video stream of the specified user: - * - true: Stop receiving the video stream of the specified user. - * - false: (Default) Resume receiving the video stream of the specified user. - * @param connection The RtcConnetion object. + * - `true`: Stop receiving the video stream of the specified user. + * - `false`: (Default) Resume receiving the video stream of the specified user. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1214,18 +1404,39 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int muteRemoteVideoStreamEx(uid_t uid, bool mute, const RtcConnection& connection) = 0; /** - * Sets the remote video stream type. - * - * If the remote user has enabled the dual-stream mode, by default the SDK receives the high-stream video by - * Call this method to switch to the low-stream video. - * - * @note - * This method applies to scenarios where the remote user has enabled the dual-stream mode using - * \ref enableDualStreamMode "enableDualStreamMode"(true) before joining the channel. - * - * @param uid ID of the remote user sending the video stream. - * @param streamType Sets the video stream type: #VIDEO_STREAM_TYPE. - * @param connection The RtcConnection object. + * @brief Sets the video stream type to subscribe to. + * + * @details + * Depending on the default behavior of the sender and the specific settings when calling + * `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig)`, the + * scenarios for the receiver calling this method are as follows: + * - The SDK enables low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) on the + * sender side by default, meaning only the high-quality video stream is transmitted. Only the + * receiver with the role of the **host**can call this method to initiate a low-quality video stream + * request. Once the sender receives the request, it starts automatically sending the low-quality + * video stream. At this point, all users in the channel can call this method to switch to + * low-quality video stream subscription mode. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `DISABLE_SIMULCAST_STREAM` + * (never send low-quality video stream), then calling this method will have no effect. + * - If the sender calls `setDualStreamMode(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& + * streamConfig)` and sets `mode` to `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video stream), both the host and audience receivers can call this method + * to switch to low-quality video stream subscription mode. + * The SDK will dynamically adjust the size of the corresponding video stream based on the size of + * the video window to save bandwidth and computing resources. The default aspect ratio of the + * low-quality video stream is the same as that of the high-quality video stream. According to the + * current aspect ratio of the high-quality video stream, the system will automatically allocate the + * resolution, frame rate, and bitrate of the low-quality video stream. + * + * @note If the publisher has already called `setDualStreamModeEx` and set `mode` to + * `DISABLE_SIMULCAST_STREAM` (never send low-quality video stream), calling this method will not + * take effect, you should call `setDualStreamModeEx` again on the sending end and adjust the + * settings. + * + * @param uid The user ID. + * @param streamType The video stream type, see `VIDEO_STREAM_TYPE`. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1233,76 +1444,112 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int setRemoteVideoStreamTypeEx(uid_t uid, VIDEO_STREAM_TYPE streamType, const RtcConnection& connection) = 0; /** - *Stops or resumes sending the local audio stream with connection. + * @brief Stops or resumes publishing the local audio stream. * - *@param mute Determines whether to send or stop sending the local audio stream: - *- true: Stop sending the local audio stream. - *- false: Send the local audio stream. + * @details + * A successful call of this method triggers the `onUserMuteAudio` and `onRemoteAudioStateChanged` + * callbacks on the remote client. * - *@param connection The connection of the user ID. + * @note This method does not affect any ongoing audio recording, because it does not disable the + * audio capture device. * - *@return - *- 0: Success. - *- < 0: Failure. + * @param mute Whether to stop publishing the local audio stream: + * - `true`: Stops publishing the local audio stream. + * - `false`: (Default) Resumes publishing the local audio stream. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int muteLocalAudioStreamEx(bool mute, const RtcConnection& connection) = 0; - + /** - *Stops or resumes sending the local video stream with connection. + * @brief Stops or resumes publishing the local video stream. * - *@param mute Determines whether to send or stop sending the local video stream: - *- true: Stop sending the local video stream. - *- false: Send the local video stream. + * @details + * A successful call of this method triggers the `onUserMuteVideo` callback on the remote client. * - *@param connection The connection of the user ID. + * @note This method does not affect any ongoing video recording, because it does not disable the + * camera. * - *@return - *- 0: Success. - *- < 0: Failure. + * @param mute Whether to stop publishing the local video stream. + * - `true`: Stop publishing the local video stream. + * - `false`: (Default) Publish the local video stream. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int muteLocalVideoStreamEx(bool mute, const RtcConnection& connection) = 0; - + /** - *Stops or resumes receiving all remote audio stream with connection. + * @brief Stops or resumes subscribing to the audio streams of all remote users. + * + * @details + * After successfully calling this method, the local user stops or resumes subscribing to the audio + * streams of all remote users, including the ones join the channel subsequent to this call. * - *@param mute Whether to stop receiving remote audio streams: - *- true: Stop receiving any remote audio stream. - *- false: Resume receiving all remote audio streams. + * @note + * - Call this method after joining a channel. + * - If you do not want to subscribe the audio streams of remote users before joining a channel, you + * can set `autoSubscribeAudio` as `false` when calling `joinChannel(const char* token, const char* + * channelId, uid_t uid, const ChannelMediaOptions& options)`. * - *@param connection The connection of the user ID. + * @param mute Whether to stop subscribing to the audio streams of all remote users: + * - `true`: Stops subscribing to the audio streams of all remote users. + * - `false`: (Default) Subscribes to the audio streams of all remote users by default. + * @param connection The connection information. See `RtcConnection`. * - *@return - *- 0: Success. - *- < 0: Failure. + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int muteAllRemoteAudioStreamsEx(bool mute, const RtcConnection& connection) = 0; - + /** - *Stops or resumes receiving all remote video stream with connection. + * @brief Stops or resumes subscribing to the video streams of all remote users. * - *@param mute Whether to stop receiving remote audio streams: - *- true: Stop receiving any remote audio stream. - *- false: Resume receiving all remote audio streams. + * @details + * After successfully calling this method, the local user stops or resumes subscribing to the video + * streams of all remote users, including all subsequent users. * - *@param connection The connection of the user ID. + * @param mute Whether to stop subscribing to the video streams of all remote users. + * - `true`: Stop subscribing to the video streams of all remote users. + * - `false`: (Default) Subscribe to the video streams of all remote users by default. + * @param connection The connection information. See `RtcConnection`. * - *@return - *- 0: Success. - *- < 0: Failure. + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int muteAllRemoteVideoStreamsEx(bool mute, const RtcConnection& connection) = 0; /** - * Sets the blocklist of subscribe remote stream audio. + * @brief Sets the blocklist of subscriptions for audio streams. * - * @note - * If uid is in uidList, the remote user's audio will not be subscribed, - * even if muteRemoteAudioStream(uid, false) and muteAllRemoteAudioStreams(false) are operated. + * @details + * You can call this method to specify the audio streams of a user that you do not want to subscribe + * to. * - * @param uidList The id list of users who do not subscribe to audio. - * @param uidNumber The number of uid in uidList. - * @param connection The RtcConnection object. + * @note + * - You can call this method either before or after joining a channel. + * - The blocklist is not affected by the setting in `muteRemoteAudioStream`, + * `muteAllRemoteAudioStreams`, and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the blocklist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you do not want to subscribe to. + * If you want to specify the audio streams of a user that you do not want to subscribe to, add the + * user ID in this list. If you want to remove a user from the blocklist, you need to call the + * `setSubscribeAudioBlocklist` method to update the user ID list; this means you only add the `uid` + * of users that you do not want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1311,16 +1558,27 @@ class IRtcEngineEx : public IRtcEngine { virtual int setSubscribeAudioBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) = 0; /** - * Sets the allowlist of subscribe remote stream audio. + * @brief Sets the allowlist of subscriptions for audio streams. * - * @note - * - If uid is in uidList, the remote user's audio will be subscribed, - * even if muteRemoteAudioStream(uid, true) and muteAllRemoteAudioStreams(true) are operated. - * - If a user is in the blacklist and whitelist at the same time, the user will not subscribe to audio. + * @details + * You can call this method to specify the audio streams of a user that you want to subscribe to. * - * @param uidList The id list of users who do subscribe to audio. - * @param uidNumber The number of uid in uidList. - * @param connection The RtcConnection object. + * @note + * - You can call this method either before or after joining a channel. + * - The allowlist is not affected by the setting in `muteRemoteAudioStream`, + * `muteAllRemoteAudioStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the allowlist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you want to subscribe to. + * If you want to specify the audio streams of a user for subscription, add the user ID in this + * list. If you want to remove a user from the allowlist, you need to call the + * `setSubscribeAudioAllowlist` method to update the user ID list; this means you only add the `uid` + * of users that you want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1329,15 +1587,28 @@ class IRtcEngineEx : public IRtcEngine { virtual int setSubscribeAudioAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) = 0; /** - * Sets the blocklist of subscribe remote stream video. + * @brief Sets the blocklist of subscriptions for video streams. * - * @note - * If uid is in uidList, the remote user's video will not be subscribed, - * even if muteRemoteVideoStream(uid, false) and muteAllRemoteVideoStreams(false) are operated. + * @details + * You can call this method to specify the video streams of a user that you do not want to subscribe + * to. * - * @param uidList The id list of users who do not subscribe to video. - * @param uidNumber The number of uid in uidList. - * @param connection The RtcConnection object. + * @note + * - You can call this method either before or after joining a channel. + * - The blocklist is not affected by the setting in `muteRemoteVideoStream`, + * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the blocklist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you do not want to subscribe to. + * If you want to specify the video streams of a user that you do not want to subscribe to, add the + * user ID of that user in this list. If you want to remove a user from the blocklist, you need to + * call the `setSubscribeVideoBlocklist` method to update the user ID list; this means you only add + * the `uid` of users that you do not want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1346,16 +1617,27 @@ class IRtcEngineEx : public IRtcEngine { virtual int setSubscribeVideoBlocklistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) = 0; /** - * Sets the allowlist of subscribe remote stream video. + * @brief Sets the allowlist of subscriptions for video streams. * - * @note - * - If uid is in uidList, the remote user's video will be subscribed, - * even if muteRemoteVideoStream(uid, true) and muteAllRemoteVideoStreams(true) are operated. - * - If a user is in the blacklist and whitelist at the same time, the user will not subscribe to video. + * @details + * You can call this method to specify the video streams of a user that you want to subscribe to. * - * @param uidList The id list of users who do subscribe to video. - * @param uidNumber The number of uid in uidList. - * @param connection The RtcConnection object. + * @note + * - You can call this method either before or after joining a channel. + * - The allowlist is not affected by the setting in `muteRemoteVideoStream`, + * `muteAllRemoteVideoStreams` and `autoSubscribeAudio` in `ChannelMediaOptions`. + * - Once the allowlist of subscriptions is set, it is effective even if you leave the current + * channel and rejoin the channel. + * - If a user is added in the allowlist and blocklist at the same time, only the blocklist takes + * effect. + * + * @param uidList The user ID list of users that you want to subscribe to. + * If you want to specify the video streams of a user for subscription, add the user ID of that user + * in this list. If you want to remove a user from the allowlist, you need to call the + * `setSubscribeVideoAllowlist` method to update the user ID list; this means you only add the `uid` + * of users that you want to subscribe to in the new user ID list. + * @param uidNumber The number of users in the user ID list. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1363,38 +1645,48 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int setSubscribeVideoAllowlistEx(uid_t* uidList, int uidNumber, const RtcConnection& connection) = 0; /** - * Sets the remote video subscription options + * @brief Sets options for subscribing to remote video streams. * + * @details + * When a remote user has enabled dual-stream mode, you can call this method to choose the option + * for subscribing to the video streams sent by the remote user. + * + * @param uid The user ID of the remote user. + * @param options The video subscription options. See `VideoSubscriptionOptions`. + * @param connection The connection information. See `RtcConnection`. * - * @param uid ID of the remote user sending the video stream. - * @param options Sets the video subscription options: VideoSubscriptionOptions. - * @param connection The RtcConnection object. * @return * - 0: Success. * - < 0: Failure. */ virtual int setRemoteVideoSubscriptionOptionsEx(uid_t uid, const VideoSubscriptionOptions& options, const RtcConnection& connection) = 0; - /** Sets the sound position and gain of a remote user. - - When the local user calls this method to set the sound position of a remote user, the sound difference between the left and right channels allows the local user to track the real-time position of the remote user, creating a real sense of space. This method applies to massively multiplayer online games, such as Battle Royale games. - - @note - - For this method to work, enable stereo panning for remote users by calling the \ref agora::rtc::IRtcEngine::enableSoundPositionIndication "enableSoundPositionIndication" method before joining a channel. - - This method requires hardware support. For the best sound positioning, we recommend using a wired headset. - - Ensure that you call this method after joining a channel. - - @param uid The ID of the remote user. - @param pan The sound position of the remote user. The value ranges from -1.0 to 1.0: - - 0.0: the remote sound comes from the front. - - -1.0: the remote sound comes from the left. - - 1.0: the remote sound comes from the right. - @param gain Gain of the remote user. The value ranges from 0.0 to 100.0. The default value is 100.0 (the original gain of the remote user). The smaller the value, the less the gain. - @param connection The RtcConnection object. - - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Sets the 2D position (the position on the horizontal plane) of the remote user's voice. + * + * @details + * This method sets the voice position and volume of a remote user. + * When the local user calls this method to set the voice position of a remote user, the voice + * difference between the left and right channels allows the local user to track the real-time + * position of the remote user, creating a sense of space. This method applies to massive + * multiplayer online games, such as Battle Royale games. + * + * @note + * - For the best voice positioning, Agora recommends using a wired headset. + * - Call this method after joining a channel. + * + * @param uid The user ID of the remote user. + * @param pan The voice position of the remote user. The value ranges from -1.0 to 1.0: + * - -1.0: The remote voice comes from the left. + * - 0.0: (Default) The remote voice comes from the front. + * - 1.0: The remote voice comes from the right. + * @param gain The volume of the remote user. The value ranges from 0.0 to 100.0. The default value + * is 100.0 (the original volume of the remote user). The smaller the value, the lower the volume. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setRemoteVoicePositionEx(uid_t uid, double pan, double gain, const RtcConnection& connection) = 0; /** Sets remote user parameters for spatial audio @@ -1408,21 +1700,21 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int setRemoteUserSpatialAudioParamsEx(uid_t uid, const agora::SpatialAudioParams& params, const RtcConnection& connection) = 0; /** - * Updates the display mode of the video view of a remote user. + * @brief Sets the video display mode of a specified remote user. * + * @details * After initializing the video view of a remote user, you can call this method to update its * rendering and mirror modes. This method affects only the video view that the local user sees. * * @note - * - Ensure that you have called \ref setupRemoteVideo "setupRemoteVideo" to initialize the remote video - * view before calling this method. + * - Call this method after initializing the remote view by calling the `setupRemoteVideo` method. * - During a call, you can call this method as many times as necessary to update the display mode * of the video view of a remote user. * - * @param uid ID of the remote user. - * @param renderMode Sets the remote display mode. See #RENDER_MODE_TYPE. - * @param mirrorMode Sets the mirror type. See #VIDEO_MIRROR_MODE_TYPE. - * @param connection The RtcConnection object. + * @param uid The user ID of the remote user. + * @param renderMode The video display mode of the remote user. See `RENDER_MODE_TYPE`. + * @param mirrorMode The mirror mode of the remote user view. See `VIDEO_MIRROR_MODE_TYPE`. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. @@ -1430,27 +1722,35 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int setRemoteRenderModeEx(uid_t uid, media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode, const RtcConnection& connection) = 0; - /** Enables loopback recording. + /** + * @brief Enables loopback audio capturing. * - * If you enable loopback recording, the output of the default sound card is mixed into - * the audio stream sent to the other end. + * @details + * If you enable loopback audio capturing, the output of the sound card is mixed into the audio + * stream sent to the other end. * - * @note This method is for Windows only. + * @note + * - This method applies to the macOS and Windows only. + * - macOS does not support loopback audio capture of the default sound card. If you need to use + * this function, use a virtual sound card and pass its name to the `deviceName` parameter. Agora + * recommends using AgoraALD as the virtual sound card for audio capturing. + * - This method only supports using one sound card for audio capturing. + * + * @param connection The connection information. See `RtcConnection`. + * @param enabled Sets whether to enable loopback audio capture: + * - `true`: Enable loopback audio capturing. + * - `false`: (Default) Disable loopback audio capturing. + * @param deviceName - macOS: The device name of the virtual sound card. The default value is set to + * NULL, which means using AgoraALD for loopback audio capturing. + * - Windows: The device name of the sound card. The default is set to NULL, which means the SDK + * uses the sound card of your device for loopback audio capturing. * - * @param connection The RtcConnection object. - * @param enabled Sets whether to enable/disable loopback recording. - * - true: Enable loopback recording. - * - false: (Default) Disable loopback recording. - * @param deviceName Pointer to the device name of the sound card. The default value is NULL (the default sound card). - * - This method is for macOS and Windows only. - * - macOS does not support loopback capturing of the default sound card. If you need to use this method, - * please use a virtual sound card and pass its name to the deviceName parameter. Agora has tested and recommends using soundflower. * @return * - 0: Success. * - < 0: Failure. */ virtual int enableLoopbackRecordingEx(const RtcConnection& connection, bool enabled, const char* deviceName = NULL) = 0; - + /** * Adjusts the recording volume. * @@ -1466,7 +1766,7 @@ class IRtcEngineEx : public IRtcEngine { * - < 0: Failure. */ virtual int adjustRecordingSignalVolumeEx(int volume, const RtcConnection& connection) = 0; - + /** * Mute or resume recording signal volume. * @@ -1483,304 +1783,537 @@ class IRtcEngineEx : public IRtcEngine { virtual int muteRecordingSignalEx(bool mute, const RtcConnection& connection) = 0; /** - * Adjust the playback signal volume of a specified remote user. - * You can call this method as many times as necessary to adjust the playback volume of different remote users, or to repeatedly adjust the playback volume of the same remote user. - * - * @note - * The playback volume here refers to the mixed volume of a specified remote user. - * This method can only adjust the playback volume of one specified remote user at a time. To adjust the playback volume of different remote users, call the method as many times, once for each remote user. - * - * @param uid The ID of the remote user. - * @param volume The playback volume of the specified remote user. The value ranges between 0 and 400, including the following: - * + * @brief Adjusts the playback signal volume of a specified remote user. + * + * @details + * You can call this method to adjust the playback volume of a specified remote user. To adjust the + * playback volume of different remote users, call the method as many times, once for each remote + * user. + * Call timing: Call this method after `joinChannelEx`. + * + * @param uid The user ID of the remote user. + * @param volume The volume of the user. The value range is [0,400]. * - 0: Mute. - * - 100: (Default) Original volume. - * @param connection RtcConnection - * + * - 100: (Default) The original volume. + * - 400: Four times the original volume (amplifying the audio signals by four times). + * @param connection The connection information. See `RtcConnection`. + * * @return * - 0: Success. * - < 0: Failure. - */ + */ virtual int adjustUserPlaybackSignalVolumeEx(uid_t uid, int volume, const RtcConnection& connection) = 0; - /** Gets the current connection state of the SDK. - @param connection The RtcConnection object. - @return #CONNECTION_STATE_TYPE. + /** + * @brief Gets the current connection state of the SDK. + * + * @details + * Call timing: This method can be called either before or after joining the channel. + * + * @param connection The connection information. See `RtcConnection`. + * + * @return + * The current connection state. See `CONNECTION_STATE_TYPE`. */ virtual CONNECTION_STATE_TYPE getConnectionStateEx(const RtcConnection& connection) = 0; - /** Enables/Disables the built-in encryption. - * - * In scenarios requiring high security, Agora recommends calling this method to enable the built-in encryption before joining a channel. + /** + * @brief Enables or disables the built-in encryption. * - * All users in the same channel must use the same encryption mode and encryption key. Once all users leave the channel, the encryption key of this channel is automatically cleared. + * @details + * After the user leaves the channel, the SDK automatically disables the built-in encryption. To + * enable the built-in encryption, call this method before the user joins the channel again. + * Applicable scenarios: Scenarios with higher security requirements. + * Call timing: Call this method before joining a channel. * * @note - * - If you enable the built-in encryption, you cannot use the RTMP streaming function. + * - All users within the same channel must set the same encryption configurations when calling this + * method. + * - If you enable the built-in encryption, you cannot use the Media Push function. * - * @param connection The RtcConnection object. - * @param enabled Whether to enable the built-in encryption: + * @param enabled Whether to enable built-in encryption: * - true: Enable the built-in encryption. - * - false: Disable the built-in encryption. - * @param config Configurations of built-in encryption schemas. See EncryptionConfig. + * - false: (Default) Disable the built-in encryption. + * @param config Built-in encryption configurations. See `EncryptionConfig`. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. * - < 0: Failure. - * - -2(ERR_INVALID_ARGUMENT): An invalid parameter is used. Set the parameter with a valid value. - * - -4(ERR_NOT_SUPPORTED): The encryption mode is incorrect or the SDK fails to load the external encryption library. Check the enumeration or reload the external encryption library. - * - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this method. */ virtual int enableEncryptionEx(const RtcConnection& connection, bool enabled, const EncryptionConfig& config) = 0; - /** Creates a data stream. - * - * You can call this method to create a data stream and improve the - * reliability and ordering of data tranmission. - * - * @note - * - Ensure that you set the same value for `reliable` and `ordered`. - * - Each user can only create a maximum of 5 data streams during a RtcEngine - * lifecycle. - * - The data channel allows a data delay of up to 5 seconds. If the receiver - * does not receive the data stream within 5 seconds, the data channel reports - * an error. - * - * @param[out] streamId The ID of the stream data. - * @param reliable Sets whether the recipients are guaranteed to receive - * the data stream from the sender within five seconds: - * - true: The recipients receive the data stream from the sender within - * five seconds. If the recipient does not receive the data stream within - * five seconds, an error is reported to the application. - * - false: There is no guarantee that the recipients receive the data stream - * within five seconds and no error message is reported for any delay or - * missing data stream. - * @param ordered Sets whether the recipients receive the data stream - * in the sent order: - * - true: The recipients receive the data stream in the sent order. - * - false: The recipients do not receive the data stream in the sent order. - * @param connection The RtcConnection object. + /** + * @brief Creates a data stream. + * + * @details + * You can call this method to create a data stream and improve the reliability and ordering of data + * transmission. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: After setting `reliable` to `true`, if the recipient does not receive the data + * within five seconds, the SDK triggers the `onStreamMessageError` callback and returns an error + * code. + * + * @note Each user can create up to five data streams during the lifecycle of `IRtcEngine`. The data + * stream will be destroyed when leaving the channel, and the data stream needs to be recreated if + * needed. + * + * @param streamId An output parameter; the ID of the data stream created. + * @param reliable Sets whether the recipients are guaranteed to receive the data stream within five + * seconds: + * - `true`: The recipients receive the data from the sender within five seconds. If the recipient + * does not receive the data within five seconds, the SDK triggers the `onStreamMessageError` + * callback and returns an error code. + * - `false`: There is no guarantee that the recipients receive the data stream within five seconds + * and no error message is reported for any delay or missing data stream. + * Attention: Please ensure that `reliable` and `ordered` are either both set to`true` or both set + * to `false`. + * @param ordered Sets whether the recipients receive the data stream in the sent order: + * - `true`: The recipients receive the data in the sent order. + * - `false`: The recipients do not receive the data in the sent order. + * @param connection The connection information. See `RtcConnection`. * * @return - * - 0: Success. + * - 0: The data stream is successfully created. * - < 0: Failure. */ virtual int createDataStreamEx(int* streamId, bool reliable, bool ordered, const RtcConnection& connection) = 0; - /** Creates a data stream. + /** + * @brief Creates a data stream. * - * Each user can create up to five data streams during the lifecycle of the IChannel. - * @param streamId The ID of the created data stream. - * @param config The config of data stream. - * @param connection The RtcConnection object. - * @return int - * - Returns 0: Success. + * @details + * Compared to `createDataStreamEx(int* streamId, bool reliable, bool ordered, const RtcConnection& + * connection)`, this method does not guarantee the reliability of data + * transmission. If a data packet is not received five seconds after it was sent, the SDK directly + * discards the data. + * Call timing: Call this method after `joinChannelEx`. + * + * @note + * Each user can create up to five data streams during the lifecycle of `IRtcEngine`. The data + * stream will be destroyed when leaving the channel, and the data stream needs to be recreated if + * needed. + * If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * + * @param streamId An output parameter; the ID of the data stream created. + * @param config The configurations for the data stream. See `DataStreamConfig`. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: The data stream is successfully created. * - < 0: Failure. */ virtual int createDataStreamEx(int* streamId, const DataStreamConfig& config, const RtcConnection& connection) = 0; - /** Sends a data stream. - * - * After calling \ref IRtcEngine::createDataStream "createDataStream", you can call - * this method to send a data stream to all users in the channel. + /** + * @brief Sends data stream messages. * + * @details + * After calling `createDataStreamEx(int* streamId, const DataStreamConfig& config, const + * RtcConnection& connection)`, you can call this method to send data stream messages + * to all users in the channel. * The SDK has the following restrictions on this method: - * - Up to 60 packets can be sent per second in a channel with each packet having a maximum size of 1 KB. - * - Each client can send up to 30 KB of data per second. - * - Each user can have up to five data streams simultaneously. - * - * After the remote user receives the data stream within 5 seconds, the SDK triggers the - * \ref IRtcEngineEventHandler::onStreamMessage "onStreamMessage" callback on - * the remote client. After the remote user does not receive the data stream within 5 seconds, - * the SDK triggers the \ref IRtcEngineEventHandler::onStreamMessageError "onStreamMessageError" + * - Each client within the channel can have up to 5 data channels simultaneously, with a total + * shared packet bitrate limit of 30 KB/s for all data channels. + * - Each data channel can send up to 60 packets per second, with each packet being a maximum of 1 + * KB. + * A successful method call triggers the `onStreamMessage` callback on the remote client, from which + * the remote user gets the stream message. A failed method call triggers the `onStreamMessageError` * callback on the remote client. * * @note - * - Call this method after calling \ref IRtcEngine::createDataStream "createDataStream". - * - This method applies only to the `COMMUNICATION` profile or to - * the hosts in the `LIVE_BROADCASTING` profile. If an audience in the - * `LIVE_BROADCASTING` profile calls this method, the audience may be switched to a host. - * - * @param streamId The ID of the stream data. - * @param data The data stream. - * @param length The length (byte) of the data stream. - * @param connection The RtcConnection object. + * - If you need a more comprehensive solution for low-latency, high-concurrency, and scalable + * real-time messaging and status synchronization, it is recommended to use `Signaling`. + * - Call this method after `joinChannelEx`. + * - Ensure that you call `createDataStreamEx(int* streamId, const DataStreamConfig& config, const + * RtcConnection& connection)` to create a data channel before calling this + * method. + * + * @param streamId The data stream ID. You can get the data stream ID by calling + * `createDataStreamEx(int* streamId, const DataStreamConfig& config, const RtcConnection& + * connection)` + * . + * @param data The message to be sent. + * @param length The length of the data. + * @param connection The connection information. See `RtcConnection`. * * @return * - 0: Success. * - < 0: Failure. */ virtual int sendStreamMessageEx(int streamId, const char* data, size_t length, const RtcConnection& connection) = 0; - /** Adds a watermark image to the local video. - - This method adds a PNG watermark image to the local video in a live broadcast. Once the watermark image is added, all the audience in the channel (CDN audience included), - and the recording device can see and capture it. Agora supports adding only one watermark image onto the local video, and the newly watermark image replaces the previous one. - - The watermark position depends on the settings in the \ref IRtcEngine::setVideoEncoderConfiguration "setVideoEncoderConfiguration" method: - - If the orientation mode of the encoding video is #ORIENTATION_MODE_FIXED_LANDSCAPE, or the landscape mode in #ORIENTATION_MODE_ADAPTIVE, the watermark uses the landscape orientation. - - If the orientation mode of the encoding video is #ORIENTATION_MODE_FIXED_PORTRAIT, or the portrait mode in #ORIENTATION_MODE_ADAPTIVE, the watermark uses the portrait orientation. - - When setting the watermark position, the region must be less than the dimensions set in the `setVideoEncoderConfiguration` method. Otherwise, the watermark image will be cropped. - - @note - - Ensure that you have called the \ref agora::rtc::IRtcEngine::enableVideo "enableVideo" method to enable the video module before calling this method. - - If you only want to add a watermark image to the local video for the audience in the CDN live broadcast channel to see and capture, you can call this method or the \ref agora::rtc::IRtcEngine::setLiveTranscoding "setLiveTranscoding" method. - - This method supports adding a watermark image in the PNG file format only. Supported pixel formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. - - If the dimensions of the PNG image differ from your settings in this method, the image will be cropped or zoomed to conform to your settings. - - If you have enabled the local video preview by calling the \ref agora::rtc::IRtcEngine::startPreview "startPreview" method, you can use the `visibleInPreview` member in the WatermarkOptions class to set whether or not the watermark is visible in preview. - - If you have enabled the mirror mode for the local video, the watermark on the local video is also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror and watermark functions for the local video at the same time. You can implement the watermark function in your application layer. - - @param watermarkUrl The local file path of the watermark image to be added. This method supports adding a watermark image from the local absolute or relative file path. - @param options Pointer to the watermark's options to be added. See WatermarkOptions for more infomation. - @param connection The RtcConnection object. - @return int - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Send Reliable message to remote uid in channel. + * + * @technical preview + * + * @param uid Remote user id. + * @param type Reliable Data Transmission tunnel message type. See RdtStreamType + * @param data The pointer to the sent data. + * @param length The length of the sent data. + * @param connection The RtcConnection object. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int sendRdtMessageEx(uid_t uid, RdtStreamType type, const char *data, size_t length, const RtcConnection& connection) = 0; + + /** + * @brief Send media control message + * + * @technical preview + * + * @param uid Remote user id. In particular, if the uid is set to 0, it means broadcasting the message to the entire channel. + * @param data The pointer to the sent data. + * @param length The length of the sent data, max 1024. + * @param connection The RtcConnection object. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int sendMediaControlMessageEx(uid_t uid, const char *data, size_t length, const RtcConnection& connection) = 0; + + /** + * @brief Adds a watermark image to the local video. + * + * @deprecated v4.6.0. This method is deprecated. Use addVideoWatermarkEx(const WatermarkConfig& + * config, const RtcConnection& connection) instead. + * + * @details + * This method adds a PNG watermark image to the local video in the live streaming. Once the + * watermark image is added, all the audience in the channel (CDN audience included), and the + * capturing device can see and capture it. The Agora SDK supports adding only one watermark image + * onto a live video stream. The newly added watermark image replaces the previous one. + * The watermark coordinates are dependent on the settings in the `setVideoEncoderConfigurationEx` + * method: + * - If the orientation mode of the encoding video ( `ORIENTATION_MODE` ) is fixed landscape mode or + * the adaptive landscape mode, the watermark uses the landscape orientation. + * - If the orientation mode of the encoding video ( `ORIENTATION_MODE` ) is fixed portrait mode or + * the adaptive portrait mode, the watermark uses the portrait orientation. + * - When setting the watermark position, the region must be less than the dimensions set in the + * `setVideoEncoderConfigurationEx` method; otherwise, the watermark image will be cropped. + * + * @note + * - Ensure that you have called `enableVideo` before calling this method. + * - This method supports adding a watermark image in the PNG file format only. Supported pixel + * formats of the PNG image are RGBA, RGB, Palette, Gray, and Alpha_gray. + * - If the dimensions of the PNG image differ from your settings in this method, the image will be + * cropped or zoomed to conform to your settings. + * - If you have enabled the local video preview by calling the `startPreview(VIDEO_SOURCE_TYPE + * sourceType)` method, you can + * use the `visibleInPreview` member to set whether or not the watermark is visible in the preview. + * - If you have enabled the mirror mode for the local video, the watermark on the local video is + * also mirrored. To avoid mirroring the watermark, Agora recommends that you do not use the mirror + * and watermark functions for the local video at the same time. You can implement the watermark + * function in your application layer. + * + * @param watermarkUrl The local file path of the watermark image to be added. This method supports + * adding a watermark image from the local absolute or relative file path. + * @param options The options of the watermark image to be added. See `WatermarkOptions`. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int addVideoWatermarkEx(const char* watermarkUrl, const WatermarkOptions& options, const RtcConnection& connection) = 0; - /** Removes the watermark image on the video stream added by - addVideoWatermark(). - @param connection The RtcConnection object. - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Adds a watermark image to the local video. + * + * @since 4.6.0 + * + * @details + * Applicable scenarios: This method applies to multi-channel scenarios. + * + * @param config Watermark configuration. See `WatermarkConfig`. + * @param connection `RtcConnection` object. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int addVideoWatermarkEx(const WatermarkConfig& config, const RtcConnection& connection) = 0; + + /** + * @brief Removes the specified watermark image from the local or remote video stream. + * + * @since 4.6.0 + * + * @details + * Applicable scenarios: This method applies to multi-channel scenarios. + * + * @param id Watermark ID. + * @param connection `RtcConnection` object. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int removeVideoWatermarkEx(const char* id, const RtcConnection& connection) = 0; + + /** + * @brief Removes the watermark image from the video stream. + * + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int clearVideoWatermarkEx(const RtcConnection& connection) = 0; - /** Agora supports reporting and analyzing customized messages. + /** + * @brief Agora supports reporting and analyzing customized messages. * - * This function is in the beta stage with a free trial. The ability provided - * in its beta test version is reporting a maximum of 10 message pieces within - * 6 seconds, with each message piece not exceeding 256 bytes. + * @details + * Agora supports reporting and analyzing customized messages. This function is in the beta stage + * with a free trial. The ability provided in its beta test version is reporting a maximum of 10 + * message pieces within 6 seconds, with each message piece not exceeding 256 bytes and each string + * not exceeding 100 bytes. To try out this function, contact `support@agora.io` and discuss the + * format of customized messages with us. * - * To try out this function, contact [support@agora.io](mailto:support@agora.io) - * and discuss the format of customized messages with us. */ virtual int sendCustomReportMessageEx(const char* id, const char* category, const char* event, const char* label, int value, const RtcConnection& connection) = 0; /** - * Enables the `onAudioVolumeIndication` callback to report on which users are speaking - * and the speakers' volume. + * @brief Enables the reporting of users' volume indication. * - * Once the \ref IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication" - * callback is enabled, the SDK returns the volume indication in the at the time interval set - * in `enableAudioVolumeIndication`, regardless of whether any user is speaking in the channel. + * @details + * This method enables the SDK to regularly report the volume information to the app of the local + * user who sends a stream and remote users (three users at most) whose instantaneous volumes are + * the highest. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: The SDK triggers the `onAudioVolumeIndication` callback according to the + * interval you set if this method is successfully called and there are users publishing streams in + * the channel. * * @param interval Sets the time interval between two consecutive volume indications: - * - <= 0: Disables the volume indication. - * - > 0: Time interval (ms) between two consecutive volume indications, - * and should be integral multiple of 200 (less than 200 will be set to 200). - * @param smooth The smoothing factor that sets the sensitivity of the audio volume - * indicator. The value range is [0, 10]. The greater the value, the more sensitive the - * indicator. The recommended value is 3. - * @param reportVad - * - `true`: Enable the voice activity detection of the local user. Once it is enabled, the `vad` parameter of the - * `onAudioVolumeIndication` callback reports the voice activity status of the local user. - * - `false`: (Default) Disable the voice activity detection of the local user. Once it is disabled, the `vad` parameter - * of the `onAudioVolumeIndication` callback does not report the voice activity status of the local user, except for - * the scenario where the engine automatically detects the voice activity of the local user. - * @param connection The RtcConnection object. + * - ≤ 0: Disables the volume indication. + * - > 0: Time interval (ms) between two consecutive volume indications. Ensure this parameter is + * set to a value greater than 10, otherwise you will not receive the `onAudioVolumeIndication` + * callback. Agora recommends that this value is set as greater than 100. + * @param smooth The smoothing factor that sets the sensitivity of the audio volume indicator. The + * value ranges between 0 and 10. The recommended value is 3. The greater the value, the more + * sensitive the indicator. + * @param reportVad - `true`: Enables the voice activity detection of the local user. Once it is + * enabled, the `vad` parameter of the `onAudioVolumeIndication` callback reports the voice activity + * status of the local user. + * - `false`: (Default) Disables the voice activity detection of the local user. Once it is + * disabled, the `vad` parameter of the `onAudioVolumeIndication` callback does not report the voice + * activity status of the local user, except for the scenario where the engine automatically detects + * the voice activity of the local user. + * @param connection The connection information. See `RtcConnection`. + * * @return * - 0: Success. * - < 0: Failure. */ virtual int enableAudioVolumeIndicationEx(int interval, int smooth, bool reportVad, const RtcConnection& connection) = 0; - - /** Publishes the local stream without transcoding to a specified CDN live RTMP address. (CDN live only.) - * - * @param url The CDN streaming URL in the RTMP format. The maximum length of this parameter is 1024 bytes. - * @param connection RtcConnection. - * - * @return - * - 0: Success. - * - < 0: Failure. - */ + + /** + * @brief Starts pushing media streams to a CDN without transcoding. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to push an audio or video stream to the specified CDN address. This + * method can push media streams to only one CDN address at a time, so if you need to push streams + * to multiple addresses, call this method multiple times. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. + * + * @note + * - Call this method after joining a channel. + * - Only hosts in the LIVE_BROADCASTING profile can call this method. + * - If you want to retry pushing streams after a failed push, make sure to call `stopRtmpStream` + * first, then call this method to retry pushing streams; otherwise, the SDK returns the same error + * code as the last failed push. + * + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The URL or configuration of transcoding is invalid; check your URL and transcoding + * configurations. + * - -7: The SDK is not initialized before calling this method. + * - -19: The Media Push URL is already in use; use another URL instead. + */ virtual int startRtmpStreamWithoutTranscodingEx(const char* url, const RtcConnection& connection) = 0; - - /** Publishes the local stream with transcoding to a specified CDN live RTMP address. (CDN live only.) - * - * @param url The CDN streaming URL in the RTMP format. The maximum length of this parameter is 1024 bytes. - * @param transcoding Sets the CDN live audio/video transcoding settings. See LiveTranscoding. - * @param connection RtcConnection. - * - * @return - * - 0: Success. - * - < 0: Failure. - */ + + /** + * @brief Starts Media Push and sets the transcoding configuration. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to push a live audio-and-video stream to the specified CDN address and + * set the transcoding configuration. This method can push media streams to only one CDN address at + * a time, so if you need to push streams to multiple addresses, call this method multiple times. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. + * + * @note + * - Ensure that you enable the Media Push service before using this function. + * - Call this method after joining a channel. + * - Only hosts in the LIVE_BROADCASTING profile can call this method. + * - If you want to retry pushing streams after a failed push, make sure to call `stopRtmpStreamEx` + * first, then call this method to retry pushing streams; otherwise, the SDK returns the same error + * code as the last failed push. + * + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param transcoding The transcoding configuration for Media Push. See `LiveTranscoding`. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - -2: The URL or configuration of transcoding is invalid; check your URL and transcoding + * configurations. + * - -7: The SDK is not initialized before calling this method. + * - -19: The Media Push URL is already in use; use another URL instead. + */ virtual int startRtmpStreamWithTranscodingEx(const char* url, const LiveTranscoding& transcoding, const RtcConnection& connection) = 0; - - /** Update the video layout and audio settings for CDN live. (CDN live only.) - * @note This method applies to Live Broadcast only. - * - * @param transcoding Sets the CDN live audio/video transcoding settings. See LiveTranscoding. - * @param connection RtcConnection. - * - * @return - * - 0: Success. - * - < 0: Failure. - */ + + /** + * @brief Updates the transcoding configuration. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * After you start pushing media streams to CDN with transcoding, you can dynamically update the + * transcoding configuration according to the scenario. The SDK triggers the `onTranscodingUpdated` + * callback after the transcoding configuration is updated. + * + * @param transcoding The transcoding configuration for Media Push. See `LiveTranscoding`. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int updateRtmpTranscodingEx(const LiveTranscoding& transcoding, const RtcConnection& connection) = 0; - - /** Stop an RTMP stream with transcoding or without transcoding from the CDN. (CDN live only.) - * @param url The RTMP URL address to be removed. The maximum length of this parameter is 1024 bytes. - * @param connection RtcConnection. - * @return - * - 0: Success. - * - < 0: Failure. - */ + + /** + * @brief Stops pushing media streams to a CDN. + * + * @details + * Agora recommends that you use the server-side Media Push function. For details, see `Use RESTful + * API`. + * You can call this method to stop the live stream on the specified CDN address. This method can + * stop pushing media streams to only one CDN address at a time, so if you need to stop pushing + * streams to multiple addresses, call this method multiple times. + * After you call this method, the SDK triggers the `onRtmpStreamingStateChanged` callback on the + * local client to report the state of the streaming. + * + * @param url The address of Media Push. The format is RTMP or RTMPS. The character length cannot + * exceed 1024 bytes. Special characters such as Chinese characters are not supported. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int stopRtmpStreamEx(const char* url, const RtcConnection& connection) = 0; - - /** Starts relaying media streams across channels or updates the channels for media relay. + + /** + * @brief Starts relaying media streams across channels or updates channels for media relay. * * @since v4.2.0 - * @param configuration The configuration of the media stream relay:ChannelMediaRelayConfiguration. - * @param connection RtcConnection. + * + * @details + * The first successful call to this method starts relaying media streams from the source channel to + * the destination channels. To relay the media stream to other channels, or exit one of the current + * media relays, you can call this method again to update the destination channels. This feature + * supports relaying media streams to a maximum of six destination channels. + * After a successful method call, the SDK triggers the `onChannelMediaRelayStateChanged` callback, + * and this callback returns the state of the media stream relay. Common states are as follows: + * - If the `onChannelMediaRelayStateChanged` callback returns `RELAY_STATE_RUNNING` (2) and + * `RELAY_OK` (0), it means that the SDK starts relaying media streams from the source channel to + * the destination channel. + * - If the `onChannelMediaRelayStateChanged` callback returns `RELAY_STATE_FAILURE` (3), an + * exception occurs during the media stream relay. + * + * @note + * - Call this method after joining the channel. + * - This method takes effect only when you are a host in a live streaming channel. + * - The relaying media streams across channels function needs to be enabled by contacting + * `technical support`. + * - Agora does not support string user accounts in this API. + * + * @param configuration The configuration of the media stream relay. See + * `ChannelMediaRelayConfiguration`. + * @param connection The connection information. See `RtcConnection`. + * * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -8(ERR_INVALID_STATE): The current status is invalid, only allowed to be called when the role is the broadcaster. + * - -1: A general error occurs (no specified reason). + * - -2: The parameter is invalid. + * - -8: Internal state error. Probably because the user is not a broadcaster. */ virtual int startOrUpdateChannelMediaRelayEx(const ChannelMediaRelayConfiguration& configuration, const RtcConnection& connection) = 0; - - /** Stops the media stream relay. - * - * Once the relay stops, the host quits all the destination + + /** + * @brief Stops the media stream relay. Once the relay stops, the host quits all the target * channels. * - * @param connection RtcConnection. + * @details + * After a successful method call, the SDK triggers the `onChannelMediaRelayStateChanged` callback. + * If the callback reports `RELAY_STATE_IDLE` (0) and `RELAY_OK` (0), the host successfully stops + * the relay. + * + * @note If the method call fails, the SDK triggers the `onChannelMediaRelayStateChanged` callback + * with the `RELAY_ERROR_SERVER_NO_RESPONSE` (2) or `RELAY_ERROR_SERVER_CONNECTION_LOST` (8) status + * code. You can call the `leaveChannel(const LeaveChannelOptions& options)` method to leave the + * channel, and the media stream + * relay automatically stops. + * + * @param connection The connection information. See `RtcConnection`. + * * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. + * - -5: The method call was rejected. There is no ongoing channel media relay. */ virtual int stopChannelMediaRelayEx(const RtcConnection& connection) = 0; - - /** pause the channels for media stream relay. + + /** + * @brief Pauses the media stream relay to all target channels. + * + * @details + * After the cross-channel media stream relay starts, you can call this method to pause relaying + * media streams to all target channels; after the pause, if you want to resume the relay, call + * `resumeAllChannelMediaRelay`. + * + * @note Call this method after `startOrUpdateChannelMediaRelayEx`. + * + * @param connection The connection information. See `RtcConnection`. * - * @param connection RtcConnection. * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. + * - -5: The method call was rejected. There is no ongoing channel media relay. */ virtual int pauseAllChannelMediaRelayEx(const RtcConnection& connection) = 0; - /** resume the channels for media stream relay. + /** + * @brief Resumes the media stream relay to all target channels. + * + * @details + * After calling the `pauseAllChannelMediaRelayEx` method, you can call this method to resume + * relaying media streams to all destination channels. + * + * @note Call this method after `pauseAllChannelMediaRelayEx`. + * + * @param connection The connection information. See `RtcConnection`. * - * @param connection RtcConnection. * @return * - 0: Success. * - < 0: Failure. - * - -1(ERR_FAILED): A general error occurs (no specified reason). - * - -2(ERR_INVALID_ARGUMENT): The argument is invalid. - * - -5(ERR_REFUSED): The request is rejected. - * - -7(ERR_NOT_INITIALIZED): cross channel media streams are not relayed. + * - -5: The method call was rejected. There is no paused channel media relay. */ virtual int resumeAllChannelMediaRelayEx(const RtcConnection& connection) = 0; @@ -1814,56 +2347,99 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int getUserInfoByUidEx(uid_t uid, rtc::UserInfo* userInfo, const RtcConnection& connection) = 0; - /** - * Enables or disables the dual video stream mode. + /** + * @brief Enables or disables dual-stream mode on the sender side. * * @deprecated v4.2.0. This method is deprecated. Use setDualStreamModeEx instead * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video - * stream) video using {@link setRemoteVideoStreamType setRemoteVideoStreamType}. + * @details + * You can call this method to enable or disable the dual-stream mode on the publisher side. Dual + * streams are a pairing of a high-quality video stream and a low-quality video stream: + * - High-quality video stream: High bitrate, high resolution. + * - Low-quality video stream: Low bitrate, low resolution. + * After you enable dual-stream mode, you can call `setRemoteVideoStreamType` to choose to receive + * either the high-quality video stream or the low-quality video stream on the subscriber side. + * + * @note This method is applicable to all types of streams from the sender, including but not + * limited to video streams collected from cameras, screen sharing streams, and custom-collected + * video streams. + * + * @param enabled Whether to enable dual-stream mode: + * - `true`: Enable dual-stream mode. + * - `false`: (Default) Disable dual-stream mode. + * @param streamConfig The configuration of the low-quality video stream. See + * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting + * `streamConfig` will not take effect. + * @param connection The connection information. See `RtcConnection`. * - * @param enabled - * - true: Enable the dual-stream mode. - * - false: (default) Disable the dual-stream mode. - * @param streamConfig The minor stream config - * @param connection The RtcConnection object. + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int enableDualStreamModeEx(bool enabled, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) = 0; /** - * Enables, disables or auto enable the dual video stream mode. + * @brief Sets the dual-stream mode on the sender side. + * + * @details + * The SDK defaults to enabling low-quality video stream adaptive mode ( `AUTO_SIMULCAST_STREAM` ) + * on the sender side, which means the sender does not actively send low-quality video stream. The + * receiving end with the role of the **host** can initiate a low-quality video stream request by + * calling `setRemoteVideoStreamTypeEx`, and upon receiving the request, the sending end + * automatically starts sending low-quality stream. + * - If you want to modify this behavior, you can call this method and set `mode` to + * `DISABLE_SIMULCAST_STREAM` (never send low-quality video streams) or `ENABLE_SIMULCAST_STREAM` + * (always send low-quality video streams). + * - If you want to restore the default behavior after making changes, you can call this method + * again with `mode` set to `AUTO_SIMULCAST_STREAM`. * - * If dual-stream mode is enabled, the subscriber can choose to receive the high-stream - * (high-resolution high-bitrate video stream) or low-stream (low-resolution low-bitrate video - * stream) video using {@link setRemoteVideoStreamType setRemoteVideoStreamType}. + * @note + * The difference and connection between this method and `enableDualStreamModeEx` is as follows: + * - When calling this method and setting `mode` to DISABLE_SIMULCAST_STREAM, it has the same effect + * as `enableDualStreamModeEx` `(false)`. + * - When calling this method and setting `mode` to ENABLE_SIMULCAST_STREAM, it has the same effect + * as `enableDualStreamModeEx` `(true)`. + * - Both methods can be called before and after joining a channel. If both methods are used, the + * settings in the method called later takes precedence. + * + * @param mode The mode in which the video stream is sent. See `SIMULCAST_STREAM_MODE`. + * @param streamConfig The configuration of the low-quality video stream. See + * `SimulcastStreamConfig`.Note: When setting `mode` to `DISABLE_SIMULCAST_STREAM`, setting + * `streamConfig` will not take effect. + * @param connection The connection information. See `RtcConnection`. * - * @param mode The dual stream mode: #SIMULCAST_STREAM_MODE. - * @param streamConfig The configuration of the low stream: SimulcastStreamConfig. - * @param connection The RtcConnection object. + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int setDualStreamModeEx(SIMULCAST_STREAM_MODE mode, const SimulcastStreamConfig& streamConfig, const RtcConnection& connection) = 0; /** - * Set the multi-layer video stream configuration. + * @brief Sets the simulcast video stream configuration. * - * If multi-layer is configed, the subscriber can choose to receive the coresponding layer - * of video stream using {@link setRemoteVideoStreamType setRemoteVideoStreamType}. + * @since v4.6.0 + * + * @details + * This method can be called in scenarios involving multiple channels. You can call the + * `setSimulcastConfig` method to set video streams with different resolutions for the same video + * source. The subscribers can call to select which stream layer to receive. The broadcaster can + * publish up to four layers of video streams: one main stream (highest resolution) and three + * additional streams of different quality levels. `setRemoteVideoStreamType` + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * + * @param simulcastConfig This configuration includes seven layers, from STREAM_LAYER_1 to + * STREAM_LOW, with a maximum of three layers enabled simultaneously. See `SimulcastConfig`. + * @param connection Connection information. See `RtcConnection`. * - * @param simulcastConfig - * - The configuration for multi-layer video stream. It includes seven layers, ranging from - * STREAM_LAYER_1 to STREAM_LOW. A maximum of 3 layers can be enabled simultaneously. - * @param connection The RtcConnection object. * @return * - 0: Success. * - < 0: Failure. - * @technical preview */ virtual int setSimulcastConfigEx(const SimulcastConfig& simulcastConfig, const RtcConnection& connection) = 0; - + /** * Set the high priority user list and their fallback level in weak network condition. * @@ -1884,64 +2460,124 @@ class IRtcEngineEx : public IRtcEngine { STREAM_FALLBACK_OPTIONS option, const RtcConnection& connection) = 0; - /** - * Takes a snapshot of a video stream. - * - * This method takes a snapshot of a video stream from the specified user, generates a JPG - * image, and saves it to the specified path. - * - * The method is asynchronous, and the SDK has not taken the snapshot when the method call - * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback - * to report whether the snapshot is successfully taken, as well as the details for that - * snapshot. - * - * @note - * - Call this method after joining a channel. - * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. - * - If the user's video has been preprocessed, for example, watermarked or beautified, the resulting - * snapshot includes the pre-processing effect. - * @param connection The RtcConnection object. - * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. - * @param filePath The local path (including filename extensions) of the snapshot. For example: - * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` - * - iOS: `/App Sandbox/Library/Caches/example.jpg` - * - macOS: `~/Library/Logs/example.jpg` - * - Android: `/storage/emulated/0/Android/data//files/example.jpg` - * - * Ensure that the path you specify exists and is writable. - * @return - * - 0 : Success. - * - < 0 : Failure. - */ + /** + * @brief Takes a snapshot of a video stream using connection ID. + * + * @details + * This method takes a snapshot of a video stream from the specified user, generates a JPG image, + * and saves it to the specified path. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: After a successful call of this method, the SDK triggers the `onSnapshotTaken` + * callback to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. + * - When used for local video snapshots, this method takes a snapshot for the video streams + * specified in `ChannelMediaOptions`. + * - If the user's video has been preprocessed, for example, watermarked or beautified, the + * resulting snapshot includes the pre-processing effect. + * + * @param connection The connection information. See `RtcConnection`. + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param filePath The local path (including filename extensions) of the snapshot. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` + * - iOS: `/App Sandbox/Library/Caches/example.jpg` + * - macOS: `~/Library/Logs/example.jpg` + * - Android: `/storage/emulated/0/Android/data//files/example.jpg` + * Attention: Ensure that the path you specify exists and is writable. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const char* filePath) = 0; - /** Enables video screenshot and upload with the connection ID. - @param enabled Whether to enable video screenshot and upload: - - `true`: Yes. - - `false`: No. - @param config The configuration for video screenshot and upload. - @param connection The connection information. See RtcConnection. - @return - - 0: Success. - - < 0: Failure. - */ + /** + * @brief Gets a video screenshot of the specified observation point using the connection ID. + * + * @details + * This method takes a snapshot of a video stream from the specified user, generates a JPG image, + * and saves it to the specified path. + * Call timing: Call this method after `joinChannelEx`. + * Related callbacks: After a successful call of this method, the SDK triggers the `onSnapshotTaken` + * callback to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. + * - When used for local video snapshots, this method takes a snapshot for the video streams + * specified in `ChannelMediaOptions`. + * - If the user's video has been preprocessed, for example, watermarked or beautified, the + * resulting snapshot includes the pre-processing effect. + * + * @param connection The connection information. See `RtcConnection`. + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration of the snaptshot. See `SnapshotConfig`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const media::SnapshotConfig& config) = 0; + + /** + * @brief Enables or disables video screenshot and upload. + * + * @details + * This method can take screenshots for multiple video streams and upload them. When video + * screenshot and upload function is enabled, the SDK takes screenshots and uploads videos sent by + * local users based on the type and frequency of the module you set in `ContentInspectConfig`. + * After video screenshot and upload, the Agora server sends the callback notification to your app + * server in HTTPS requests and sends all screenshots to the third-party cloud storage service. + * Call timing: This method can be called either before or after joining the channel. + * + * @note Before calling this method, ensure that you have contacted `technical support` to activate + * the video screenshot upload service. + * + * @param enabled Whether to enalbe video screenshot and upload: + * - `true`: Enables video screenshot and upload. + * - `false`: Disables video screenshot and upload. + * @param config Screenshot and upload configuration. See `ContentInspectConfig`. + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int enableContentInspectEx(bool enabled, const media::ContentInspectConfig &config, const RtcConnection& connection) = 0; /** - @brief Start tracing media rendering events. - @since v4.1.1 - @discussion - - SDK will trace media rendering events when this API is called. - - The tracing result can be obtained through callback `IRtcEngineEventHandler(Ex)::onVideoRenderingTracingResult` - @param connection The RtcConnection object. - @note - - By default, SDK will trace media rendering events when `IRtcEngineEx::joinChannelEx` is called. - - The start point of event tracing will be reset after leaving channel. - @return - - 0: Success. - - < 0: Failure. - - -2(ERR_INVALID_ARGUMENT): The parameter is invalid. Check the channel ID and local uid set by parameter `connection`. - - -7(ERR_NOT_INITIALIZED): The SDK is not initialized. Initialize the `IRtcEngine` instance before calling this method. + * @brief Enables tracing the video frame rendering process. + * + * @since v4.1.1 + * + * @details + * The SDK starts tracing the rendering status of the video frames in the channel from the moment + * this method is successfully called and reports information about the event through the + * `onVideoRenderingTracingResult` callback. + * Applicable scenarios: Agora recommends that you use this method in conjunction with the UI + * settings (such as buttons and sliders) in your app to improve the user experience. For example, + * call this method when the user clicks the Join Channel button, and then get the time spent during + * the video frame rendering process through the `onVideoRenderingTracingResult` callback, so as to + * optimize the indicators accordingly. + * + * @note + * - If you have not called this method, the SDK tracks the rendering events of the video frames + * from the moment you call `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` to join the channel. You can call this method at an + * appropriate time according to the actual application scenario to set the starting position for + * tracking video rendering events. + * - After the local user leaves the current channel, the SDK automatically tracks the video + * rendering events from the moment you join a channel. + * + * @param connection The connection information. See `RtcConnection`. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int startMediaRenderingTracingEx(const RtcConnection& connection) = 0; @@ -1956,22 +2592,18 @@ class IRtcEngineEx : public IRtcEngine { virtual int setParametersEx(const RtcConnection& connection, const char* parameters) = 0; /** - * Gets the current call ID. + * @brief Gets the call ID with the connection ID. * - * When a user joins a channel on a client, a `callId` is generated to identify - * the call. + * @details + * When a user joins a channel on a client, a `callId` is generated to identify the call from the + * client. You can call this method to get `callId`, and pass it in when calling methods such as + * `rate` and `complain`. + * Call timing: Call this method after joining a channel. * - * After a call ends, you can call `rate` or `complain` to gather feedback from the customer. - * These methods require a `callId` parameter. To use these feedback methods, call the this - * method first to retrieve the `callId` during the call, and then pass the value as an - * argument in the `rate` or `complain` method after the call ends. + * @param callId Output parameter, the current call ID. + * @param connection The connection information. See `RtcConnection`. * - * @param callId The reference to the call ID. - * @param connection The RtcConnection object. - * @return - * - The call ID if the method call is successful. - * - < 0: Failure. - */ + */ virtual int getCallIdEx(agora::util::AString& callId, const RtcConnection& connection) = 0; /** @@ -1986,6 +2618,87 @@ class IRtcEngineEx : public IRtcEngine { * @technical preview */ virtual int sendAudioMetadataEx(const RtcConnection& connection, const char* metadata, size_t length) = 0; + + /** + * @brief Preloads a specified sound effect to a channel. + * + * @since v4.6.0 + * + * @details + * Each time you call this method, you can only preload one sound effect file into memory. If you + * need to preload multiple sound files, please call this method multiple times. After preloading is + * complete, you can call `playEffect` to play the preloaded sound effects, or call `playAllEffects` + * to play all preloaded sound effects. + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * + * @note + * - To ensure a smooth experience, the size of sound effect files should not exceed the limit. + * - Agora recommends that you call this method before joining a channel. + * - If preloadEffectEx is called before playEffectEx is executed, the file resource will not be closed after playEffectEx. + * The next time playEffectEx is executed, it will directly seek to play at the beginning. + * - If preloadEffectEx is not called before playEffectEx is executed, the resource will be destroyed after playEffectEx. + * The next time playEffectEx is executed, it will try to reopen the file and play it from the beginning. + * + * @param connection One `RtcConnection` object. See `RtcConnection`. + * @param soundId The audio effect ID. + * @param filePath The absolute path of the local file or the URL of the online file. Supported + * audio formats include: mp3, mp4, m4a, aac, 3gp, mkv and wav. + * @param startPos The playback position (ms) of the audio effect file. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int preloadEffectEx(const RtcConnection& connection, int soundId, const char* filePath, int startPos = 0) = 0; + + /** + * @brief Plays a specified sound effect in a channel. + * + * @since v4.6.0 + * + * @details + * You can call this method to play a specified sound effect to all users in the channel. Each call + * to this method can only play one sound effect. To play multiple sound effects simultaneously, + * please call this method multiple times. This method allows you to set whether to publish sound + * effects in a channel. In order to play multiple sound files simultaneously, simply call the + * method multiple times with different `soundId` and `filePath` parameters. + * Applicable scenarios: This method can be called in scenarios involving multiple channels. + * + * @note + * - Agora recommends not playing more than three sound effects at the same time. + * - The sound effect ID and file path in this method must be consistent with those in the + * `preloadEffectEx` method. + * - If preloadEffectEx is called before playEffectEx is executed, the file resource will not be closed after playEffectEx. + * The next time playEffectEx is executed, it will directly seek to play at the beginning. + * - If preloadEffectEx is not called before playEffectEx is executed, the resource will be destroyed after playEffectEx. + * The next time playEffectEx is executed, it will try to reopen the file and play it from the beginning. + * + * @param connection One `RtcConnection` object. See `RtcConnection`. + * @param soundId The audio effect ID. + * @param filePath The absolute path of the local file or the URL of the online file. Supported + * audio formats: mp3, mp4, m4a, aac, 3gp, mkv and wav. + * @param loopCount Number of times the sound effect to be looped: + * - `-1`: Loop infinitely until calling `stopEffect` or `stopAllEffects`. + * - `0`: Play once. + * - `1`: Play twice. + * @param pitch The pitch of the audio effect. The range is from 0.5 to 2.0, with a default value of + * 1.0 (original pitch). The lower the value, the lower the pitch. + * @param pan The spatial position of the audio effect. The range of values is from -1.0 to 1.0: + * - `-1.0`: The audio effect is heard on the left of the user. + * - `0.0`: The audio effect is heard in front of the user. + * - `1.0`: The audio effect is heard on the right of the user. + * @param gain The volume of the audio effect. The value range is from 0 to 100, with a default + * value of 100 (original volume). The smaller the value, the lower the volume. + * @param publish Whether to publish the audio effect in the channel: + * - `true`: Publish the audio effect in the channel. + * - `false`: (Default) Do not publish the audio effect in the channel. + * @param startPos The playback position (ms) of the audio effect file. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int playEffectEx(const RtcConnection& connection, int soundId, const char* filePath, int loopCount, double pitch, double pan, int gain, bool publish = false, int startPos = 0) = 0; }; } // namespace rtc diff --git a/include/IAgoraSpatialAudio.h b/include/IAgoraSpatialAudio.h index 1c96632..2286aae 100644 --- a/include/IAgoraSpatialAudio.h +++ b/include/IAgoraSpatialAudio.h @@ -15,40 +15,86 @@ namespace agora { namespace rtc { -// The information of remote voice position +/** + * @brief The spatial position of the remote user or the media player. + */ struct RemoteVoicePositionInfo { - // The coordnate of remote voice source, (x, y, z) + /** + * The coordinates in the world coordinate system. This parameter is an array of length 3, and the + * three values represent the front, right, and top coordinates in turn. + */ float position[3]; - // The forward vector of remote voice, (x, y, z). When it's not set, the vector is forward to listner. + /** + * The unit vector of the x axis in the coordinate system. This parameter is an array of length 3, + * and the three values represent the front, right, and top coordinates in turn. + */ float forward[3]; }; +/** + * @brief Sound insulation area settings. + */ struct SpatialAudioZone { - //the zone id + /** + * The ID of the sound insulation area. + */ int zoneSetId; - //zone center point + /** + * The spatial center point of the sound insulation area. This parameter is an array of length 3, + * and the three values represent the front, right, and top coordinates in turn. + */ float position[3]; - //forward direction + /** + * Starting at `position`, the forward unit vector. This parameter is an array of length 3, and the + * three values represent the front, right, and top coordinates in turn. + */ float forward[3]; - //right direction + /** + * Starting at `position`, the right unit vector. This parameter is an array of length 3, and the + * three values represent the front, right, and top coordinates in turn. + */ float right[3]; - //up direction + /** + * Starting at `position`, the up unit vector. This parameter is an array of length 3, and the three + * values represent the front, right, and top coordinates in turn. + */ float up[3]; - //the forward side length of the zone + /** + * The entire sound insulation area is regarded as a cube; this represents the length of the forward + * side in the unit length of the game engine. + */ float forwardLength; - //tehe right side length of the zone + /** + * The entire sound insulation area is regarded as a cube; this represents the length of the right + * side in the unit length of the game engine. + */ float rightLength; - //the up side length of the zone + /** + * The entire sound insulation area is regarded as a cube; this represents the length of the up side + * in the unit length of the game engine. + */ float upLength; - //the audio attenuation of zone + /** + * The sound attenuation coefficient when users within the sound insulation area communicate with + * external users. The value range is [0,1]. The values are as follows: + * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume + * and timbre heard by local users do not change regardless of distance. + * - (0,0.5): Weak attenuation mode, that is, the volume and timbre are only weakly attenuated + * during the propagation process, and the sound can travel farther than the real environment. + * - 0.5: (Default) simulates the attenuation of the volume in the real environment; the effect is + * equivalent to not setting the `audioAttenuation` parameter. + * - (0.5,1]: Strong attenuation mode (default value is 1), that is, the volume and timbre attenuate + * rapidly during propagation. + */ float audioAttenuation; }; -/** The definition of LocalSpatialAudioConfig +/** + * @brief The configuration of `ILocalSpatialAudioEngine`. */ struct LocalSpatialAudioConfig { - /*The reference to \ref IRtcEngine, which is the base interface class of the Agora RTC SDK and provides - * the real-time audio and video communication functionality. + /** + * `IRtcEngine`. */ agora::rtc::IRtcEngine* rtcEngine; @@ -63,30 +109,53 @@ class ILocalSpatialAudioEngine: public RefCountInterface { public: /** - * Releases all the resources occupied by spatial audio engine. + * @brief Destroys `IBaseSpatialAudioEngine`. + * + * @details + * This method releases all resources under `IBaseSpatialAudioEngine`. When the user does not need + * to use the spatial audio effect, you can call this method to release resources for other + * operations. + * After calling this method, you can no longer use any of the APIs under `IBaseSpatialAudioEngine`. + * + * @note Call this method before the `release` method under `IRtcEngine`. + * */ virtual void release() = 0; /** - * Initializes the ILocalSpatialAudioEngine object and allocates the internal resources. + * @brief Initializes `ILocalSpatialAudioEngine`. * - * @note Ensure that you call IRtcEngine::queryInterface and initialize before calling any other ILocalSpatialAudioEngine APIs. + * @note + * - Call this method after calling `queryInterface` `(AGORA_IID_LOCAL_SPATIAL_AUDIO)`. + * - Before calling other methods of the `ILocalSpatialAudioEngine` class, you need to call this + * method to initialize `ILocalSpatialAudioEngine`. + * - The SDK supports creating only one `ILocalSpatialAudioEngine` instance for an app. * - * @param config The pointer to the LocalSpatialAudioConfig. See #LocalSpatialAudioConfig. + * @param config The configuration of `ILocalSpatialAudioEngine`. See `LocalSpatialAudioConfig`. * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int initialize(const LocalSpatialAudioConfig& config) = 0; /** - * Updates the position information of remote user. You should call it when remote user whose role is broadcaster moves. + * @brief Updates the spatial position of the specified remote user. + * + * @details + * After successfully calling this method, the SDK calculates the spatial audio parameters based on + * the relative position of the local and remote user. + * + * @note Call this method after the `joinChannel(const char* token, const char* channelId, const + * char* info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, + * const ChannelMediaOptions& options)` method. + * + * @param uid The user ID. This parameter must be the same as the user ID passed in when the user + * joined the channel. + * @param posInfo The spatial position of the remote user. See `RemoteVoicePositionInfo`. * - * @param uid The remote user ID. It should be the same as RTC channel remote user id. - * @param posInfo The position information of remote user. See #RemoteVoicePositionInfo. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int updateRemotePosition(uid_t uid, const RemoteVoicePositionInfo &posInfo) = 0; /** @@ -101,12 +170,22 @@ class ILocalSpatialAudioEngine: public RefCountInterface { */ virtual int updateRemotePositionEx(uid_t uid, const RemoteVoicePositionInfo &posInfo, const RtcConnection& connection) = 0; /** - * Remove the position information of remote user. You should call it when remote user called IRtcEngine::leaveChannel. + * @brief Removes the spatial position of the specified remote user. + * + * @details + * After successfully calling this method, the local user no longer hears the specified remote user. + * After leaving the channel, to avoid wasting computing resources, call this method to delete the + * spatial position information of the specified remote user. Otherwise, the user's spatial position + * information will be saved continuously. When the number of remote users exceeds the number of + * audio streams that can be received as set in `setMaxAudioRecvCount`, the system automatically + * unsubscribes from the audio stream of the user who is furthest away based on relative distance. + * + * @param uid The user ID. This parameter must be the same as the user ID passed in when the user + * joined the channel. * - * @param uid The remote user ID. It should be the same as RTC channel remote user id. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int removeRemotePosition(uid_t uid) = 0; /** @@ -145,84 +224,96 @@ class ILocalSpatialAudioEngine: public RefCountInterface { virtual int updateSelfPositionEx(const float position[3], const float axisForward[3], const float axisRight[3], const float axisUp[3], const RtcConnection& connection) = 0; /** - * This method sets the maximum number of streams that a player can receive in a - * specified audio reception range. + * @brief Sets the maximum number of streams that a user can receive in a specified audio reception + * range. * - * @note You can call this method either before or after calling enterRoom: - * - Calling this method before enterRoom affects the maximum number of received streams - * the next time the player enters a room. - * - Calling this method after entering a room changes the current maximum number of - * received streams of the player. + * @details + * If the number of receivable streams exceeds the set value, the local user receives the `maxCount` + * streams that are closest to the local user. * - * @param maxCount The maximum number of streams that a player can receive within - * a specified audio reception range. If the number of receivable streams exceeds - * the set value, the SDK receives the set number of streams closest to the player. + * @param maxCount The maximum number of streams that a user can receive within a specified audio + * reception range. The value of this parameter should be ≤ 16, and the default value is 10. * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int setMaxAudioRecvCount(int maxCount) = 0; /** - * This method sets the audio reception range. The unit of the audio reception range - * is the same as the unit of distance in the game engine. + * @brief Sets the audio reception range of the local user. * - * @note You can call this method either before or after calling enterRoom. - * During the game, you can call it multiple times to update the audio reception range. + * @details + * After the setting is successful, the local user can only hear the remote users within the setting + * range or belonging to the same team. You can call this method at any time to update the audio + * reception range. * - * @param range The maximum audio reception range, in the unit of game engine distance. + * @param range The maximum audio reception range. The unit is meters. The value of this parameter + * must be greater than 0, and the default value is 20. * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int setAudioRecvRange(float range) = 0; /** - * This method sets distance unit of game engine. The smaller the unit is, the sound fades slower - * with distance. + * @brief Sets the length (in meters) of the game engine distance per unit. * - * @note You can call this method either before or after calling enterRoom. - * During the game, you can call it multiple times to update the distance unit. + * @details + * In a game engine, the unit of distance is customized, while in the Agora spatial audio algorithm, + * distance is measured in meters. By default, the SDK converts the game engine distance per unit to + * one meter. You can call this method to convert the game engine distance per unit to a specified + * number of meters. * - * @param unit The number of meters that the game engine distance per unit is equal to. For example, setting unit as 2 means the game engine distance per unit equals 2 meters. + * @param unit The number of meters that the game engine distance per unit is equal to. The value of + * this parameter must be greater than 0.00, and the default value is 1.00. For example, setting + * unit as 2.00 means the game engine distance per unit equals 2 meters.The larger the value is, the + * faster the sound heard by the local user attenuates when the remote user moves far away from the + * local user. * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int setDistanceUnit(float unit) = 0; /** - * Updates the position of local user. - * When calling it in ICloudSpatialAudioEngine, it triggers the SDK to update the user position to the Agora spatial audio server. The Agora spatial audio server uses the users' world coordinates and audio reception range to determine whether they are within each other's specified audio reception range. - * When calling it in ILocalSpatialAudioEngine, it triggers the SDK to calculate the relative position between the local and remote users and updates spatial audio parameters. + * @brief Updates the spatial position of the local user. * - * when calling it in ICloudSpatialAudioEngine, you should notice: - * @note - * - Call the method after calling enterRoom. - * - The call frequency is determined by the app. Agora recommends calling this method every - * 120 to 7000 ms. Otherwise, the SDK may lose synchronization with the server. + * @details + * - Under the `ILocalSpatialAudioEngine` class, this method needs to be used with + * `updateRemotePosition`. The SDK calculates the relative position between the local and remote + * users according to this method and the parameter settings in `updateRemotePosition`, and then + * calculates the user's spatial audio effect parameters. + * + * @param position The coordinates in the world coordinate system. This parameter is an array of + * length 3, and the three values represent the front, right, and top coordinates in turn. + * @param axisForward The unit vector of the x axis in the coordinate system. This parameter is an + * array of length 3, and the three values represent the front, right, and top coordinates in turn. + * @param axisRight The unit vector of the y axis in the coordinate system. This parameter is an + * array of length 3, and the three values represent the front, right, and top coordinates in turn. + * @param axisUp The unit vector of the z axis in the coordinate system. This parameter is an array + * of length 3, and the three values represent the front, right, and top coordinates in turn. * - * @param position The sound position of the user. The coordinate order is forward, right, and up. - * @param axisForward The vector in the direction of the forward axis in the coordinate system. - * @param axisRight The vector in the direction of the right axis in the coordinate system. - * @param axisUp The vector in the direction of the up axis in the coordinate system. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int updateSelfPosition(const float position[3], const float axisForward[3], const float axisRight[3], const float axisUp[3]) = 0; /** - * Updates the position of a media player in scene. This method has same behavior both in ICloudSpatialAudioEngine and ILocalSpatialAudioEngine. + * @brief Updates the spatial position of the media player. * - * @note - * - This method is suggested to be called once if you don't move media player in the virtual space. + * @details + * After a successful update, the local user can hear the change in the spatial position of the + * media player. + * Call timing: This method can be called either before or after joining the channel. + * + * @param playerId The ID of the media player. You can get the Device ID by calling + * `getMediaPlayerId`. + * @param positionInfo The spatial position of the media player. See `RemoteVoicePositionInfo`. * - * @param playerId The ID of the media player. You can get it by IMediaPlayer::getMediaPlayerId. - * @param positionInfo The position information of media player in the virtual space. For details inforamtion, see the declaration of RemoteVoicePositionInfo. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int updatePlayerPositionInfo(int playerId, const RemoteVoicePositionInfo& positionInfo) = 0; @@ -237,63 +328,190 @@ class ILocalSpatialAudioEngine: public RefCountInterface { virtual int setParameters(const char* params) = 0; /** - * Mute or unmute local audio stream. + * @brief Stops or resumes publishing the local audio stream. + * + * @note + * - This method does not affect any ongoing audio recording, because it does not disable the audio + * capture device. + * - Call this method after the `joinChannel(const char* token, const char* channelId, const char* + * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` method. + * - When using the spatial audio effect, if you need to set whether to stop subscribing to the + * audio stream of a specified user, Agora recommends calling this method instead of the + * `muteLocalAudioStream` method in `IRtcEngine`. + * - A successful call of this method triggers the `onUserMuteAudio` and `onRemoteAudioStateChanged` + * callbacks on the remote client. + * + * @param mute Whether to stop publishing the local audio stream: + * - `true`: Stop publishing the local audio stream. + * - `false`: Publish the local audio stream. * - * @param mute When it's false, it will send local audio stream, otherwise it will not send local audio stream. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int muteLocalAudioStream(bool mute) = 0; /** - * Mute all remote audio streams. It determines wether SDK receves remote audio streams or not. + * @brief Stops or resumes subscribing to the audio streams of all remote users. + * + * @details + * After successfully calling this method, the local user stops or resumes subscribing to the audio + * streams of all remote users, including all subsequent users. + * + * @note + * - Call this method after the `joinChannel(const char* token, const char* channelId, const char* + * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` method. + * - When using the spatial audio effect, if you need to set whether to stop subscribing to the + * audio streams of all remote users, Agora recommends calling this method instead of the + * `muteAllRemoteAudioStreams` method in `IRtcEngine`. + * - After calling this method, you need to call `updateSelfPosition` and `updateRemotePosition` to + * update the spatial location of the local user and the remote user; otherwise, the settings in + * this method do not take effect. + * + * @param mute Whether to stop subscribing to the audio streams of all remote users: + * - `true`: Stop subscribing to the audio streams of all remote users. + * - `false`: Subscribe to the audio streams of all remote users. * - * @param mute When it's false, SDK will receive remote audio streams, otherwise SDK will not receive remote audio streams. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int muteAllRemoteAudioStreams(bool mute) = 0; /** - * Mute or unmute remote user audio stream. + * @brief Stops or resumes subscribing to the audio stream of a specified user. + * + * @note + * - Call this method after the `joinChannel(const char* token, const char* channelId, const char* + * info, uid_t uid)` or `joinChannel(const char* token, const char* channelId, uid_t uid, const + * ChannelMediaOptions& options)` method. + * - When using the spatial audio effect, if you need to set whether to stop subscribing to the + * audio stream of a specified user, Agora recommends calling this method instead of the + * `muteRemoteAudioStream` method in `IRtcEngine`. + * + * @param uid The user ID. This parameter must be the same as the user ID passed in when the user + * joined the channel. + * @param mute Whether to subscribe to the specified remote user's audio stream. + * - `true`: Stop subscribing to the audio stream of the specified user. + * - `false`: (Default) Subscribe to the audio stream of the specified user. The SDK decides whether + * to subscribe according to the distance between the local user and the remote user. * - * @param uid The ID of the remote user. - * @param mute When it's false, SDK will receive remote user audio streams, otherwise SDK will not receive remote user audio streams. * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int muteRemoteAudioStream(uid_t uid, bool mute) = 0; + /** + * @brief Sets the sound attenuation effect for the specified user. + * + * @param uid The user ID. This parameter must be the same as the user ID passed in when the user + * joined the channel. + * @param attenuation For the user's sound attenuation coefficient, the value range is [0,1]. The + * values are as follows: + * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume + * and timbre heard by local users do not change regardless of distance. + * - (0,0.5): Weak attenuation mode, that is, the volume and timbre are only weakly attenuated + * during the propagation process, and the sound can travel farther than the real environment. + * - 0.5: (Default) simulates the attenuation of the volume in the real environment; the effect is + * equivalent to not setting the `speaker_attenuation` parameter. + * - (0.5,1]: Strong attenuation mode, that is, the volume and timbre attenuate rapidly during the + * propagation process. + * @param forceSet Whether to force the user's sound attenuation effect: + * - `true`: Force `attenuation` to set the sound attenuation of the user. At this time, the + * `attenuation` coefficient of the sound insulation area set in the `audioAttenuation` of the + * `SpatialAudioZone` does not take effect for the user. + * - `false`: Do not force `attenuation` to set the user's sound attenuation effect, as shown in the + * following two cases. + * - If the sound source and listener are inside and outside the sound isolation area, the sound + * attenuation effect is determined by the `audioAttenuation` in `SpatialAudioZone`. + * - If the sound source and the listener are in the same sound insulation area or outside the + * same sound insulation area, the sound attenuation effect is determined by `attenuation` in this + * method. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ virtual int setRemoteAudioAttenuation(uid_t uid, double attenuation, bool forceSet) = 0; /** - * Setting up sound Space + * @brief Sets the sound insulation area. + * + * @details + * In virtual interactive scenarios, you can use this method to set the sound insulation area and + * sound attenuation coefficient. When the sound source (which can be the user or the media player) + * and the listener belong to the inside and outside of the sound insulation area, they can + * experience the attenuation effect of sound similar to the real environment when it encounters a + * building partition. + * - When the sound source and the listener belong to the inside and outside of the sound insulation + * area, the sound attenuation effect is determined by the sound attenuation coefficient in + * `SpatialAudioZone`. + * - If the user or media player is in the same sound insulation area, it is not affected by + * `SpatialAudioZone`, and the sound attenuation effect is determined by the `attenuation` parameter + * in `setPlayerAttenuation` or `setRemoteAudioAttenuation`. If you do not call + * `setPlayerAttenuation` or `setRemoteAudioAttenuation`, the default sound attenuation coefficient + * of the SDK is 0.5, which simulates the attenuation of the sound in the real environment. + * - If the sound source and the receiver belong to two sound insulation areas, the receiver cannot + * hear the sound source. + * + * @note If this method is called multiple times, the last sound insulation area set takes effect. + * + * @param zones Sound insulation area settings. See `SpatialAudioZone`. When you set this parameter + * to `NULL`, it means clearing all sound insulation zones.Attention: On the Windows platform, it is + * necessary to ensure that the number of members in the `zones` array is equal to the value of + * `zoneCount`; otherwise, it may cause a crash. + * @param zoneCount The number of sound insulation areas. * - * @param zones The Sound space array - * @param zoneCount the sound Space count of array * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int setZones(const SpatialAudioZone *zones, unsigned int zoneCount) = 0; /** - * Set the audio attenuation coefficient of the player - * @param playerId The ID of the media player. You can get it by IMediaPlayer::getMediaPlayerId. - * @param attenuation The audio attenuation of the media player. - * @param forceSet Whether to force the setting of audio attenuation coefficient. + * @brief Sets the sound attenuation properties of the media player. + * + * @param playerId The ID of the media player. You can get the Device ID by calling + * `getMediaPlayerId`. + * @param attenuation The sound attenuation coefficient of the remote user or media player. The + * value range is [0,1]. The values are as follows: + * - 0: Broadcast mode, where the volume and timbre are not attenuated with distance, and the volume + * and timbre heard by local users do not change regardless of distance. + * - (0,0.5): Weak attenuation mode, that is, the volume and timbre are only weakly attenuated + * during the propagation process, and the sound can travel farther than the real environment. + * - 0.5: (Default) simulates the attenuation of the volume in the real environment; the effect is + * equivalent to not setting the `speaker_attenuation` parameter. + * - (0.5,1]: Strong attenuation mode, that is, the volume and timbre attenuate rapidly during the + * propagation process. + * @param forceSet Whether to force the sound attenuation effect of the media player: + * - `true`: Force `attenuation` to set the attenuation of the media player. At this time, the + * attenuation coefficient of the sound insulation are set in the `audioAttenuation` in the + * `SpatialAudioZone` does not take effect for the media player. + * - `false`: Do not force `attenuation` to set the sound attenuation effect of the media player, as + * shown in the following two cases. + * - If the sound source and listener are inside and outside the sound isolation area, the sound + * attenuation effect is determined by the `audioAttenuation` in `SpatialAudioZone`. + * - If the sound source and the listener are in the same sound insulation area or outside the + * same sound insulation area, the sound attenuation effect is determined by `attenuation` in this + * method. + * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int setPlayerAttenuation(int playerId, double attenuation, bool forceSet) = 0; /** - * Clear the position informations of remote users. + * @brief Removes the spatial positions of all remote users. + * + * @details + * After successfully calling this method, the local user no longer hears any remote users. + * After leaving the channel, to avoid wasting resources, you can also call this method to delete + * the spatial positions of all remote users. * * @return * - 0: Success. - * - <0: Failure. + * - < 0: Failure. */ virtual int clearRemotePositions() = 0; }; diff --git a/include/IAudioDeviceManager.h b/include/IAudioDeviceManager.h index ab3cfe5..c0aa1c2 100644 --- a/include/IAudioDeviceManager.h +++ b/include/IAudioDeviceManager.h @@ -9,11 +9,11 @@ namespace agora { namespace rtc { /** - * The maximum device ID length. + * @brief The maximum length of the device ID. */ enum MAX_DEVICE_ID_LENGTH_TYPE { /** - * The maximum device ID length is 512. + * The maximum length of the device ID is 512 bytes. */ MAX_DEVICE_ID_LENGTH = 512 }; @@ -26,23 +26,27 @@ class IAudioDeviceCollection { virtual ~IAudioDeviceCollection() {} /** - * Gets the total number of the playback or recording devices. + * @brief Gets the total number of audio playback or audio capture devices. * - * Call \ref IAudioDeviceManager::enumeratePlaybackDevices - * "enumeratePlaybackDevices" first, and then call this method to return the - * number of the audio playback devices. + * @details + * If you call `enumeratePlaybackDevices` before this method, the SDK returns the number of audio + * playback devices. If you call `enumerateRecordingDevices` before this method, the SDK returns the + * number of audio capture devices. * * @return - * - The number of the audio devices, if the method call succeeds. - * - < 0, if the method call fails. + * The number of audio playback or audio capture devices. */ virtual int getCount() = 0; /** - * Gets the information of a specified audio device. - * @param index An input parameter that specifies the audio device. - * @param deviceName An output parameter that indicates the device name. - * @param deviceId An output parameter that indicates the device ID. + * @brief Gets a specified piece of information about an indexed video device. + * + * @param index The index value of the video device. The value of this parameter must be less than + * the value returned in `getCount`. + * @param deviceName The device name. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceId The device ID of the video device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -51,12 +55,21 @@ class IAudioDeviceCollection { char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the information of a specified audio device. - * @note - * @param index An input parameter that specifies the audio device. - * @param deviceName An output parameter that indicates the device name. - * @param deviceTypeName An output parameter that indicates the device type name. such as Built-in, USB, HDMI, etc. (MacOS only) - * @param deviceId An output parameter that indicates the device ID. + * @brief Gets the audio device information and device type by index. + * + * @details + * You can call this method to get the name, type and ID of a specified audio device. + * + * @note This method is for macOS only. + * + * @param index An input parameter. The index of the device. + * @param deviceId An output parameter. The device ID. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceName An output parameter. The device name. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceTypeName Output parameter; indicates the type of audio devices, such as built-in, + * USB and HDMI. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -65,8 +78,15 @@ class IAudioDeviceCollection { char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Specifies a device with the device ID. - * @param deviceId The device ID. + * @brief Specifies the video capture device with the device ID. + * + * @note + * - Plugging or unplugging a device does not change its device ID. + * - This method is for Windows and macOS only. + * + * @param deviceId The device ID. You can get the device ID by calling `enumerateVideoDevices`. + * The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -74,10 +94,17 @@ class IAudioDeviceCollection { virtual int setDevice(const char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the default audio device of the system (for macOS and Windows only). + * @brief Gets the default audio device of the system. * - * @param deviceName The name of the system default audio device. - * @param deviceId The device ID of the the system default audio device. + * @details + * - This method is for Windows and macOS only. + * - You need to call `enumeratePlaybackDevices` or `enumerateRecordingDevices` to get the device + * list before calling this method. + * + * @param deviceName Output parameter; the name of the system's default audio device. The maximum + * length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceId Output parameter; the device ID of the system default audio device. The maximum + * length is `MAX_DEVICE_ID_LENGTH_TYPE`. * * @return * - 0: Success. @@ -85,12 +112,20 @@ class IAudioDeviceCollection { */ virtual int getDefaultDevice(char deviceName[MAX_DEVICE_ID_LENGTH], char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; - /** - * Gets the default audio device of the system (for macOS and Windows only). + /** + * @brief Gets the system‘s default audio device and its type. + * + * @details + * - This method applies to macOS only. + * - You need to call `enumeratePlaybackDevices` or `enumerateRecordingDevices` to get the device + * list before calling this method. * - * @param deviceName The name of the system default audio device. - * @param deviceTypeName The device type name of the the system default audio device, such as Built-in, USB, HDMI, etc. (MacOS only) - * @param deviceId The device ID of the the system default audio device. + * @param deviceName Output parameter; the name of the system's default audio device. The maximum + * length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceTypeName Output parameter; indicates the type of audio devices, such as built-in, + * USB and HDMI. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceId Output parameter; the device ID of the system default audio device. The maximum + * length is `MAX_DEVICE_ID_LENGTH_TYPE`. * * @return * - 0: Success. @@ -99,9 +134,9 @@ class IAudioDeviceCollection { virtual int getDefaultDevice(char deviceName[MAX_DEVICE_ID_LENGTH], char deviceTypeName[MAX_DEVICE_ID_LENGTH], char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Sets the volume of the app. + * @brief Sets the volume of the app. * - * @param volume The volume of the app. The value range is [0, 255]. + * @param volume The volume of the app. The value range is [0,255]. * * @return * - 0: Success. @@ -110,9 +145,9 @@ class IAudioDeviceCollection { virtual int setApplicationVolume(int volume) = 0; /** - * Gets the volume of the app. + * @brief Retrieves the volume of the app. * - * @param volume The volume of the app. The value range is [0, 255] + * @param volume The volume of the app. The value range is [0,255]. * * @return * - 0: Success. @@ -120,11 +155,12 @@ class IAudioDeviceCollection { */ virtual int getApplicationVolume(int &volume) = 0; - /** Mutes or unmutes the app. + /** + * @brief Mutes/Unmutes the app. * - * @param mute Determines whether to mute the app: - * - true: Mute the app. - * - false: Unmute the app. + * @param mute Whether to mute the app: + * - `true`: Mute the app. + * - `false`: Unmute the app. * * @return * - 0: Success. @@ -133,11 +169,11 @@ class IAudioDeviceCollection { virtual int setApplicationMute(bool mute) = 0; /** - * Gets the mute state of the app. + * @brief Checks whether the app is muted. * - * @param mute A reference to the mute state of the app: - * - true: The app is muted. - * - false: The app is not muted. + * @param mute Whether the app is muted: + * - `true`: The app is muted. + * - `false`: The app is not muted. * * @return * - 0: Success. @@ -146,7 +182,7 @@ class IAudioDeviceCollection { virtual int isApplicationMute(bool &mute) = 0; /** - * Releases all IAudioDeviceCollection resources. + * @brief Releases all the resources occupied by the `IAudioDeviceCollection` object. */ virtual void release() = 0; }; @@ -159,44 +195,57 @@ class IAudioDeviceManager : public RefCountInterface { virtual ~IAudioDeviceManager() {} /** - * Enumerates the audio playback devices. + * @brief Enumerates the audio playback devices. + * + * @details + * This method returns an `IAudioDeviceCollection` object that includes all audio playback devices + * in the system. With the `IAudioDeviceCollection` object, the application can enumerate video + * devices. The application must call the `release` method to release the returned object after + * using it. * - * This method returns an IAudioDeviceCollection object that includes all the - * audio playback devices in the system. With the IAudioDeviceCollection - * object, the app can enumerate the audio playback devices. The app must call - * the \ref IAudioDeviceCollection::release "IAudioDeviceCollection::release" - * method to release the returned object after using it. + * @note This method is for Windows and macOS only. * * @return - * - A pointer to the IAudioDeviceCollection object that includes all the - * audio playback devices in the system, if the method call succeeds. - * - The empty pointer NULL, if the method call fails. + * - Success: Returns an `IAudioDeviceCollection` object that includes all audio playback devices in + * the system. + * - Failure: NULL. */ virtual IAudioDeviceCollection *enumeratePlaybackDevices() = 0; /** - * Enumerates the audio recording devices. + * @brief Enumerates the audio capture devices. + * + * @details + * This method returns an `IAudioDeviceCollection` object that includes all audio capture devices in + * the system. With the `IAudioDeviceCollection` object, the application can enumerate video + * devices. The application must call the `release` method to release the returned object after + * using it. * - * This method returns an IAudioDeviceCollection object that includes all the - * audio recording devices in the system. With the IAudioDeviceCollection - * object, the app can enumerate the audio recording devices. The app needs to - * call the \ref IAudioDeviceCollection::release - * "IAudioDeviceCollection::release" method to release the returned object - * after using it. + * @note This method is for Windows and macOS only. * * @return - * - A pointer to the IAudioDeviceCollection object that includes all the - * audio recording devices in the system, if the method call succeeds. - * - The empty pointer NULL, if the method call fails. + * - Success: An `IAudioDeviceCollection` object including all audio capture devices. + * - Failure: NULL. */ virtual IAudioDeviceCollection *enumerateRecordingDevices() = 0; /** - * Specifies an audio playback device with the device ID. + * @brief Sets the audio playback device. + * + * @details + * You can call this method to change the audio route currently being used, but this does not change + * the default audio route. For example, if the default audio route is speaker 1, you call this + * method to set the audio route as speaker 2 before joinging a channel and then start a device + * test, the SDK conducts device test on speaker 2. After the device test is completed and you join + * a channel, the SDK still uses speaker 1, the default audio route. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId The ID of the specified audio playback device. You can get the device ID by + * calling `enumeratePlaybackDevices`. Connecting or disconnecting the audio device does not change + * the value of `deviceId`. + * The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId ID of the audio playback device. It can be retrieved by the - * \ref enumeratePlaybackDevices "enumeratePlaybackDevices" method. Plugging - * or unplugging the audio device does not change the device ID. * @return * - 0: Success. * - < 0: Failure. @@ -204,9 +253,13 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setPlaybackDevice(const char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the ID of the audio playback device. - * @param deviceId An output parameter that specifies the ID of the audio - * playback device. + * @brief Retrieves the audio playback device associated with the device ID. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId Output parameter. The device ID of the audio playback device. The maximum length + * is `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -214,11 +267,15 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getPlaybackDevice(char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the device ID and device name of the audio playback device. - * @param deviceId An output parameter that specifies the ID of the audio - * playback device. - * @param deviceName An output parameter that specifies the name of the audio - * playback device. + * @brief Retrieves the information of the audio playback device. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId Th ID of the audio playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceName Output parameter; the name of the playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -227,10 +284,18 @@ class IAudioDeviceManager : public RefCountInterface { char deviceName[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the device ID and device name and device type name of the audio playback device. - * @param deviceId An output parameter that specifies the ID of the audio playback device. - * @param deviceName An output parameter that specifies the name of the audio playback device. - * @param deviceTypeName An output parameter that specifies the device type name. such as Built-in, USB, HDMI, etc. (MacOS only) + * @brief Gets the information and type of the audio playback device. + * + * @details + * This method applies to macOS only. + * + * @param deviceId Th ID of the audio playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceName Output parameter; the name of the playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceTypeName Output parameter; indicates the type of audio playback devices, such as + * built-in, USB and HDMI. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. + * * @return * - 0: Success. * - < 0: Failure. @@ -238,9 +303,12 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getPlaybackDeviceInfo(char deviceId[MAX_DEVICE_ID_LENGTH], char deviceName[MAX_DEVICE_ID_LENGTH], char deviceTypeName[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Sets the volume of the audio playback device. - * @param volume The volume of the audio playing device. The value range is - * [0, 255]. + * @brief Sets the volume of the audio playback device. + * + * @note This method applies to Windows only. + * + * @param volume The volume of the audio playback device. The value range is [0,255]. + * * @return * - 0: Success. * - < 0: Failure. @@ -248,9 +316,11 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setPlaybackDeviceVolume(int volume) = 0; /** - * Gets the volume of the audio playback device. - * @param volume The volume of the audio playback device. The value range is - * [0, 255]. + * @brief Retrieves the volume of the audio playback device. + * + * @param volume An output parameter. The volume of the audio playback device. The value range is + * [0,255]. + * * @return * - 0: Success. * - < 0: Failure. @@ -258,11 +328,22 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getPlaybackDeviceVolume(int *volume) = 0; /** - * Specifies an audio recording device with the device ID. + * @brief Sets the audio capture device. + * + * @details + * You can call this method to change the audio route currently being used, but this does not change + * the default audio route. For example, if the default audio route is microphone, you call this + * method to set the audio route as bluetooth earphones before joinging a channel and then start a + * device test, the SDK conducts device test on the bluetooth earphones. After the device test is + * completed and you join a channel, the SDK still uses the microphone for audio capturing. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId The ID of the audio capture device. You can get the Device ID by calling + * `enumerateRecordingDevices`. Connecting or disconnecting the audio device does not change the + * value of `deviceId`. + * The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId ID of the audio recording device. It can be retrieved by - * the \ref enumerateRecordingDevices "enumerateRecordingDevices" method. - * Plugging or unplugging the audio device does not change the device ID. * @return * - 0: Success. * - < 0: Failure. @@ -270,9 +351,13 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setRecordingDevice(const char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the audio recording device by the device ID. + * @brief Gets the current audio recording device. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId An output parameter. The device ID of the recording device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId ID of the audio recording device. * @return * - 0: Success. * - < 0: Failure. @@ -280,11 +365,15 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getRecordingDevice(char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the information of the audio recording device by the device ID and - * device name. + * @brief Retrieves the information of the audio recording device. + * + * @note This method is for Windows and macOS only. + * + * @param deviceId Th ID of the audio playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceName Output parameter; the name of the playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId ID of the audio recording device. - * @param deviceName The name of the audio recording device. * @return * - 0: Success. * - < 0: Failure. @@ -293,11 +382,18 @@ class IAudioDeviceManager : public RefCountInterface { char deviceName[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the device ID and device name and device type name of the audio recording device. + * @brief Gets the information and type of the audio capturing device. + * + * @details + * This method applies to macOS only. + * + * @param deviceId Th ID of the audio playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceName Output parameter; the name of the playback device. The maximum length is + * `MAX_DEVICE_ID_LENGTH_TYPE`. + * @param deviceTypeName Output parameter; indicates the type of audio capturing devices, such as + * built-in, USB and HDMI. The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId An output parameter that indicates the device id. - * @param deviceName An output parameter that indicates the device name. - * @param deviceTypeName An output parameter that indicates the device type name. such as Built-in, USB, HDMI, etc. (MacOS only) * @return * - 0: Success. * - < 0: Failure. @@ -305,9 +401,14 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getRecordingDeviceInfo(char deviceId[MAX_DEVICE_ID_LENGTH], char deviceName[MAX_DEVICE_ID_LENGTH], char deviceTypeName[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Sets the volume of the recording device. - * @param volume The volume of the recording device. The value range is [0, - * 255]. + * @brief Sets the volume of the audio capture device. + * + * @details + * This method is for Windows and macOS only. + * + * @param volume The volume of the audio recording device. The value range is [0,255]. 0 means no + * sound, 255 means maximum volume. + * * @return * - 0: Success. * - < 0: Failure. @@ -315,8 +416,13 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setRecordingDeviceVolume(int volume) = 0; /** - * Gets the volume of the recording device. - * @param volume The volume of the microphone, ranging from 0 to 255. + * @brief Retrieves the volume of the audio recording device. + * + * @note This method applies to Windows only. + * + * @param volume An output parameter. It indicates the volume of the audio recording device. The + * value range is [0,255]. + * * @return * - 0: Success. * - < 0: Failure. @@ -324,11 +430,36 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getRecordingDeviceVolume(int *volume) = 0; /** - * Specifies an audio loopback recording device with the device ID. + * @brief Sets the loopback device. + * + * @details + * The SDK uses the current playback device as the loopback device by default. If you want to + * specify another audio device as the loopback device, call this method, and set `deviceId` to the + * loopback device you want to specify. + * You can call this method to change the audio route currently being used, but this does not change + * the default audio route. For example, if the default audio route is microphone, you call this + * method to set the audio route as a sound card before joinging a channel and then start a device + * test, the SDK conducts device test on the sound card. After the device test is completed and you + * join a channel, the SDK still uses the microphone for audio capturing. + * + * @note + * This method is for Windows and macOS only. + * The scenarios where this method is applicable are as follows: + * Use app A to play music through a Bluetooth headset; when using app B for a video conference, + * play through the speakers. + * - If the loopback device is set as the Bluetooth headset, the SDK publishes the music in app A to + * the remote end. + * - If the loopback device is set as the speaker, the SDK does not publish the music in app A to + * the remote end. + * - If you set the loopback device as the Bluetooth headset, and then use a wired headset to play + * the music in app A, you need to call this method again, set the loopback device as the wired + * headset, and the SDK continues to publish the music in app A to remote end. + * + * @param deviceId Specifies the loopback device of the SDK. You can get the device ID by calling + * `enumeratePlaybackDevices`. Connecting or disconnecting the audio device does not change the + * value of `deviceId`. + * The maximum length is `MAX_DEVICE_ID_LENGTH_TYPE`. * - * @param deviceId ID of the audio loopback recording device. It can be retrieved by - * the \ref enumeratePlaybackDevices "enumeratePlaybackDevices" method. - * Plugging or unplugging the audio device does not change the device ID. * @return * - 0: Success. * - < 0: Failure. @@ -336,9 +467,13 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setLoopbackDevice(const char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Gets the audio loopback recording device by the device ID. + * @brief Gets the current loopback device. + * + * @details + * This method is for Windows and macOS only. + * + * @param deviceId Output parameter, the ID of the current loopback device. * - * @param deviceId ID of the audio loopback recording device. * @return * - 0: Success. * - < 0: Failure. @@ -346,11 +481,11 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getLoopbackDevice(char deviceId[MAX_DEVICE_ID_LENGTH]) = 0; /** - * Mutes or unmutes the audio playback device. + * @brief Mutes the audio playback device. * - * @param mute Determines whether to mute the audio playback device. - * - true: Mute the device. - * - false: Unmute the device. + * @param mute Whether to mute the audio playback device: + * - `true`: Mute the audio playback device. + * - `false`: Unmute the audio playback device. * * @return * - 0: Success. @@ -359,11 +494,11 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setPlaybackDeviceMute(bool mute) = 0; /** - * Gets the mute state of the playback device. + * @brief Retrieves whether the audio playback device is muted. * - * @param mute A pointer to the mute state of the playback device. - * - true: The playback device is muted. - * - false: The playback device is unmuted. + * @param mute Whether the audio playback device is muted. + * - `true`: The audio playback device is muted. + * - `false`: The audio playback device is unmuted. * * @return * - 0: Success. @@ -372,11 +507,11 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getPlaybackDeviceMute(bool *mute) = 0; /** - * Mutes or unmutes the audio recording device. + * @brief Sets the mute status of the audio capture device. * - * @param mute Determines whether to mute the recording device. - * - true: Mute the microphone. - * - false: Unmute the microphone. + * @param mute Whether to mute the audio recording device: + * - `true`: Mute the audio capture device. + * - `false`: Unmute the audio capture device. * * @return * - 0: Success. @@ -385,11 +520,11 @@ class IAudioDeviceManager : public RefCountInterface { virtual int setRecordingDeviceMute(bool mute) = 0; /** - * Gets the mute state of the audio recording device. + * @brief Gets whether the audio capture device is muted. * - * @param mute A pointer to the mute state of the recording device. - * - true: The microphone is muted. - * - false: The microphone is unmuted. + * @param mute Whether the audio capture device is muted. + * - `true`: The microphone is muted. + * - `false`: The microphone is unmuted. * * @return * - 0: Success. @@ -398,26 +533,39 @@ class IAudioDeviceManager : public RefCountInterface { virtual int getRecordingDeviceMute(bool *mute) = 0; /** - * Starts the audio playback device test. + * @brief Starts the audio playback device test. + * + * @details + * This method tests whether the audio device for local playback works properly. Once a user starts + * the test, the SDK plays an audio file specified by the user. If the user can hear the audio, the + * playback device works properly. + * After calling this method, the SDK triggers the `onAudioVolumeIndication` callback every 100 ms, + * reporting `uid` = 1 and the volume information of the playback device. + * The difference between this method and the `startEchoTest` method is that the former checks if + * the local audio playback device is working properly, while the latter can check the audio and + * video devices and network conditions. * - * This method tests if the playback device works properly. In the test, the - * SDK plays an audio file specified by the user. If the user hears the audio, - * the playback device works properly. + * @note Call this method before joining a channel. After the test is completed, call + * `stopPlaybackDeviceTest` to stop the test before joining a channel. * - * @param testAudioFilePath The file path of the audio file for the test, - * which is an absolute path in UTF8: - * - Supported file format: wav, mp3, m4a, and aac. - * - Supported file sampling rate: 8000, 16000, 32000, 44100, and 48000. + * @param testAudioFilePath The path of the audio file. The data format is string in UTF-8. + * - Supported file formats: wav, mp3, m4a, and aac. + * - Supported file sample rates: 8000, 16000, 32000, 44100, and 48000 Hz. * * @return - * - 0, if the method call succeeds and you can hear the sound of the - * specified audio file. - * - An error code, if the method call fails. + * - 0: Success. + * - < 0: Failure. */ virtual int startPlaybackDeviceTest(const char *testAudioFilePath) = 0; /** - * Stops the audio playback device test. + * @brief Stops the audio playback device test. + * + * @details + * This method stops the audio playback device test. You must call this method to stop the test + * after calling the `startPlaybackDeviceTest` method. + * + * @note Call this method before joining a channel. * * @return * - 0: Success. @@ -426,24 +574,39 @@ class IAudioDeviceManager : public RefCountInterface { virtual int stopPlaybackDeviceTest() = 0; /** - * Starts the recording device test. + * @brief Starts the audio capturing device test. * - * This method tests whether the recording device works properly. Once the - * test starts, the SDK uses the \ref - * IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication" - * callback to notify the app on the volume information. + * @details + * This method tests whether the audio capturing device works properly. After calling this method, + * the SDK triggers the `onAudioVolumeIndication` callback at the time interval set in this method, + * which reports `uid` = 0 and the volume information of the capturing device. + * The difference between this method and the `startEchoTest` method is that the former checks if + * the local audio capturing device is working properly, while the latter can check the audio and + * video devices and network conditions. * - * @param indicationInterval The time interval (ms) between which the SDK - * triggers the `onAudioVolumeIndication` callback. + * @note Call this method before joining a channel. After the test is completed, call + * `stopRecordingDeviceTest` to stop the test before joining a channel. + * + * @param indicationInterval The interval (ms) for triggering the `onAudioVolumeIndication` + * callback. This value should be set to greater than 10, otherwise, you will not receive the + * `onAudioVolumeIndication` callback and the SDK returns the error code `-2`. Agora recommends that + * you set this value to 100. * * @return * - 0: Success. * - < 0: Failure. + * - -2: Invalid parameters. Check your parameter settings. */ virtual int startRecordingDeviceTest(int indicationInterval) = 0; /** - * Stops the recording device test. + * @brief Stops the audio capturing device test. + * + * @details + * This method stops the audio capturing device test. You must call this method to stop the test + * after calling the `startRecordingDeviceTest` method. + * + * @note Call this method before joining a channel. * * @return * - 0: Success. @@ -452,19 +615,29 @@ class IAudioDeviceManager : public RefCountInterface { virtual int stopRecordingDeviceTest() = 0; /** - * Starts the audio device loopback test. + * @brief Starts an audio device loopback test. * - * This method tests whether the local audio devices are working properly. - * After calling this method, the microphone captures the local audio and - * plays it through the speaker, and the \ref - * IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication" - * callback returns the local audio volume information at the set interval. + * @details + * This method tests whether the local audio capture device and playback device are working + * properly. After starting the test, the audio capture device records the local audio, and the + * audio playback device plays the captured audio. The SDK triggers two independent + * `onAudioVolumeIndication` callbacks at the time interval set in this method, which reports the + * volume information of the capture device ( `uid` = 0) and the volume information of the playback + * device ( `uid` = 1) respectively. + * + * @note + * - This method is for Windows and macOS only. + * - You can call this method either before or after joining a channel. + * - This method only takes effect when called by the host. + * - This method tests local audio devices and does not report the network conditions. + * - When you finished testing, call `stopAudioDeviceLoopbackTest` to stop the audio device loopback + * test. + * + * @param indicationInterval The time interval (ms) at which the SDK triggers the + * `onAudioVolumeIndication` callback. Agora recommends setting a value greater than 200 ms. This + * value must not be less than 10 ms; otherwise, you can not receive the `onAudioVolumeIndication` + * callback. * - * @note This method tests the local audio devices and does not report the - * network conditions. - * @param indicationInterval The time interval (ms) at which the \ref - * IRtcEngineEventHandler::onAudioVolumeIndication "onAudioVolumeIndication" - * callback returns. * @return * - 0: Success. * - < 0: Failure. @@ -472,58 +645,80 @@ class IAudioDeviceManager : public RefCountInterface { virtual int startAudioDeviceLoopbackTest(int indicationInterval) = 0; /** - * Stops the audio device loopback test. + * @brief Stops the audio device loopback test. + * + * @note + * - This method is for Windows and macOS only. + * - You can call this method either before or after joining a channel. + * - This method only takes effect when called by the host. + * - Ensure that you call this method to stop the loopback test after calling the + * `startAudioDeviceLoopbackTest` method. * - * @note Ensure that you call this method to stop the loopback test after - * calling the \ref IAudioDeviceManager::startAudioDeviceLoopbackTest - * "startAudioDeviceLoopbackTest" method. * @return * - 0: Success. * - < 0: Failure. */ virtual int stopAudioDeviceLoopbackTest() = 0; - /** The status of following system default playback device. - - @note The status of following system default playback device. - - @param enable Variable to whether the current device follow system default playback device or not. - - true: The current device will change when the system default playback device changed. - - false: The current device will change only current device is removed. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the audio playback device used by the SDK to follow the system default audio playback + * device. + * + * @note This method is for Windows and macOS only. + * + * @param enable Whether to follow the system default audio playback device: + * - `true`: Follow the system default audio playback device. The SDK immediately switches the audio + * playback device when the system default audio playback device changes. + * - `false`: Do not follow the system default audio playback device. The SDK switches the audio + * playback device to the system default audio playback device only when the currently used audio + * playback device is disconnected. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int followSystemPlaybackDevice(bool enable) = 0; - /** The status of following system default recording device. - - @note The status of following system default recording device. - - @param enable Variable to whether the current device follow system default recording device or not. - - true: The current device will change when the system default recording device changed. - - false: The current device will change only current device is removed. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets the audio recording device used by the SDK to follow the system default audio + * recording device. + * + * @note This method is for Windows and macOS only. + * + * @param enable Whether to follow the system default audio recording device: + * - `true`: Follow the system default audio playback device. The SDK immediately switches the audio + * recording device when the system default audio recording device changes. + * - `false`: Do not follow the system default audio playback device. The SDK switches the audio + * recording device to the system default audio recording device only when the currently used audio + * recording device is disconnected. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int followSystemRecordingDevice(bool enable) = 0; - /** The status of following system default loopback device. - - @note The status of following system default loopback device. - - @param enable Variable to whether the current device follow system default loopback device or not. - - true: The current device will change when the system default loopback device changed. - - false: The current device will change only current device is removed. - @return - - 0: Success. - - < 0: Failure. + /** + * @brief Sets whether the loopback device follows the system default playback device. + * + * @details + * This method is for Windows and macOS only. + * + * @param enable Whether to follow the system default audio playback device: + * - `true`: Follow the system default audio playback device. When the default playback device of + * the system is changed, the SDK immediately switches to the loopback device. + * - `false`: Do not follow the system default audio playback device. The SDK switches the audio + * loopback device to the system default audio playback device only when the current audio playback + * device is disconnected. + * + * @return + * - 0: Success. + * - < 0: Failure. */ virtual int followSystemLoopbackDevice(bool enable) = 0; /** - * Releases all IAudioDeviceManager resources. + * @brief Releases all the resources occupied by the IAudioDeviceManager object. */ virtual void release() = 0; }; diff --git a/include/internal/c/bridge.h b/include/rte_base/c/bridge.h similarity index 89% rename from include/internal/c/bridge.h rename to include/rte_base/c/bridge.h index ab8b563..a72e5a7 100644 --- a/include/internal/c/bridge.h +++ b/include/rte_base/c/bridge.h @@ -8,7 +8,7 @@ #include "handle.h" #include "common.h" -#include "stream/stream.h" +#include "./stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/c_error.h b/include/rte_base/c/c_error.h similarity index 54% rename from include/internal/c/c_error.h rename to include/rte_base/c/c_error.h index a2b9e94..1392a63 100644 --- a/include/internal/c/c_error.h +++ b/include/rte_base/c/c_error.h @@ -16,13 +16,41 @@ extern "C" { typedef struct RteString RteString; +/** + * @brief RTE-related status and error codes. + */ typedef enum RteErrorCode { + /** + * 0: Operation succeeded. + */ kRteOk, + /** + * 1: General error (no specific cause). + */ kRteErrorDefault, + /** + * 2: Invalid parameter in the method. For example, the parameter contains illegal characters or the + * passed object is null. Please reset the parameter. + */ kRteErrorInvalidArgument, + /** + * 3: The current state does not support the operation, such as the object being destroyed or + * invalid. + */ kRteErrorInvalidOperation, + /** + * 4: Network error. Please check your network. + */ kRteErrorNetworkError, + /** + * 5: Token authentication failed. Possible reasons include an invalid or expired token. Please + * check your token. + */ kRteErrorAuthenticationFailed, + /** + * 6: Media stream not found. For example, no media stream is received from the host within 10 + * seconds after joining the channel. + */ kRteErrorStreamNotFound, } RteErrorCode; diff --git a/include/internal/c/c_player.h b/include/rte_base/c/c_player.h similarity index 53% rename from include/internal/c/c_player.h rename to include/rte_base/c/c_player.h index 0551eb2..b03fe3b 100644 --- a/include/internal/c/c_player.h +++ b/include/rte_base/c/c_player.h @@ -20,58 +20,351 @@ typedef struct Rte Rte; typedef struct RteStream RteStream; typedef struct RtePlayerInternal RtePlayerInternal; +/** + * @brief Player state. + * + * @since v4.4.0 + */ typedef enum RtePlayerState { + /** + * 0: Idle state. + */ kRtePlayerStateIdle, + /** + * 1: Opening URL resource. This state is reported after calling `OpenWithUrl`. + */ kRtePlayerStateOpening, + /** + * 2: URL resource opened successfully. This state is reported after `OpenWithUrl` successfully + * opens the resource. + */ kRtePlayerStateOpenCompleted, + /** + * 3: Playing. + */ kRtePlayerStatePlaying, + /** + * 4: Playback paused. This state is reported after successfully calling `Pause`. + */ kRtePlayerStatePaused, + /** + * 5: Playback completed. + */ kRtePlayerStatePlaybackCompleted, + /** + * 6: Playback stopped. This state is reported after successfully calling `Stop`. + */ kRtePlayerStateStopped, + /** + * 7: Failed state. This state is reported when an internal error occurs. If you receive this state, + * call `Stop` first and then `OpenWithUrl` to reopen the resource. + */ kRtePlayerStateFailed } RtePlayerState; +/** + * @brief Player event types. + * + * @since v4.4.0 + */ typedef enum RtePlayerEvent { - kRtePlayerEventSeekBegin, - kRtePlayerEventSeekComplete, - kRtePlayerEventSeekError, - kRtePlayerEventBufferLow, - kRtePlayerEventBufferRecover, - kRtePlayerEventFreezeStart, - kRtePlayerEventFreezeStop, - kRtePlayerEventOneLoopPlaybackCompleted, - kRtePlayerEventAuthenticationWillExpire + /** + * 0: Start seeking to a specified position for playback. + */ + kRtePlayerEventSeekBegin = 0, + /** + * 1: Seek to the specified position completed. + */ + kRtePlayerEventSeekComplete = 1, + /** + * 2: Error occurred while seeking to the specified position. + */ + kRtePlayerEventSeekError = 2, + /** + * 3: The current buffer is insufficient for playback. + */ + kRtePlayerEventBufferLow = 3, + /** + * 4: The current buffer is just enough to support playback. + */ + kRtePlayerEventBufferRecover = 4, + /** + * 5: Audio or video stutter detected. + */ + kRtePlayerEventFreezeStart = 5, + /** + * 6: Audio and video stutter stopped. + */ + kRtePlayerEventFreezeStop = 6, + /** + * 7: One loop of playback completed. + */ + kRtePlayerEventOneLoopPlaybackCompleted = 7, + /** + * 8: Token is about to expire. You need to regenerate a new token and update the URL via + * `OpenWithUrl`. + */ + kRtePlayerEventAuthenticationWillExpire = 8, + /** + * 9: Due to network issues, fallback from receiving audio and video to receiving audio only. + */ + kRtePlayerEventAbrFallbackToAudioOnlyLayer = 9, + /** + * 10: After network recovers, resume from receiving audio only to receiving audio and video. + */ + kRtePlayerEventAbrRecoverFromAudioOnlyLayer = 10, + /** + * 11: Start switching to a new URL. + */ + kRtePlayerEventSwitchBegin = 11, + /** + * 12: Switch to the new URL completed. + */ + kRtePlayerEventSwitchComplete = 12, + /** + * 13: Error occurred while switching to the new URL. + */ + kRtePlayerEventSwitchError = 13, + /** + * 14: First video frame displayed. + */ + kRtePlayerEventFirstDisplayed = 14, + /** + * 15: Maximum number of cache files reached. + */ + kRtePlayerEventReachCacheFileMaxCount = 15, + /** + * 16: Maximum cache file size reached. + */ + kRtePlayerEventReachCacheFileMaxSize = 16, + /** + * 17: Start trying to open a new URL. + */ + kRtePlayerEventTryOpenStart = 17, + /** + * 18: Successfully opened the new URL. + */ + kRtePlayerEventTryOpenSucceed = 18, + /** + * 19: Failed to open the new URL. + */ + kRtePlayerEventTryOpenFailed = 19, + /** + * 20: The current audio track has changed. + */ + kRtePlayerEventAudioTrackChanged = 20, } RtePlayerEvent; +/** + * @brief Quality layer of the subscribed video stream. + * + * @since v4.4.0 + * + * @note To customize the resolution for layers `kRteAbrSubscriptionLayer1` to + * `kRteAbrSubscriptionLayer6` and subscribe to them, please `contact technical support` to enable + * the ABR feature. + */ +typedef enum RteAbrSubscriptionLayer { + /** + * 0: Highest quality video stream. This layer has the highest resolution. + */ + kRteAbrSubscriptionHigh = 0, + /** + * 1: (Default) Lowest quality video stream. This layer has the lowest resolution. + */ + kRteAbrSubscriptionLow = 1, + /** + * 2: Video quality layer 1. This layer has resolution just below `kRteAbrSubscriptionHigh`. + */ + kRteAbrSubscriptionLayer1 = 2, + /** + * 3: Video quality layer 2. This layer has resolution lower than `kRteAbrSubscriptionLayer1`. + */ + kRteAbrSubscriptionLayer2 = 3, + /** + * 4: Video quality layer 3. This layer has resolution lower than `kRteAbrSubscriptionLayer2`. + */ + kRteAbrSubscriptionLayer3 = 4, + /** + * 5: Video quality layer 4. This layer has resolution lower than `kRteAbrSubscriptionLayer3`. + */ + kRteAbrSubscriptionLayer4 = 5, + /** + * 6: Video quality layer 5. This layer has resolution lower than `kRteAbrSubscriptionLayer4`. + */ + kRteAbrSubscriptionLayer5 = 6, + /** + * 7: Video quality layer 6. This layer has resolution lower than `kRteAbrSubscriptionLayer5`. + */ + kRteAbrSubscriptionLayer6 = 7, +} RteAbrSubscriptionLayer; + + +/** + * @brief Fallback quality layer for video streams. + * + * @since v4.4.0 + * + * @note + * - To use `kRteAbrFallbackLayer1` to `kRteAbrFallbackLayer6`, you must `contact technical support` + * to enable the ABR feature. + * Once enabled, you can customize the resolution for each layer and select any layer as the + * **lowest resolution** for video fallback. When the network condition is poor, the SDK dynamically + * adjusts the resolution within this range, using the selected layer as the lower limit. + * - When customizing resolutions, be sure to sort the video quality layers from highest to lowest + * resolution. If resolutions are the same, sort by frame rate from highest to lowest. + */ +typedef enum RteAbrFallbackLayer { + /** + * 0: No fallback for audio and video streams, but quality is not guaranteed. + */ + kRteAbrFallbackDisabled = 0, + /** + * 1: (Default) Fallback to the lowest quality video stream. This layer has the lowest resolution. + */ + kRteAbrFallbackLow = 1, + /** + * 2: First try to receive the lowest quality video stream; if the network is too poor to display + * video, fallback to receiving only the subscribed audio stream. + */ + kRteAbrFallbackAudioOnly = 2, + /** + * 3: Fallback to video quality layer 1. This layer has resolution and bitrate just below the + * highest subscribed video quality. + */ + kRteAbrFallbackLayer1 = 3, + /** + * 4: Fallback to video quality layer 2. This layer has resolution just below layer 1. + */ + kRteAbrFallbackLayer2 = 4, + /** + * 5: Fallback to video quality layer 3. This layer has resolution just below layer 2. + */ + kRteAbrFallbackLayer3 = 5, + /** + * 6: Fallback to video quality layer 4. This layer has resolution just below layer 3. + */ + kRteAbrFallbackLayer4 = 6, + /** + * 7: Fallback to video quality layer 5. This layer has resolution just below layer 4. + */ + kRteAbrFallbackLayer5 = 7, + /** + * 8: Fallback to video quality layer 6. This layer has resolution just below layer 5. + */ + kRteAbrFallbackLayer6 = 8, +} RteAbrFallbackLayer; + +/** + * Player information. + * When playerInfo changes, it will be notified through the PlayerObserver::onPlayerInfoUpdated callback interface. + * It can also be actively obtained through the Player::GetInfo interface. + * @since v4.4.0 + */ typedef struct RtePlayerInfo { + /** + * Current player state + */ RtePlayerState state; + /** + * Duration time of the current media source. This is valid when playing local media files or on-demand streams + */ size_t duration; + /** + * Stream count. This field is only valid when opening a non-RTE URL. + */ size_t stream_count; + /** + * Whether there is an audio stream. Indicates whether the url source contains the audio stream. + * - true: The url source contains the audio stream. + * - false: The url source does not contain the audio stream. + */ bool has_audio; + /** + * Whether there is a video stream. Indicates whether the url source contains the video stream. + * - true: The url source contains the video stream. + * - false: The url source does not contain the video stream. + */ bool has_video; + /** + * Whether the audio is muted. Indicates whether the receiver end stops receiving the audio stream. + * - true: Stop receiving the audio stream. + * - false: Continue receiving the audio stream. + */ bool is_audio_muted; + /** + * Whether the video is muted. Indicates whether the receiver end stops receiving the video stream. This field is only valid when you open an RTE URL. + * - true: Stop receiving the video stream. + * - false: Continue receiving the video stream. + */ bool is_video_muted; + /** + * Video resolution height + */ int video_height; + /** + * Video resolution width + */ int video_width; + /** + * The currently subscribed video layer. This field is only valid when you open an RTE URL. + */ + RteAbrSubscriptionLayer abr_subscription_layer; + /** + * Audio sample rate + */ int audio_sample_rate; + /** + * Number of audio channels. + */ int audio_channels; + /** + * Audio bits per sample. This field is only valid when opening a non-RTE URL. + */ int audio_bits_per_sample; -} RtePlayerInfo; + /** + * The URL being played. + */ + RteString *current_url; +} RtePlayerInfo; +/** + * @brief Statistics of the media resource being played. + * + * @since v4.4.0 + */ typedef struct RtePlayerStats { - int video_decode_frame_rate; - int video_render_frame_rate; - int video_bitrate; - - int audio_bitrate; + /** + * Video decode frame rate (fps). + */ + int video_decode_frame_rate; + /** + * Video render frame rate (fps). + */ + int video_render_frame_rate; + /** + * Video bitrate (Kbps). + */ + int video_bitrate; + + /** + * Audio bitrate (Kbps). + */ + int audio_bitrate; } RtePlayerStats; typedef struct RteMediaTrackInfo { void *placeholder; } RteMediaTrackInfo; +/** + * @brief Type of media stream metadata. + */ typedef enum RtePlayerMetadataType { + /** + * 0: SEI (Supplemental Enhancement Information) type. + */ kRtePlayerMetadataTypeSei } RtePlayerMetadataType; @@ -135,6 +428,14 @@ typedef struct RtePlayerConfig { RteString *json_parameter; bool _json_parameter_is_set; + + // live player options + RteAbrSubscriptionLayer abr_subscription_layer; + bool _abr_subscription_layer_is_set; + + RteAbrFallbackLayer abr_fallback_layer; + bool _abr_fallback_layer_is_set; + } RtePlayerConfig; typedef struct RtePlayerCustomSourceProvider RtePlayerCustomSourceProvider; @@ -167,6 +468,7 @@ struct RtePlayerObserver { }; AGORA_RTE_API_C void RtePlayerInfoInit(RtePlayerInfo *info, RteError *err); +AGORA_RTE_API_C void RtePlayerInfoCopy(RtePlayerInfo *dest, const RtePlayerInfo *src, RteError *err); AGORA_RTE_API_C void RtePlayerInfoDeinit(RtePlayerInfo *info, RteError *err); AGORA_RTE_API_C void RtePlayerStatsInit(RtePlayerStats *stats, RteError *err); @@ -317,11 +619,27 @@ AGORA_RTE_API_C void RtePlayerConfigGetJsonParameter(RtePlayerConfig *config, RteString *json_parameter, RteError *err); +AGORA_RTE_API_C void RtePlayerConfigSetAbrSubscriptionLayer(RtePlayerConfig *config, + RteAbrSubscriptionLayer abr_subscription_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigGetAbrSubscriptionLayer(RtePlayerConfig *config, + RteAbrSubscriptionLayer *abr_subscription_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigSetAbrFallbackLayer(RtePlayerConfig *config, + RteAbrFallbackLayer abr_fallback_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigGetAbrFallbackLayer(RtePlayerConfig *config, + RteAbrFallbackLayer *abr_fallback_layer, + RteError *err); + AGORA_RTE_API_C RtePlayer RtePlayerCreate(Rte *self, RtePlayerInitialConfig *config, RteError *err); AGORA_RTE_API_C void RtePlayerDestroy(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerPreloadWithUrl(RtePlayer *self, const char *url, +AGORA_RTE_API_C bool RtePlayerPreloadWithUrl(RtePlayer *self, const char *url, RteError *err); AGORA_RTE_API_C void RtePlayerOpenWithUrl( @@ -341,26 +659,26 @@ AGORA_RTE_API_C void RtePlayerOpenWithStream(RtePlayer *self, RteStream *stream, AGORA_RTE_API_C void RtePlayerGetStats(RtePlayer *self, void (*cb)(RtePlayer *player, RtePlayerStats *stats, void *cb_data, RteError *err), void *cb_data); -AGORA_RTE_API_C void RtePlayerSetCanvas(RtePlayer *self, RteCanvas *canvas, RteError *err); +AGORA_RTE_API_C bool RtePlayerSetCanvas(RtePlayer *self, RteCanvas *canvas, RteError *err); -AGORA_RTE_API_C void RtePlayerPlay(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerStop(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerPause(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerSeek(RtePlayer *self, uint64_t new_time, +AGORA_RTE_API_C bool RtePlayerPlay(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerStop(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerPause(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerSeek(RtePlayer *self, uint64_t new_time, RteError *err); + +AGORA_RTE_API_C void RtePlayerSwitchWithUrl(RtePlayer *self, const char* url, bool sync_pts, void (*cb)(RtePlayer *self, void *cb_data, RteError *err), void *cb_data); -AGORA_RTE_API_C void RtePlayerMuteAudio(RtePlayer *self, bool mute, RteError *err); -AGORA_RTE_API_C void RtePlayerMuteVideo(RtePlayer *self, bool mute, RteError *err); +AGORA_RTE_API_C bool RtePlayerMuteAudio(RtePlayer *self, bool mute, RteError *err); +AGORA_RTE_API_C bool RtePlayerMuteVideo(RtePlayer *self, bool mute, RteError *err); AGORA_RTE_API_C uint64_t RtePlayerGetPosition(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerGetInfo(RtePlayer *self, RtePlayerInfo *info, RteError *err); +AGORA_RTE_API_C bool RtePlayerGetInfo(RtePlayer *self, RtePlayerInfo *info, RteError *err); -AGORA_RTE_API_C void RtePlayerGetConfigs(RtePlayer *self, +AGORA_RTE_API_C bool RtePlayerGetConfigs(RtePlayer *self, RtePlayerConfig *config, RteError *err); -AGORA_RTE_API_C void RtePlayerSetConfigs( - RtePlayer *self, RtePlayerConfig *config, - void (*cb)(RtePlayer *self, void *cb_data, RteError *err), void *cb_data); +AGORA_RTE_API_C bool RtePlayerSetConfigs(RtePlayer *self, RtePlayerConfig *config, RteError *err); AGORA_RTE_API_C bool RtePlayerRegisterObserver( diff --git a/include/internal/c/c_rte.h b/include/rte_base/c/c_rte.h similarity index 91% rename from include/internal/c/c_rte.h rename to include/rte_base/c/c_rte.h index 747a746..8160b32 100644 --- a/include/internal/c/c_rte.h +++ b/include/rte_base/c/c_rte.h @@ -93,18 +93,15 @@ AGORA_RTE_API_C void RteConfigGetJsonParameter(RteConfig *config, RteError *err); AGORA_RTE_API_C Rte RteCreate(RteInitialConfig *config, RteError *err); -AGORA_RTE_API_C void RteDestroy(Rte *self, RteError *err); +AGORA_RTE_API_C bool RteDestroy(Rte *self, RteError *err); AGORA_RTE_API_C bool RteInitMediaEngine(Rte *self, void (*cb)(Rte *self, void *cb_data, RteError *err), void *cb_data, RteError *err); -AGORA_RTE_API_C void RteGetConfigs(Rte *self, RteConfig *config, RteError *err); -AGORA_RTE_API_C bool RteSetConfigs(Rte *self, RteConfig *config, - void (*cb)(Rte *self, void *cb_data, - RteError *err), - void *cb_data, RteError *err); +AGORA_RTE_API_C bool RteGetConfigs(Rte *self, RteConfig *config, RteError *err); +AGORA_RTE_API_C bool RteSetConfigs(Rte *self, RteConfig *config, RteError *err); AGORA_RTE_API_C void RteRelayStream(RteChannel *src_channel, RteRemoteStream *src_stream, diff --git a/include/internal/c/channel.h b/include/rte_base/c/channel.h similarity index 100% rename from include/internal/c/channel.h rename to include/rte_base/c/channel.h diff --git a/include/internal/c/common.h b/include/rte_base/c/common.h similarity index 100% rename from include/internal/c/common.h rename to include/rte_base/c/common.h diff --git a/include/internal/c/device/audio.h b/include/rte_base/c/device/audio.h similarity index 87% rename from include/internal/c/device/audio.h rename to include/rte_base/c/device/audio.h index 80e18f4..a37b9c2 100644 --- a/include/internal/c/device/audio.h +++ b/include/rte_base/c/device/audio.h @@ -6,9 +6,9 @@ */ #pragma once -#include "device/device.h" -#include "handle.h" -#include "../common.h" +#include "rte_base/c/device/device.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/device/audio_device_manager.h b/include/rte_base/c/device/audio_device_manager.h similarity index 93% rename from include/internal/c/device/audio_device_manager.h rename to include/rte_base/c/device/audio_device_manager.h index 7b1a7bd..4223520 100644 --- a/include/internal/c/device/audio_device_manager.h +++ b/include/rte_base/c/device/audio_device_manager.h @@ -8,10 +8,10 @@ #include -#include "../common.h" -#include "c_error.h" -#include "device/audio.h" -#include "handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/device/audio.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/device/device.h b/include/rte_base/c/device/device.h similarity index 83% rename from include/internal/c/device/device.h rename to include/rte_base/c/device/device.h index 5f31532..3b8931a 100644 --- a/include/internal/c/device/device.h +++ b/include/rte_base/c/device/device.h @@ -6,8 +6,8 @@ */ #pragma once -#include "../common.h" -#include "utils/string.h" +#include "rte_base/c/common.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/device/video.h b/include/rte_base/c/device/video.h similarity index 86% rename from include/internal/c/device/video.h rename to include/rte_base/c/device/video.h index 35f6bf8..9debb5f 100644 --- a/include/internal/c/device/video.h +++ b/include/rte_base/c/device/video.h @@ -6,9 +6,9 @@ */ #pragma once -#include "device/device.h" -#include "../common.h" -#include "handle.h" +#include "rte_base/c/device/device.h" +#include "rte_base/c/common.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/device/video_device_manager.h b/include/rte_base/c/device/video_device_manager.h similarity index 92% rename from include/internal/c/device/video_device_manager.h rename to include/rte_base/c/device/video_device_manager.h index 4fd6914..a780a63 100644 --- a/include/internal/c/device/video_device_manager.h +++ b/include/rte_base/c/device/video_device_manager.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "device/video.h" -#include "handle.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/device/video.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/handle.h b/include/rte_base/c/handle.h similarity index 100% rename from include/internal/c/handle.h rename to include/rte_base/c/handle.h diff --git a/include/internal/c/info.h b/include/rte_base/c/info.h similarity index 100% rename from include/internal/c/info.h rename to include/rte_base/c/info.h diff --git a/include/internal/c/log.h b/include/rte_base/c/log.h similarity index 100% rename from include/internal/c/log.h rename to include/rte_base/c/log.h diff --git a/include/internal/c/metadata.h b/include/rte_base/c/metadata.h similarity index 100% rename from include/internal/c/metadata.h rename to include/rte_base/c/metadata.h diff --git a/include/internal/c/observer.h b/include/rte_base/c/observer.h similarity index 100% rename from include/internal/c/observer.h rename to include/rte_base/c/observer.h diff --git a/include/internal/c/old.h b/include/rte_base/c/old.h similarity index 100% rename from include/internal/c/old.h rename to include/rte_base/c/old.h diff --git a/include/internal/c/options.h b/include/rte_base/c/options.h similarity index 100% rename from include/internal/c/options.h rename to include/rte_base/c/options.h diff --git a/include/internal/c/stream/cdn_stream.h b/include/rte_base/c/stream/cdn_stream.h similarity index 84% rename from include/internal/c/stream/cdn_stream.h rename to include/rte_base/c/stream/cdn_stream.h index 06bade9..4138167 100644 --- a/include/internal/c/stream/cdn_stream.h +++ b/include/rte_base/c/stream/cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/local_cdn_stream.h b/include/rte_base/c/stream/local_cdn_stream.h similarity index 92% rename from include/internal/c/stream/local_cdn_stream.h rename to include/rte_base/c/stream/local_cdn_stream.h index b1bc421..f98bfd9 100644 --- a/include/internal/c/stream/local_cdn_stream.h +++ b/include/rte_base/c/stream/local_cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/cdn_stream.h" -#include "stream/local_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/cdn_stream.h" +#include "rte_base/c/stream/local_stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/local_realtime_stream.h b/include/rte_base/c/stream/local_realtime_stream.h similarity index 82% rename from include/internal/c/stream/local_realtime_stream.h rename to include/rte_base/c/stream/local_realtime_stream.h index daeb6f1..303a9c8 100644 --- a/include/internal/c/stream/local_realtime_stream.h +++ b/include/rte_base/c/stream/local_realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/local_stream.h" -#include "stream/realtime_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/local_stream.h" +#include "rte_base/c/stream/realtime_stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/local_stream.h b/include/rte_base/c/stream/local_stream.h similarity index 84% rename from include/internal/c/stream/local_stream.h rename to include/rte_base/c/stream/local_stream.h index 5167158..8f476af 100644 --- a/include/internal/c/stream/local_stream.h +++ b/include/rte_base/c/stream/local_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/realtime_stream.h b/include/rte_base/c/stream/realtime_stream.h similarity index 85% rename from include/internal/c/stream/realtime_stream.h rename to include/rte_base/c/stream/realtime_stream.h index b7958b1..b3d1c82 100644 --- a/include/internal/c/stream/realtime_stream.h +++ b/include/rte_base/c/stream/realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/remote_cdn_stream.h b/include/rte_base/c/stream/remote_cdn_stream.h similarity index 91% rename from include/internal/c/stream/remote_cdn_stream.h rename to include/rte_base/c/stream/remote_cdn_stream.h index 6b659e3..47182ea 100644 --- a/include/internal/c/stream/remote_cdn_stream.h +++ b/include/rte_base/c/stream/remote_cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/cdn_stream.h" -#include "stream/remote_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/cdn_stream.h" +#include "rte_base/c/stream/remote_stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/remote_realtime_stream.h b/include/rte_base/c/stream/remote_realtime_stream.h similarity index 81% rename from include/internal/c/stream/remote_realtime_stream.h rename to include/rte_base/c/stream/remote_realtime_stream.h index bcd8826..060d9a8 100644 --- a/include/internal/c/stream/remote_realtime_stream.h +++ b/include/rte_base/c/stream/remote_realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/realtime_stream.h" -#include "stream/remote_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/realtime_stream.h" +#include "rte_base/c/stream/remote_stream.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/remote_stream.h b/include/rte_base/c/stream/remote_stream.h similarity index 84% rename from include/internal/c/stream/remote_stream.h rename to include/rte_base/c/stream/remote_stream.h index 07ccd4f..f7827cb 100644 --- a/include/internal/c/stream/remote_stream.h +++ b/include/rte_base/c/stream/remote_stream.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" -#include "track/track.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" +#include "rte_base/c/track/track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/stream/stream.h b/include/rte_base/c/stream/stream.h similarity index 93% rename from include/internal/c/stream/stream.h rename to include/rte_base/c/stream/stream.h index e9baf3e..ceac673 100644 --- a/include/internal/c/stream/stream.h +++ b/include/rte_base/c/stream/stream.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "handle.h" -#include "observer.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/observer.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { @@ -65,9 +65,24 @@ typedef enum RteVideoDegradationPreference { kRteVideoDegradationPreferenceDisabled, } RteVideoDegradationPreference; +/** + * @brief Video mirror mode. + */ typedef enum RteVideoMirrorMode { + /** + * 0: Mirror mode is decided by the SDK. + * - Local view mirror mode: If you use the front camera, local view mirror mode is enabled by + * default; if you use the rear camera, it is disabled by default. + * - Remote user view mirror mode: Disabled by default. + */ kRteVideoMirrorModeAuto, + /** + * 1: Enable mirror mode. + */ kRteVideoMirrorModeEnabled, + /** + * 2: Disable mirror mode. + */ kRteVideoMirrorModeDisabled, } RteVideoMirrorMode; diff --git a/include/internal/c/track/camera_video_track.h b/include/rte_base/c/track/camera_video_track.h similarity index 84% rename from include/internal/c/track/camera_video_track.h rename to include/rte_base/c/track/camera_video_track.h index 4e2c2c4..88e8b5b 100644 --- a/include/internal/c/track/camera_video_track.h +++ b/include/rte_base/c/track/camera_video_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/canvas.h b/include/rte_base/c/track/canvas.h similarity index 68% rename from include/internal/c/track/canvas.h rename to include/rte_base/c/track/canvas.h index 8b2f5c1..bac34b1 100644 --- a/include/internal/c/track/canvas.h +++ b/include/rte_base/c/track/canvas.h @@ -6,10 +6,10 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/view.h" -#include "stream/stream.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/view.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { @@ -18,9 +18,20 @@ extern "C" { typedef struct RteViewConfig RteViewConfig; +/** + * @brief Video render mode. + */ typedef enum RteVideoRenderMode { - kRteVideoRenderModeHidden, - kRteVideoRenderModeFit + /** + * 0: Video is scaled proportionally to fill the view. Extra parts of the video that exceed the view + * due to aspect ratio differences will be cropped. + */ + kRteVideoRenderModeHidden = 0, + /** + * 1: Video is scaled proportionally to fit within the view. Black bars are added to fill any unused + * space due to aspect ratio differences. + */ + kRteVideoRenderModeFit = 1 } RteVideoRenderMode; typedef struct RteCanvasInitialConfig { @@ -73,18 +84,14 @@ AGORA_RTE_API_C RteCanvas RteCanvasCreate(::Rte *rte, RteCanvasInitialConfig *co RteError *err); AGORA_RTE_API_C void RteCanvasDestroy(RteCanvas *self, RteError *err); -AGORA_RTE_API_C void RteCanvasGetConfigs(RteCanvas *self, +AGORA_RTE_API_C bool RteCanvasGetConfigs(RteCanvas *self, RteCanvasConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasSetConfigs( - RteCanvas *self, RteCanvasConfig *config, - void (*cb)(RteCanvas *canvas, void *cb_data, RteError *err), void *cb_data); +AGORA_RTE_API_C bool RteCanvasSetConfigs(RteCanvas *self, RteCanvasConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasAddView( - RteCanvas *self, RteView *view, RteViewConfig *config, - void (*cb)(RteCanvas *canvas, RteView *view, void *cb_data, RteError *err), - void *cb_data); +AGORA_RTE_API_C bool RteCanvasAddView( + RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasRemoveView(RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); +AGORA_RTE_API_C bool RteCanvasRemoveView(RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); #ifdef __cplusplus } diff --git a/include/internal/c/track/layout.h b/include/rte_base/c/track/layout.h similarity index 100% rename from include/internal/c/track/layout.h rename to include/rte_base/c/track/layout.h diff --git a/include/internal/c/track/local_audio_track.h b/include/rte_base/c/track/local_audio_track.h similarity index 93% rename from include/internal/c/track/local_audio_track.h rename to include/rte_base/c/track/local_audio_track.h index e24264a..8ca03e7 100644 --- a/include/internal/c/track/local_audio_track.h +++ b/include/rte_base/c/track/local_audio_track.h @@ -6,10 +6,10 @@ */ #pragma once -#include "../common.h" -#include "track/local_track.h" -#include "utils/frame.h" -#include "utils/string.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_track.h" +#include "rte_base/c/utils/frame.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/local_track.h b/include/rte_base/c/track/local_track.h similarity index 91% rename from include/internal/c/track/local_track.h rename to include/rte_base/c/track/local_track.h index 6a47509..f4513d5 100644 --- a/include/internal/c/track/local_track.h +++ b/include/rte_base/c/track/local_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "handle.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/local_video_track.h b/include/rte_base/c/track/local_video_track.h similarity index 87% rename from include/internal/c/track/local_video_track.h rename to include/rte_base/c/track/local_video_track.h index eb0d0f9..9184000 100644 --- a/include/internal/c/track/local_video_track.h +++ b/include/rte_base/c/track/local_video_track.h @@ -6,7 +6,7 @@ */ #pragma once -#include "track/local_track.h" +#include "rte_base/c/track/local_track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/mic_audio_track.h b/include/rte_base/c/track/mic_audio_track.h similarity index 95% rename from include/internal/c/track/mic_audio_track.h rename to include/rte_base/c/track/mic_audio_track.h index fb33983..5c77bd6 100644 --- a/include/internal/c/track/mic_audio_track.h +++ b/include/rte_base/c/track/mic_audio_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_audio_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_audio_track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/mixed_video_track.h b/include/rte_base/c/track/mixed_video_track.h similarity index 85% rename from include/internal/c/track/mixed_video_track.h rename to include/rte_base/c/track/mixed_video_track.h index aecfd57..4dd7871 100644 --- a/include/internal/c/track/mixed_video_track.h +++ b/include/rte_base/c/track/mixed_video_track.h @@ -8,9 +8,9 @@ */ #include -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/remote_audio_track.h b/include/rte_base/c/track/remote_audio_track.h similarity index 93% rename from include/internal/c/track/remote_audio_track.h rename to include/rte_base/c/track/remote_audio_track.h index 13f011f..421d3f1 100644 --- a/include/internal/c/track/remote_audio_track.h +++ b/include/rte_base/c/track/remote_audio_track.h @@ -8,9 +8,9 @@ #include -#include "../common.h" -#include "track/remote_track.h" -#include "utils/frame.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/remote_track.h" +#include "rte_base/c/utils/frame.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/remote_track.h b/include/rte_base/c/track/remote_track.h similarity index 87% rename from include/internal/c/track/remote_track.h rename to include/rte_base/c/track/remote_track.h index 41847e1..54844af 100644 --- a/include/internal/c/track/remote_track.h +++ b/include/rte_base/c/track/remote_track.h @@ -6,9 +6,9 @@ * Copyright (c) 2024 Agora IO. All rights reserved. * */ -#include "handle.h" -#include "../common.h" -#include "c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/remote_video_track.h b/include/rte_base/c/track/remote_video_track.h similarity index 83% rename from include/internal/c/track/remote_video_track.h rename to include/rte_base/c/track/remote_video_track.h index 0594736..ef15bc8 100644 --- a/include/internal/c/track/remote_video_track.h +++ b/include/rte_base/c/track/remote_video_track.h @@ -6,7 +6,7 @@ */ #pragma once -#include "track/remote_track.h" +#include "rte_base/c/track/remote_track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/screen_video_track.h b/include/rte_base/c/track/screen_video_track.h similarity index 84% rename from include/internal/c/track/screen_video_track.h rename to include/rte_base/c/track/screen_video_track.h index 6c8d850..36075d6 100644 --- a/include/internal/c/track/screen_video_track.h +++ b/include/rte_base/c/track/screen_video_track.h @@ -6,11 +6,11 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" -#include "utils/rect.h" -#include "utils/string.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" +#include "rte_base/c/utils/rect.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/track.h b/include/rte_base/c/track/track.h similarity index 87% rename from include/internal/c/track/track.h rename to include/rte_base/c/track/track.h index 7da6c37..d7f7e90 100644 --- a/include/internal/c/track/track.h +++ b/include/rte_base/c/track/track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/video_track.h b/include/rte_base/c/track/video_track.h similarity index 87% rename from include/internal/c/track/video_track.h rename to include/rte_base/c/track/video_track.h index 98916e4..06c08c5 100644 --- a/include/internal/c/track/video_track.h +++ b/include/rte_base/c/track/video_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/track.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/track/view.h b/include/rte_base/c/track/view.h similarity index 90% rename from include/internal/c/track/view.h rename to include/rte_base/c/track/view.h index fe46f97..916062d 100644 --- a/include/internal/c/track/view.h +++ b/include/rte_base/c/track/view.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "utils/rect.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/utils/rect.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/user/local_user.h b/include/rte_base/c/user/local_user.h similarity index 94% rename from include/internal/c/user/local_user.h rename to include/rte_base/c/user/local_user.h index c256171..fe02d17 100644 --- a/include/internal/c/user/local_user.h +++ b/include/rte_base/c/user/local_user.h @@ -6,10 +6,10 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "user/user.h" -#include "utils/buf.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/user/user.h" +#include "rte_base/c/utils/buf.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/user/remote_user.h b/include/rte_base/c/user/remote_user.h similarity index 93% rename from include/internal/c/user/remote_user.h rename to include/rte_base/c/user/remote_user.h index 3c487bc..f02ea2b 100644 --- a/include/internal/c/user/remote_user.h +++ b/include/rte_base/c/user/remote_user.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "user/user.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/user/user.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/user/user.h b/include/rte_base/c/user/user.h similarity index 91% rename from include/internal/c/user/user.h rename to include/rte_base/c/user/user.h index 455f141..854a5e8 100644 --- a/include/internal/c/user/user.h +++ b/include/rte_base/c/user/user.h @@ -6,12 +6,12 @@ */ #pragma once -#include "c_error.h" -#include "handle.h" -#include "info.h" -#include "metadata.h" -#include "observer.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/info.h" +#include "rte_base/c/metadata.h" +#include "rte_base/c/observer.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/utils/buf.h b/include/rte_base/c/utils/buf.h similarity index 94% rename from include/internal/c/utils/buf.h rename to include/rte_base/c/utils/buf.h index dc85c5e..aa87806 100644 --- a/include/internal/c/utils/buf.h +++ b/include/rte_base/c/utils/buf.h @@ -9,8 +9,8 @@ #include #include -#include "c_error.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/include/internal/c/utils/frame.h b/include/rte_base/c/utils/frame.h similarity index 100% rename from include/internal/c/utils/frame.h rename to include/rte_base/c/utils/frame.h diff --git a/include/internal/c/utils/rect.h b/include/rte_base/c/utils/rect.h similarity index 100% rename from include/internal/c/utils/rect.h rename to include/rte_base/c/utils/rect.h diff --git a/include/internal/c/utils/string.h b/include/rte_base/c/utils/string.h similarity index 95% rename from include/internal/c/utils/string.h rename to include/rte_base/c/utils/string.h index 8382650..5ab5639 100644 --- a/include/internal/c/utils/string.h +++ b/include/rte_base/c/utils/string.h @@ -10,7 +10,7 @@ #include #include -#include "../common.h" +#include "rte_base/c/common.h" #define RTE_STRING_PRE_BUF_SIZE 256 diff --git a/include/internal/c/utils/uuid.h b/include/rte_base/c/utils/uuid.h similarity index 100% rename from include/internal/c/utils/uuid.h rename to include/rte_base/c/utils/uuid.h diff --git a/include/rte_cpp_callback_utils.h b/include/rte_base/rte_cpp_callback_utils.h similarity index 56% rename from include/rte_cpp_callback_utils.h rename to include/rte_base/rte_cpp_callback_utils.h index 48d9d54..077d452 100644 --- a/include/rte_cpp_callback_utils.h +++ b/include/rte_base/rte_cpp_callback_utils.h @@ -1,17 +1,51 @@ #pragma once #include #include "rte_cpp_error.h" -#include "internal/c/handle.h" +#include "rte_base/c/handle.h" +/** + * @technical preview + */ namespace rte { template class SingleUseCallback { - public: + public: using CallbackType = std::function; - SingleUseCallback(){}; + SingleUseCallback(){ + cb_ = nullptr; + cb_data_ = nullptr; + self_ = nullptr; + }; + + SingleUseCallback(SingleUseCallback& other){ + cb_ = other.cb_; + cb_data_ = other.cb_data_; + self_ = other.self_; + + other.Clear(); + } + + + SingleUseCallback(SingleUseCallback&& other){ + cb_ = other.cb_; + cb_data_ = other.cb_data_; + self_ = other.self_; + + other.Clear(); + } + + SingleUseCallback &operator=(SingleUseCallback&& other){ + cb_ = other.cb_; + cb_data_ = other.cb_data_; + self_ = other.self_; + + other.Clear(); + + return *this; + } void Store(T* self, CallbackType cb, void* cb_data){ self_ = self; @@ -33,6 +67,12 @@ class SingleUseCallback { return cb_ == nullptr; } + void Clear(){ + self_ = nullptr; + cb_ = nullptr; + cb_data_ = nullptr; + } + CallbackType cb_; void* cb_data_; T* self_; @@ -42,17 +82,30 @@ template class CallbackContext { public: + using CallbackTypeOnlyError = std::function; + using CallbackTypeOnlyErrorWithCppError = std::function; + using CallbackType = std::function; using CallbackTypeWithCppError = std::function; + CallbackContext(T* self, CallbackTypeOnlyError cb) + :self_(self), cb_only_error_(cb) {} + + CallbackContext(T* self, CallbackTypeOnlyErrorWithCppError cb) + :self_(self), cb_only_error_with_cpp_error_(cb) {} + CallbackContext(T* self, CallbackType cb, void* cb_data) :self_(self), cb_(cb), cb_data_(cb_data) {} CallbackContext(T* self, CallbackTypeWithCppError cb, void* cb_data) :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} + CallbackTypeOnlyError cb_only_error_; + CallbackTypeOnlyErrorWithCppError cb_only_error_with_cpp_error_; + CallbackType cb_; CallbackTypeWithCppError cb_with_cpp_error_; + void* cb_data_; T* self_; }; @@ -61,6 +114,15 @@ template void CallbackFunc(FromeType* self, void* cb_data, RteError* err){ auto *ctx = static_cast*>(cb_data); + if(ctx->cb_only_error_ != nullptr){ + ctx->cb_only_error_(err); + } + + if(ctx->cb_only_error_with_cpp_error_ != nullptr){ + rte::Error cpp_err(err); + ctx->cb_only_error_with_cpp_error_(&cpp_err); + } + if(ctx->cb_with_cpp_error_ != nullptr){ rte::Error cpp_err(err); ctx->cb_with_cpp_error_( self != nullptr ? ctx->self_ : nullptr, ctx->cb_data_, &cpp_err); @@ -77,15 +139,29 @@ template class CallbackContextWithArgs { public: + + using CallbackTypeOnlyError = std::function; + using CallbackTypeOnlyErrorWithCppError = std::function; + using CallbackType = std::function; using CallbackTypeWithCppError = std::function; + CallbackContextWithArgs(T* self, CallbackTypeOnlyError cb) + :self_(self), cb_only_error_(cb) {} + + CallbackContextWithArgs(T* self, CallbackTypeOnlyErrorWithCppError cb) + :self_(self), cb_only_error_with_cpp_error_(cb) {} + CallbackContextWithArgs(T* self, CallbackType cb, void* cb_data) :self_(self), cb_(cb), cb_data_(cb_data) {} CallbackContextWithArgs(T* self, CallbackTypeWithCppError cb, void* cb_data) :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} + + CallbackTypeOnlyError cb_only_error_; + CallbackTypeOnlyErrorWithCppError cb_only_error_with_cpp_error_; + CallbackType cb_; CallbackTypeWithCppError cb_with_cpp_error_; void* cb_data_; @@ -96,13 +172,22 @@ template void CallbackFuncWithArgs(FromeType* self, Args... args, void* cb_data, RteError* err){ auto *ctx = static_cast*>(cb_data); + if(ctx->cb_only_error_ != nullptr){ + ctx->cb_only_error_(args..., err); + } + + if(ctx->cb_only_error_with_cpp_error_ != nullptr){ + rte::Error cpp_err(err); + ctx->cb_only_error_with_cpp_error_(args..., &cpp_err); + } + if(ctx->cb_with_cpp_error_ != nullptr){ Error cpp_err(err); - ctx->cb_with_cpp_error_(ctx->self_, args..., ctx->cb_data_, &cpp_err); + ctx->cb_with_cpp_error_( self != nullptr ? ctx->self_ : nullptr, args..., ctx->cb_data_, &cpp_err); } if(ctx->cb_ != nullptr){ - ctx->cb_(ctx->self_, args..., ctx->cb_data_, err); + ctx->cb_(self != nullptr ? ctx->self_ : nullptr, args..., ctx->cb_data_, err); } delete ctx; } diff --git a/include/rte_base/rte_cpp_canvas.h b/include/rte_base/rte_cpp_canvas.h new file mode 100644 index 0000000..ac4cf80 --- /dev/null +++ b/include/rte_base/rte_cpp_canvas.h @@ -0,0 +1,278 @@ +#pragma once + +#include "rte_base/c/c_player.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/track/canvas.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_rte.h" +#include "rte_cpp_callback_utils.h" + + +namespace rte { + +using VideoRenderMode = ::RteVideoRenderMode; +using VideoMirrorMode = ::RteVideoMirrorMode; +using ViewConfig = ::RteViewConfig; +using View = ::RteView; +using Rect = ::RteRect; + +/** + * The CanvasInitialConfig class is used to initialize the Canvas object. + * @since v4.4.0 + * @technical preview + */ +class CanvasInitialConfig { + public: + CanvasInitialConfig() {RteCanvasInitialConfigInit(&c_canvas_initial_config, nullptr);} + ~CanvasInitialConfig() {RteCanvasInitialConfigDeinit(&c_canvas_initial_config, nullptr);} + + private: + friend class Canvas; + ::RteCanvasInitialConfig c_canvas_initial_config; +}; + +/** + * @brief This class provides methods for configuring video rendering for the player. + * + * @since v4.4.0 + */ +class CanvasConfig { + public: + CanvasConfig() {RteCanvasConfigInit(&c_canvas_config, nullptr);} + ~CanvasConfig() {RteCanvasConfigDeinit(&c_canvas_config, nullptr);} + + /** + * @brief Sets the video render mode. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called before `SetConfigs(CanvasConfig *config, Error *err)`. + * + * @param mode Render mode. See `RteVideoRenderMode`. The default render mode is + * `kRteVideoRenderModeHidden`, + * which scales the video proportionally and prioritizes filling the view window. + * @param err Status or error information. See `Error`. + * + */ + void SetRenderMode(VideoRenderMode mode, Error *err = nullptr) { + RteCanvasConfigSetVideoRenderMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the currently set video render mode. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called after `GetConfigs(CanvasConfig *config, Error *err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * The currently set video render mode. See `RteVideoRenderMode`. + */ + VideoRenderMode GetRenderMode(Error *err = nullptr) { + VideoRenderMode mode; + RteCanvasConfigGetVideoRenderMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * @brief Sets the mirror mode for the video. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called before `SetConfigs(CanvasConfig *config, Error *err)`. + * + * @param mode Mirror mode. See `RteVideoMirrorMode`. The default is `kRteVideoMirrorModeAuto`, + * where the SDK + * determines the mirror mode. By default, the mirror mode for remote users is disabled. + * @param err Status or error information. See `Error`. + * + */ + void SetMirrorMode(VideoMirrorMode mode, Error *err = nullptr) { + RteCanvasConfigSetVideoMirrorMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the currently set mirror mode. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called after `GetConfigs(CanvasConfig *config, Error *err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * The currently set mirror mode. See `RteVideoMirrorMode`. + */ + VideoMirrorMode GetMirrorMode(Error *err = nullptr) { + VideoMirrorMode mode; + RteCanvasConfigGetVideoMirrorMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * Set the Crop Area. + * @since v4.4.0 + * @param crop_area + * @param err + * @return void + * @technical preview + */ + void SetCropArea(RteRect &crop_area, Error *err = nullptr) { + RteCanvasConfigSetCropArea(&c_canvas_config, crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Crop Area. + * @since v4.4.0 + * @param err + * @return RteRect + * @technical preview + */ + RteRect GetCropArea(Error *err = nullptr) { + RteRect crop_area; + RteCanvasConfigGetCropArea(&c_canvas_config, &crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); + return crop_area; + } + + private: + friend class Canvas; + ::RteCanvasConfig c_canvas_config; +}; + +/** + * The Canvas class is used to render the video stream. + * @since v4.4.0 + */ +class Canvas { + public: + /** + * @brief Constructs a `Canvas` object. + * + * @details + * Call timing: Call this method after `InitMediaEngine`. + * + * @param Rte An `Rte` object. + * @param initial_config The configuration object for the `Canvas`. Currently, you can pass in null. + * + */ + Canvas(Rte *rte, CanvasInitialConfig *initial_config = nullptr) { + c_canvas = ::RteCanvasCreate(&rte->c_rte, initial_config != nullptr ? &initial_config->c_canvas_initial_config : nullptr, nullptr); + }; + ~Canvas() { RteCanvasDestroy(&c_canvas, nullptr); }; + + Canvas(Canvas&& other) : c_canvas(other.c_canvas) { + other.c_canvas = {}; + } + + //@{ + Canvas(const Canvas& other) = delete; + Canvas& operator=(const Canvas& other) = delete; + Canvas& operator=(Canvas&& other) = delete; + //@} + + + /** + * @brief Gets the current video rendering configuration of the player. + * + * @since v4.4.0 + * + * @details + * Call timing: Call this method after `Canvas`. + * + * @param config The settings of the `Canvas` object. See `CanvasConfig`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the configuration is retrieved successfully: + * - `true`: Retrieved successfully. + * - `false`: Failed to retrieve. + */ + bool GetConfigs(CanvasConfig *config, Error *err = nullptr) { + return RteCanvasGetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Sets the player video rendering configuration. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called before `OpenWithUrl`. + * + * @param config Settings for the `Canvas` object. See `CanvasConfig`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the configuration is successful: + * - `true`: Configuration succeeded. + * - `false`: Configuration failed. + */ + bool SetConfigs(CanvasConfig *config, Error *err = nullptr) { + return RteCanvasSetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Adds a rendering view. + * + * @since v4.4.0 + * + * @details + * Applicable scenarios: When you need to use the player to play videos, you can call this method to + * add a view to the video renderer so that the video content can be displayed. + * Call timing: This method must be called before `SetCanvas`. + * + * @note Currently, only one view is supported. + * + * @param view The HWND window handle. + * @param config The settings of the `View` object. Currently, pass in null. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the view is added successfully: + * - `true`: The view is added successfully. + * - `false`: Failed to add the view. + */ + bool AddView(View *view, ViewConfig *config, rte::Error *err = nullptr) { + return RteCanvasAddView(&c_canvas, view, config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Removes a rendering view. + * + * @since v4.4.0 + * + * @details + * After you call `AddView` to add a view, you can call this method to remove the rendering view if + * needed. + * Call timing: This method must be called after `AddView`. + * + * @note Only one view can be removed at a time. + * + * @param view The view object to be removed. + * @param config Settings for the `View` object. Currently, pass null. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the view is successfully removed: + * - `true`: View removed successfully. + * - `false`: Failed to remove view. + */ + bool RemoveView(View *view, ViewConfig *config, rte::Error *err = nullptr) { + return RteCanvasRemoveView(&c_canvas, view, config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + private: + + friend class Player; + + ::RteCanvas c_canvas; +}; + +} // namespace rte \ No newline at end of file diff --git a/include/rte_cpp_error.h b/include/rte_base/rte_cpp_error.h similarity index 55% rename from include/rte_cpp_error.h rename to include/rte_base/rte_cpp_error.h index 4016f9c..d1a7b6a 100644 --- a/include/rte_cpp_error.h +++ b/include/rte_base/rte_cpp_error.h @@ -9,8 +9,8 @@ #include #include -#include "internal/c/c_error.h" -#include "internal/c/utils/string.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/utils/string.h" namespace rte { @@ -22,11 +22,16 @@ class Config; class PlayerConfig; class CanvasConfig; +using ErrorCode = ::RteErrorCode; + +/** + * @brief This class provides methods to retrieve error codes and error messages. + * + * @since v4.4.0 + */ class Error { public: - using ErrorCode = ::RteErrorCode; - Error() : c_error(RteErrorCreate()) {} explicit Error(::RteError *error) : c_error(error), c_error_owned(false) {} @@ -44,11 +49,36 @@ class Error { // @} void Set(ErrorCode code, const char *message) { - RteErrorSet(c_error, code, "%s", message); + if(c_error != nullptr){ + RteErrorSet(c_error, code, "%s", message ? message : ""); + } } + /** + * @brief Gets the error code returned by the API call. + * + * @since v4.4.0 + * + * @details + * Call timing: When an API call fails, you can call this method to get the error code. + * + * @return + * The error code. See `RteErrorCode`. + */ ErrorCode Code() const { return c_error != nullptr ? c_error->code : kRteErrorDefault; } + /** + * @brief Gets detailed error information. + * + * @since v4.4.0 + * + * @details + * Call timing: When an API call fails, you can call this method to get error information to help + * troubleshoot the issue. + * + * @return + * Error information. + */ const char *Message() const { if(c_error != nullptr && c_error->message != nullptr){ return RteStringCStr(c_error->message, nullptr); diff --git a/include/rte_base/rte_cpp_player.h b/include/rte_base/rte_cpp_player.h new file mode 100644 index 0000000..bc61928 --- /dev/null +++ b/include/rte_base/rte_cpp_player.h @@ -0,0 +1,1583 @@ +/** + * + * Agora Real Time Engagement + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +#pragma once +#include +#include + +#include "rte_base/c/c_rte.h" +#include "rte_base/c/c_player.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_callback_utils.h" +#include "rte_cpp_canvas.h" +#include "rte_cpp_string.h" +#include "rte_cpp_stream.h" + +namespace rte { + +using PlayerState = ::RtePlayerState; +using PlayerEvent = ::RtePlayerEvent; +using PlayerMetadataType = ::RtePlayerMetadataType; +using PlayerStats = ::RtePlayerStats; +using PlayerCustomSourceProvider = ::RtePlayerCustomSourceProvider; +using AbrSubscriptionLayer = ::RteAbrSubscriptionLayer; +using AbrFallbackLayer = ::RteAbrFallbackLayer; + +class PlayerInitialConfig {}; + +/** + * @brief Information about the player and media stream. + * + * @since v4.5.1 + */ +class PlayerInfo { + public: + PlayerInfo() { RtePlayerInfoInit(&c_player_info, nullptr); } + ~PlayerInfo() { RtePlayerInfoDeinit(&c_player_info, nullptr); } + + PlayerInfo(const RtePlayerInfo* other) { + RtePlayerInfoInit(&c_player_info, nullptr); + RtePlayerInfoCopy(&c_player_info, other, nullptr); + } + + PlayerInfo(const PlayerInfo& other) { + RtePlayerInfoInit(&c_player_info, nullptr); + RtePlayerInfoCopy(&c_player_info, &other.c_player_info, nullptr); + } + + PlayerInfo& operator=(const PlayerInfo& other) { + RtePlayerInfoCopy(&c_player_info, &other.c_player_info, nullptr); + return *this; + } + + PlayerInfo& operator=(const RtePlayerInfo* other) { + RtePlayerInfoCopy(&c_player_info, other, nullptr); + return *this; + } + + /** + * @brief Gets the current player state. + * + * @since v4.5.1 + * + * @return + * - If the method call succeeds, returns the current player state. See `RtePlayerState`. + */ + RtePlayerState State() const { + return c_player_info.state; + } + + /** + * @brief Gets the duration of the current media resource. + * + * @since v4.5.1 + * + * @note + * Valid only when playing local media files or on-demand streams. + * + * @return + * If the method call succeeds, returns the duration of the current media resource in milliseconds. + */ + size_t Duration() const { + return c_player_info.duration; + } + + /** + * @brief Gets the number of streams in the current playback source. + * + * @since v4.5.1 + * + * @note + * This method is valid when opening non-RTE URLs. + * + * @return + * If the method call succeeds, returns the number of streams in the current playback source. + */ + size_t StreamCount() const { + return c_player_info.stream_count; + } + + /** + * @brief Determines whether the media resource contains an audio stream. + * + * @since v4.5.1 + * + * @details + * Indicates whether the URL source contains an audio stream. + * + * @return + * - `true`: The media resource contains an audio stream. + * - `false`: The media resource does not contain an audio stream. + */ + bool HasAudio() const { + return c_player_info.has_audio; + } + + /** + * @brief Determines whether a video stream is included. + * + * @since v4.5.1 + * + * @return + * - `true`: The URL source contains a video stream. + * - `false`: The URL source does not contain a video stream. + */ + bool HasVideo() const { + return c_player_info.has_video; + } + + /** + * @brief Determines whether the player has stopped receiving the audio stream. + * + * @since v4.5.1 + * + * @details + * This method is used to determine whether the player has stopped receiving the audio stream. + * + * @return + * - `true`: The player has stopped receiving the audio stream. + * - `false`: The player is still receiving the audio stream. + */ + bool IsAudioMuted() const { + return c_player_info.is_audio_muted; + } + + /** + * @brief Determines whether the player has stopped receiving the video stream. + * + * @since v4.5.1 + * + * @note + * The `IsVideoMuted` API is only valid when opening an RTE URL. + * + * @return + * - `true`: The player has stopped receiving the video stream. + * - `false`: The player is still receiving the video stream. + */ + bool IsVideoMuted() const { + return c_player_info.is_video_muted; + } + + /** + * @brief Gets the height of the video resolution. + * + * @since v4.5.1 + * + * @return + * If the method call succeeds, returns the height of the video resolution in pixels. + */ + int VideoHeight() const { + return c_player_info.video_height; + } + + /** + * @brief Gets the width of the video resolution. + * + * @since v4.5.1 + * + * @return + * If the method call succeeds, returns the width of the video resolution in pixels. + */ + int VideoWidth() const { + return c_player_info.video_width; + } + + /** + * @brief Gets the currently subscribed video layer. + * + * @since v4.5.1 + * + * @note + * This field is only valid when you open an RTE URL. + * + * @return + * - If the method call succeeds, returns the currently subscribed video layer. See + * `RteAbrSubscriptionLayer`. + */ + AbrSubscriptionLayer AbrSubscriptionLayer() const { + return c_player_info.abr_subscription_layer; + } + + /** + * @brief Gets the audio sample rate. + * + * @since v4.5.1 + * + * @return + * If the method call succeeds, returns the audio sample rate in Hz. + */ + int AudioSampleRate() const { + return c_player_info.audio_sample_rate; + } + + /** + * @brief Gets the number of audio channels. + * + * @since v4.5.1 + * + * @return + * If the method call succeeds, returns the number of audio channels. + */ + int AudioChannels() const { + return c_player_info.audio_channels; + } + + /** + * @brief Gets the number of bits per audio sample. + * + * @since v4.5.1 + * + * @note + * This field is only valid when opening a non-RTE URL. + * + * @return + * If the method call succeeds, returns the number of bits per audio sample, in bits. + */ + int AudioBitsPerSample() const { + return c_player_info.audio_bits_per_sample; + } + + /** + * @brief Gets the currently playing URL. + * + * @since v4.5.1 + * + * @return + * - If the method call succeeds, returns the currently playing URL. + * - If the method call fails, returns an empty string. + */ + std::string CurrentUrl() const { + String str(c_player_info.current_url); + return std::string(str.CStr()); + } + + /** + * @brief Sets the current URL. + * @technical preview + * @param url The current URL. + * @return void + */ + void SetCurrentUrl(const std::string& url) { + if(c_player_info.current_url != nullptr){ + RteStringDestroy(c_player_info.current_url, nullptr); + c_player_info.current_url = nullptr; + } + + c_player_info.current_url = RteStringCreate(nullptr); + RteStringInitWithCStr(c_player_info.current_url, url.c_str(), nullptr); + } + + ::RtePlayerInfo *get_underlying_impl() { return &c_player_info; } + + private: + ::RtePlayerInfo c_player_info; +}; + +static void onStateChanged(::RtePlayerObserver *observer, + RtePlayerState old_state, RtePlayerState new_state, + RteError *err); + +static void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, + uint64_t utc_time); + +static void onResolutionChanged(::RtePlayerObserver *observer, int width, int height); + +static void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event); + +static void onMetadata(::RtePlayerObserver *observer, ::RtePlayerMetadataType type, + const uint8_t *data, size_t length); + +static void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info); + +static void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume); + + +/** + * @brief The PlayerObserver class is used to observe the event of Player object. + * @since v4.4.0 + */ +class PlayerObserver { + public: + PlayerObserver() : c_player_observer(::RtePlayerObserverCreate(nullptr)) { + + c_player_observer->base_observer.me_in_target_lang = this; + + c_player_observer->on_state_changed = rte::onStateChanged; + c_player_observer->on_position_changed = rte::onPositionChanged; + c_player_observer->on_resolution_changed = rte::onResolutionChanged; + c_player_observer->on_event = rte::onEvent; + c_player_observer->on_metadata = rte::onMetadata; + c_player_observer->on_player_info_updated = rte::onPlayerInfoUpdated; + c_player_observer->on_audio_volume_indication = rte::onAudioVolumeIndication; + } + virtual ~PlayerObserver(){ RtePlayerObserverDestroy(c_player_observer, nullptr); } + + // @{ + PlayerObserver(PlayerObserver &other) = delete; + PlayerObserver(PlayerObserver &&other) = delete; + PlayerObserver &operator=(const PlayerObserver &cmd) = delete; + PlayerObserver &operator=(PlayerObserver &&cmd) = delete; + // @} + + /** + * @brief Occurs when the player state changes. + * + * @since v4.4.0 + * + * @details + * If you need to monitor changes in the player state, you must first call `RegisterObserver` to + * register the player observer object. + * Call timing: When the player state changes, the SDK triggers this callback to report the current + * and previous states. + * + * @param old_state The previous state of the player. See `RtePlayerState`. + * @param new_state The current state of the player. See `RtePlayerState`. If the current state is + * `kRtePlayerStateFailed`, you can get detailed error information from the `err` parameter. + * @param err Status or error information. See `Error`. + * + */ + virtual void onStateChanged(PlayerState old_state, PlayerState new_state, + rte::Error *err) {}; + + /** + * @brief Reports the current playback progress of the media resource. + * + * @since v4.4.0 + * + * @details + * Call timing: This callback is triggered once per second during media playback. + * + * @param curr_time Current playback progress in milliseconds. + * @param utc_time Current NTP (Network Time Protocol) time in milliseconds. + * + */ + virtual void onPositionChanged(uint64_t curr_time, + uint64_t utc_time) {}; + + /** + * @brief Occurs when the video resolution changes. + * + * @since v4.4.0 + * + * @details + * If you need to monitor changes in the resolution of the video stream played by the player, you + * must first call `RegisterObserver` to register the player observer object. + * Call timing: When the resolution of the video stream changes, the SDK triggers this callback to + * report the current width and height of the video. + * + * @param width Width of the video frame (px). + * @param height Height of the video frame (px). + * + */ + virtual void onResolutionChanged(int width, int height) {}; + + /** + * @brief Callback for player events. + * + * @since v4.4.0 + * + * @details + * If you need to monitor player events, you must first call `RegisterObserver` to register the + * player observer object. + * Call timing: The SDK triggers this callback when a player event occurs. + * + * @param event Player event. See `RtePlayerEvent`. + * + */ + virtual void onEvent(PlayerEvent event) {}; + + /** + * @brief Callback for received media metadata. + * + * @since v4.4.0 + * + * @details + * If you need to obtain metadata from the media stream, you must first call `RegisterObserver` to + * register the player observer object. + * Call timing: After the SDK parses the metadata from the media stream being played, it triggers + * this callback to report the metadata type and its content. + * + * @param type Metadata type. See `RtePlayerMetadataType`. + * @param data Parsed metadata. + * @param length Size of the data in bytes. + * + */ + virtual void onMetadata(PlayerMetadataType type, + const uint8_t *data, size_t length) {}; + + /** + * @brief Occurs when player and media stream information changes. + * + * @since v4.4.0 + * + * @details + * If you need to get information about the player and media stream, you must first call + * `RegisterObserver` to register the player observer object. You can also directly retrieve it + * using the `GetInfo` method. + * Call timing: The SDK triggers this callback when information about the player or media stream + * changes. + * + * @param info Information about the player and media stream. See `PlayerInfo`. + * + */ + virtual void onPlayerInfoUpdated(const PlayerInfo *info) {}; + + /** + * @brief Callback for player audio volume indication. + * + * @since v4.4.0 + * + * @details + * Call timing: The SDK triggers this callback when the player's volume changes. + * + * @param volume The current volume of the player, ranging from [0, 225]. + * + */ + virtual void onAudioVolumeIndication(int32_t volume) {}; + + private: + friend class Player; + + ::RtePlayerObserver *c_player_observer; +}; + +void onStateChanged(::RtePlayerObserver *observer, + RtePlayerState old_state, RtePlayerState new_state, + RteError *err){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + Error cpp_err(err); + player_observer->onStateChanged(old_state, new_state, &cpp_err); + } +} +void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, + uint64_t utc_time){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onPositionChanged(curr_time, utc_time); + } +} + +void onResolutionChanged(::RtePlayerObserver *observer, int width, int height){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onResolutionChanged(width, height); + } +} + +void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onEvent(event); + } +} + +void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, + const uint8_t *data, size_t length){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onMetadata(type, data, length); + } +} + +void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + PlayerInfo cpp_info(info); + player_observer->onPlayerInfoUpdated(&cpp_info); + } +} + +void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onAudioVolumeIndication(volume); + } +} + +/** + * @brief This class provides methods for configuring the player. + * + * @since v4.4.0 + */ +class PlayerConfig { + public: + PlayerConfig() { RtePlayerConfigInit(&c_player_config, nullptr); } + ~PlayerConfig() { RtePlayerConfigDeinit(&c_player_config, nullptr); } + + // @{ + PlayerConfig(PlayerConfig &other) = delete; + PlayerConfig(PlayerConfig &&other) = delete; + PlayerConfig &operator=(PlayerConfig &&cmd) = delete; + + PlayerConfig &operator=(const PlayerConfig &other) { + RtePlayerConfigCopy(&c_player_config, &other.c_player_config, nullptr); + return *this; + }; + + PlayerConfig &operator=(const RtePlayerConfig* other) { + RtePlayerConfigCopy(&c_player_config, other, nullptr); + return *this; + }; + // @} + + /** + * @brief Sets whether to autoplay. + * + * @since v4.4.0 + * + * @details + * Before you call `OpenWithUrl` to open a media stream, you can call this method to set whether to + * autoplay. If not set, autoplay is enabled by default. + * Call timing: This method must be called before `OpenWithUrl`. + * + * @param auto_play Whether to enable autoplay: + * - `true`: (Default) Enable autoplay. + * - `false`: Disable autoplay. + * @param err Status or error information. See `Error`. + * + */ + void SetAutoPlay(bool auto_play, Error *err = nullptr) { + RtePlayerConfigSetAutoPlay(&c_player_config, auto_play, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the auto-play setting. + * + * @since v4.4.0 + * + * @details + * You can call this method to check the current auto-play setting of the player. + * Call timing: Call this method after `GetConfigs(PlayerConfig* config, Error* err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * Whether the auto-play setting is retrieved successfully: + * - `true`: Retrieved successfully. + * - `false`: Failed to retrieve. + */ + bool GetAutoPlay(Error *err = nullptr) { + bool auto_play; + RtePlayerConfigGetAutoPlay(&c_player_config, &auto_play, + err != nullptr ? err->get_underlying_impl() : nullptr); + return auto_play; + } + + /** + * @brief Sets the playback speed parameter. + * + * @since v4.5.1 + * + * @note + * You can call this method after calling the `OpenWithUrl` method. + * + * @param speed Playback speed. The valid range is [50, 400]. + * @param err Status or error information. See `Error`. + * - kRteOk: Call succeeded. + * - kRteErrorInvalidArgument: The `speed` parameter is set to an invalid value. + * + */ + void SetPlaybackSpeed(int32_t speed, Error *err = nullptr) { + RtePlayerConfigSetPlaybackSpeed(&c_player_config, speed, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the playback speed parameter. + * + * @since v4.5.1 + * + * @param err Status or error information. See `Error`. + * - kRteOk: Indicates success. + * + * @return + * If the method call succeeds, returns the playback speed value. + */ + int32_t GetPlaybackSpeed(Error *err = nullptr) { + int32_t speed; + RtePlayerConfigGetPlaybackSpeed(&c_player_config, &speed, + err != nullptr ? err->get_underlying_impl() : nullptr); + return speed; + } + + /** + * Set the playout audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetPlayoutAudioTrackIdx(int idx, Error *err = nullptr) { + RtePlayerConfigSetPlayoutAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the playout audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPlayoutAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetPlayoutAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the publish audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetPublishAudioTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetPublishAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the publish audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPublishAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetPublishAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetAudioTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the subtitle track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetSubtitleTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetSubtitleTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the subtitle track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetSubtitleTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetSubtitleTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the external subtitle track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetExternalSubtitleTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetExternalSubtitleTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the external subtitle track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetExternalSubtitleTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetExternalSubtitleTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the audio pitch parameter. + * @since v4.4.0 + * @param audio_pitch + * @param err + * @return void + * @technical preview + */ + void SetAudioPitch(int32_t audio_pitch, Error *err = nullptr) { + RtePlayerConfigSetAudioPitch(&c_player_config, audio_pitch, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio pitch parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioPitch(Error *err = nullptr) { + int32_t audio_pitch; + RtePlayerConfigGetAudioPitch(&c_player_config, &audio_pitch, + err != nullptr ? err->get_underlying_impl() : nullptr); + return audio_pitch; + } + + /** + * @brief Sets the playback volume parameter. + * + * @since v4.5.1 + * + * @details + * You can use this method to set the playback volume. The valid range is [0, 400]. + * + * @param volume The volume value to set. The valid range is [0, 400]. + * @param err Status or error information. See `Error`. + * Possible error codes include: + * - kRteOk: Setting succeeded. + * - kRteErrorInvalidArgument: The volume parameter is set to an invalid value. + * + */ + void SetPlayoutVolume(int32_t volume, Error *err = nullptr) { + RtePlayerConfigSetPlayoutVolume(&c_player_config, volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the playback volume of the player. + * + * @since v4.5.1 + * + * @param err Status or error information. See `Error`. + * - kRteOk: Indicates success. + * + * @return + * If the method call succeeds, returns the volume value of the player. + */ + int32_t GetPlayoutVolume(Error *err = nullptr) { + int32_t volume; + RtePlayerConfigGetPlayoutVolume(&c_player_config, &volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + return volume; + } + + /** + * Set the audio playback delay parameter. + * @since v4.4.0 + * @param volume + * @param err + * @return void + * @technical preview + */ + void SetAudioPlaybackDelay(int32_t delay, Error *err = nullptr) { + RtePlayerConfigSetAudioPlaybackDelay(&c_player_config, delay, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio playback delay parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioPlaybackDelay(Error *err = nullptr) { + int32_t delay; + RtePlayerConfigGetAudioPlaybackDelay(&c_player_config, &delay, + err != nullptr ? err->get_underlying_impl() : nullptr); + return delay; + } + + /** + * Set the audio dual mono mode parameter. + * @since v4.4.0 + * @param mode + * @param err + * @return void + * @technical preview + */ + void SetAudioDualMonoMode(RteAudioDualMonoMode mode, Error *err = nullptr) { + RtePlayerConfigSetAudioDualMonoMode(&c_player_config, mode, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio dual mono mode parameter. + * @since v4.4.0 + * @param err + * @return RteAudioDualMonoMode + * @technical preview + */ + RteAudioDualMonoMode GetAudioDualMonoMode(Error *err = nullptr) { + RteAudioDualMonoMode mode; + RtePlayerConfigGetAudioDualMonoMode(&c_player_config, &mode, + err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * Set the publish volume parameter. + * @since v4.4.0 + * @param volume + * @param err + * @return void + * @technical preview + */ + void SetPublishVolume(int32_t volume, Error *err = nullptr) { + RtePlayerConfigSetPublishVolume(&c_player_config, volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the publish volume parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPublishVolume(Error *err = nullptr) { + int32_t volume; + RtePlayerConfigGetPublishVolume(&c_player_config, &volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + return volume; + } + + /** + * @brief Sets the loop count for media file playback. + * + * @since v4.5.1 + * + * @param count The number of times the media file should loop. + * - `1`: Play once. + * - `2`: Play twice. + * - `-1`: Loop indefinitely until `Stop` is called. + * @param err Status or error information. See `Error`. + * - kRteOk: Call succeeded. + * - kRteErrorInvalidArgument: The `count` parameter is set to an invalid value. + * + */ + void SetLoopCount(int32_t count, Error *err = nullptr) { + RtePlayerConfigSetLoopCount(&c_player_config, count, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the loop count parameter of the media file. + * + * @since v4.5.1 + * + * @param err Output parameter, status or error information. See `Error`. + * - kRteOk: Indicates success. + * + * @return + * If the method call succeeds, returns the loop count of the media file. + */ + int32_t GetLoopCount(Error *err = nullptr) { + int32_t count; + RtePlayerConfigGetLoopCount(&c_player_config, &count, + err != nullptr ? err->get_underlying_impl() : nullptr); + return count; + } + + /** + * Set player private parameters. This parameter setting can be done according to actual needs, referring to the suggestions of Agora SA. + * @since v4.4.0 + * @param json_parameter JSON formatted string + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: Indicates that the json_parameter parameter is empty. + * @return void + */ + void SetJsonParameter(const char *json_parameter, Error *err = nullptr) { + String str(json_parameter); + RtePlayerConfigSetJsonParameter(&c_player_config, str.get_underlying_impl(), + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the currently configured private parameters of the player. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return std::string + */ + std::string GetJsonParameter(Error *err = nullptr) { + String str; + RtePlayerConfigGetJsonParameter(&c_player_config, str.get_underlying_impl(), + err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.CStr()); + } + + /** + * @brief Sets the quality layer of the subscribed media stream. + * + * @since v4.4.0 + * + * @details + * You can switch between different video stream quality layers based on actual network conditions, + * each corresponding to a different resolution. + * By default, after calling this method, the audience can only switch between high and low video + * stream quality, i.e., between `kRteAbrSubscriptionHigh` and `kRteAbrSubscriptionLow`. + * If you have higher requirements for video resolution switching, you can `contact technical + * support` to enable + * the ABR (Adaptive Bitrate) feature. Once enabled, you can customize the resolution for each video + * quality layer, and the audience can switch between all video quality layers as needed. For + * detailed implementation, see `Audience-side URL streaming`. + * Applicable scenarios: In unstable network environments, the audience can choose an appropriate + * video quality level based on actual network conditions to ensure a smooth viewing experience. + * Call timing: This method must be called before `SetConfigs(PlayerConfig* config, Error* err)`. + * + * @note If you have not enabled the ABR feature when calling this method, the subscribed video + * quality layer can only be `kRteAbrSubscriptionHigh` or `kRteAbrSubscriptionLow`. + * + * @param abr_subscription_layer The video quality layer to subscribe to. See + * `RteAbrSubscriptionLayer`. + * When customizing the resolution of `kRteAbrSubscriptionLayer1` to + * `kRteAbrSubscriptionLayer6`, you can refer to the table below or set it according to your actual + * needs: + * Note: When customizing resolutions, be sure to sort the video quality + * layers from highest to lowest resolution, and by frame rate from highest to lowest when + * resolutions are the same. + * | Video Quality Layer | Resolution | + * | --------------------------- | --------- | + * | `kRteAbrSubscriptionLayer1` | 2160p (4K) | + * | `kRteAbrSubscriptionLayer2` | 1440p (2K) | + * | `kRteAbrSubscriptionLayer3` | 1080p (HD) | + * | `kRteAbrSubscriptionLayer4` | 720p (HD) | + * | `kRteAbrSubscriptionLayer5` | 540p (SD) | + * | `kRteAbrSubscriptionLayer6` | 480p (SD) | + * @param err Status or error information. See `Error`. + * + */ + void SetAbrSubscriptionLayer(AbrSubscriptionLayer abr_subscription_layer, Error *err = nullptr) { + RtePlayerConfigSetAbrSubscriptionLayer(&c_player_config, abr_subscription_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the quality layer of the subscribed video stream. + * + * @since v4.4.0 + * + * @details + * Call timing: Call this method after `GetConfigs(PlayerConfig* config, Error* err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * The currently set video quality layer. See `RteAbrSubscriptionLayer`. + */ + AbrSubscriptionLayer GetAbrSubscriptionLayer(Error *err = nullptr) { + AbrSubscriptionLayer abr_subscription_layer; + RtePlayerConfigGetAbrSubscriptionLayer(&c_player_config, &abr_subscription_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + return abr_subscription_layer; + } + + /** + * @brief Sets the fallback option for the subscribed video stream. + * + * @since v4.4.0 + * + * @details + * In poor network conditions, the quality of real-time audio and video may degrade. After calling + * this method, the SDK will downgrade the resolution of the video stream to the specified fallback + * quality layer when the network condition is poor. Different quality layers correspond to + * different resolutions and bitrates. At the same time, the SDK + * continuously monitors the network quality and restores the video stream to the subscribed quality + * layer when the network improves. + * By default, after calling this method, you can only choose to fallback to `kRteAbrFallbackLow` or + * `kRteAbrFallbackAudioOnly`. If you have higher requirements for video experience, you can + * `contact technical support` + * to enable the ABR feature. Once enabled, you can fallback to video streams of all quality layers + * and customize the resolution for each layer. + * Applicable scenarios: - In general scenarios, you can call this method and set the fallback + * quality layer to `kRteAbrFallbackLow` or + * `kRteAbrFallbackAudioOnly`, and the SDK will fallback to a low-quality video stream or receive + * only the audio stream when the network is poor. + * - If you have higher resolution requirements for fallback video streams, you can `contact + * technical support` to enable the ABR feature. After enabling, you can customize the resolution + * for each layer. The SDK + * will use the resolution of the lowest quality layer you specify as the lower limit and + * dynamically adjust the resolution within this range based on network conditions. For detailed + * implementation, see `Audience-side URL streaming`. + * Call timing: This method must be called before `SetConfigs(PlayerConfig* config, Error* err)`. + * + * @note If you have not enabled the ABR feature when calling this method, the fallback quality + * layer for the video stream can only be `kRteAbrFallbackLow` or `kRteAbrFallbackAudioOnly`. + * + * @param abr_fallback_layer The fallback quality layer of the video stream. See `RteAbrFallbackLayer`. When customizing the resolution for `kRteAbrFallbackLayer1` to `kRteAbrFallbackLayer6`, you can refer to the table below, or set it according to your actual needs: | Video Quality Layer | Resolution | + * | --------------------------- | --------- | + * | `kRteAbrSubscriptionLayer1` | 2160p (4K) | + * | `kRteAbrSubscriptionLayer2` | 1440p (2K) | + * | `kRteAbrSubscriptionLayer3` | 1080p (HD) | + * | `kRteAbrSubscriptionLayer4` | 720p (HD) | + * | `kRteAbrSubscriptionLayer5` | 540p (SD) | + * | `kRteAbrSubscriptionLayer6` | 480p (SD) | + * Note: When customizing resolutions, be sure to sort the video quality layers from highest to + * lowest resolution, and by frame rate from highest to lowest when resolutions are the same. + * @param err Status or error information. See `Error`. + * + */ + void SetAbrFallbackLayer(AbrFallbackLayer abr_fallback_layer, Error *err = nullptr) { + RtePlayerConfigSetAbrFallbackLayer(&c_player_config, abr_fallback_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + /** + * @brief Gets the configured video stream fallback option. + * + * @since v4.4.0 + * + * @details + * Call timing: Call this method after `GetConfigs(PlayerConfig* config, Error* err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * The configured video stream fallback option. See `RteAbrFallbackLayer`. + */ + AbrFallbackLayer GetAbrFallbackLayer(Error *err = nullptr) { + AbrFallbackLayer abr_fallback_layer; + RtePlayerConfigGetAbrFallbackLayer(&c_player_config, &abr_fallback_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + return abr_fallback_layer; + } + + + ::RtePlayerConfig* get_underlying_impl() { return &c_player_config; } + + private: + friend class Player; + + ::RtePlayerConfig c_player_config; +}; + +/** + * The Player class can be used to play URL resources. + * @since v4.4.0 + */ +class Player { + public: + /** + * @brief Constructs a player object. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called after `InitMediaEngine`. + * + * @param self An `Rte` object. + * @param config Initial configuration for the player object. You can pass a null pointer. + * + */ + explicit Player(Rte *self, PlayerInitialConfig *config = nullptr) + : c_player(::RtePlayerCreate(&self->c_rte, nullptr, nullptr)) {}; + ~Player() { + RtePlayerDestroy(&c_player, nullptr); + }; + + Player(Player &other) = default; + Player(Player &&other) = default; + + // @{ + Player &operator=(const Player &cmd) = delete; + Player &operator=(Player &&cmd) = delete; + // @} + + /** + * @brief Preloads a URL resource. + * + * @since v4.4.0 + * + * @details + * After successful preloading, the speed of opening the URL resource via `OpenWithUrl` can be + * improved. When these resources are needed, they can be accessed more quickly, reducing waiting + * time. + * Applicable scenarios: Preloading media resources before opening them can reduce user wait time + * and provide a smoother audiovisual experience. + * Call timing: This method must be called before `OpenWithUrl`. + * + * @note This method currently only works for URLs prefixed with `rte://`. A maximum of 20 URLs can + * be preloaded. If the limit is exceeded, the earliest URL will be replaced by the newly preloaded + * one. + * + * @param url A URL prefixed with `rte://`. For details on the fields, see `Audience-side URL + * streaming`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the preload is successful: + * - `true`: Preload succeeded. + * - `false`: Preload failed. + */ + static bool PreloadWithUrl(const char* url, Error *err = nullptr) { + return RtePlayerPreloadWithUrl(nullptr, url, err != nullptr ? err->get_underlying_impl() : nullptr); + }; + + /** + * @brief Opens a URL resource. + * + * @since v4.4.0 + * + * @details + * This method supports opening real-time streaming media via URL. If you want to speed up the URL + * opening process, you can call `PreloadWithUrl` to preload the resource before calling this + * method. + * If the resource fails to open, you will receive the `onStateChanged` callback with the state + * reported as `kRtePlayerStateFailed`. In this case, you need to call `Stop` first and then call + * `OpenWithUrl` again to reopen the URL resource. If you have disabled autoplay, you can call + * `Play` to start playback after the resource is opened. + * Call timing: This method must be called after `Player`. + * Related callbacks: After calling this method, the `onStateChanged` callback is triggered to + * report the player state as `kRtePlayerStateOpening`, indicating the URL is being opened. + * Upon successful opening, the player state is reported as `kRtePlayerStateOpenCompleted`. + * + * @note This method currently only supports URLs that start with `rte://`, CDN URLs, and local + * media files. + * + * @param url The URL to open. It can be a CDN URL, local media file, or a URL prefixed with + * `rte://`. For details on the fields of an RTE URL, see `Audience-side URL streaming`. + * @param start_time The start playback position, in milliseconds. + * @param cb An asynchronous callback function used to notify the result of opening the URL + * resource. If an error occurs during the opening process, you can get the specific error + * information through the `err` parameter in the callback. See `Error`. + * + */ + void OpenWithUrl(const char* url, uint64_t start_time, std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithUrl(&c_player, url, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + /** + * Open a custom source provider. + * @since v4.4.0 + * @param provider + * @param start_time + * @param cb + * @return void + * @technical preview + */ + void OpenWithCustomSourceProvider(PlayerCustomSourceProvider* provider, uint64_t start_time, + std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithCustomSourceProvider(&c_player, provider, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + /** + * Open a stream. + * @since v4.4.0 + * @param stream + * @param cb + * @return void + * @technical preview + */ + void OpenWithStream(Stream* stream, std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithStream(&c_player, stream != nullptr ? &stream->c_rte_stream : nullptr, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + + /** + * @brief Switches to a new URL during playback. + * + * @since v4.5.1 + * + * @note + * This method is only valid when the player has opened a non-RTE URL. Call this method when the SDK + * reports the player state as `kRtePlayerStateOpenCompleted`. + * + * @param url The new URL to switch to. + * @param sync_pts - `true`: Synchronize playback position. + * - `false`: (Default) Do not synchronize playback position. + * @param cb Callback for asynchronously notifying the result of the switch operation. You can get + * the result or error code through the `err` parameter in the callback. See `Error`. + * Possible values for `err` include: + * - kRteOk: Switch succeeded. + * - kRteErrorDefault: Switch failed. + * - kRteErrorInvalidArgument: The provided URL is empty or has an invalid format. + * - kRteErrorInvalidOperation: + * - The corresponding internal `Player` object has been destroyed or is invalid. + * - The currently opened URL is an `Rte` URL, which does not support switching. + * + */ + void SwitchWithUrl(const char* url, bool sync_pts, std::function cb){ + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerSwitchWithUrl(&c_player, url, sync_pts, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + } + + /** + * @brief Gets statistics of the media resource currently being played by the player. + * + * @since v4.4.0 + * + * @details + * This method is used to obtain player statistics, including decoding, rendering frame rate, audio + * and video bitrates, etc., and returns the result asynchronously via a callback function. + * Call timing: This method must be called after `OpenWithUrl`. + * @param cb An asynchronous callback function used to notify the result and error information when + * retrieving player media resource statistics. The function includes the following parameters: + * - `stats`: Statistical data of the media resource currently being played by the player. See + * `RtePlayerStats`. + * - `err`: Error information. See `Error`. + * + */ + void GetStats(std::function cb){ + CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb); + RtePlayerGetStats(&c_player, &CallbackFuncWithArgs<::RtePlayer, Player, rte::PlayerStats*>, ctx); + } + + /** + * @brief Sets the view window for displaying video. + * + * @since v4.4.0 + * + * @details + * This method is used to specify a `Canvas` object to display video. Once the video stream is + * successfully played, the video will be shown on the specified `Canvas`. + * Call timing: This method must be called after `Player`. + * + * @param canvas The `Canvas` object used to render video frames. See `Canvas`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the setting is successful: + * - `true`: Setting succeeded. + * - `false`: Setting failed. You can check the error code returned in the `err` parameter for + * details. + */ + bool SetCanvas(Canvas *canvas, Error *err = nullptr) { + return RtePlayerSetCanvas(&c_player, canvas != nullptr ? &canvas->c_canvas : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + }; + + /** + * @brief Plays a URL resource. + * + * @since v4.4.0 + * + * @details + * If you have disabled autoplay, after successfully calling `OpenWithUrl` to open a real-time + * stream, you can call this method to start playback. + * Call timing: Call this method after receiving the `onStateChanged` callback reporting the state + * as `kRtePlayerStateOpenCompleted`. + * Related callbacks: After this method is successfully called, the `onStateChanged` callback is + * triggered to report the player state. + * + * @note This method currently only supports URLs prefixed with `rte://`. + * + * @param err Status or error information. See `Error`. + * + * @return + * Whether playback is successful: + * - `true`: Playback succeeded. + * - `false`: Playback failed. + */ + bool Play(Error *err = nullptr) { + return RtePlayerPlay(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Stops media playback. + * + * @since v4.4.0 + * + * @details + * After successfully opening a URL stream and calling `Play`, you can call this method to stop + * playback. If you want to pause playback, call `Pause` instead. + * If you fail to open the URL stream using `OpenWithUrl`, you need to call this method first before + * calling `OpenWithUrl` again to reopen the URL. + * Call timing: This method must be called after `OpenWithUrl`. + * Related callbacks: After this method is successfully called, the `onStateChanged` callback is + * triggered and reports the player state as `kRtePlayerStateStopped`. + * + * @param err Status or error information. See `Error`. + * + * @return + * Whether the playback is successfully stopped: + * - `true`: Playback stopped successfully. + * - `false`: Failed to stop playback. + */ + bool Stop(Error *err = nullptr) { + return RtePlayerStop(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Pauses playback. + * + * @since v4.4.0 + * + * @details + * After you call `Play` to play a URL resource, you can call this method to pause playback. If you + * want to stop playback, call `Stop`. + * Call timing: This method must be called after `Play`. + * Related callbacks: After this method is successfully called, the `onStateChanged` callback is + * triggered to report the player state as `kRtePlayerStatePaused`. + * + * @param err Status or error information. See `Error`. + * + * @return + * Whether the playback is successfully paused: + * - `true`: Playback paused successfully. + * - `false`: Failed to pause playback. + */ + bool Pause(Error *err = nullptr) { + return RtePlayerPause(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Seeks to a new playback position. + * + * @since v4.5.1 + * + * @param new_time The new playback position to seek to. + * @param err Output parameter indicating the error code: + * - kRteOk: Success. + * - kRteErrorInvalidOperation: + * - The corresponding internal `Player` object has been destroyed or is invalid. + * - The opened URL is an RTE URL, which does not support seeking. See `Error`. + * + * @return + * Whether the SDK successfully sought to the specified playback position: + * - `true`: Successfully sought to the playback position. + * - `false`: Failed to seek to the playback position. + * + */ + bool Seek(uint64_t new_time, Error *err = nullptr) { + return RtePlayerSeek(&c_player, new_time, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Receives or stops receiving the audio stream. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called after `OpenWithUrl`. + * Related callbacks: After this method is successfully called, the `onPlayerInfoUpdated(const + * PlayerInfo *info)` callback is triggered to report the latest player and media stream + * information. + * + * @param mute Whether to receive the audio stream: + * - `true`: Do not receive the audio stream. + * - `false`: Receive the audio stream. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the subscribe or unsubscribe operation succeeds: + * - `true`: Operation succeeded. + * - `false`: Operation failed. + */ + bool MuteAudio(bool mute, Error *err = nullptr) { + return RtePlayerMuteAudio(&c_player, mute, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Receives or stops receiving the video stream. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called after `OpenWithUrl`. + * Related callbacks: After this method is successfully called, the `onPlayerInfoUpdated(const + * PlayerInfo *info)` callback is triggered to report the latest player and media stream + * information. + * + * @param mute Whether to receive the video stream: + * - `true`: Do not receive the video stream. + * - `false`: Receive the video stream. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the subscribe or unsubscribe operation succeeds: + * - `true`: Operation succeeded. + * - `false`: Operation failed. + */ + bool MuteVideo(bool mute, Error *err = nullptr) { + return RtePlayerMuteVideo(&c_player, mute, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + /** + * @brief Gets the current playback position. + * + * @since v4.5.1 + * + * @details + * This method is used to get the current playback time position, in milliseconds. + * + * @param err Status or error information. See `Error`. + * Possible error codes include: + * - kRteOk: Call succeeded. + * - kRteErrorInvalidOperation: + * - The corresponding internal `Player` object has been destroyed or is invalid. + * - The opened URL is an RTE URL, which does not support calling `GetPosition`. + * + * @return + * If the method call succeeds, returns the current playback position in milliseconds. + */ + uint64_t GetPosition(Error *err = nullptr){ + return RtePlayerGetPosition(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets information about the player and media stream. + * + * @since v4.4.0 + * + * @details + * You can use this method to get information about the player and media stream, such as audio + * sample rate, video frame size, and more. + * Call timing: Call this method after `Player`. + * + * @param info Information about the player and media stream. See `PlayerInfo`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the information is retrieved successfully: + * - `true`: Retrieved successfully. + * - `false`: Failed to retrieve. + */ + bool GetInfo(PlayerInfo *info, Error *err = nullptr){ + return RtePlayerGetInfo(&c_player, info != nullptr ? info->get_underlying_impl() : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the current player settings. + * + * @since v4.4.0 + * + * @details + * Call timing: Call this method after `Player`. + * + * @param config The player settings object. See `PlayerConfig`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the configuration is retrieved successfully: + * - `true`: Retrieved successfully. + * - `false`: Failed to retrieve. + */ + bool GetConfigs(PlayerConfig* config, Error *err = nullptr) { + return RtePlayerGetConfigs(&c_player, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Configures the player. + * + * @since v4.4.0 + * + * @details + * You can call this method to configure the player, such as enabling autoplay, subscribing to video + * streams of different resolutions and bitrates, etc. + * Call timing: This method must be called after `Player`. + * + * @param config Player configuration object. See `PlayerConfig`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the player configuration is successful: + * - `true`: Configuration succeeded. + * - `false`: Configuration failed. + */ + bool SetConfigs(PlayerConfig* config, Error *err = nullptr) { + return RtePlayerSetConfigs(&c_player, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Registers a player observer object. + * + * @since v4.4.0 + * + * @details + * Before calling this method, you need to implement an interface class that inherits from + * `PlayerObserver`. + * Call timing: This method must be called after `Player`. + * + * @param observer An instance of the interface object. See `PlayerObserver`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the registration is successful: + * - `true`: Registration succeeded. + * - `false`: Registration failed. + */ + bool RegisterObserver(PlayerObserver *observer, Error *err = nullptr) { + return RtePlayerRegisterObserver( + &c_player, observer != nullptr ? observer->c_player_observer : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Unregisters the player event observer object. + * + * @since v4.4.0 + * + * @details + * After calling `RegisterObserver` to register a player event observer, call this method if you + * need to unregister it. + * Call timing: This method must be called after `RegisterObserver`. + * + * @param observer Instance of the interface object. See `PlayerObserver`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the unregistration is successful: + * - `true`: Unregistration succeeded. + * - `false`: Unregistration failed. + */ + bool UnregisterObserver(PlayerObserver *observer, Error *err = nullptr){ + return RtePlayerUnregisterObserver(&c_player, observer != nullptr ? observer->c_player_observer : nullptr, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + private: + ::RtePlayer c_player; +}; + +} // namespace rte diff --git a/include/rte_base/rte_cpp_rte.h b/include/rte_base/rte_cpp_rte.h new file mode 100644 index 0000000..68b4423 --- /dev/null +++ b/include/rte_base/rte_cpp_rte.h @@ -0,0 +1,459 @@ +/** + * + * Agora Real Time Engagement + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +#pragma once +#include +#include "rte_base/c/c_rte.h" +#include "rte_base/c/bridge.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_callback_utils.h" +#include "rte_cpp_string.h" + +struct RteObserver; +struct RteInitialConfig; +struct RteConfig; + +namespace rte { + +class Player; + +/** + * The InitialConfig class is used to initialize the Rte object. + * @since v4.4.0 + * @technical preview + */ +class InitialConfig { + public: + InitialConfig() { RteInitialConfigInit(&c_rte_init_cfg, nullptr); } + ~InitialConfig() { RteInitialConfigDeinit(&c_rte_init_cfg, nullptr);} + + private: + friend class Rte; + ::RteInitialConfig c_rte_init_cfg; +}; + +/** + * The Observer class is used to observe the event of Rte object. + * @since v4.4.0 + * @technical preview + */ +class Observer { + public: + Observer(): c_rte_observer(::RteObserverCreate(nullptr)) { + c_rte_observer->base_observer.me_in_target_lang = this;} + ~Observer() { RteObserverDestroy(c_rte_observer, nullptr); } + + // @{ + Observer(Observer &other) = delete; + Observer(Observer &&other) = delete; + Observer &operator=(const Observer &cmd) = delete; + Observer &operator=(Observer &&cmd) = delete; + // @} + + private: + friend class Rte; + + ::RteObserver *c_rte_observer; +}; + +/** + * @brief RTE configuration class used to configure the RTE object. + * + * @since v4.4.0 + */ +class Config { + public: + Config() {RteConfigInit(&c_rte_config, nullptr);} + ~Config() {RteConfigDeinit(&c_rte_config, nullptr);} + + // @{ + Config(Config &other) = delete; + Config(Config &&other) = delete; + Config &operator=(const Config &cmd) = delete; + Config &operator=(Config &&cmd) = delete; + // @} + + /** + * @brief Sets the App ID. + * + * @since v4.4.0 + * + * @details + * Call timing: This method must be called before `InitMediaEngine`. + * + * @param app_id Your project's App ID, which you can obtain from the Agora Console. + * @param err Status or error information. See `Error`. + * + */ + void SetAppId(const char *app_id, Error *err = nullptr){ + String str(app_id); + RteConfigSetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the configured App ID. + * + * @since v4.4.0 + * + * @param err Status or error information. See `Error`. + * + * @return + * - If the method call succeeds, returns the configured App ID. + * - If the method call fails, returns an empty string. + */ + std::string GetAppId(Error *err = nullptr){ + String str; + RteConfigGetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.CStr()); + } + + + /** + * Set the Log Folder Parameter + * @since v4.4.0 + * @param log_folder + * @param err + * @technical preview + */ + void SetLogFolder(const char *log_folder, Error *err = nullptr){ + String str(log_folder); + RteConfigSetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + /** + * Get the Log Folder Parameter + * @since v4.4.0 + * @param err + * @return const char* + * @technical preview + */ + std::string GetLogFolder(Error *err = nullptr){ + String str; + RteConfigGetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.CStr()); + } + + /** + * Set the Log File Size Parameter + * @since v4.4.0 + * @param log_file_size + * @param err + * @technical preview + */ + void SetLogFileSize(size_t log_file_size, Error *err = nullptr){ + RteConfigSetLogFileSize(&c_rte_config, log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Log File Size Parameter + * @since v4.4.0 + * @param err + * @return size_t + * @technical preview + */ + size_t GetLogFileSize(Error *err = nullptr){ + size_t log_file_size; + RteConfigGetLogFileSize(&c_rte_config, &log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); + return log_file_size; + } + + /** + * Set the Area Code Parameter + * @since v4.4.0 + * @param area_code + * @param err + * @technical preview + */ + void SetAreaCode(int32_t area_code, Error *err = nullptr){ + RteConfigSetAreaCode(&c_rte_config, area_code, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Area Code Parameter + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAreaCode(Error *err = nullptr){ + int32_t area_code; + RteConfigGetAreaCode(&c_rte_config, &area_code, err != nullptr ? err->get_underlying_impl() : nullptr); + return area_code; + } + + /** + * Set the Cloud Proxy Parameter + * @since v4.4.0 + * @param cloud_proxy + * @param err + * @technical preview + */ + void SetCloudProxy(const char *cloud_proxy, Error *err = nullptr){ + String str(cloud_proxy); + RteConfigSetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Cloud Proxy Parameter + * @since v4.4.0 + * @param err + * @return const char* + * @technical preview + */ + std::string GetCloudProxy(Error *err = nullptr){ + String str; + RteConfigGetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.CStr()); + } + + /** + * @brief Configures technical preview or custom features provided by the SDK using JSON. + * + * @since v4.4.0 + * + * @details + * Applicable scenarios: You can call this method when you need to set private parameters or use + * custom features. + * Call timing: This method must be called before `SetConfigs(Config *config, Error *err)`. + * + * @param json_parameter Parameters in JSON string format. + * @param err Status or error information. See `Error`. + * + */ + void SetJsonParameter(const char *json_parameter, Error *err = nullptr){ + String str(json_parameter); + RteConfigSetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the configured SDK JSON configuration information. + * + * @since v4.4.0 + * + * @details + * After you call `SetJsonParameter` to set the JSON configuration, you can call this method to + * retrieve the configured information. + * Call timing: This method must be called after `GetConfigs(Config *config, Error *err)`. + * + * @param err Status or error information. See `Error`. + * + * @return + * - If the method call succeeds, returns the configured JSON information. + * - If the method call fails, returns an empty string. + */ + std::string GetJsonParameter(Error *err = nullptr){ + String str; + RteConfigGetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.CStr()); + } + + private: + ::RteConfig* get_underlying_impl() { return &c_rte_config; } + + private: + friend class Rte; + ::RteConfig c_rte_config; +}; + +/** + * The Rte class, which is the base interface of the Agora Real Time Engagement SDK. + * @since v4.4.0 + */ +class Rte { + public: + + /** + * @brief Bridges an RTE object from `IRtcEngine`. + * + * @since v4.4.0 + * + * @details + * The RTE object created by calling this method does not need to be initialized again using + * `InitMediaEngine`. If you have not previously created and initialized the RTC engine, you can + * first call `Rte` to create an RTE object, and then call `InitMediaEngine` to initialize it. + * Call timing: Before calling this method, make sure you have called `initialize` to initialize the + * RTC engine. + * + * @param err A pointer to an `Error` object used to receive status and error codes. + * + * @return + * An RTE object. + */ + static Rte GetFromBridge(Error* err = nullptr){ + Rte rte( RteGetFromBridge(err != nullptr ? err->get_underlying_impl() : nullptr)); + return rte; + } + + /** + * @brief Constructs an RTE object. + * + * @since v4.4.0 + * + * @details + * The RTE object is used to organize and manage internal resources within Rte. + * Call timing: This method must be called before `InitMediaEngine`. + * + * @param config Initialization settings. Currently, you can pass null. + * + */ + explicit Rte(InitialConfig *config = nullptr): c_rte(::RteCreate(config != nullptr ? &config->c_rte_init_cfg : nullptr, nullptr)) {} + ~Rte(){Destroy();}; + + /** + * Construct a new Rte object. + * + * @param other + */ + Rte(Rte &&other) : c_rte(other.c_rte) { + other.c_rte = {}; + } + + // @{ + Rte(Rte &other) = delete; + Rte &operator=(const Rte &other) = delete; + Rte &operator=(Rte &&other) = delete; + // @} + + /** + * Register an RTE observer. + * @since v4.4.0 + * @param observer The object that observes RTE callback events. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal RTE object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The registered observer object is null. + * @return bool + * - true: Registration is successful. + * - false: Registration failed. + * @technical preview + */ + bool RegisterObserver(Observer *observer, Error *err = nullptr){ + return RteRegisterObserver(&c_rte, observer != nullptr ? observer->c_rte_observer : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Unregister the RTE observer object. + * @since v4.4.0 + * @param observer The object that observes RTE callback events. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal RTE object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The unregistered observer object is null. + * @return bool + * - true: Unregistration is successful. + * - false: Unregistration failed. + * @technical preview + */ + bool UnregisterObserver(Observer *observer, Error *err = nullptr){ + return RteUnregisterObserver(&c_rte, observer != nullptr ? observer->c_rte_observer : nullptr, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Initializes the RTE engine. + * + * @details + * This method is asynchronous and returns the initialization result via a callback function. + * Call timing: This method must be called after creating the RTE object and setting the App ID. + * + * @note + * The return value only indicates whether the initialization operation has been successfully + * scheduled in the asynchronous task queue, not whether the initialization itself succeeded. + * + * @param cb An asynchronous callback function used to return the result of engine initialization. + * You can get the result or error code of the initialization through the `err` parameter in the + * callback. See `Error`. + * @param err Error information during the initialization process. See `Error`. When calling the + * `InitMediaEngine` method, if immediately detectable errors occur (such as invalid parameters or + * insufficient resources), the SDK + * will return the error information synchronously through this parameter. + * + * @return + * Whether the asynchronous operation was successfully added to the queue: + * - `true`: Successfully added to the queue. + * - `false`: Failed to be added to the queue. + * Note: This return value only indicates whether the initialization operation has been successfully + * scheduled in the asynchronous task queue, not whether the initialization itself succeeded. + */ + bool InitMediaEngine(std::function cb, Error *err = nullptr){ + auto* ctx = new CallbackContext(this, cb); + return RteInitMediaEngine(&c_rte, &CallbackFunc<::Rte, Rte>, ctx, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Gets the configuration information of the RTE object. + * + * @since v4.4.0 + * + * @details + * Call timing: Call this method after `Rte`. + * + * @param config The settings of the RTE object. See `Config`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the configuration is retrieved successfully: + * - `true`: Retrieved successfully. + * - `false`: Failed to retrieve. + */ + bool GetConfigs(Config *config, Error *err = nullptr){ + return RteGetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Configures information for the RTE object. + * + * @since v4.4.0 + * + * @details + * You can call this method to set information such as the App ID. + * Call timing: This method must be called after `Rte`. + * + * @param config Settings for the RTE object. See `Config`. + * @param err Status or error information. See `Error`. + * + * @return + * Whether the configuration is successful: + * - `true`: Configuration succeeded. + * - `false`: Configuration failed. + */ + bool SetConfigs(Config *config, Error *err = nullptr){ + return RteSetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * @brief Destroys the RTE object. + * + * @since v4.4.0 + * + * @details + * This method releases all resources used by the RTE object. + * + * @param err Status or error information. See `Error`. + * + * @return + * Whether the RTE object is destroyed successfully: + * - `true`: Destroyed successfully. + * - `false`: Failed to destroy. + */ + bool Destroy(Error *err = nullptr){ + return RteDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + private: + + explicit Rte(::Rte other) { c_rte = other; } + + private: + friend class Player; + friend class Canvas; + + ::Rte c_rte; +}; + +} // namespace rte diff --git a/include/rte_cpp_stream.h b/include/rte_base/rte_cpp_stream.h similarity index 64% rename from include/rte_cpp_stream.h rename to include/rte_base/rte_cpp_stream.h index bc3df3f..6cb0984 100644 --- a/include/rte_cpp_stream.h +++ b/include/rte_base/rte_cpp_stream.h @@ -5,10 +5,15 @@ * */ #pragma once -#include "internal/c/stream/stream.h" +#include "rte_base/c/stream/stream.h" namespace rte { +/** + * The Stream class is used to manage the stream. + * @since v4.4.0 + * @technical preview + */ class Stream { public: diff --git a/include/rte_cpp_string.h b/include/rte_base/rte_cpp_string.h similarity index 61% rename from include/rte_cpp_string.h rename to include/rte_base/rte_cpp_string.h index be4642f..eeb0cd9 100644 --- a/include/rte_cpp_string.h +++ b/include/rte_base/rte_cpp_string.h @@ -5,13 +5,18 @@ * */ #pragma once -#include "internal/c/utils/string.h" +#include "rte_base/c/utils/string.h" namespace rte { class Config; class PlayerConfig; +/** + * The String class is used to manage the string. + * @since v4.4.0 + * @technical preview + */ class String { public: @@ -28,6 +33,23 @@ class String { } } + String(const RteString *other) { + c_rte_string = RteStringCreate(nullptr); + RteStringInit(c_rte_string, nullptr); + RteStringCopy(c_rte_string, other, nullptr); + } + + String(const String &other) { + c_rte_string = RteStringCreate(nullptr); + RteStringInit(c_rte_string, nullptr); + RteStringCopy(c_rte_string, other.c_rte_string, nullptr); + } + + String(String &&other) { + c_rte_string = other.c_rte_string; + other.c_rte_string = nullptr; + } + ~String() { RteStringDeinit(c_rte_string, nullptr); RteStringDestroy(c_rte_string, nullptr); @@ -44,10 +66,11 @@ class String { RteStringCopy(c_rte_string, other.c_rte_string, nullptr); } - const char* Cstr() const { + const char* CStr() const { return RteStringCStr(c_rte_string, nullptr); } + friend class Config; friend class PlayerConfig; diff --git a/include/rte_cpp.h b/include/rte_cpp.h index 4f4c42c..f6d0ce6 100644 --- a/include/rte_cpp.h +++ b/include/rte_cpp.h @@ -6,9 +6,9 @@ */ #pragma once -#include "rte_cpp_error.h" // IWYU pragma: export -#include "rte_cpp_player.h" // IWYU pragma: export -#include "rte_cpp_rte.h" // IWYU pragma: export -#include "rte_cpp_canvas.h" // IWYU pragma: export -#include "rte_cpp_string.h" // IWYU pragma: export -#include "rte_cpp_callback_utils.h" // IWYU pragma: export +#include "rte_base/rte_cpp_error.h" // IWYU pragma: export +#include "rte_base/rte_cpp_player.h" // IWYU pragma: export +#include "rte_base/rte_cpp_rte.h" // IWYU pragma: export +#include "rte_base/rte_cpp_canvas.h" // IWYU pragma: export +#include "rte_base/rte_cpp_string.h" // IWYU pragma: export +#include "rte_base/rte_cpp_callback_utils.h" // IWYU pragma: export diff --git a/include/rte_cpp_canvas.h b/include/rte_cpp_canvas.h deleted file mode 100644 index b5635c4..0000000 --- a/include/rte_cpp_canvas.h +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include "internal/c/c_player.h" -#include "internal/c/handle.h" -#include "internal/c/track/canvas.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_rte.h" -#include "rte_cpp_callback_utils.h" - - -namespace rte { - -using VideoRenderMode = ::RteVideoRenderMode; -using VideoMirrorMode = ::RteVideoMirrorMode; -using ViewConfig = ::RteViewConfig; -using View = ::RteView; -using Rect = ::RteRect; - -class CanvasInitialConfig { - public: - CanvasInitialConfig() {RteCanvasInitialConfigInit(&c_canvas_initial_config, nullptr);} - ~CanvasInitialConfig() {RteCanvasInitialConfigDeinit(&c_canvas_initial_config, nullptr);} - - private: - friend class Canvas; - ::RteCanvasInitialConfig c_canvas_initial_config; -}; - - -class CanvasConfig { - public: - CanvasConfig() {RteCanvasConfigInit(&c_canvas_config, nullptr);} - ~CanvasConfig() {RteCanvasConfigDeinit(&c_canvas_config, nullptr);} - - void SetRenderMode(VideoRenderMode mode, Error *err) { - RteCanvasConfigSetVideoRenderMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoRenderMode GetRenderMode(Error *err) { - VideoRenderMode mode; - RteCanvasConfigGetVideoRenderMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetMirrorMode(VideoMirrorMode mode, Error *err) { - RteCanvasConfigSetVideoMirrorMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoMirrorMode GetMirrorMode(Error *err) { - VideoMirrorMode mode; - RteCanvasConfigGetVideoMirrorMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetCropArea(RteRect &crop_area, Error *err) { - RteCanvasConfigSetCropArea(&c_canvas_config, crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteRect GetCropArea(Error *err) { - RteRect crop_area; - RteCanvasConfigGetCropArea(&c_canvas_config, &crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - return crop_area; - } - - private: - friend class Canvas; - ::RteCanvasConfig c_canvas_config; -}; - -class Canvas { - public: - Canvas(Rte *rte, CanvasInitialConfig *initial_config) { - c_canvas = ::RteCanvasCreate(&rte->c_rte, &initial_config->c_canvas_initial_config, nullptr); - }; - ~Canvas() { RteCanvasDestroy(&c_canvas, nullptr); }; - - void Destroy(Error *err = nullptr) { - RteCanvasDestroy(&c_canvas, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Canvas(const Canvas& other) = delete; - Canvas(Canvas&& other) = delete; - Canvas& operator=(const Canvas& other) = delete; - Canvas& operator=(Canvas&& other) = delete; - - void GetConfigs(CanvasConfig *config, Error *err) { - RteCanvasGetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(CanvasConfig *config, std::function cb, void *cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RteCanvasSetConfigs(&c_canvas, &config->c_canvas_config, &CallbackFunc<::RteCanvas, Canvas>, callbackCtx); - } - - void AddView(View *view, ViewConfig *config, std::function cb, void *cb_data) { - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RteCanvasAddView(&c_canvas, view, config, &CallbackFuncWithArgs<::RteCanvas, Canvas, View*>, ctx); - } - - private: - - friend class Player; - - ::RteCanvas c_canvas; -}; - -} // namespace rte \ No newline at end of file diff --git a/include/rte_cpp_player.h b/include/rte_cpp_player.h deleted file mode 100644 index 1c3c713..0000000 --- a/include/rte_cpp_player.h +++ /dev/null @@ -1,443 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once -#include - -#include "internal/c/c_rte.h" -#include "internal/c/c_player.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_canvas.h" -#include "rte_cpp_string.h" -#include "rte_cpp_stream.h" - -struct RtePlayerObserver; - -namespace rte { - - -using PlayerState = ::RtePlayerState; -using PlayerEvent = ::RtePlayerEvent; -using PlayerMetadataType = ::RtePlayerMetadataType; -using PlayerInfo = ::RtePlayerInfo; -using PlayerStats = ::RtePlayerStats; -using PlayerCustomSourceProvider = ::RtePlayerCustomSourceProvider; - -class PlayerInitialConfig {}; - -static void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err); - -static void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time); - -static void onResolutionChanged(::RtePlayerObserver *observer, int width, int height); - -static void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event); - -static void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length); - -static void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info); - -static void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume); - - -class PlayerObserver { - public: - PlayerObserver() : c_rte_observer(::RtePlayerObserverCreate(nullptr)) { - - c_rte_observer->base_observer.me_in_target_lang = this; - - c_rte_observer->on_state_changed = rte::onStateChanged; - c_rte_observer->on_position_changed = rte::onPositionChanged; - c_rte_observer->on_resolution_changed = rte::onResolutionChanged; - c_rte_observer->on_event = rte::onEvent; - c_rte_observer->on_metadata = rte::onMetadata; - c_rte_observer->on_player_info_updated = rte::onPlayerInfoUpdated; - c_rte_observer->on_audio_volume_indication = rte::onAudioVolumeIndication; - } - virtual ~PlayerObserver(){ RtePlayerObserverDestroy(c_rte_observer, nullptr); } - - // @{ - PlayerObserver(PlayerObserver &other) = delete; - PlayerObserver(PlayerObserver &&other) = delete; - PlayerObserver &operator=(const PlayerObserver &cmd) = delete; - PlayerObserver &operator=(PlayerObserver &&cmd) = delete; - // @} - - virtual void onStateChanged(PlayerState old_state, PlayerState new_state, - rte::Error *err) = 0; - virtual void onPositionChanged(uint64_t curr_time, - uint64_t utc_time) = 0; - virtual void onResolutionChanged(int width, int height) = 0; - virtual void onEvent(PlayerEvent event) = 0; - virtual void onMetadata(PlayerMetadataType type, - const uint8_t *data, size_t length) = 0; - - virtual void onPlayerInfoUpdated(const PlayerInfo *info) = 0; - - virtual void onAudioVolumeIndication(int32_t volume) = 0; - - private: - friend class Player; - - ::RtePlayerObserver *c_rte_observer; -}; - -void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - Error cpp_err(err); - player_observer->onStateChanged(old_state, new_state, &cpp_err); - } -} -void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPositionChanged(curr_time, utc_time); - } -} - -void onResolutionChanged(::RtePlayerObserver *observer, int width, int height){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onResolutionChanged(width, height); - } -} - -void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onEvent(event); - } - -} - -void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onMetadata(type, data, length); - } -} - -void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPlayerInfoUpdated(info); - } -} - -void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onAudioVolumeIndication(volume); - } -} - -class PlayerConfig { - public: - PlayerConfig() { RtePlayerConfigInit(&c_rte_player_config, nullptr); } - ~PlayerConfig() { RtePlayerConfigDeinit(&c_rte_player_config, nullptr); } - - // @{ - PlayerConfig(PlayerConfig &other) = delete; - PlayerConfig(PlayerConfig &&other) = delete; - PlayerConfig &operator=(const PlayerConfig &cmd) = delete; - PlayerConfig &operator=(PlayerConfig &&cmd) = delete; - // @} - - void SetAutoPlay(bool auto_play, Error *err) { - RtePlayerConfigSetAutoPlay(&c_rte_player_config, auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool GetAutoPlay(Error *err) { - bool auto_play; - RtePlayerConfigGetAutoPlay(&c_rte_player_config, &auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - return auto_play; - } - - void SetPlaybackSpeed(int32_t speed, Error *err) { - RtePlayerConfigSetPlaybackSpeed(&c_rte_player_config, speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlaybackSpeed(Error *err) { - int32_t speed; - RtePlayerConfigGetPlaybackSpeed(&c_rte_player_config, &speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - return speed; - } - - void SetPlayoutAudioTrackIdx(int idx, Error *err) { - RtePlayerConfigSetPlayoutAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPlayoutAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetPublishAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetPublishAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPublishAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetExternalSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetExternalSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetExternalSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetExternalSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioPitch(int32_t audio_pitch, Error *err) { - RtePlayerConfigSetAudioPitch(&c_rte_player_config, audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPitch(Error *err) { - int32_t audio_pitch; - RtePlayerConfigGetAudioPitch(&c_rte_player_config, &audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - return audio_pitch; - } - - void SetPlayoutVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPlayoutVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPlayoutVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetAudioPlaybackDelay(int32_t delay, Error *err) { - RtePlayerConfigSetAudioPlaybackDelay(&c_rte_player_config, delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPlaybackDelay(Error *err) { - int32_t delay; - RtePlayerConfigGetAudioPlaybackDelay(&c_rte_player_config, &delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - return delay; - } - - void SetAudioDualMonoMode(RteAudioDualMonoMode mode, Error *err) { - RtePlayerConfigSetAudioDualMonoMode(&c_rte_player_config, mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteAudioDualMonoMode GetAudioDualMonoMode(Error *err) { - RteAudioDualMonoMode mode; - RtePlayerConfigGetAudioDualMonoMode(&c_rte_player_config, &mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetPublishVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPublishVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPublishVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetLoopCount(int32_t count, Error *err) { - RtePlayerConfigSetLoopCount(&c_rte_player_config, count, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetLoopCount(Error *err) { - int32_t count; - RtePlayerConfigGetLoopCount(&c_rte_player_config, &count, - err != nullptr ? err->get_underlying_impl() : nullptr); - return count; - } - - void SetJsonParameter(const char *json_parameter, Error *err) { - String str(json_parameter); - RtePlayerConfigSetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char *GetJsonParameter(Error *err) { - String str; - RtePlayerConfigGetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RtePlayerConfig* get_underlying_impl() { return &c_rte_player_config; } - - private: - friend class Player; - - ::RtePlayerConfig c_rte_player_config; -}; - - -class Player { - public: - explicit Player(Rte *self, PlayerInitialConfig *config = nullptr) - : c_rte(::RtePlayerCreate(&self->c_rte, nullptr, nullptr)) {}; - ~Player() { RtePlayerDestroy(&c_rte, nullptr); }; - - void Destroy(Error *err = nullptr){ - RtePlayerDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); -}; - - Player(Player &other) = default; - Player(Player &&other) = default; - - // @{ - Player &operator=(const Player &cmd) = delete; - Player &operator=(Player &&cmd) = delete; - // @} - - void PreloadWithUrl(const char* url, Error* err) { - RtePlayerPreloadWithUrl(&c_rte, url, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void OpenWithUrl(const char* url, uint64_t start_time, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithUrl(&c_rte, url, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void OpenWithCustomSourceProvider(PlayerCustomSourceProvider* provider, uint64_t start_time, - std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithCustomSourceProvider(&c_rte, provider, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - - void OpenWithStream(Stream* stream, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithStream(&c_rte, stream != nullptr ? &stream->c_rte_stream : nullptr, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void GetStats(std::function cb, void *cb_data){ - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RtePlayerGetStats(&c_rte, &CallbackFuncWithArgs<::RtePlayer, Player, rte::PlayerStats*>, ctx); - } - - void SetCanvas(Canvas *canvas, Error *err) { - RtePlayerSetCanvas(&c_rte, canvas != nullptr ? &canvas->c_canvas : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void Play(Error* err) { - RtePlayerPlay(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Stop(Error* err) { - RtePlayerStop(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Pause(Error* err) { - RtePlayerPause(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Seek(uint64_t new_time, Error* err) { - RtePlayerSeek(&c_rte, new_time, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteAudio(bool mute, Error* err) { - RtePlayerMuteAudio(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteVideo(bool mute, Error* err) { - RtePlayerMuteVideo(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - uint64_t GetPosition(Error *err){ - return RtePlayerGetPosition(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void GetInfo(PlayerInfo *info, Error *err){ - RtePlayerGetInfo(&c_rte, info, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void GetConfigs(PlayerConfig* config, Error* err) { - RtePlayerGetConfigs(&c_rte, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(PlayerConfig* config, std::function cb, - void* cb_data) { - - rte::CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerSetConfigs(&c_rte, config->get_underlying_impl(), &CallbackFunc<::RtePlayer, Player>, callbackCtx); - } - - bool RegisterObserver(PlayerObserver *observer, Error *err) { - return RtePlayerRegisterObserver( - &c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void UnregisterObserver(PlayerObserver *observer, Error *err){ - RtePlayerUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - ::RtePlayer c_rte; -}; - -} // namespace rte diff --git a/include/rte_cpp_rte.h b/include/rte_cpp_rte.h deleted file mode 100644 index 95b3260..0000000 --- a/include/rte_cpp_rte.h +++ /dev/null @@ -1,218 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once - -#include "internal/c/c_rte.h" -#include "internal/c/bridge.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_string.h" - - -struct RteObserver; -struct RteInitialConfig; -struct RteConfig; - -namespace rte { - -class Player; - -class RteInitialConfig { - ::RteInitialConfig *c_rte_init_cfg; -}; - -class RteObserver { - public: - RteObserver(): c_rte_observer(::RteObserverCreate(nullptr)) { - c_rte_observer->base_observer.me_in_target_lang = this;} - ~RteObserver() { RteObserverDestroy(c_rte_observer, nullptr); } - - // @{ - RteObserver(RteObserver &other) = delete; - RteObserver(RteObserver &&other) = delete; - RteObserver &operator=(const RteObserver &cmd) = delete; - RteObserver &operator=(RteObserver &&cmd) = delete; - // @} - - private: - friend class Rte; - - ::RteObserver *c_rte_observer; -}; - -class Config { - public: - Config() {RteConfigInit(&c_rte_config, nullptr);} - ~Config() {RteConfigDeinit(&c_rte_config, nullptr);} - - // @{ - Config(Config &other) = delete; - Config(Config &&other) = delete; - Config &operator=(const Config &cmd) = delete; - Config &operator=(Config &&cmd) = delete; - // @} - - void SetAppId(const char *app_id, Error *err){ - String str(app_id); - RteConfigSetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetAppId(Error *err){ - String str; - RteConfigGetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFolder(const char *log_folder, Error *err){ - String str(log_folder); - RteConfigSetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetLogFolder(Error *err){ - String str; - RteConfigGetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFileSize(size_t log_file_size, Error *err){ - RteConfigSetLogFileSize(&c_rte_config, log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - size_t GetLogFileSize(Error *err){ - size_t log_file_size; - RteConfigGetLogFileSize(&c_rte_config, &log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - return log_file_size; - } - - void SetAreaCode(int32_t area_code, Error *err){ - RteConfigSetAreaCode(&c_rte_config, area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAreaCode(Error *err){ - int32_t area_code; - RteConfigGetAreaCode(&c_rte_config, &area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - return area_code; - } - - void SetCloudProxy(const char *cloud_proxy, Error *err){ - String str(cloud_proxy); - RteConfigSetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetCloudProxy(Error *err){ - String str; - RteConfigGetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetJsonParameter(const char *json_parameter, Error *err){ - String str(json_parameter); - RteConfigSetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetJsonParameter(Error *err){ - String str; - RteConfigGetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RteConfig* get_underlying_impl() { return &c_rte_config; } - - private: - friend class Rte; - ::RteConfig c_rte_config; -}; - -class Rte { - public: - - static Rte GetFromBridge(Error* err = nullptr){ - Rte rte( RteGetFromBridge(err != nullptr ? err->get_underlying_impl() : nullptr)); - return rte; - } - - explicit Rte(::RteInitialConfig *config = nullptr): c_rte(::RteCreate(config, nullptr)) {} - ~Rte()=default; - - void Destroy(Error *err = nullptr) { - RteDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool RegisterObserver(RteObserver *observer, Error *err){ - return RteRegisterObserver(&c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool UnregisterObserver(RteObserver *observer, Error *err){ - return RteUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool InitMediaEngine(std::function cb, void *cb_data, Error *err = nullptr){ - auto* ctx = new CallbackContext(this, cb, cb_data); - return RteInitMediaEngine(&c_rte, &CallbackFunc<::Rte, Rte>, ctx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Rte(Rte &other) = default; - Rte(Rte &&other) = default; - - // @{ - Rte &operator=(const Rte &cmd) = delete; - Rte &operator=(Rte &&cmd) = delete; - // @} - - void GetConfigs(Config *config, Error *err){ - RteGetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - } - bool SetConfigs(Config *config, std::function cb, void *cb_data, Error *err = nullptr){ - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - return RteSetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, &CallbackFunc<::Rte, Rte>, callbackCtx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - - explicit Rte(::Rte other) { c_rte = other; } - - private: - friend class Player; - friend class Canvas; - - ::Rte c_rte; - -// struct RteInitMediaEngineCtx { -// RteInitMediaEngineCtx(InitMediaEngineCb cb, void *cb_data) -// : cb(cb), cb_data(cb_data) {} - -// ~RteInitMediaEngineCtx() = default; - -// // @{ -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &other) = delete; -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &&other) = delete; -// RteInitMediaEngineCtx &operator=(const RteInitMediaEngineCtx &cmd) = delete; -// RteInitMediaEngineCtx &operator=(RteInitMediaEngineCtx &&cmd) = delete; -// // @} - -// InitMediaEngineCb cb; -// void *cb_data; -// }; - -// static void RteInitMediaEngineCtxProxy(::Rte *self, void *cb_data, -// ::RteError *err){ -// auto *ctx = static_cast(cb_data); - -// Rte rte; -// rte.c_rte = *self; - -// Error cpp_err(err); -// ctx->cb(&rte, ctx->cb_data, &cpp_err); - -// delete ctx; -// } -}; - -} // namespace rte