Skip to content

Commit

Permalink
[doc] daily update 2024-9-24 (#2034)
Browse files Browse the repository at this point in the history
[doc] daily update 2024-9-24

doc source:
https://github.com/AgoraIO/agora_doc_source/releases/download/master-build/flutter_ng_json_template_en.json

> This pull request is trigger by bot, do not edit it directly

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
  • Loading branch information
littleGnAl and github-actions[bot] authored Sep 25, 2024
1 parent 47ac794 commit c1fbd01
Show file tree
Hide file tree
Showing 9 changed files with 392 additions and 512 deletions.
29 changes: 14 additions & 15 deletions lib/src/agora_base.dart
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ enum ErrorCodeType {
errNetDown,

/// 17: The request to join the channel is rejected. Possible reasons include the following:
/// The user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to determine whether the user exists in the channel. Do not call this method to join the channel unless you receive the connectionStateDisconnected (1) state.
/// The user is already in the channel. Agora recommends that you use the onConnectionStateChanged callback to see whether the user is in the channel. Do not call this method to join the channel unless you receive the connectionStateDisconnected (1) state.
/// After calling startEchoTest for the call test, the user tries to join the channel without calling stopEchoTest to end the current test. To join a channel, the call test must be ended by calling stopEchoTest.
@JsonValue(17)
errJoinChannelRejected,
Expand Down Expand Up @@ -688,7 +688,7 @@ enum QualityType {
@JsonValue(6)
qualityDown,

/// 7: Users cannot detect the network quality (not in use).
/// @nodoc
@JsonValue(7)
qualityUnsupported,

Expand Down Expand Up @@ -1871,7 +1871,7 @@ class VideoEncoderConfiguration {
@JsonKey(name: 'orientationMode')
final OrientationMode? orientationMode;

/// Video degradation preference under limited bandwidth. See DegradationPreference.
/// Video degradation preference under limited bandwidth. See DegradationPreference. When this parameter is set to maintainFramerate (1) or maintainBalanced (2), orientationMode needs to be set to orientationModeAdaptive (0) at the same time, otherwise the setting will not take effect.
@JsonKey(name: 'degradationPreference')
final DegradationPreference? degradationPreference;

Expand Down Expand Up @@ -2502,7 +2502,7 @@ enum AudioScenarioType {
@JsonValue(3)
audioScenarioGameStreaming,

/// 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the microphone. For example, education scenarios. In this scenario, audience members receive a pop-up window to request permission of using microphones.
/// 5: Chatroom scenario, where users need to frequently switch the user role or mute and unmute the microphone. For example, education scenarios.
@JsonValue(5)
audioScenarioChatroom,

Expand Down Expand Up @@ -2627,7 +2627,7 @@ enum VideoApplicationScenarioType {
@JsonValue(0)
applicationScenarioGeneral,

/// If set to applicationScenarioMeeting (1), the SDK automatically enables the following strategies:
/// applicationScenarioMeeting (1) is suitable for meeting scenarios. The SDK automatically enables the following strategies:
/// In meeting scenarios where low-quality video streams are required to have a high bitrate, the SDK automatically enables multiple technologies used to deal with network congestions, to enhance the performance of the low-quality streams and to ensure the smooth reception by subscribers.
/// The SDK monitors the number of subscribers to the high-quality video stream in real time and dynamically adjusts its configuration based on the number of subscribers.
/// If nobody subscribers to the high-quality stream, the SDK automatically reduces its bitrate and frame rate to save upstream bandwidth.
Expand Down Expand Up @@ -2974,7 +2974,7 @@ enum LocalVideoStreamReason {
@JsonValue(20)
localVideoStreamReasonScreenCaptureWindowNotSupported,

/// @nodoc
/// 21: (Windows only) The screen has not captured any data available for window sharing.
@JsonValue(21)
localVideoStreamReasonScreenCaptureFailure,

Expand Down Expand Up @@ -4379,7 +4379,6 @@ enum ConnectionChangedReasonType {
/// All lowercase English letters: a to z.
/// All uppercase English letters: A to Z.
/// All numeric characters: 0 to 9.
/// Space
/// "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", "{", "}", "|", "~", ","
@JsonValue(7)
connectionChangedInvalidChannelName,
Expand Down Expand Up @@ -4477,19 +4476,19 @@ extension ConnectionChangedReasonTypeExt on ConnectionChangedReasonType {
/// The reason for a user role switch failure.
@JsonEnum(alwaysCreate: true)
enum ClientRoleChangeFailedReason {
/// 1: The number of hosts in the channel is already at the upper limit. This enumerator is reported only when the support for 128 users is enabled. The maximum number of hosts is based on the actual number of hosts configured when you enable the 128-user feature.
/// 1: The number of hosts in the channel exceeds the limit. This enumerator is reported only when the support for 128 users is enabled. The maximum number of hosts is based on the actual number of hosts configured when you enable the 128-user feature.
@JsonValue(1)
clientRoleChangeFailedTooManyBroadcasters,

/// 2: The request is rejected by the Agora server. Agora recommends you prompt the user to try to switch their user role again.
@JsonValue(2)
clientRoleChangeFailedNotAuthorized,

/// 3: The request is timed out. Agora recommends you prompt the user to check the network connection and try to switch their user role again.
/// 3: The request is timed out. Agora recommends you prompt the user to check the network connection and try to switch their user role again. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use.
@JsonValue(3)
clientRoleChangeFailedRequestTimeOut,

/// 4: The SDK connection fails. You can use reason reported in the onConnectionStateChanged callback to troubleshoot the failure.
/// 4: The SDK is disconnected from the Agora edge server. You can troubleshoot the failure through the reason reported by onConnectionStateChanged. Deprecated: This enumerator is deprecated since v4.4.0 and is not recommended for use.
@JsonValue(4)
clientRoleChangeFailedConnectionFailed,
}
Expand Down Expand Up @@ -4644,7 +4643,7 @@ extension NetworkTypeExt on NetworkType {
/// Setting mode of the view.
@JsonEnum(alwaysCreate: true)
enum VideoViewSetupMode {
/// 0: (Default) Replaces a view.
/// 0: (Default) Clear all added views and replace with a new view.
@JsonValue(0)
videoViewSetupReplace,

Expand Down Expand Up @@ -4688,7 +4687,7 @@ class VideoCanvas {
this.enableAlphaMask,
this.position});

/// The user ID.
/// User ID that publishes the video source.
@JsonKey(name: 'uid')
final int? uid;

Expand Down Expand Up @@ -4730,7 +4729,7 @@ class VideoCanvas {
@JsonKey(name: 'cropArea')
final Rectangle? cropArea;

/// (Optional) Whether the receiver enables alpha mask rendering: true : The receiver enables alpha mask rendering. false : (Default) The receiver disables alpha mask rendering. Alpha mask rendering can create images with transparent effects and extract portraits from videos. When used in combination with other methods, you can implement effects such as portrait-in-picture and watermarking.
/// (Optional) Whether to enable alpha mask rendering: true : Enable alpha mask rendering. false : (Default) Disable alpha mask rendering. Alpha mask rendering can create images with transparent effects and extract portraits from videos. When used in combination with other methods, you can implement effects such as portrait-in-picture and watermarking.
/// The receiver can render alpha channel information only when the sender enables alpha transmission.
/// To enable alpha transmission,.
@JsonKey(name: 'enableAlphaMask')
Expand Down Expand Up @@ -5022,7 +5021,7 @@ class VirtualBackgroundSource {
/// The custom background.
@JsonEnum(alwaysCreate: true)
enum BackgroundSourceType {
/// 0: Process the background as alpha information without replacement, only separating the portrait and the background. After setting this value, you can call startLocalVideoTranscoder to implement the picture-in-picture effect.
/// 0: Process the background as alpha data without replacement, only separating the portrait and the background. After setting this value, you can call startLocalVideoTranscoder to implement the picture-in-picture effect.
@JsonValue(0)
backgroundNone,

Expand Down Expand Up @@ -5664,7 +5663,7 @@ class AudioRecordingConfiguration {
@JsonKey(name: 'fileRecordingType')
final AudioFileRecordingType? fileRecordingType;

/// Recording quality. See audiorecordingqualitytype. Note: This parameter applies to AAC files only.
/// Recording quality. See audiorecordingqualitytype. This parameter applies to AAC files only.
@JsonKey(name: 'quality')
final AudioRecordingQualityType? quality;

Expand Down
42 changes: 14 additions & 28 deletions lib/src/agora_media_base.dart
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ extension ContentInspectTypeExt on ContentInspectType {
}
}

/// A ContentInspectModule structure used to configure the frequency of video screenshot and upload.
/// ContentInspectModule A structure used to configure the frequency of video screenshot and upload.
@JsonSerializable(explicitToJson: true, includeIfNull: false)
class ContentInspectModule {
/// @nodoc
Expand All @@ -401,7 +401,7 @@ class ContentInspectModule {
Map<String, dynamic> toJson() => _$ContentInspectModuleToJson(this);
}

/// Configuration of video screenshot and upload.
/// Screenshot and upload configuration.
@JsonSerializable(explicitToJson: true, includeIfNull: false)
class ContentInspectConfig {
/// @nodoc
Expand Down Expand Up @@ -640,7 +640,7 @@ enum RenderModeType {
@JsonValue(2)
renderModeFit,

/// Deprecated: 3: This mode is deprecated.
/// 3: Adaptive mode. Deprecated: This enumerator is deprecated and not recommended for use.
@JsonValue(3)
renderModeAdaptive,
}
Expand Down Expand Up @@ -803,11 +803,13 @@ class ExternalVideoFrame {
@JsonKey(name: 'metadata_size')
final int? metadataSize;

/// @nodoc
/// The alpha channel data output by using portrait segmentation algorithm. This data matches the size of the video frame, with each pixel value ranging from [0,255], where 0 represents the background and 255 represents the foreground (portrait). By setting this parameter, you can render the video background into various effects, such as transparent, solid color, image, video, etc. In custom video rendering scenarios, ensure that both the video frame and alphaBuffer are of the Full Range type; other types may cause abnormal alpha data rendering.
@JsonKey(name: 'alphaBuffer', ignore: true)
final Uint8List? alphaBuffer;

/// @nodoc
/// This parameter only applies to video data in BGRA or RGBA format. Whether to extract the alpha channel data from the video frame and automatically fill it into alphaBuffer : true :Extract and fill the alpha channel data. false : (Default) Do not extract and fill the Alpha channel data. For video data in BGRA or RGBA format, you can set the Alpha channel data in either of the following ways:
/// Automatically by setting this parameter to true.
/// Manually through the alphaBuffer parameter.
@JsonKey(name: 'fillAlphaBuffer')
final bool? fillAlphaBuffer;

Expand Down Expand Up @@ -968,15 +970,15 @@ class VideoFrame {
@JsonKey(name: 'matrix')
final List<double>? matrix;

/// @nodoc
/// The alpha channel data output by using portrait segmentation algorithm. This data matches the size of the video frame, with each pixel value ranging from [0,255], where 0 represents the background and 255 represents the foreground (portrait). By setting this parameter, you can render the video background into various effects, such as transparent, solid color, image, video, etc. In custom video rendering scenarios, ensure that both the video frame and alphaBuffer are of the Full Range type; other types may cause abnormal alpha data rendering.
@JsonKey(name: 'alphaBuffer', ignore: true)
final Uint8List? alphaBuffer;

/// @nodoc
@JsonKey(name: 'pixelBuffer', ignore: true)
final Uint8List? pixelBuffer;

/// The meta information in the video frame. To use this parameter, please.
/// The meta information in the video frame. To use this parameter, please contact.
@VideoFrameMetaInfoConverter()
@JsonKey(name: 'metaInfo')
final VideoFrameMetaInfo? metaInfo;
Expand Down Expand Up @@ -1382,7 +1384,7 @@ class AudioSpectrumObserver {

/// Gets the statistics of a local audio spectrum.
///
/// After successfully calling registerAudioSpectrumObserver to implement the onLocalAudioSpectrum callback in AudioSpectrumObserver and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK will trigger the callback as the time interval you set to report the received remote audio data spectrum.
/// After successfully calling registerAudioSpectrumObserver to implement the onLocalAudioSpectrum callback in AudioSpectrumObserver and calling enableAudioSpectrumMonitor to enable audio spectrum monitoring, the SDK triggers this callback as the time interval you set to report the received remote audio data spectrum before encoding.
///
/// * [data] The audio spectrum data of the local user. See AudioSpectrumData.
final void Function(AudioSpectrumData data)? onLocalAudioSpectrum;
Expand Down Expand Up @@ -1444,6 +1446,7 @@ class VideoFrameObserver {
/// Occurs each time the SDK receives a video frame before encoding.
///
/// After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios.
/// It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display.
/// Due to framework limitations, this callback does not support sending processed video data back to the SDK.
/// The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced.
///
Expand All @@ -1463,6 +1466,7 @@ class VideoFrameObserver {
/// Occurs each time the SDK receives a video frame sent by the remote user.
///
/// After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios.
/// It is recommended that you ensure the modified parameters in videoFrame are consistent with the actual situation of the video frames in the video frame buffer. Otherwise, it may cause unexpected rotation, distortion, and other issues in the local preview and remote video display.
/// If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
/// Due to framework limitations, this callback does not support sending processed video data back to the SDK.
///
Expand Down Expand Up @@ -1704,26 +1708,8 @@ class FaceInfoObserver {
/// yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right.
/// roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left.
/// timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON:
/// {
/// "faces":[{
/// "blendshapes":{
/// "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0,
/// "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0,
/// "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0,
/// "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
/// "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0,
/// "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0,
/// "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0,
/// "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0,
/// "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0,
/// "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0,
/// "tongueOut":0.0
/// },
/// "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
///
/// }],
/// "timestamp":"654879876546"
/// }
/// { "faces":[{ "blendshapes":{ "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0, "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0, "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0, "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0, "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0, "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0, "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0, "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0, "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0, "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0, "tongueOut":0.0 }, "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
/// }], "timestamp":"654879876546" }
///
/// Returns
/// true : Facial information JSON parsing successful. false : Facial information JSON parsing failed.
Expand Down
Loading

0 comments on commit c1fbd01

Please sign in to comment.