diff --git a/apps/common-app/src/examples/AudioStream/AudioContent.tsx b/apps/common-app/src/examples/AudioStream/AudioContent.tsx
new file mode 100644
index 000000000..3c136ab7e
--- /dev/null
+++ b/apps/common-app/src/examples/AudioStream/AudioContent.tsx
@@ -0,0 +1,38 @@
+import { useAudioTagContext } from 'react-native-audio-api/development/react';
+import { ActivityIndicator, Button, Text, View } from 'react-native';
+import VolumeSlider from './VolumeSlider';
+import { Spacer } from '../../components';
+
+const AudioContent: React.FC = () => {
+ const { isReady, play, pause, playbackState, setMuted, muted } = useAudioTagContext();
+
+ return (
+
+ {!isReady ? (
+
+ ) : (
+
+ {playbackState}
+
+
+ )}
+
+ );
+};
+
+export default AudioContent;
diff --git a/apps/common-app/src/examples/AudioStream/AudioTag.tsx b/apps/common-app/src/examples/AudioStream/AudioTag.tsx
new file mode 100644
index 000000000..fbac30a43
--- /dev/null
+++ b/apps/common-app/src/examples/AudioStream/AudioTag.tsx
@@ -0,0 +1,26 @@
+import React from 'react';
+import { View } from 'react-native';
+import { Audio } from 'react-native-audio-api/development/react';
+
+import { Container } from '../../components';
+import AudioContent from './AudioContent';
+
+const DEMO_AUDIO_URL =
+ 'https://filesampleshub.com/download/audio/aac/sample1.AAC';
+ // '/data/data/com.fabricexample/cache/audio.wav';
+
+const AudioTag: React.FC = () => {
+ return (
+
+
+
+
+ {/* */}
+ {/* */}
+
+
+
+ );
+};
+
+export default AudioTag;
diff --git a/apps/common-app/src/examples/AudioStream/PlayButton.tsx b/apps/common-app/src/examples/AudioStream/PlayButton.tsx
new file mode 100644
index 000000000..a2d96bde3
--- /dev/null
+++ b/apps/common-app/src/examples/AudioStream/PlayButton.tsx
@@ -0,0 +1,12 @@
+import { useAudioTagContext } from 'react-native-audio-api/development/react';
+import { Button } from 'react-native';
+
+const AudioPlayerButton: React.FC = () => {
+ const { play } = useAudioTagContext();
+
+ return (
+
+ );
+};
+
+export default AudioPlayerButton;
diff --git a/apps/common-app/src/examples/AudioStream/VolumeSlider.tsx b/apps/common-app/src/examples/AudioStream/VolumeSlider.tsx
new file mode 100644
index 000000000..fa20eae75
--- /dev/null
+++ b/apps/common-app/src/examples/AudioStream/VolumeSlider.tsx
@@ -0,0 +1,21 @@
+import { useAudioTagContext } from 'react-native-audio-api/development/react';
+import { Slider } from '../../components';
+import { View } from 'react-native';
+
+const VolumeSlider: React.FC = () => {
+ const { volume, setVolume } = useAudioTagContext();
+
+ return (
+
+
+
+ );
+};
+
+export default VolumeSlider;
diff --git a/apps/common-app/src/examples/AudioStream/index.ts b/apps/common-app/src/examples/AudioStream/index.ts
new file mode 100644
index 000000000..ad47e827f
--- /dev/null
+++ b/apps/common-app/src/examples/AudioStream/index.ts
@@ -0,0 +1 @@
+export { default } from './AudioTag';
diff --git a/apps/common-app/src/examples/index.ts b/apps/common-app/src/examples/index.ts
index 554dcaf49..e30db29d6 100644
--- a/apps/common-app/src/examples/index.ts
+++ b/apps/common-app/src/examples/index.ts
@@ -12,6 +12,7 @@ import PlaybackSpeed from './PlaybackSpeed/PlaybackSpeed';
import Record from './Record/Record';
import Streaming from './Streaming/Streaming';
import Worklets from './Worklets/Worklets';
+import AudioStream from './AudioStream/AudioTag';
type NavigationParamList = {
Oscillator: undefined;
@@ -26,6 +27,7 @@ type NavigationParamList = {
Record: undefined;
Worklets: undefined;
Streamer: undefined;
+ AudioTag: undefined;
};
export type ExampleKey = keyof NavigationParamList;
@@ -110,4 +112,10 @@ export const Examples: Example[] = [
Icon: icons.Radio,
screen: Streaming,
},
+ {
+ key: 'AudioTag',
+ title: 'Audio Tag',
+ Icon: icons.Tag,
+ screen: AudioStream,
+ }
] as const;
diff --git a/apps/fabric-example/ios/FabricExample/AppDelegate.swift b/apps/fabric-example/ios/FabricExample/AppDelegate.swift
index 3e7e49895..58a3fcf48 100644
--- a/apps/fabric-example/ios/FabricExample/AppDelegate.swift
+++ b/apps/fabric-example/ios/FabricExample/AppDelegate.swift
@@ -28,6 +28,11 @@ class AppDelegate: UIResponder, UIApplicationDelegate {
in: window,
launchOptions: launchOptions
)
+ let fileManager = FileManager.default
+ let docsURL = fileManager.urls(for: .documentDirectory, in: .userDomainMask).first!
+ let filePath = docsURL.appendingPathComponent("audio.wav").path
+
+ print(filePath);
return true
}
diff --git a/apps/fabric-example/ios/Podfile.lock b/apps/fabric-example/ios/Podfile.lock
index a344b24dc..cdd4bf9ed 100644
--- a/apps/fabric-example/ios/Podfile.lock
+++ b/apps/fabric-example/ios/Podfile.lock
@@ -2514,7 +2514,7 @@ EXTERNAL SOURCES:
SPEC CHECKSUMS:
FBLazyVector: e97c19a5a442429d1988f182a1940fb08df514da
- hermes-engine: ca0c1d4fe0200e05fedd8d7c0c283b54cd461436
+ hermes-engine: 471e81260adadffc041e40c5eea01333addabb53
RCTDeprecation: af44b104091a34482596cd9bd7e8d90c4e9b4bd7
RCTRequired: bb77b070f75f53398ce43c0aaaa58337cebe2bf6
RCTSwiftUI: afc0a0a635860da1040a0b894bfd529da06d7810
@@ -2523,7 +2523,7 @@ SPEC CHECKSUMS:
React: 1ba7d364ade7d883a1ec055bfc3606f35fdee17b
React-callinvoker: bc2a26f8d84fb01f003fc6de6c9337b64715f95b
React-Core: 7840d3a80b43a95c5e80ef75146bd70925ebab0f
- React-Core-prebuilt: e44365cf4785c3aa56ababc9ab204fe8bc6b17d0
+ React-Core-prebuilt: 6586031f606ff8ab466cac9e8284053a91342881
React-CoreModules: 2eb010400b63b89e53a324ffb3c112e4c7c3ce42
React-cxxreact: a558e92199d26f145afa9e62c4233cf8e7950efe
React-debug: 755200a6e7f5e6e0a40ff8d215493d43cce285fc
@@ -2553,7 +2553,7 @@ SPEC CHECKSUMS:
React-microtasksnativemodule: d1956f0eec54c619b63a379520fb4c618a55ccb9
react-native-background-timer: 4638ae3bee00320753647900b21260b10587b6f7
react-native-safe-area-context: ae7587b95fb580d1800c5b0b2a7bd48c2868e67a
- react-native-skia: 5f68d3c3749bfb4f726e408410b8be5999392cd9
+ react-native-skia: 9e5b3a8a4ced921df89cb625dd9eb4fb10be1acf
React-NativeModulesApple: 5ba0903927f6b8d335a091700e9fda143980f819
React-networking: 3a4b7f9ed2b2d1c0441beacb79674323a24bcca6
React-oscompat: ff26abf0ae3e3fdbe47b44224571e3fc7226a573
@@ -2587,7 +2587,7 @@ SPEC CHECKSUMS:
ReactAppDependencyProvider: e96e93b493d8d86eeaee3e590ba0be53f6abe46f
ReactCodegen: f66521b131699d6af0790f10653933b3f1f79a6f
ReactCommon: 07572bf9e687c8a52fbe4a3641e9e3a1a477c78e
- ReactNativeDependencies: 3467a1fea6f7a524df13b30430bebcc254d9aee2
+ ReactNativeDependencies: a5d71d95f2654107eb45e6ece04caba36beac2bd
RNAudioAPI: fa5c075d2fcdb1ad9a695754b38f07c8c3074396
RNGestureHandler: 07de6f059e0ee5744ae9a56feb07ee345338cc31
RNReanimated: d75c81956bf7531fe08ba4390149002ab8bdd127
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
index f4c22c8b2..4aec47875 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -58,6 +59,7 @@ BaseAudioContextHostObject::BaseAudioContextHostObject(
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBiquadFilter),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createIIRFilter),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferSource),
+ JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createFileSource),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createBufferQueueSource),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createPeriodicWave),
JSI_EXPORT_FUNCTION(BaseAudioContextHostObject, createConvolver),
@@ -250,6 +252,26 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferSource) {
return jsi::Object::createFromHostObject(runtime, bufferSourceHostObject);
}
+JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createFileSource) {
+ AudioFileSourceOptions options;
+ if (count > 0 && !args[0].isUndefined() && !args[0].isNull()) {
+ if (args[0].isString()) {
+ options.filePath = args[0].getString(runtime).utf8(runtime);
+ } else {
+ auto obj = args[0].asObject(runtime);
+ if (obj.isArrayBuffer(runtime)) {
+ auto arrayBuffer = obj.getArrayBuffer(runtime);
+ auto *data = arrayBuffer.data(runtime);
+ auto size = arrayBuffer.size(runtime);
+ options.data = std::vector(data, data + size);
+ }
+ }
+ }
+ const auto fileSourceHostObject =
+ std::make_shared(context_, options);
+ return jsi::Object::createFromHostObject(runtime, fileSourceHostObject);
+}
+
JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createBufferQueueSource) {
const auto options = args[0].asObject(runtime);
const auto baseAudioBufferSourceOptions =
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
index 052538058..4041f8f51 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.h
@@ -39,6 +39,7 @@ class BaseAudioContextHostObject : public JsiHostObject {
JSI_HOST_FUNCTION_DECL(createBiquadFilter);
JSI_HOST_FUNCTION_DECL(createIIRFilter);
JSI_HOST_FUNCTION_DECL(createBufferSource);
+ JSI_HOST_FUNCTION_DECL(createFileSource);
JSI_HOST_FUNCTION_DECL(createBufferQueueSource);
JSI_HOST_FUNCTION_DECL(createPeriodicWave);
JSI_HOST_FUNCTION_DECL(createAnalyser);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp
new file mode 100644
index 000000000..20b5c591e
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.cpp
@@ -0,0 +1,66 @@
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+
+AudioFileSourceNodeHostObject::AudioFileSourceNodeHostObject(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options)
+ : AudioScheduledSourceNodeHostObject(context->createFileSource(options), options) {
+ addGetters(JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, volume));
+ addSetters(JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, volume));
+ addGetters(JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, loop));
+ addSetters(JSI_EXPORT_PROPERTY_SETTER(AudioFileSourceNodeHostObject, loop));
+ addGetters(JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, currentTime));
+ addGetters(JSI_EXPORT_PROPERTY_GETTER(AudioFileSourceNodeHostObject, duration));
+
+ addFunctions(JSI_EXPORT_FUNCTION(AudioFileSourceNodeHostObject, pause));
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, volume) {
+ auto node = std::static_pointer_cast(node_);
+ return jsi::Value(node->getVolume());
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, volume) {
+ auto node = std::static_pointer_cast(node_);
+ node->setVolume(static_cast(value.getNumber()));
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, loop) {
+ auto node = std::static_pointer_cast(node_);
+ return jsi::Value(node->getLoop());
+}
+
+JSI_PROPERTY_SETTER_IMPL(AudioFileSourceNodeHostObject, loop) {
+ auto node = std::static_pointer_cast(node_);
+ node->setLoop(value.getBool());
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, currentTime) {
+ auto node = std::static_pointer_cast(node_);
+ return jsi::Value(node->getCurrentTime());
+}
+
+JSI_PROPERTY_GETTER_IMPL(AudioFileSourceNodeHostObject, duration) {
+ auto node = std::static_pointer_cast(node_);
+ return jsi::Value(node->getDuration());
+}
+
+JSI_HOST_FUNCTION_IMPL(AudioFileSourceNodeHostObject, pause) {
+ auto audioFileSourceNode = std::static_pointer_cast(node_);
+
+ auto event = [audioFileSourceNode](BaseAudioContext &) {
+ audioFileSourceNode->pause();
+ };
+ audioFileSourceNode->scheduleAudioEvent(std::move(event));
+
+ return jsi::Value::undefined();
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h
new file mode 100644
index 000000000..222978337
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioFileSourceNodeHostObject.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include
+
+#include
+
+namespace audioapi {
+using namespace facebook;
+
+struct AudioFileSourceOptions;
+class BaseAudioContext;
+
+class AudioFileSourceNodeHostObject : public AudioScheduledSourceNodeHostObject {
+ public:
+ explicit AudioFileSourceNodeHostObject(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options);
+
+ ~AudioFileSourceNodeHostObject() override = default;
+
+ JSI_PROPERTY_GETTER_DECL(volume);
+ JSI_PROPERTY_SETTER_DECL(volume);
+ JSI_PROPERTY_GETTER_DECL(loop);
+ JSI_PROPERTY_SETTER_DECL(loop);
+ JSI_PROPERTY_GETTER_DECL(currentTime);
+ JSI_PROPERTY_GETTER_DECL(duration);
+
+ JSI_HOST_FUNCTION_DECL(pause);
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
index 67e4013e0..41e95a136 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -177,6 +178,15 @@ std::shared_ptr BaseAudioContext::createBufferSource(
return bufferSource;
}
+#if !RN_AUDIO_API_TEST
+std::shared_ptr BaseAudioContext::createFileSource(
+ const AudioFileSourceOptions &options) {
+ auto fileSource = std::make_shared(shared_from_this(), options);
+ graphManager_->addSourceNode(fileSource);
+ return fileSource;
+}
+#endif // RN_AUDIO_API_TEST
+
std::shared_ptr BaseAudioContext::createIIRFilter(const IIRFilterOptions &options) {
auto iirFilter = std::make_shared(shared_from_this(), options);
graphManager_->addProcessingNode(iirFilter);
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
index e3f52fd28..4e2d97e05 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h
@@ -28,6 +28,7 @@ class IIRFilterNode;
class AudioDestinationNode;
class AudioBufferSourceNode;
class AudioBufferQueueSourceNode;
+class AudioFileSourceNode;
class AnalyserNode;
class AudioEventHandlerRegistry;
class ConvolverNode;
@@ -47,6 +48,7 @@ struct BiquadFilterOptions;
struct OscillatorOptions;
struct BaseAudioBufferSourceOptions;
struct AudioBufferSourceOptions;
+struct AudioFileSourceOptions;
struct StreamerOptions;
struct DelayOptions;
struct IIRFilterOptions;
@@ -93,6 +95,9 @@ class BaseAudioContext : public std::enable_shared_from_this {
std::shared_ptr createBiquadFilter(const BiquadFilterOptions &options);
std::shared_ptr createBufferSource(
const AudioBufferSourceOptions &options);
+#if !RN_AUDIO_API_TEST
+ std::shared_ptr createFileSource(const AudioFileSourceOptions &options);
+#endif // RN_AUDIO_API_TEST
std::shared_ptr createBufferQueueSource(
const BaseAudioBufferSourceOptions &options);
std::shared_ptr createPeriodicWave(
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp
new file mode 100644
index 000000000..d37ae7488
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp
@@ -0,0 +1,284 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace audioapi {
+
+AudioFileSourceNode::AudioFileSourceNode(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options)
+ : AudioScheduledSourceNode(context, options) {
+ const bool useFilePath = !options.filePath.empty();
+ const bool useData = !options.data.empty();
+
+ if (useFilePath || useData) {
+ auto state = std::make_shared();
+ if (useData) {
+ state->memoryData = options.data;
+ }
+ if (useFilePath) {
+ state->filePath = options.filePath;
+ FFmpegNeeded_ = AudioDecoder::pathHasExtension(options.filePath, {".mp4", ".m4a", ".aac"});
+ } else {
+ auto format = AudioDecoder::detectAudioFormat(options.data.data(), options.data.size());
+ FFmpegNeeded_ =
+ format == AudioFormat::MP4 || format == AudioFormat::M4A || format == AudioFormat::AAC;
+ }
+ initDecoders(useFilePath, context, state);
+ }
+
+ isInitialized_.store(true, std::memory_order_release);
+}
+
+void AudioFileSourceNode::initDecoders(
+ bool useFilePath,
+ const std::shared_ptr &context,
+ const std::shared_ptr &state) {
+ if (FFmpegNeeded_) {
+#if RN_AUDIO_API_FFMPEG_DISABLED
+ assert(false && "File codec is not supported when FFmpeg is disabled");
+#else
+ ffmpegdecoder::ffmpegDecoderConfigInit(&cfg, static_cast(context->getSampleRate()));
+ bool result;
+ if (useFilePath) {
+ result = decoder.openFile(cfg, state->filePath);
+ } else {
+ result = decoder.openMemory(cfg, state->memoryData.data(), state->memoryData.size());
+ }
+ if (result) {
+ state->channels = decoder.outputChannels();
+ state->sampleRate = static_cast(decoder.outputSampleRate());
+ duration_.store(decoder.getDurationInSeconds(), std::memory_order_release);
+ } else {
+ decoder.close();
+ }
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+ } else {
+ ma_decoder_config config =
+ ma_decoder_config_init(ma_format_f32, 0, static_cast(context->getSampleRate()));
+ ma_decoding_backend_vtable *customBackends[] = {
+ ma_decoding_backend_libvorbis, ma_decoding_backend_libopus};
+ config.ppCustomBackendVTables = customBackends;
+ config.customBackendCount = sizeof(customBackends) / sizeof(customBackends[0]);
+
+ maDecoder_ = std::make_unique();
+ ma_result result;
+ if (useFilePath) {
+ result = ma_decoder_init_file(state->filePath.c_str(), &config, maDecoder_.get());
+ } else {
+ result = ma_decoder_init_memory(
+ state->memoryData.data(), state->memoryData.size(), &config, maDecoder_.get());
+ }
+
+ if (result == MA_SUCCESS) {
+ state->channels = static_cast(maDecoder_->outputChannels);
+ state->sampleRate = static_cast(maDecoder_->outputSampleRate);
+ ma_uint64 length = 0;
+ if (ma_decoder_get_length_in_pcm_frames(maDecoder_.get(), &length) == MA_SUCCESS) {
+ duration_.store(static_cast(length) / state->sampleRate, std::memory_order_release);
+ }
+ } else {
+ ma_decoder_uninit(maDecoder_.get());
+ maDecoder_.reset();
+ }
+ }
+ state->interleavedBuffer.resize(RENDER_QUANTUM_SIZE * state->channels);
+ decoderState_ = state;
+ channelCount_ = decoderState_->channels;
+ sampleRate_ = decoderState_->sampleRate;
+}
+
+void AudioFileSourceNode::setDecoderState(const std::shared_ptr &state) {
+ decoderState_ = state;
+ channelCount_ = state != nullptr ? state->channels : 1;
+}
+
+void AudioFileSourceNode::start(double when) {
+ if (filePaused_.load(std::memory_order_acquire)) {
+ filePaused_.store(false, std::memory_order_release);
+ if (fileStarted_) {
+ return;
+ }
+ }
+
+ AudioScheduledSourceNode::start(when);
+}
+
+void AudioFileSourceNode::pause() {
+ filePaused_.store(true, std::memory_order_release);
+}
+
+void AudioFileSourceNode::disable() {
+ filePaused_.store(false, std::memory_order_release);
+ fileStarted_ = false;
+ totalFramesRead_ = 0;
+ if (FFmpegNeeded_) {
+ decoder.close();
+ } else if (maDecoder_ != nullptr) {
+ ma_decoder_uninit(maDecoder_.get());
+ maDecoder_.reset();
+ }
+ AudioScheduledSourceNode::disable();
+}
+
+size_t AudioFileSourceNode::readFrames(float *buf, size_t frameCount) {
+ if (FFmpegNeeded_) {
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ return decoder.readPcmFrames(buf, frameCount);
+#else
+ return 0;
+#endif
+ }
+ if (maDecoder_ == nullptr) {
+ return 0;
+ }
+ ma_uint64 framesRead = 0;
+ ma_decoder_read_pcm_frames(maDecoder_.get(), buf, frameCount, &framesRead);
+ return static_cast(framesRead);
+}
+
+bool AudioFileSourceNode::seekToStart() {
+ bool seeked = false;
+ if (FFmpegNeeded_) {
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ seeked = decoder.seekToStart();
+#endif
+ } else if (maDecoder_ != nullptr) {
+ seeked = ma_decoder_seek_to_pcm_frame(maDecoder_.get(), 0) == MA_SUCCESS;
+ }
+ if (seeked) {
+ totalFramesRead_ = 0;
+ currentTime_.store(0.0, std::memory_order_release);
+ }
+ return seeked;
+}
+
+void AudioFileSourceNode::writeInterleavedToBuffer(
+ const std::shared_ptr &processingBuffer,
+ const AudioFileDecoderState &state,
+ size_t destSampleOffset,
+ size_t frameCount,
+ float vol) {
+ if (vol == 0) {
+ processingBuffer->zero();
+ return;
+ }
+ auto numOutputChannels = static_cast(processingBuffer->getNumberOfChannels());
+ for (size_t i = 0; i < frameCount; i++) {
+ for (int ch = 0; ch < numOutputChannels; ch++) {
+ int srcCh = ch < state.channels ? ch : state.channels - 1;
+ processingBuffer->getChannel(ch)->span()[destSampleOffset + i] =
+ vol * state.interleavedBuffer[i * state.channels + srcCh];
+ }
+ }
+}
+
+size_t AudioFileSourceNode::handleEof(
+ const std::shared_ptr &processingBuffer,
+ size_t nonSilentFrames,
+ size_t framesRead,
+ float vol,
+ size_t startOffset) {
+ if (!loop_.load(std::memory_order_acquire)) {
+ currentTime_.store(decoder.getDurationInSeconds(), std::memory_order_release);
+ playbackState_ = PlaybackState::STOP_SCHEDULED;
+ return framesRead;
+ }
+
+ if (!seekToStart()) {
+ currentTime_.store(decoder.getDurationInSeconds(), std::memory_order_release);
+ playbackState_ = PlaybackState::STOP_SCHEDULED;
+ return framesRead;
+ }
+
+ playbackState_ = PlaybackState::PLAYING;
+
+ size_t toFill = nonSilentFrames - framesRead;
+ if (toFill == 0) {
+ return framesRead;
+ }
+
+ auto &state = *decoderState_;
+ size_t extra = readFrames(state.interleavedBuffer.data(), toFill);
+ totalFramesRead_ += extra;
+ if (sampleRate_ > 0) {
+ currentTime_.store(
+ static_cast(totalFramesRead_) / sampleRate_, std::memory_order_release);
+ }
+
+ if (vol != 0) {
+ writeInterleavedToBuffer(processingBuffer, state, startOffset + framesRead, extra, vol);
+ }
+
+ return framesRead + extra;
+}
+
+std::shared_ptr AudioFileSourceNode::processNode(
+ const std::shared_ptr &processingBuffer,
+ int framesToProcess) {
+ if (decoderState_ == nullptr) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ std::shared_ptr context = context_.lock();
+ if (context == nullptr) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ size_t startOffset = 0;
+ size_t nonSilentFrames = 0;
+ updatePlaybackInfo(
+ processingBuffer,
+ framesToProcess,
+ startOffset,
+ nonSilentFrames,
+ context->getSampleRate(),
+ context->getCurrentSampleFrame());
+
+ if (!isPlaying() && !isStopScheduled()) {
+ processingBuffer->zero();
+ return processingBuffer;
+ }
+
+ if (startOffset > 0) {
+ processingBuffer->zero(0, startOffset);
+ }
+
+ auto &state = *decoderState_;
+
+ if (filePaused_.load(std::memory_order_acquire)) {
+ processingBuffer->zero(startOffset, nonSilentFrames);
+ return processingBuffer;
+ }
+
+ size_t framesRead = readFrames(state.interleavedBuffer.data(), nonSilentFrames);
+ totalFramesRead_ += framesRead;
+ if (sampleRate_ > 0) {
+ currentTime_.store(
+ static_cast(totalFramesRead_) / sampleRate_, std::memory_order_release);
+ }
+
+ const float vol = volume_.load(std::memory_order_acquire);
+ writeInterleavedToBuffer(processingBuffer, state, startOffset, framesRead, vol);
+
+ if (framesRead < nonSilentFrames) {
+ size_t totalFilled = handleEof(processingBuffer, nonSilentFrames, framesRead, vol, startOffset);
+ processingBuffer->zero(startOffset + totalFilled, nonSilentFrames - totalFilled);
+ }
+
+ handleStopScheduled();
+ return processingBuffer;
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h
new file mode 100644
index 000000000..ed1c35e2a
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioFileSourceNode.h
@@ -0,0 +1,109 @@
+#pragma once
+
+#include
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+#include
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+#include
+
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+
+struct AudioFileSourceOptions;
+
+struct AudioFileDecoderState {
+ std::vector memoryData;
+ std::vector interleavedBuffer;
+ int channels = 0;
+ float sampleRate = 0;
+ std::string filePath;
+};
+
+class AudioFileSourceNode : public AudioScheduledSourceNode {
+ public:
+ explicit AudioFileSourceNode(
+ const std::shared_ptr &context,
+ const AudioFileSourceOptions &options);
+ ~AudioFileSourceNode() override = default;
+
+ /// @note Audio Thread only
+ void setDecoderState(const std::shared_ptr &state);
+
+ void disable() override;
+
+ void start(double when) override;
+
+ float getVolume() const {
+ return volume_.load(std::memory_order_acquire);
+ }
+
+ void setVolume(float v) {
+ volume_.store(v, std::memory_order_release);
+ }
+
+ void pause();
+
+ bool getLoop() const {
+ return loop_.load(std::memory_order_acquire);
+ }
+
+ void setLoop(bool v) {
+ loop_.store(v, std::memory_order_release);
+ }
+
+ double getDuration() const {
+ return duration_.load(std::memory_order_acquire);
+ }
+
+ double getCurrentTime() const {
+ return currentTime_.load(std::memory_order_acquire);
+ }
+
+ protected:
+ std::shared_ptr processNode(
+ const std::shared_ptr &processingBuffer,
+ int framesToProcess) override;
+
+ private:
+ void initDecoders(
+ bool useFilePath,
+ const std::shared_ptr &context,
+ const std::shared_ptr &state);
+
+ std::shared_ptr decoderState_;
+ std::unique_ptr maDecoder_;
+ std::atomic volume_;
+ bool FFmpegNeeded_;
+#if !RN_AUDIO_API_FFMPEG_DISABLED
+ ffmpegdecoder::FFmpegDecoder decoder;
+ ffmpegdecoder::FFmpegDecoderConfig cfg;
+#endif // RN_AUDIO_API_FFMPEG_DISABLED
+ std::atomic filePaused_{false};
+ bool fileStarted_{false};
+ std::atomic loop_{false};
+ std::atomic duration_{0};
+ std::atomic currentTime_{0};
+ size_t totalFramesRead_{0};
+ double sampleRate_{0};
+
+ size_t readFrames(float *buf, size_t frameCount);
+ bool seekToStart();
+ static void writeInterleavedToBuffer(
+ const std::shared_ptr &processingBuffer,
+ const AudioFileDecoderState &state,
+ size_t destSampleOffset,
+ size_t frameCount,
+ float vol);
+ size_t handleEof(
+ const std::shared_ptr &processingBuffer,
+ size_t nonSilentFrames,
+ size_t framesRead,
+ float vol,
+ size_t startOffset);
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
index 0c44d7419..1e5451996 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioDecoder.h
@@ -39,16 +39,7 @@ class AudioDecoder {
int inputChannelCount,
bool interleaved);
- private:
- static AudioBufferResult decodeWithMiniaudio(float sampleRate, DecoderSource source);
- static Result, std::string> readAllPcmFrames(
- ma_decoder &decoder,
- int outputChannels);
- static AudioBufferResult makeAudioBufferFromFloatBuffer(
- const std::vector &buffer,
- float outputSampleRate,
- int outputChannels);
- static AudioFormat detectAudioFormat(const void *data, size_t size) {
+ [[nodiscard]] static AudioFormat detectAudioFormat(const void *data, size_t size) {
if (size < 12)
return AudioFormat::UNKNOWN;
const auto *bytes = static_cast(data);
@@ -84,7 +75,8 @@ class AudioDecoder {
}
return AudioFormat::UNKNOWN;
}
- static inline bool pathHasExtension(
+
+ [[nodiscard]] static inline bool pathHasExtension(
const std::string &path,
const std::vector &extensions) {
std::string pathLower = path;
@@ -95,6 +87,16 @@ class AudioDecoder {
}
return false;
}
+
+ private:
+ static AudioBufferResult decodeWithMiniaudio(float sampleRate, DecoderSource source);
+ static Result, std::string> readAllPcmFrames(
+ ma_decoder &decoder,
+ int outputChannels);
+ static AudioBufferResult makeAudioBufferFromFloatBuffer(
+ const std::vector &buffer,
+ float outputSampleRate,
+ int outputChannels);
[[nodiscard]] static inline int16_t floatToInt16(float sample) {
return static_cast(sample * INT16_MAX);
}
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
index 041992795..a379eee17 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
+++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp
@@ -10,323 +10,485 @@
#if !RN_AUDIO_API_FFMPEG_DISABLED
#include
-#endif // RN_AUDIO_API_FFMPEG_DISABLED
-#include
+
+#include
+#include
+#include
+
+extern "C" {
+#include
+#include
+#include
+}
namespace audioapi::ffmpegdecoder {
int read_packet(void *opaque, uint8_t *buf, int buf_size) {
- MemoryIOContext *ctx = static_cast(opaque);
-
+ auto *ctx = static_cast(opaque);
if (ctx->pos >= ctx->size) {
return AVERROR_EOF;
}
-
- int bytes_to_read = std::min(buf_size, static_cast(ctx->size - ctx->pos));
- memcpy(buf, ctx->data + ctx->pos, bytes_to_read);
- ctx->pos += bytes_to_read;
-
- return bytes_to_read;
+ int n = std::min(buf_size, static_cast(ctx->size - ctx->pos));
+ memcpy(buf, ctx->data + ctx->pos, n);
+ ctx->pos += static_cast(n);
+ return n;
}
int64_t seek_packet(void *opaque, int64_t offset, int whence) {
- MemoryIOContext *ctx = static_cast(opaque);
-
+ auto *ctx = static_cast(opaque);
switch (whence) {
case SEEK_SET:
- ctx->pos = offset;
+ ctx->pos = static_cast(offset);
break;
case SEEK_CUR:
- ctx->pos += offset;
+ ctx->pos += static_cast(offset);
break;
case SEEK_END:
- ctx->pos = ctx->size + offset;
+ ctx->pos = ctx->size + static_cast(offset);
break;
case AVSEEK_SIZE:
- return ctx->size;
+ return static_cast(ctx->size);
+ default:
+ return AVERROR(EINVAL);
}
+ ctx->pos = std::min(ctx->pos, ctx->size);
+ return static_cast(ctx->pos);
+}
- if (ctx->pos > ctx->size) {
- ctx->pos = ctx->size;
+int findAudioStreamIndex(AVFormatContext *fmt_ctx) {
+ for (unsigned i = 0; i < fmt_ctx->nb_streams; i++) {
+ if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
+ return static_cast(i);
+ }
}
-
- return ctx->pos;
+ return -1;
}
-void convertFrameToBuffer(
- SwrContext *swr,
- AVFrame *frame,
- int output_channel_count,
- std::vector &buffer,
- size_t &framesRead,
- uint8_t **&resampled_data,
- int &max_resampled_samples) {
- const int out_samples = swr_get_out_samples(swr, frame->nb_samples);
- if (out_samples > max_resampled_samples) {
- av_freep(&resampled_data[0]);
- av_freep(&resampled_data);
- max_resampled_samples = out_samples;
-
- if (av_samples_alloc_array_and_samples(
- &resampled_data,
- nullptr,
- output_channel_count,
- max_resampled_samples,
- AV_SAMPLE_FMT_FLT,
- 0) < 0) {
- return;
- }
+bool openCodec(AVFormatContext *fmt_ctx, int &audio_stream_index, AVCodecContext **out_codec) {
+ audio_stream_index = findAudioStreamIndex(fmt_ctx);
+ if (audio_stream_index < 0) {
+ return false;
+ }
+ AVCodecParameters *codecpar = fmt_ctx->streams[audio_stream_index]->codecpar;
+ const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
+ if (codec == nullptr) {
+ return false;
+ }
+ AVCodecContext *ctx = avcodec_alloc_context3(codec);
+ if (ctx == nullptr) {
+ return false;
+ }
+ if (avcodec_parameters_to_context(ctx, codecpar) < 0) {
+ avcodec_free_context(&ctx);
+ return false;
}
+ if (avcodec_open2(ctx, codec, nullptr) < 0) {
+ avcodec_free_context(&ctx);
+ return false;
+ }
+ *out_codec = ctx;
+ return true;
+}
- int converted_samples = swr_convert(
- swr,
- resampled_data,
- max_resampled_samples,
- const_cast(frame->data),
- frame->nb_samples);
+FFmpegDecoder::~FFmpegDecoder() {
+ close();
+}
- if (converted_samples > 0) {
- const size_t current_size = buffer.size();
- const size_t new_samples = static_cast(converted_samples) * output_channel_count;
- buffer.resize(current_size + new_samples);
- memcpy(buffer.data() + current_size, resampled_data[0], new_samples * sizeof(float));
- framesRead += converted_samples;
+void FFmpegDecoder::close() {
+ if (resampled_data_ != nullptr) {
+ av_freep(&resampled_data_[0]);
+ av_freep(&resampled_data_);
+ }
+ max_resampled_samples_ = 0;
+ if (swr_ != nullptr) {
+ swr_free(&swr_);
+ }
+ if (packet_ != nullptr) {
+ av_packet_free(&packet_);
+ }
+ if (frame_ != nullptr) {
+ av_frame_free(&frame_);
}
+ if (codec_ctx_ != nullptr) {
+ avcodec_free_context(&codec_ctx_);
+ }
+ if (fmt_ctx_ != nullptr) {
+ avformat_close_input(&fmt_ctx_);
+ }
+ if (avio_ctx_ != nullptr) {
+ avio_context_free(&avio_ctx_);
+ }
+ mem_io_.reset();
+ leftover_.clear();
+ leftover_offset_ = 0;
+ audio_stream_index_ = -1;
+ output_channels_ = 0;
+ output_sample_rate_ = 0;
+ total_output_frames_ = 0;
}
-std::vector readAllPcmFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int out_sample_rate,
- int output_channel_count,
- int audio_stream_index,
- size_t &framesRead) {
- framesRead = 0;
- std::vector buffer;
- auto swr = std::unique_ptr>(
- swr_alloc(), [](SwrContext *ctx) { swr_free(&ctx); });
-
- if (swr == nullptr)
- return buffer;
-
- av_opt_set_chlayout(swr.get(), "in_chlayout", &codec_ctx->ch_layout, 0);
- av_opt_set_int(swr.get(), "in_sample_rate", codec_ctx->sample_rate, 0);
- av_opt_set_sample_fmt(swr.get(), "in_sample_fmt", codec_ctx->sample_fmt, 0);
-
- AVChannelLayout out_ch_layout;
- av_channel_layout_default(&out_ch_layout, output_channel_count);
- av_opt_set_chlayout(swr.get(), "out_chlayout", &out_ch_layout, 0);
- av_opt_set_int(swr.get(), "out_sample_rate", out_sample_rate, 0);
- av_opt_set_sample_fmt(swr.get(), "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
-
- if (swr_init(swr.get()) < 0) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- auto packet = std::unique_ptr>(
- av_packet_alloc(), [](AVPacket *p) { av_packet_free(&p); });
- auto frame = std::unique_ptr>(
- av_frame_alloc(), [](AVFrame *p) { av_frame_free(&p); });
-
- if (packet == nullptr || frame == nullptr) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- // Allocate buffer for resampled data
- uint8_t **resampled_data = nullptr;
- int max_resampled_samples = 4096; // Initial size
+bool FFmpegDecoder::setupSwr() {
+ swr_ = swr_alloc();
+ if (swr_ == nullptr) {
+ return false;
+ }
+ av_opt_set_chlayout(swr_, "in_chlayout", &codec_ctx_->ch_layout, 0);
+ av_opt_set_int(swr_, "in_sample_rate", codec_ctx_->sample_rate, 0);
+ av_opt_set_sample_fmt(swr_, "in_sample_fmt", codec_ctx_->sample_fmt, 0);
+
+ AVChannelLayout out_layout;
+ av_channel_layout_default(&out_layout, output_channels_);
+ av_opt_set_chlayout(swr_, "out_chlayout", &out_layout, 0);
+ av_opt_set_int(swr_, "out_sample_rate", output_sample_rate_, 0);
+ av_opt_set_sample_fmt(swr_, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
+ if (swr_init(swr_) < 0) {
+ av_channel_layout_uninit(&out_layout);
+ return false;
+ }
+ av_channel_layout_uninit(&out_layout);
+
if (av_samples_alloc_array_and_samples(
- &resampled_data,
+ &resampled_data_,
nullptr,
- output_channel_count,
- max_resampled_samples,
+ output_channels_,
+ FFmpegDecoder::CHUNK_SIZE,
AV_SAMPLE_FMT_FLT,
0) < 0) {
- av_channel_layout_uninit(&out_ch_layout);
- return buffer;
- }
-
- while (av_read_frame(fmt_ctx, packet.get()) >= 0) {
- if (packet->stream_index == audio_stream_index) {
- if (avcodec_send_packet(codec_ctx, packet.get()) == 0) {
- while (avcodec_receive_frame(codec_ctx, frame.get()) == 0) {
- convertFrameToBuffer(
- swr.get(),
- frame.get(),
- output_channel_count,
- buffer,
- framesRead,
- resampled_data,
- max_resampled_samples);
- }
- }
- }
- av_packet_unref(packet.get());
- }
-
- // Flush decoder
- avcodec_send_packet(codec_ctx, nullptr);
- while (avcodec_receive_frame(codec_ctx, frame.get()) == 0) {
- convertFrameToBuffer(
- swr.get(),
- frame.get(),
- output_channel_count,
- buffer,
- framesRead,
- resampled_data,
- max_resampled_samples);
+ return false;
}
-
- av_freep(&resampled_data[0]);
- av_freep(&resampled_data);
- av_channel_layout_uninit(&out_ch_layout);
-
- return buffer;
+ max_resampled_samples_ = FFmpegDecoder::CHUNK_SIZE;
+ return true;
}
-inline int findAudioStreamIndex(AVFormatContext *fmt_ctx) {
- for (int i = 0; i < fmt_ctx->nb_streams; i++) {
- if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
- return i;
- }
+bool FFmpegDecoder::openFile(const FFmpegDecoderConfig &cfg, const std::string &path) {
+ close();
+ if (path.empty()) {
+ return false;
}
- return -1;
+ if (avformat_open_input(&fmt_ctx_, path.c_str(), nullptr, nullptr) < 0) {
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ if (avformat_find_stream_info(fmt_ctx_, nullptr) < 0) {
+ avformat_close_input(&fmt_ctx_);
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ if (!openCodec(fmt_ctx_, audio_stream_index_, &codec_ctx_)) {
+ avformat_close_input(&fmt_ctx_);
+ fmt_ctx_ = nullptr;
+ return false;
+ }
+ output_channels_ = codec_ctx_->ch_layout.nb_channels;
+ output_sample_rate_ =
+ (cfg.outputSampleRate > 0) ? cfg.outputSampleRate : codec_ctx_->sample_rate;
+
+ packet_ = av_packet_alloc();
+ frame_ = av_frame_alloc();
+ if (packet_ == nullptr || frame_ == nullptr || !setupSwr()) {
+ close();
+ return false;
+ }
+ total_output_frames_ = 0;
+ return true;
}
-bool setupDecoderContext(
- AVFormatContext *fmt_ctx,
- int &audio_stream_index,
- std::unique_ptr> &codec_ctx) {
- audio_stream_index = findAudioStreamIndex(fmt_ctx);
- if (audio_stream_index == -1) {
+bool FFmpegDecoder::openMemory(const FFmpegDecoderConfig &cfg, const void *data, size_t size) {
+ close();
+ if (data == nullptr || size == 0) {
return false;
}
-
- AVCodecParameters *codecpar = fmt_ctx->streams[audio_stream_index]->codecpar;
- const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);
- if (codec == nullptr) {
+ mem_io_ = std::make_unique();
+ mem_io_->data = static_cast(data);
+ mem_io_->size = size;
+ mem_io_->pos = 0;
+
+ auto* io_buf = static_cast(av_malloc(FFmpegDecoder::CHUNK_SIZE));
+ if (io_buf == nullptr) {
+ close();
+ return false;
+ }
+ avio_ctx_ = avio_alloc_context(
+ io_buf,
+ static_cast(FFmpegDecoder::CHUNK_SIZE),
+ 0,
+ mem_io_.get(),
+ read_packet,
+ nullptr,
+ seek_packet);
+ if (avio_ctx_ == nullptr) {
+ av_free(io_buf);
+ mem_io_.reset();
return false;
}
- AVCodecContext *raw_codec_ctx = avcodec_alloc_context3(codec);
- if (raw_codec_ctx == nullptr) {
+ fmt_ctx_ = avformat_alloc_context();
+ if (fmt_ctx_ == nullptr) {
+ close();
return false;
}
+ fmt_ctx_->pb = avio_ctx_;
- codec_ctx.reset(raw_codec_ctx);
- if (avcodec_parameters_to_context(codec_ctx.get(), codecpar) < 0) {
+ if (avformat_open_input(&fmt_ctx_, nullptr, nullptr, nullptr) < 0) {
+ close();
return false;
}
- if (avcodec_open2(codec_ctx.get(), codec, nullptr) < 0) {
+ if (avformat_find_stream_info(fmt_ctx_, nullptr) < 0) {
+ close();
return false;
}
-
+ if (!openCodec(fmt_ctx_, audio_stream_index_, &codec_ctx_)) {
+ close();
+ return false;
+ }
+ output_channels_ = codec_ctx_->ch_layout.nb_channels;
+ output_sample_rate_ =
+ (cfg.outputSampleRate > 0) ? cfg.outputSampleRate : codec_ctx_->sample_rate;
+
+ packet_ = av_packet_alloc();
+ frame_ = av_frame_alloc();
+ if (packet_ == nullptr || frame_ == nullptr || !setupSwr()) {
+ close();
+ return false;
+ }
+ total_output_frames_ = 0;
return true;
}
-std::shared_ptr decodeAudioFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int audio_stream_index,
- int sample_rate) {
- size_t framesRead = 0;
- int output_sample_rate = (sample_rate > 0) ? sample_rate : codec_ctx->sample_rate;
- int output_channel_count = codec_ctx->ch_layout.nb_channels;
-
- std::vector decoded_buffer = readAllPcmFrames(
- fmt_ctx, codec_ctx, output_sample_rate, output_channel_count, audio_stream_index, framesRead);
-
- if (framesRead == 0 || decoded_buffer.empty()) {
- return nullptr;
+void FFmpegDecoder::appendFrameResampled(AVFrame *frame) {
+ int out_samples = swr_get_out_samples(swr_, frame->nb_samples);
+ if (out_samples > max_resampled_samples_) {
+ av_freep(&resampled_data_[0]);
+ av_freep(&resampled_data_);
+ max_resampled_samples_ = out_samples;
+ if (av_samples_alloc_array_and_samples(
+ &resampled_data_,
+ nullptr,
+ output_channels_,
+ max_resampled_samples_,
+ AV_SAMPLE_FMT_FLT,
+ 0) < 0) {
+ return;
+ }
+ }
+ int converted = swr_convert(
+ swr_,
+ resampled_data_,
+ max_resampled_samples_,
+ const_cast(frame->data),
+ frame->nb_samples);
+ if (converted > 0) {
+ size_t n = static_cast(converted) * static_cast(output_channels_);
+ const float *src = reinterpret_cast(resampled_data_[0]);
+ leftover_.insert(leftover_.end(), src, src + n);
}
+}
- auto outputFrames = decoded_buffer.size() / output_channel_count;
- auto audioBuffer =
- std::make_shared(outputFrames, output_channel_count, output_sample_rate);
+bool FFmpegDecoder::feedPipeline() {
+ for (;;) {
+ int r = avcodec_receive_frame(codec_ctx_, frame_);
+ if (r == 0) {
+ appendFrameResampled(frame_);
+ return true;
+ }
+ if (r == AVERROR_EOF) {
+ return !leftover_.empty();
+ }
+ if (r != AVERROR(EAGAIN)) {
+ return false;
+ }
- for (size_t ch = 0; ch < output_channel_count; ++ch) {
- auto channelData = audioBuffer->getChannel(ch)->span();
- for (int i = 0; i < outputFrames; ++i) {
- channelData[i] = decoded_buffer[i * output_channel_count + ch];
+ r = av_read_frame(fmt_ctx_, packet_);
+ if (r == AVERROR_EOF) {
+ if (avcodec_send_packet(codec_ctx_, nullptr) < 0) {
+ return false;
+ }
+ continue;
+ }
+ if (r < 0) {
+ return false;
+ }
+ if (packet_->stream_index != audio_stream_index_) {
+ av_packet_unref(packet_);
+ continue;
+ }
+ r = avcodec_send_packet(codec_ctx_, packet_);
+ av_packet_unref(packet_);
+ if (r < 0) {
+ return false;
}
}
- return audioBuffer;
}
-std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) {
- if (data == nullptr || size == 0) {
- return nullptr;
+float FFmpegDecoder::getDurationInSeconds() const {
+ if (!isOpen() || fmt_ctx_ == nullptr || audio_stream_index_ < 0) {
+ return 0;
}
-
- MemoryIOContext io_ctx{static_cast(data), size, 0};
-
- constexpr size_t buffer_size = 4096;
- uint8_t *io_buffer = static_cast(av_malloc(buffer_size));
- if (io_buffer == nullptr) {
- return nullptr;
+ if (fmt_ctx_->duration != AV_NOPTS_VALUE && fmt_ctx_->duration >= 0) {
+ double t =
+ static_cast(fmt_ctx_->duration) / static_cast(AV_TIME_BASE);
+ if (t > 0 && std::isfinite(t)) {
+ return static_cast(t);
+ }
}
+ return 0;
+}
- auto avio_ctx = std::unique_ptr>(
- avio_alloc_context(io_buffer, buffer_size, 0, &io_ctx, read_packet, nullptr, seek_packet),
- [](AVIOContext *ctx) { avio_context_free(&ctx); });
- if (avio_ctx == nullptr) {
- return nullptr;
+float FFmpegDecoder::getCurrentPositionInSeconds() const {
+ if (!isOpen() || output_sample_rate_ <= 0) {
+ return 0;
}
+ return static_cast(total_output_frames_) / static_cast(output_sample_rate_);
+}
- AVFormatContext *raw_fmt_ctx = avformat_alloc_context();
- if (raw_fmt_ctx == nullptr) {
- return nullptr;
+bool FFmpegDecoder::seekToStart() {
+ if (!isOpen() || audio_stream_index_ < 0) {
+ return false;
}
- raw_fmt_ctx->pb = avio_ctx.get();
-
- if (avformat_open_input(&raw_fmt_ctx, nullptr, nullptr, nullptr) < 0) {
- avformat_free_context(raw_fmt_ctx);
- return nullptr;
+ if (avformat_seek_file(
+ fmt_ctx_, -1, INT64_MIN, 0, INT64_MAX, 0) < 0) {
+ return false;
}
+ avcodec_flush_buffers(codec_ctx_);
+ leftover_.clear();
+ leftover_offset_ = 0;
+ total_output_frames_ = 0;
+ return true;
+}
- auto fmt_ctx = std::unique_ptr(
- raw_fmt_ctx, &avformat_free_context);
-
- if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
- return nullptr;
+size_t FFmpegDecoder::readPcmFrames(float *outInterleaved, size_t frameCount) {
+ if (!isOpen() || outInterleaved == nullptr || frameCount == 0 || output_channels_ <= 0) {
+ return 0;
+ }
+ size_t delivered = 0;
+ const auto ch = static_cast(output_channels_);
+
+ while (delivered < frameCount) {
+ size_t need = frameCount - delivered;
+ size_t available_samples = leftover_.size() > leftover_offset_
+ ? leftover_.size() - leftover_offset_
+ : 0;
+ size_t leftover_frames = available_samples / ch;
+ if (leftover_frames > 0) {
+ size_t take = std::min(need, leftover_frames);
+ size_t samples = take * ch;
+ memcpy(
+ outInterleaved + delivered * ch,
+ leftover_.data() + leftover_offset_,
+ samples * sizeof(float));
+ leftover_offset_ += samples;
+ if (leftover_offset_ >= leftover_.size()) {
+ leftover_.clear();
+ leftover_offset_ = 0;
+ }
+ delivered += take;
+ } else if (!feedPipeline()) {
+ break;
+ }
}
+ total_output_frames_ += delivered;
+ return delivered;
+}
- auto codec_ctx = std::unique_ptr>(
- nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });
- int audio_stream_index = -1;
- if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) {
+static std::shared_ptr buildAudioBufferFromInterleaved(
+ std::vector &interleaved,
+ int channels,
+ int sample_rate) {
+ if (interleaved.empty() || channels <= 0) {
return nullptr;
}
-
- return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate);
+ size_t frames = interleaved.size() / static_cast(channels);
+ auto buf = std::make_shared(frames, channels, static_cast(sample_rate));
+ for (int c = 0; c < channels; ++c) {
+ auto span = buf->getChannel(c)->span();
+ for (size_t i = 0; i < frames; ++i) {
+ span[i] = interleaved[i * static_cast(channels) + static_cast(c)];
+ }
+ }
+ return buf;
}
std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate) {
- if (path.empty()) {
+ FFmpegDecoderConfig cfg;
+ ffmpegDecoderConfigInit(&cfg, sample_rate);
+ FFmpegDecoder dec;
+ if (!dec.openFile(cfg, path)) {
return nullptr;
}
+ std::vector acc;
+ std::vector tmp(FFmpegDecoder::CHUNK_SIZE * static_cast(std::max(1, dec.outputChannels())));
+ while (true) {
+ size_t n = dec.readPcmFrames(tmp.data(), FFmpegDecoder::CHUNK_SIZE);
+ if (n == 0) {
+ break;
+ }
+ acc.insert(
+ acc.end(),
+ tmp.begin(),
+ tmp.begin() + static_cast(n * static_cast(dec.outputChannels())));
+ }
+ return buildAudioBufferFromInterleaved(acc, dec.outputChannels(), dec.outputSampleRate());
+}
- AVFormatContext *raw_fmt_ctx = nullptr;
- if (avformat_open_input(&raw_fmt_ctx, path.c_str(), nullptr, nullptr) < 0)
+std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) {
+ FFmpegDecoderConfig cfg;
+ ffmpegDecoderConfigInit(&cfg, sample_rate);
+ FFmpegDecoder dec;
+ if (!dec.openMemory(cfg, data, size)) {
return nullptr;
+ }
+ std::vector acc;
+ std::vector tmp(FFmpegDecoder::CHUNK_SIZE * static_cast(std::max(1, dec.outputChannels())));
+ while (true) {
+ size_t n = dec.readPcmFrames(tmp.data(), FFmpegDecoder::CHUNK_SIZE);
+ if (n == 0) {
+ break;
+ }
+ acc.insert(
+ acc.end(),
+ tmp.begin(),
+ tmp.begin() + static_cast(n * static_cast(dec.outputChannels())));
+ }
+ return buildAudioBufferFromInterleaved(acc, dec.outputChannels(), dec.outputSampleRate());
+}
- auto fmt_ctx = std::unique_ptr>(
- raw_fmt_ctx, [](AVFormatContext *ctx) { avformat_close_input(&ctx); });
+} // namespace audioapi::ffmpegdecoder
- if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
- return nullptr;
- }
+#else
- auto codec_ctx = std::unique_ptr>(
- nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); });
- int audio_stream_index = -1;
- if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) {
- return nullptr;
- }
+#include
- return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate);
+namespace audioapi::ffmpegdecoder {
+
+FFmpegDecoder::FFmpegDecoder(FFmpegDecoder &&) noexcept = default;
+FFmpegDecoder &FFmpegDecoder::operator=(FFmpegDecoder &&) noexcept = default;
+FFmpegDecoder::~FFmpegDecoder() = default;
+void FFmpegDecoder::close() {}
+bool FFmpegDecoder::openFile(const FFmpegDecoderConfig &, const std::string &) {
+ return false;
+}
+bool FFmpegDecoder::openMemory(const FFmpegDecoderConfig &, const void *, size_t) {
+ return false;
+}
+float FFmpegDecoder::getDurationInSeconds() const {
+ return 0;
+}
+float FFmpegDecoder::getCurrentPositionInSeconds() const {
+ return 0;
+}
+bool FFmpegDecoder::seekToStart() {
+ return false;
+}
+size_t FFmpegDecoder::readPcmFrames(float *, size_t) {
+ return 0;
+}
+std::shared_ptr decodeWithFilePath(const std::string &, int) {
+ return nullptr;
+}
+std::shared_ptr decodeWithMemoryBlock(const void *, size_t, int) {
+ return nullptr;
}
} // namespace audioapi::ffmpegdecoder
+
+#endif // !RN_AUDIO_API_FFMPEG_DISABLED
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
index 46eea8f1d..35e366455 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h
@@ -8,8 +8,12 @@
* FFmpeg, you must comply with the terms of the LGPL for FFmpeg itself.
*/
+#pragma once
+
#include
+#include
#include
+#include
#include
extern "C" {
@@ -18,53 +22,116 @@ extern "C" {
#include
#include
}
+
class AudioBuffer;
namespace audioapi::ffmpegdecoder {
-// Custom IO context for reading from memory
+
+/// Opaque IO state for openMemory (must outlive decode until close).
struct MemoryIOContext {
- const uint8_t *data;
- size_t size;
- size_t pos;
+ const uint8_t *data = nullptr;
+ size_t size = 0;
+ size_t pos = 0;
};
-struct AudioStreamContext {
- AVFormatContext *fmt_ctx = nullptr;
- AVCodecContext *codec_ctx = nullptr;
- int audio_stream_index = -1;
+/// Step 1 — like ma_decoder_config_init: desired output sample rate (0 = use stream rate).
+struct FFmpegDecoderConfig {
+ int outputSampleRate = 0;
};
-int read_packet(void *opaque, uint8_t *buf, int buf_size);
-int64_t seek_packet(void *opaque, int64_t offset, int whence);
-inline int findAudioStreamIndex(AVFormatContext *fmt_ctx);
-std::vector readAllPcmFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int out_sample_rate,
- int output_channel_count,
- int audio_stream_index,
- size_t &framesRead);
-
-void convertFrameToBuffer(
- SwrContext *swr,
- AVFrame *frame,
- int output_channel_count,
- std::vector &buffer,
- size_t &framesRead,
- uint8_t **&resampled_data,
- int &max_resampled_samples);
-bool setupDecoderContext(
- AVFormatContext *fmt_ctx,
- int &audio_stream_index,
- std::unique_ptr &codec_ctx);
-std::shared_ptr decodeAudioFrames(
- AVFormatContext *fmt_ctx,
- AVCodecContext *codec_ctx,
- int audio_stream_index,
- int sample_rate);
+/// Initialize decoder config (mirrors miniaudio-style config step).
+inline void ffmpegDecoderConfigInit(FFmpegDecoderConfig *cfg, int outputSampleRate) {
+ if (cfg != nullptr) {
+ cfg->outputSampleRate = outputSampleRate;
+ }
+}
-std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate);
+/**
+ * FFmpeg decoder with incremental read, analogous to ma_decoder:
+ * 1) ffmpegDecoderConfigInit
+ * 2) openFile or openMemory
+ * 3) readPcmFrames repeatedly; 0 returned = end of stream
+ * 4) close when done
+ *
+ * For openMemory, \p data must remain valid until close().
+ */
+class FFmpegDecoder {
+ public:
+ FFmpegDecoder() = default;
+ FFmpegDecoder(const FFmpegDecoder &) = delete;
+ FFmpegDecoder &operator=(const FFmpegDecoder &) = delete;
+ FFmpegDecoder(FFmpegDecoder &&other) = delete;
+ FFmpegDecoder &operator=(FFmpegDecoder &&other) = delete;
+ ~FFmpegDecoder();
+
+ /// @brief Opens a file for decoding.
+ /// @param cfg The configuration for the decoder.
+ /// @param path The path to the file.
+ /// @return True if the file was opened successfully, false otherwise.
+ [[nodiscard]] bool openFile(const FFmpegDecoderConfig &cfg, const std::string &path);
+
+ /// @brief Opens a memory block for decoding.
+ /// @param cfg The configuration for the decoder.
+ /// @param data The data to decode.
+ /// @param size The size of the data.
+ /// @return True if the memory block was opened successfully, false otherwise.
+ [[nodiscard]] bool openMemory(const FFmpegDecoderConfig &cfg, const void *data, size_t size);
+
+ /// @brief Reads frames from the decoder.
+ /// @param outInterleaved The output buffer for the frames.
+ /// @param frameCount The maximum number of frames to read.
+ /// @return The number of frames actually read (0 = EOF).
+ [[nodiscard]] size_t readPcmFrames(float *outInterleaved, size_t frameCount);
+
+ /// @brief Closes the decoder.
+ void close();
+
+ /// @brief Checks if the decoder is open.
+ /// @return True if the decoder is open, false otherwise.
+ [[nodiscard]] bool isOpen() const { return fmt_ctx_ != nullptr && codec_ctx_ != nullptr; }
+ [[nodiscard]] int outputChannels() const { return output_channels_; }
+ [[nodiscard]] int outputSampleRate() const { return output_sample_rate_; }
+ /// @brief Duration in seconds. Returns 0 if unknown.
+ [[nodiscard]] float getDurationInSeconds() const;
+
+ /// @brief Current playback position in seconds (frames read / sample rate).
+ [[nodiscard]] float getCurrentPositionInSeconds() const;
+
+ /// @brief Seeks to the start of the stream. Call after EOF to loop.
+ /// @return True if seek succeeded.
+ [[nodiscard]] bool seekToStart();
+
+ static constexpr size_t CHUNK_SIZE = 4096;
+
+ private:
+ bool setupSwr();
+ bool feedPipeline();
+ void appendFrameResampled(AVFrame *frame);
+
+ AVFormatContext *fmt_ctx_ = nullptr;
+ AVCodecContext *codec_ctx_ = nullptr;
+ SwrContext *swr_ = nullptr;
+ AVPacket *packet_ = nullptr;
+ AVFrame *frame_ = nullptr;
+
+ uint8_t **resampled_data_ = nullptr;
+ int max_resampled_samples_ = 0;
+
+ std::unique_ptr mem_io_;
+ AVIOContext *avio_ctx_ = nullptr;
+
+ std::vector leftover_;
+ size_t leftover_offset_ = 0;
+ int audio_stream_index_ = -1;
+ int output_channels_ = 0;
+ int output_sample_rate_ = 0;
+ size_t total_output_frames_ = 0;
+};
+
+// --- One-shot decode (existing API) ----------------------------------------
+
+std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate);
std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate);
} // namespace audioapi::ffmpegdecoder
diff --git a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
index b2df4a479..0174c1434 100644
--- a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
+++ b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h
@@ -111,6 +111,11 @@ struct AudioBufferSourceOptions : BaseAudioBufferSourceOptions {
}
};
+struct AudioFileSourceOptions : AudioScheduledSourceNodeOptions {
+ std::vector data;
+ std::string filePath;
+};
+
struct StreamerOptions : AudioScheduledSourceNodeOptions {
std::string streamPath;
};
diff --git a/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt b/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
index bd9378edc..f8a3da551 100644
--- a/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
+++ b/packages/react-native-audio-api/common/cpp/cursor/CMakeLists.txt
@@ -16,7 +16,7 @@ list(REMOVE_ITEM COMMON_CPP_SOURCES
)
set(INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/include)
-set(FFMPEG_INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/ffmpeg_include)
+set(FFMPEG_INCLUDE_DIR ${COMMON_CPP_DIR}/audioapi/external/include_ffmpeg)
set(EXTERNAL_DIR ${COMMON_CPP_DIR}/audioapi/external)
set(JNI_LIBS_DIR ${COMMON_CPP_DIR}/../../android/src/main/jniLibs)
diff --git a/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt b/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
index b363b9631..a010be6a5 100644
--- a/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
+++ b/packages/react-native-audio-api/common/cpp/test/CMakeLists.txt
@@ -38,6 +38,7 @@ list(FILTER RNAUDIOAPI_SRC EXCLUDE REGEX ".*/Worklet.*Node\\.cpp$")
list(REMOVE_ITEM RNAUDIOAPI_SRC
"${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/core/AudioContext.cpp"
"${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp"
+ "${REACT_NATIVE_AUDIO_API_DIR}/common/cpp/audioapi/core/sources/AudioFileSourceNode.cpp"
)
file(GLOB_RECURSE RNAUDIOAPI_LIBS
diff --git a/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx b/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
index 0a95bf409..f2e7ed06f 100644
--- a/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
+++ b/packages/react-native-audio-api/src/development/react/Audio/Audio.tsx
@@ -1,9 +1,25 @@
-import React from 'react';
+import React, {
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from 'react';
+import { View } from 'react-native';
-import type { AudioProps } from './types';
+import { IAudioFileSourceNode } from '../../../interfaces';
+import type {
+ AudioProps,
+ AudioTagPlaybackState,
+ AudioURISource,
+} from './types';
import { useStableAudioProps } from './utils';
+import { AudioComponentContext } from './AudioTagContext';
+import AudioControls from './AudioControls';
const Audio: React.FC = (inProps) => {
+ const { children } = inProps;
+
/* eslint-disable @typescript-eslint/no-unused-vars */
const {
autoPlay,
@@ -15,10 +31,187 @@ const Audio: React.FC = (inProps) => {
playbackRate,
preservesPitch,
volume,
+ context,
} = useStableAudioProps(inProps);
- /* eslint-enable @typescript-eslint/no-unused-vars */
- return null;
+ const path = useMemo(() => {
+ if (typeof source === 'string') {
+ return source;
+ }
+ return (source as AudioURISource).uri ?? '';
+ }, [source]);
+
+ const nodeRef = useRef(null);
+ const [volumeState, setVolumeState] = useState(volume);
+ const [mutedState, setMutedState] = useState(muted);
+ const [isReady, setIsReady] = useState(false);
+ const [playbackState, setPlaybackState] =
+ useState('idle');
+ const [currentTime, setCurrentTime] = useState(0);
+ const [duration, setDuration] = useState(0);
+
+ useEffect(() => {
+ setVolumeState(volume);
+ }, [volume]);
+
+ useEffect(() => {
+ setMutedState(muted);
+ }, [muted]);
+
+ const play = useCallback(() => {
+ const n = nodeRef.current;
+ if (!n || !context) {
+ return;
+ }
+ // @ts-ignore - internal
+ n.connect(context.destination.node);
+ n.start(0);
+ setPlaybackState('playing');
+ }, [context]);
+
+ const pause = useCallback(() => {
+ if (!nodeRef.current) {
+ return;
+ }
+ nodeRef.current.pause();
+ setPlaybackState((s) => (s === 'idle' ? 'idle' : 'paused'));
+ }, []);
+
+ const attachNode = useCallback(
+ (n: IAudioFileSourceNode) => {
+ nodeRef.current = n;
+ n.loop = loop;
+ setCurrentTime(n.currentTime);
+ setDuration(n.duration);
+ setIsReady(true);
+ if (autoPlay) {
+ play();
+ }
+ },
+ [autoPlay, play, loop]
+ );
+
+ useEffect(() => {
+ if (!context || !path) {
+ return;
+ }
+
+ const run = async () => {
+ if (path.startsWith('http')) {
+ const response = await fetch(path);
+ const arrayBuffer = await response.arrayBuffer();
+ attachNode(context.context.createFileSource(arrayBuffer));
+ } else {
+ attachNode(context.context.createFileSource(path));
+ }
+ };
+
+ setIsReady(false);
+ setPlaybackState('idle');
+ run();
+
+ return () => {
+ nodeRef.current?.disconnect(undefined);
+ nodeRef.current = null;
+ setIsReady(false);
+ setPlaybackState('idle');
+ };
+ }, [path, context, attachNode]);
+
+ useEffect(() => {
+ const n = nodeRef.current;
+ if (n) {
+ n.volume = mutedState ? 0 : volumeState;
+ }
+ }, [volumeState, mutedState, isReady]);
+
+ useEffect(() => {
+ const n = nodeRef.current;
+ if (n) {
+ n.loop = loop;
+ }
+ }, [loop, isReady]);
+
+ useEffect(() => {
+ const n = nodeRef.current;
+ if (!n || playbackState !== 'playing') return;
+ const id = setInterval(() => {
+ const node = nodeRef.current;
+ if (node) {
+ setCurrentTime(node.currentTime);
+ }
+ console.log('currentTime', node?.currentTime);
+ }, 250);
+ return () => {
+ clearInterval(id);
+ };
+ }, [playbackState]);
+
+ const setVolume = useCallback(
+ (next: number) => {
+ setVolumeState(next);
+ const n = nodeRef.current;
+ if (n) {
+ n.volume = mutedState ? 0 : next;
+ }
+ },
+ [mutedState]
+ );
+
+ const setMuted = useCallback(
+ (next: boolean) => {
+ setMutedState(next);
+ const n = nodeRef.current;
+ if (n) {
+ if (next) {
+ n.volume = 0;
+ } else {
+ n.volume = volumeState;
+ }
+ }
+ },
+ [volumeState]
+ );
+
+ const ctxValue = useMemo(
+ () => ({
+ play,
+ pause,
+ volume: volumeState,
+ setVolume,
+ muted: mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ }),
+ [
+ play,
+ pause,
+ setVolume,
+ volumeState,
+ mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ ]
+ );
+
+ if (context == null) {
+ return null;
+ }
+
+ return (
+
+
+ {controls && }
+ {children}
+
+
+ );
};
export default Audio;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx b/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
index be6453af0..cec85f69c 100644
--- a/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
+++ b/packages/react-native-audio-api/src/development/react/Audio/Audio.web.tsx
@@ -1,9 +1,149 @@
-import React from 'react';
-import type { AudioProps } from './types';
+import React, {
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from 'react';
+import type { AudioProps, AudioTagPlaybackState } from './types';
+import { AudioComponentContext } from './AudioTagContext';
+import { useStableAudioProps } from './utils';
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const Audio: React.FC = (props) => {
- return null;
+ const { children } = props;
+ /* eslint-disable @typescript-eslint/no-unused-vars */
+ const {
+ autoPlay,
+ controls,
+ loop,
+ muted,
+ preload,
+ source,
+ playbackRate,
+ preservesPitch,
+ volume,
+ } = useStableAudioProps(props);
+
+ const audioRef = useRef(null);
+ const [volumeState, setVolumeState] = useState(volume);
+ const [mutedState, setMutedState] = useState(muted);
+ const [isReady, setIsReady] = useState(false);
+ const [playbackState, setPlaybackState] =
+ useState('idle');
+ const [currentTime, setCurrentTime] = useState(0);
+ const [duration, setDuration] = useState(0);
+
+ useEffect(() => {
+ setVolumeState(volume);
+ }, [volume]);
+
+ useEffect(() => {
+ setMutedState(muted);
+ }, [muted]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (el) {
+ el.volume = volumeState;
+ }
+ }, [volumeState]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (!el) return;
+ const onLoadedMetadata = () => {
+ setDuration(el.duration);
+ setCurrentTime(el.currentTime);
+ };
+ const onTimeUpdate = () => setCurrentTime(el.currentTime);
+ el.addEventListener('loadedmetadata', onLoadedMetadata);
+ el.addEventListener('timeupdate', onTimeUpdate);
+ if (!isNaN(el.duration)) setDuration(el.duration);
+ setCurrentTime(el.currentTime);
+ return () => {
+ el.removeEventListener('loadedmetadata', onLoadedMetadata);
+ el.removeEventListener('timeupdate', onTimeUpdate);
+ };
+ }, [isReady]);
+
+ useEffect(() => {
+ const el = audioRef.current;
+ if (el) {
+ el.muted = mutedState;
+ }
+ }, [mutedState]);
+
+ const play = useCallback(() => {
+ audioRef.current?.play()?.catch(() => {});
+ }, []);
+
+ const pause = useCallback(() => {
+ audioRef.current?.pause();
+ }, []);
+
+ const setVolume = useCallback((next: number) => {
+ setVolumeState(next);
+ const el = audioRef.current;
+ if (el) {
+ el.volume = next;
+ }
+ }, []);
+
+ const setMuted = useCallback((next: boolean) => {
+ setMutedState(next);
+ const el = audioRef.current;
+ if (el) {
+ el.muted = next;
+ }
+ }, []);
+
+ const ctxValue = useMemo(
+ () => ({
+ play,
+ pause,
+ volume: volumeState,
+ setVolume,
+ muted: mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ }),
+ [
+ play,
+ pause,
+ setVolume,
+ volumeState,
+ mutedState,
+ setMuted,
+ isReady,
+ playbackState,
+ currentTime,
+ duration,
+ ]
+ );
+
+ return (
+
+
+ );
};
export default Audio;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx b/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx
new file mode 100644
index 000000000..2be6d9973
--- /dev/null
+++ b/packages/react-native-audio-api/src/development/react/Audio/AudioControls.tsx
@@ -0,0 +1,247 @@
+import { Pause, Play, Volume, VolumeX } from 'lucide-react-native';
+import React, { useCallback, useEffect, useMemo, useState } from 'react';
+import {
+ ActivityIndicator,
+ LayoutChangeEvent,
+ PanResponder,
+ Platform,
+ Pressable,
+ StyleSheet,
+ Text,
+ View,
+} from 'react-native';
+import { useAudioTagContext } from './AudioTagContext';
+
+function formatTime(seconds: number): string {
+ if (!Number.isFinite(seconds) || seconds < 0) return '0:00';
+ const h = Math.floor(seconds / 3600);
+ const m = Math.floor((seconds % 3600) / 60);
+ const s = Math.floor(seconds % 60);
+ if (h > 0) {
+ return `${h}:${m.toString().padStart(2, '0')}:${s.toString().padStart(2, '0')}`;
+ } else {
+ return `${m}:${s.toString().padStart(2, '0')}`;
+ }
+}
+
+const styles = StyleSheet.create({
+ container: {
+ flexDirection: 'column',
+ alignSelf: 'stretch',
+ minWidth: 200,
+ paddingVertical: 10,
+ paddingHorizontal: 12,
+ backgroundColor: '#f5f5f5',
+ borderRadius: 8,
+ borderWidth: 1,
+ borderColor: '#333',
+ ...Platform.select({
+ ios: {
+ shadowColor: '#000',
+ shadowOffset: { width: 0, height: 2 },
+ shadowOpacity: 0.15,
+ shadowRadius: 4,
+ },
+ android: {
+ elevation: 4,
+ },
+ }),
+ },
+ topRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ playPause: {
+ padding: 4,
+ marginRight: 12,
+ flexShrink: 0,
+ },
+ timeText: {
+ color: '#000',
+ fontSize: 12,
+ marginRight: 10,
+ minWidth: 48,
+ },
+ progressTrack: {
+ flex: 1,
+ minWidth: 40,
+ height: 6,
+ justifyContent: 'center',
+ marginRight: 10,
+ },
+ progressTrackInner: {
+ flex: 1,
+ height: 6,
+ backgroundColor: '#ccc',
+ borderRadius: 3,
+ overflow: 'hidden',
+ },
+ progressFill: {
+ height: '100%',
+ backgroundColor: '#000',
+ borderRadius: 3,
+ },
+ volumeIcon: {
+ padding: 4,
+ flexShrink: 0,
+ },
+ bottomRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ justifyContent: 'center',
+ marginTop: 8,
+ },
+ volumeTrack: {
+ width: '50%',
+ height: 10,
+ flexDirection: 'column',
+ justifyContent: 'center',
+ },
+ volumeTrackPan: {
+ flex: 1,
+ width: '100%',
+ justifyContent: 'center',
+ },
+ volumeTrackInner: {
+ flex: 1,
+ height: 6,
+ backgroundColor: '#ccc',
+ borderRadius: 3,
+ overflow: 'hidden',
+ },
+ volumeFill: {
+ height: '100%',
+ backgroundColor: '#000',
+ borderRadius: 3,
+ },
+ loadingRow: {
+ flexDirection: 'row',
+ alignItems: 'center',
+ },
+ loadingText: {
+ color: '#333',
+ fontSize: 14,
+ },
+});
+
+const AudioControls: React.FC = () => {
+ const {
+ isReady,
+ play,
+ pause,
+ playbackState,
+ volume,
+ setVolume,
+ muted,
+ setMuted,
+ currentTime,
+ duration,
+ } = useAudioTagContext();
+
+ const [trackWidth, setTrackWidth] = useState(0);
+
+ const updateVolumeFromPosition = useCallback(
+ (locationX: number) => {
+ if (trackWidth <= 0) return;
+ const pct = Math.max(0, Math.min(1, locationX / trackWidth));
+ setVolume(pct);
+ },
+ [trackWidth, setVolume]
+ );
+
+ const panResponder = useMemo(
+ () =>
+ PanResponder.create({
+ onStartShouldSetPanResponder: () => true,
+ onMoveShouldSetPanResponder: () => true,
+ onPanResponderGrant: (e) =>
+ updateVolumeFromPosition(e.nativeEvent.locationX),
+ onPanResponderMove: (e) =>
+ updateVolumeFromPosition(e.nativeEvent.locationX),
+ }),
+ [updateVolumeFromPosition]
+ );
+
+ const onPlayPausePress = useCallback(() => {
+ if (playbackState === 'playing') {
+ pause();
+ } else {
+ play();
+ }
+ }, [playbackState, pause, play]);
+
+ const onVolumeTrackLayout = useCallback((e: LayoutChangeEvent) => {
+ setTrackWidth(e.nativeEvent.layout.width);
+ }, []);
+
+ useEffect(() => {
+ console.log('playbackState', playbackState);
+ }, [playbackState]);
+
+ if (!isReady) {
+ return (
+
+
+
+ Loading…
+
+
+ );
+ }
+
+ const progress = duration > 0 ? currentTime / duration : 0;
+
+ return (
+
+
+
+ {playbackState === 'playing' ? (
+
+ ) : (
+
+ )}
+
+
+
+ {formatTime(currentTime)} / {formatTime(duration)}
+
+
+
+
+
+
+
+
+ setMuted(!muted)}>
+ {muted ? (
+
+ ) : (
+
+ )}
+
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+};
+
+export default AudioControls;
diff --git a/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts b/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts
new file mode 100644
index 000000000..9d8699fe3
--- /dev/null
+++ b/packages/react-native-audio-api/src/development/react/Audio/AudioTagContext.ts
@@ -0,0 +1,29 @@
+import { createContext, useContext } from 'react';
+import type { AudioTagPlaybackState } from './types';
+
+export type AudioComponentContextType = {
+ play: () => void;
+ pause: () => void;
+ volume: number;
+ setVolume: (volume: number) => void;
+ muted: boolean;
+ setMuted: (muted: boolean) => void;
+ isReady: boolean;
+ playbackState: AudioTagPlaybackState;
+ currentTime: number;
+ duration: number;
+};
+
+export const AudioComponentContext = createContext<
+ AudioComponentContextType | undefined
+>(undefined);
+
+export function useAudioTagContext(): AudioComponentContextType {
+ const context = useContext(AudioComponentContext);
+
+ if (context === undefined) {
+ throw new Error('useAudioTagContext must be used within an