forked from Green-Sky/tomato
1234 lines
42 KiB
C
1234 lines
42 KiB
C
/*
|
|
Simple DirectMedia Layer
|
|
Copyright (C) 1997-2023 Sam Lantinga <slouken@libsdl.org>
|
|
|
|
This software is provided 'as-is', without any express or implied
|
|
warranty. In no event will the authors be held liable for any damages
|
|
arising from the use of this software.
|
|
|
|
Permission is granted to anyone to use this software for any purpose,
|
|
including commercial applications, and to alter it and redistribute it
|
|
freely, subject to the following restrictions:
|
|
|
|
1. The origin of this software must not be misrepresented; you must not
|
|
claim that you wrote the original software. If you use this software
|
|
in a product, an acknowledgment in the product documentation would be
|
|
appreciated but is not required.
|
|
2. Altered source versions must be plainly marked as such, and must not be
|
|
misrepresented as being the original software.
|
|
3. This notice may not be removed or altered from any source distribution.
|
|
*/
|
|
#include "SDL_internal.h"
|
|
|
|
#include "SDL_audio_c.h"
|
|
|
|
#include "SDL_audioqueue.h"
|
|
#include "SDL_audioresample.h"
|
|
|
|
#ifndef SDL_INT_MAX
|
|
#define SDL_INT_MAX ((int)(~0u>>1))
|
|
#endif
|
|
|
|
/*
|
|
* CHANNEL LAYOUTS AS SDL EXPECTS THEM:
|
|
*
|
|
* (Even if the platform expects something else later, that
|
|
* SDL will swizzle between the app and the platform).
|
|
*
|
|
* Abbreviations:
|
|
* - FRONT=single mono speaker
|
|
* - FL=front left speaker
|
|
* - FR=front right speaker
|
|
* - FC=front center speaker
|
|
* - BL=back left speaker
|
|
* - BR=back right speaker
|
|
* - SR=surround right speaker
|
|
* - SL=surround left speaker
|
|
* - BC=back center speaker
|
|
* - LFE=low-frequency speaker
|
|
*
|
|
* These are listed in the order they are laid out in
|
|
* memory, so "FL+FR" means "the front left speaker is
|
|
* layed out in memory first, then the front right, then
|
|
* it repeats for the next audio frame".
|
|
*
|
|
* 1 channel (mono) layout: FRONT
|
|
* 2 channels (stereo) layout: FL+FR
|
|
* 3 channels (2.1) layout: FL+FR+LFE
|
|
* 4 channels (quad) layout: FL+FR+BL+BR
|
|
* 5 channels (4.1) layout: FL+FR+LFE+BL+BR
|
|
* 6 channels (5.1) layout: FL+FR+FC+LFE+BL+BR
|
|
* 7 channels (6.1) layout: FL+FR+FC+LFE+BC+SL+SR
|
|
* 8 channels (7.1) layout: FL+FR+FC+LFE+BL+BR+SL+SR
|
|
*/
|
|
|
|
#ifdef SDL_SSE3_INTRINSICS
|
|
// Convert from stereo to mono. Average left and right.
|
|
static void SDL_TARGETING("sse3") SDL_ConvertStereoToMono_SSE3(float *dst, const float *src, int num_frames)
|
|
{
|
|
LOG_DEBUG_AUDIO_CONVERT("stereo", "mono (using SSE3)");
|
|
|
|
const __m128 divby2 = _mm_set1_ps(0.5f);
|
|
int i = num_frames;
|
|
|
|
/* Do SSE blocks as long as we have 16 bytes available.
|
|
Just use unaligned load/stores, if the memory at runtime is
|
|
aligned it'll be just as fast on modern processors */
|
|
while (i >= 4) { // 4 * float32
|
|
_mm_storeu_ps(dst, _mm_mul_ps(_mm_hadd_ps(_mm_loadu_ps(src), _mm_loadu_ps(src + 4)), divby2));
|
|
i -= 4;
|
|
src += 8;
|
|
dst += 4;
|
|
}
|
|
|
|
// Finish off any leftovers with scalar operations.
|
|
while (i) {
|
|
*dst = (src[0] + src[1]) * 0.5f;
|
|
dst++;
|
|
i--;
|
|
src += 2;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifdef SDL_SSE_INTRINSICS
|
|
// Convert from mono to stereo. Duplicate to stereo left and right.
|
|
static void SDL_TARGETING("sse") SDL_ConvertMonoToStereo_SSE(float *dst, const float *src, int num_frames)
|
|
{
|
|
LOG_DEBUG_AUDIO_CONVERT("mono", "stereo (using SSE)");
|
|
|
|
// convert backwards, since output is growing in-place.
|
|
src += (num_frames-4) * 1;
|
|
dst += (num_frames-4) * 2;
|
|
|
|
/* Do SSE blocks as long as we have 16 bytes available.
|
|
Just use unaligned load/stores, if the memory at runtime is
|
|
aligned it'll be just as fast on modern processors */
|
|
// convert backwards, since output is growing in-place.
|
|
int i = num_frames;
|
|
while (i >= 4) { // 4 * float32
|
|
const __m128 input = _mm_loadu_ps(src); // A B C D
|
|
_mm_storeu_ps(dst, _mm_unpacklo_ps(input, input)); // A A B B
|
|
_mm_storeu_ps(dst + 4, _mm_unpackhi_ps(input, input)); // C C D D
|
|
i -= 4;
|
|
src -= 4;
|
|
dst -= 8;
|
|
}
|
|
|
|
// Finish off any leftovers with scalar operations.
|
|
src += 3;
|
|
dst += 6; // adjust for smaller buffers.
|
|
while (i) { // convert backwards, since output is growing in-place.
|
|
const float srcFC = src[0];
|
|
dst[1] /* FR */ = srcFC;
|
|
dst[0] /* FL */ = srcFC;
|
|
i--;
|
|
src--;
|
|
dst -= 2;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
// Include the autogenerated channel converters...
|
|
#include "SDL_audio_channel_converters.h"
|
|
|
|
|
|
static void AudioConvertByteswap(void *dst, const void *src, int num_samples, int bitsize)
|
|
{
|
|
#if DEBUG_AUDIO_CONVERT
|
|
SDL_Log("SDL_AUDIO_CONVERT: Converting %d-bit byte order", bitsize);
|
|
#endif
|
|
|
|
switch (bitsize) {
|
|
#define CASESWAP(b) \
|
|
case b: { \
|
|
const Uint##b *tsrc = (const Uint##b *)src; \
|
|
Uint##b *tdst = (Uint##b *)dst; \
|
|
for (int i = 0; i < num_samples; i++) { \
|
|
tdst[i] = SDL_Swap##b(tsrc[i]); \
|
|
} \
|
|
break; \
|
|
}
|
|
|
|
CASESWAP(16);
|
|
CASESWAP(32);
|
|
|
|
#undef CASESWAP
|
|
|
|
default:
|
|
SDL_assert(!"unhandled byteswap datatype!");
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void AudioConvertToFloat(float *dst, const void *src, int num_samples, SDL_AudioFormat src_fmt)
|
|
{
|
|
// Endian conversion is handled separately
|
|
switch (src_fmt & ~SDL_AUDIO_MASK_BIG_ENDIAN) {
|
|
case SDL_AUDIO_S8: SDL_Convert_S8_to_F32(dst, (const Sint8 *) src, num_samples); break;
|
|
case SDL_AUDIO_U8: SDL_Convert_U8_to_F32(dst, (const Uint8 *) src, num_samples); break;
|
|
case SDL_AUDIO_S16LE: SDL_Convert_S16_to_F32(dst, (const Sint16 *) src, num_samples); break;
|
|
case SDL_AUDIO_S32LE: SDL_Convert_S32_to_F32(dst, (const Sint32 *) src, num_samples); break;
|
|
default: SDL_assert(!"Unexpected audio format!"); break;
|
|
}
|
|
}
|
|
|
|
static void AudioConvertFromFloat(void *dst, const float *src, int num_samples, SDL_AudioFormat dst_fmt)
|
|
{
|
|
// Endian conversion is handled separately
|
|
switch (dst_fmt & ~SDL_AUDIO_MASK_BIG_ENDIAN) {
|
|
case SDL_AUDIO_S8: SDL_Convert_F32_to_S8((Sint8 *) dst, src, num_samples); break;
|
|
case SDL_AUDIO_U8: SDL_Convert_F32_to_U8((Uint8 *) dst, src, num_samples); break;
|
|
case SDL_AUDIO_S16LE: SDL_Convert_F32_to_S16((Sint16 *) dst, src, num_samples); break;
|
|
case SDL_AUDIO_S32LE: SDL_Convert_F32_to_S32((Sint32 *) dst, src, num_samples); break;
|
|
default: SDL_assert(!"Unexpected audio format!"); break;
|
|
}
|
|
}
|
|
|
|
static SDL_bool SDL_IsSupportedAudioFormat(const SDL_AudioFormat fmt)
|
|
{
|
|
switch (fmt) {
|
|
case SDL_AUDIO_U8:
|
|
case SDL_AUDIO_S8:
|
|
case SDL_AUDIO_S16LE:
|
|
case SDL_AUDIO_S16BE:
|
|
case SDL_AUDIO_S32LE:
|
|
case SDL_AUDIO_S32BE:
|
|
case SDL_AUDIO_F32LE:
|
|
case SDL_AUDIO_F32BE:
|
|
return SDL_TRUE; // supported.
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return SDL_FALSE; // unsupported.
|
|
}
|
|
|
|
static SDL_bool SDL_IsSupportedChannelCount(const int channels)
|
|
{
|
|
return ((channels >= 1) && (channels <= 8)) ? SDL_TRUE : SDL_FALSE;
|
|
}
|
|
|
|
|
|
// This does type and channel conversions _but not resampling_ (resampling happens in SDL_AudioStream).
|
|
// This does not check parameter validity, (beyond asserts), it expects you did that already!
|
|
// All of this has to function as if src==dst==scratch (conversion in-place), but as a convenience
|
|
// if you're just going to copy the final output elsewhere, you can specify a different output pointer.
|
|
//
|
|
// The scratch buffer must be able to store `num_frames * CalculateMaxSampleFrameSize(src_format, src_channels, dst_format, dst_channels)` bytes.
|
|
// If the scratch buffer is NULL, this restriction applies to the output buffer instead.
|
|
void ConvertAudio(int num_frames, const void *src, SDL_AudioFormat src_format, int src_channels,
|
|
void *dst, SDL_AudioFormat dst_format, int dst_channels, void* scratch)
|
|
{
|
|
SDL_assert(src != NULL);
|
|
SDL_assert(dst != NULL);
|
|
SDL_assert(SDL_IsSupportedAudioFormat(src_format));
|
|
SDL_assert(SDL_IsSupportedAudioFormat(dst_format));
|
|
SDL_assert(SDL_IsSupportedChannelCount(src_channels));
|
|
SDL_assert(SDL_IsSupportedChannelCount(dst_channels));
|
|
|
|
if (!num_frames) {
|
|
return; // no data to convert, quit.
|
|
}
|
|
|
|
#if DEBUG_AUDIO_CONVERT
|
|
SDL_Log("SDL_AUDIO_CONVERT: Convert format %04x->%04x, channels %u->%u", src_format, dst_format, src_channels, dst_channels);
|
|
#endif
|
|
|
|
const int src_bitsize = (int) SDL_AUDIO_BITSIZE(src_format);
|
|
const int dst_bitsize = (int) SDL_AUDIO_BITSIZE(dst_format);
|
|
|
|
const int dst_sample_frame_size = (dst_bitsize / 8) * dst_channels;
|
|
|
|
/* Type conversion goes like this now:
|
|
- byteswap to CPU native format first if necessary.
|
|
- convert to native Float32 if necessary.
|
|
- change channel count if necessary.
|
|
- convert to final data format.
|
|
- byteswap back to foreign format if necessary.
|
|
|
|
The expectation is we can process data faster in float32
|
|
(possibly with SIMD), and making several passes over the same
|
|
buffer is likely to be CPU cache-friendly, avoiding the
|
|
biggest performance hit in modern times. Previously we had
|
|
(script-generated) custom converters for every data type and
|
|
it was a bloat on SDL compile times and final library size. */
|
|
|
|
// see if we can skip float conversion entirely.
|
|
if (src_channels == dst_channels) {
|
|
if (src_format == dst_format) {
|
|
// nothing to do, we're already in the right format, just copy it over if necessary.
|
|
if (src != dst) {
|
|
SDL_memcpy(dst, src, num_frames * dst_sample_frame_size);
|
|
}
|
|
return;
|
|
}
|
|
|
|
// just a byteswap needed?
|
|
if ((src_format & ~SDL_AUDIO_MASK_BIG_ENDIAN) == (dst_format & ~SDL_AUDIO_MASK_BIG_ENDIAN)) {
|
|
if (src_bitsize == 8) {
|
|
if (src != dst) {
|
|
SDL_memcpy(dst, src, num_frames * dst_sample_frame_size);
|
|
}
|
|
return; // nothing to do, it's a 1-byte format.
|
|
}
|
|
AudioConvertByteswap(dst, src, num_frames * src_channels, src_bitsize);
|
|
return; // all done.
|
|
}
|
|
}
|
|
|
|
if (scratch == NULL) {
|
|
scratch = dst;
|
|
}
|
|
|
|
const SDL_bool srcbyteswap = (SDL_AUDIO_ISBIGENDIAN(src_format) != 0) == (SDL_BYTEORDER == SDL_LIL_ENDIAN) && (src_bitsize > 8);
|
|
const SDL_bool srcconvert = !SDL_AUDIO_ISFLOAT(src_format);
|
|
const SDL_bool channelconvert = src_channels != dst_channels;
|
|
const SDL_bool dstconvert = !SDL_AUDIO_ISFLOAT(dst_format);
|
|
const SDL_bool dstbyteswap = (SDL_AUDIO_ISBIGENDIAN(dst_format) != 0) == (SDL_BYTEORDER == SDL_LIL_ENDIAN) && (dst_bitsize > 8);
|
|
|
|
// make sure we're in native byte order.
|
|
if (srcbyteswap) {
|
|
// No point writing straight to dst. If we only need a byteswap, we wouldn't be bere.
|
|
AudioConvertByteswap(scratch, src, num_frames * src_channels, src_bitsize);
|
|
src = scratch;
|
|
}
|
|
|
|
// get us to float format.
|
|
if (srcconvert) {
|
|
void* buf = (channelconvert || dstconvert || dstbyteswap) ? scratch : dst;
|
|
AudioConvertToFloat((float *) buf, src, num_frames * src_channels, src_format);
|
|
src = buf;
|
|
}
|
|
|
|
// Channel conversion
|
|
|
|
if (channelconvert) {
|
|
SDL_AudioChannelConverter channel_converter;
|
|
SDL_AudioChannelConverter override = NULL;
|
|
|
|
// SDL_IsSupportedChannelCount should have caught these asserts, or we added a new format and forgot to update the table.
|
|
SDL_assert(src_channels <= SDL_arraysize(channel_converters));
|
|
SDL_assert(dst_channels <= SDL_arraysize(channel_converters[0]));
|
|
|
|
channel_converter = channel_converters[src_channels - 1][dst_channels - 1];
|
|
SDL_assert(channel_converter != NULL);
|
|
|
|
// swap in some SIMD versions for a few of these.
|
|
if (channel_converter == SDL_ConvertStereoToMono) {
|
|
#ifdef SDL_SSE3_INTRINSICS
|
|
if (!override && SDL_HasSSE3()) { override = SDL_ConvertStereoToMono_SSE3; }
|
|
#endif
|
|
} else if (channel_converter == SDL_ConvertMonoToStereo) {
|
|
#ifdef SDL_SSE_INTRINSICS
|
|
if (!override && SDL_HasSSE()) { override = SDL_ConvertMonoToStereo_SSE; }
|
|
#endif
|
|
}
|
|
|
|
if (override) {
|
|
channel_converter = override;
|
|
}
|
|
|
|
void* buf = (dstconvert || dstbyteswap) ? scratch : dst;
|
|
channel_converter((float *) buf, (const float *) src, num_frames);
|
|
src = buf;
|
|
}
|
|
|
|
// Resampling is not done in here. SDL_AudioStream handles that.
|
|
|
|
// Move to final data type.
|
|
if (dstconvert) {
|
|
AudioConvertFromFloat(dst, (const float *) src, num_frames * dst_channels, dst_format);
|
|
src = dst;
|
|
}
|
|
|
|
// make sure we're in final byte order.
|
|
if (dstbyteswap) {
|
|
AudioConvertByteswap(dst, src, num_frames * dst_channels, dst_bitsize);
|
|
src = dst; // we've written to dst, future work will convert in-place.
|
|
}
|
|
|
|
SDL_assert(src == dst); // if we got here, we _had_ to have done _something_. Otherwise, we should have memcpy'd!
|
|
}
|
|
|
|
// Calculate the largest frame size needed to convert between the two formats.
|
|
static int CalculateMaxFrameSize(SDL_AudioFormat src_format, int src_channels, SDL_AudioFormat dst_format, int dst_channels)
|
|
{
|
|
const int src_format_size = SDL_AUDIO_BYTESIZE(src_format);
|
|
const int dst_format_size = SDL_AUDIO_BYTESIZE(dst_format);
|
|
const int max_app_format_size = SDL_max(src_format_size, dst_format_size);
|
|
const int max_format_size = SDL_max(max_app_format_size, sizeof (float)); // ConvertAudio and ResampleAudio use floats.
|
|
const int max_channels = SDL_max(src_channels, dst_channels);
|
|
return max_format_size * max_channels;
|
|
}
|
|
|
|
static Sint64 GetAudioStreamResampleRate(SDL_AudioStream* stream, int src_freq, Sint64 resample_offset)
|
|
{
|
|
src_freq = (int)((float)src_freq * stream->freq_ratio);
|
|
|
|
Sint64 resample_rate = SDL_GetResampleRate(src_freq, stream->dst_spec.freq);
|
|
|
|
// If src_freq == dst_freq, and we aren't between frames, don't resample
|
|
if ((resample_rate == 0x100000000) && (resample_offset == 0)) {
|
|
resample_rate = 0;
|
|
}
|
|
|
|
return resample_rate;
|
|
}
|
|
|
|
static int UpdateAudioStreamInputSpec(SDL_AudioStream *stream, const SDL_AudioSpec *spec)
|
|
{
|
|
if (AUDIO_SPECS_EQUAL(stream->input_spec, *spec)) {
|
|
return 0;
|
|
}
|
|
|
|
const size_t history_buffer_allocation = SDL_GetResamplerHistoryFrames() * SDL_AUDIO_FRAMESIZE(*spec);
|
|
Uint8 *history_buffer = stream->history_buffer;
|
|
|
|
if (stream->history_buffer_allocation < history_buffer_allocation) {
|
|
history_buffer = (Uint8 *) SDL_aligned_alloc(SDL_SIMDGetAlignment(), history_buffer_allocation);
|
|
if (!history_buffer) {
|
|
return SDL_OutOfMemory();
|
|
}
|
|
SDL_aligned_free(stream->history_buffer);
|
|
stream->history_buffer = history_buffer;
|
|
stream->history_buffer_allocation = history_buffer_allocation;
|
|
}
|
|
|
|
SDL_memset(history_buffer, SDL_GetSilenceValueForFormat(spec->format), history_buffer_allocation);
|
|
SDL_copyp(&stream->input_spec, spec);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SDL_AudioStream *SDL_CreateAudioStream(const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec)
|
|
{
|
|
if (!SDL_WasInit(SDL_INIT_AUDIO)) {
|
|
SDL_SetError("Audio subsystem is not initialized");
|
|
return NULL;
|
|
}
|
|
|
|
SDL_AudioStream *retval = (SDL_AudioStream *)SDL_calloc(1, sizeof(SDL_AudioStream));
|
|
if (retval == NULL) {
|
|
SDL_OutOfMemory();
|
|
return NULL;
|
|
}
|
|
|
|
retval->freq_ratio = 1.0f;
|
|
retval->queue = SDL_CreateAudioQueue(4096);
|
|
|
|
if (retval->queue == NULL) {
|
|
SDL_free(retval);
|
|
return NULL;
|
|
}
|
|
|
|
retval->lock = SDL_CreateMutex();
|
|
if (retval->lock == NULL) {
|
|
SDL_free(retval->queue);
|
|
SDL_free(retval);
|
|
return NULL;
|
|
}
|
|
|
|
OnAudioStreamCreated(retval);
|
|
|
|
if (SDL_SetAudioStreamFormat(retval, src_spec, dst_spec) == -1) {
|
|
SDL_DestroyAudioStream(retval);
|
|
return NULL;
|
|
}
|
|
|
|
return retval;
|
|
}
|
|
|
|
int SDL_SetAudioStreamGetCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
SDL_LockMutex(stream->lock);
|
|
stream->get_callback = callback;
|
|
stream->get_callback_userdata = userdata;
|
|
SDL_UnlockMutex(stream->lock);
|
|
return 0;
|
|
}
|
|
|
|
int SDL_SetAudioStreamPutCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
SDL_LockMutex(stream->lock);
|
|
stream->put_callback = callback;
|
|
stream->put_callback_userdata = userdata;
|
|
SDL_UnlockMutex(stream->lock);
|
|
return 0;
|
|
}
|
|
|
|
int SDL_LockAudioStream(SDL_AudioStream *stream)
|
|
{
|
|
return stream ? SDL_LockMutex(stream->lock) : SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
int SDL_UnlockAudioStream(SDL_AudioStream *stream)
|
|
{
|
|
return stream ? SDL_UnlockMutex(stream->lock) : SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
int SDL_GetAudioStreamFormat(SDL_AudioStream *stream, SDL_AudioSpec *src_spec, SDL_AudioSpec *dst_spec)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
if (src_spec) {
|
|
SDL_copyp(src_spec, &stream->src_spec);
|
|
}
|
|
if (dst_spec) {
|
|
SDL_copyp(dst_spec, &stream->dst_spec);
|
|
}
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
if (src_spec && src_spec->format == 0) {
|
|
return SDL_SetError("Stream has no source format");
|
|
} else if (dst_spec && dst_spec->format == 0) {
|
|
return SDL_SetError("Stream has no destination format");
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int SDL_SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
// Picked mostly arbitrarily.
|
|
static const int min_freq = 4000;
|
|
static const int max_freq = 384000;
|
|
|
|
if (src_spec) {
|
|
if (!SDL_IsSupportedAudioFormat(src_spec->format)) {
|
|
return SDL_InvalidParamError("src_spec->format");
|
|
} else if (!SDL_IsSupportedChannelCount(src_spec->channels)) {
|
|
return SDL_InvalidParamError("src_spec->channels");
|
|
} else if (src_spec->freq <= 0) {
|
|
return SDL_InvalidParamError("src_spec->freq");
|
|
} else if (src_spec->freq < min_freq) {
|
|
return SDL_SetError("Source rate is too low");
|
|
} else if (src_spec->freq > max_freq) {
|
|
return SDL_SetError("Source rate is too high");
|
|
}
|
|
}
|
|
|
|
if (dst_spec) {
|
|
if (!SDL_IsSupportedAudioFormat(dst_spec->format)) {
|
|
return SDL_InvalidParamError("dst_spec->format");
|
|
} else if (!SDL_IsSupportedChannelCount(dst_spec->channels)) {
|
|
return SDL_InvalidParamError("dst_spec->channels");
|
|
} else if (dst_spec->freq <= 0) {
|
|
return SDL_InvalidParamError("dst_spec->freq");
|
|
} else if (dst_spec->freq < min_freq) {
|
|
return SDL_SetError("Destination rate is too low");
|
|
} else if (dst_spec->freq > max_freq) {
|
|
return SDL_SetError("Destination rate is too high");
|
|
}
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
|
|
// quietly refuse to change the format of the end currently bound to a device.
|
|
if (stream->bound_device) {
|
|
if (stream->bound_device->physical_device->iscapture) {
|
|
dst_spec = NULL;
|
|
} else {
|
|
src_spec = NULL;
|
|
}
|
|
}
|
|
|
|
if (src_spec) {
|
|
SDL_copyp(&stream->src_spec, src_spec);
|
|
}
|
|
|
|
if (dst_spec) {
|
|
SDL_copyp(&stream->dst_spec, dst_spec);
|
|
}
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
float SDL_GetAudioStreamFrequencyRatio(SDL_AudioStream *stream)
|
|
{
|
|
if (!stream) {
|
|
SDL_InvalidParamError("stream");
|
|
return 0.0f;
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
float freq_ratio = stream->freq_ratio;
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
return freq_ratio;
|
|
}
|
|
|
|
int SDL_SetAudioStreamFrequencyRatio(SDL_AudioStream *stream, float freq_ratio)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
// Picked mostly arbitrarily.
|
|
static const float min_freq_ratio = 0.01f;
|
|
static const float max_freq_ratio = 100.0f;
|
|
|
|
if (freq_ratio < min_freq_ratio) {
|
|
return SDL_SetError("Frequency ratio is too low");
|
|
} else if (freq_ratio > max_freq_ratio) {
|
|
return SDL_SetError("Frequency ratio is too high");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
stream->freq_ratio = freq_ratio;
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int CheckAudioStreamIsFullySetup(SDL_AudioStream *stream)
|
|
{
|
|
if (stream->src_spec.format == 0) {
|
|
return SDL_SetError("Stream has no source format");
|
|
} else if (stream->dst_spec.format == 0) {
|
|
return SDL_SetError("Stream has no destination format");
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
|
|
{
|
|
#if DEBUG_AUDIOSTREAM
|
|
SDL_Log("AUDIOSTREAM: wants to put %d bytes", len);
|
|
#endif
|
|
|
|
if (stream == NULL) {
|
|
return SDL_InvalidParamError("stream");
|
|
} else if (buf == NULL) {
|
|
return SDL_InvalidParamError("buf");
|
|
} else if (len < 0) {
|
|
return SDL_InvalidParamError("len");
|
|
} else if (len == 0) {
|
|
return 0; // nothing to do.
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
|
|
if (CheckAudioStreamIsFullySetup(stream) != 0) {
|
|
SDL_UnlockMutex(stream->lock);
|
|
return -1;
|
|
}
|
|
|
|
if ((len % SDL_AUDIO_FRAMESIZE(stream->src_spec)) != 0) {
|
|
SDL_UnlockMutex(stream->lock);
|
|
return SDL_SetError("Can't add partial sample frames");
|
|
}
|
|
|
|
SDL_AudioTrack* track = NULL;
|
|
|
|
// When copying in large amounts of data, try and do as much work as possible
|
|
// outside of the stream lock, otherwise the output device is likely to be starved.
|
|
const int large_input_thresh = 1024 * 1024;
|
|
|
|
if (len >= large_input_thresh) {
|
|
SDL_AudioSpec src_spec;
|
|
SDL_copyp(&src_spec, &stream->src_spec);
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
size_t chunk_size = SDL_GetAudioQueueChunkSize(stream->queue);
|
|
track = SDL_CreateChunkedAudioTrack(&src_spec, buf, len, chunk_size);
|
|
|
|
if (track == NULL) {
|
|
return -1;
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
}
|
|
|
|
const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
|
|
|
|
int retval = 0;
|
|
|
|
if (track != NULL) {
|
|
SDL_AddTrackToAudioQueue(stream->queue, track);
|
|
} else {
|
|
retval = SDL_WriteToAudioQueue(stream->queue, &stream->src_spec, buf, len);
|
|
}
|
|
|
|
if (retval == 0) {
|
|
stream->total_bytes_queued += len;
|
|
if (stream->put_callback) {
|
|
const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available;
|
|
stream->put_callback(stream->put_callback_userdata, stream, newavail, newavail);
|
|
}
|
|
}
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
return retval;
|
|
}
|
|
|
|
int SDL_FlushAudioStream(SDL_AudioStream *stream)
|
|
{
|
|
if (stream == NULL) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
SDL_FlushAudioQueue(stream->queue);
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* this does not save the previous contents of stream->work_buffer. It's a work buffer!!
|
|
The returned buffer is aligned/padded for use with SIMD instructions. */
|
|
static Uint8 *EnsureAudioStreamWorkBufferSize(SDL_AudioStream *stream, size_t newlen)
|
|
{
|
|
if (stream->work_buffer_allocation >= newlen) {
|
|
return stream->work_buffer;
|
|
}
|
|
|
|
Uint8 *ptr = (Uint8 *) SDL_aligned_alloc(SDL_SIMDGetAlignment(), newlen);
|
|
if (ptr == NULL) {
|
|
SDL_OutOfMemory();
|
|
return NULL; // previous work buffer is still valid!
|
|
}
|
|
|
|
SDL_aligned_free(stream->work_buffer);
|
|
stream->work_buffer = ptr;
|
|
stream->work_buffer_allocation = newlen;
|
|
return ptr;
|
|
}
|
|
|
|
static void UpdateAudioStreamHistoryBuffer(SDL_AudioStream* stream,
|
|
Uint8* input_buffer, int input_bytes, Uint8* left_padding, int padding_bytes)
|
|
{
|
|
const int history_buffer_frames = SDL_GetResamplerHistoryFrames();
|
|
|
|
// Even if we aren't currently resampling, we always need to update the history buffer
|
|
Uint8 *history_buffer = stream->history_buffer;
|
|
int history_bytes = history_buffer_frames * SDL_AUDIO_FRAMESIZE(stream->input_spec);
|
|
|
|
if (left_padding != NULL) {
|
|
// Fill in the left padding using the history buffer
|
|
SDL_assert(padding_bytes <= history_bytes);
|
|
SDL_memcpy(left_padding, history_buffer + history_bytes - padding_bytes, padding_bytes);
|
|
}
|
|
|
|
// Update the history buffer using the new input data
|
|
if (input_bytes >= history_bytes) {
|
|
SDL_memcpy(history_buffer, input_buffer + (input_bytes - history_bytes), history_bytes);
|
|
} else {
|
|
int preserve_bytes = history_bytes - input_bytes;
|
|
SDL_memmove(history_buffer, history_buffer + input_bytes, preserve_bytes);
|
|
SDL_memcpy(history_buffer + preserve_bytes, input_buffer, input_bytes);
|
|
}
|
|
}
|
|
|
|
static Sint64 NextAudioStreamIter(SDL_AudioStream* stream, void** inout_iter,
|
|
Sint64* inout_resample_offset, SDL_AudioSpec* out_spec, SDL_bool* out_flushed)
|
|
{
|
|
SDL_AudioSpec spec;
|
|
SDL_bool flushed;
|
|
size_t queued_bytes = SDL_NextAudioQueueIter(stream->queue, inout_iter, &spec, &flushed);
|
|
|
|
if (out_spec) {
|
|
SDL_copyp(out_spec, &spec);
|
|
}
|
|
|
|
// There is infinite audio available, whether or not we are resampling
|
|
if (queued_bytes == SDL_SIZE_MAX) {
|
|
*inout_resample_offset = 0;
|
|
|
|
if (out_flushed) {
|
|
*out_flushed = SDL_FALSE;
|
|
}
|
|
|
|
return SDL_MAX_SINT32;
|
|
}
|
|
|
|
Sint64 resample_offset = *inout_resample_offset;
|
|
Sint64 resample_rate = GetAudioStreamResampleRate(stream, spec.freq, resample_offset);
|
|
Sint64 output_frames = (Sint64)(queued_bytes / SDL_AUDIO_FRAMESIZE(spec));
|
|
|
|
if (resample_rate) {
|
|
// Resampling requires padding frames to the left and right of the current position.
|
|
// Past the end of the track, the right padding is filled with silence.
|
|
// But we only want to do that if the track is actually finished (flushed).
|
|
if (!flushed) {
|
|
output_frames -= SDL_GetResamplerPaddingFrames(resample_rate);
|
|
}
|
|
|
|
output_frames = SDL_GetResamplerOutputFrames(output_frames, resample_rate, &resample_offset);
|
|
}
|
|
|
|
if (flushed) {
|
|
resample_offset = 0;
|
|
}
|
|
|
|
*inout_resample_offset = resample_offset;
|
|
|
|
if (out_flushed) {
|
|
*out_flushed = flushed;
|
|
}
|
|
|
|
return output_frames;
|
|
}
|
|
|
|
static Sint64 GetAudioStreamAvailableFrames(SDL_AudioStream* stream, Sint64* out_resample_offset)
|
|
{
|
|
void* iter = SDL_BeginAudioQueueIter(stream->queue);
|
|
|
|
Sint64 resample_offset = stream->resample_offset;
|
|
Sint64 output_frames = 0;
|
|
|
|
while (iter) {
|
|
output_frames += NextAudioStreamIter(stream, &iter, &resample_offset, NULL, NULL);
|
|
|
|
// Already got loads of frames. Just clamp it to something reasonable
|
|
if (output_frames >= SDL_MAX_SINT32) {
|
|
output_frames = SDL_MAX_SINT32;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (out_resample_offset) {
|
|
*out_resample_offset = resample_offset;
|
|
}
|
|
|
|
return output_frames;
|
|
}
|
|
|
|
static Sint64 GetAudioStreamHead(SDL_AudioStream* stream, SDL_AudioSpec* out_spec, SDL_bool* out_flushed)
|
|
{
|
|
void* iter = SDL_BeginAudioQueueIter(stream->queue);
|
|
|
|
if (iter == NULL) {
|
|
SDL_zerop(out_spec);
|
|
*out_flushed = SDL_FALSE;
|
|
return 0;
|
|
}
|
|
|
|
Sint64 resample_offset = stream->resample_offset;
|
|
return NextAudioStreamIter(stream, &iter, &resample_offset, out_spec, out_flushed);
|
|
}
|
|
|
|
// You must hold stream->lock and validate your parameters before calling this!
|
|
// Enough input data MUST be available!
|
|
static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int output_frames)
|
|
{
|
|
const SDL_AudioSpec* src_spec = &stream->input_spec;
|
|
const SDL_AudioSpec* dst_spec = &stream->dst_spec;
|
|
|
|
const SDL_AudioFormat src_format = src_spec->format;
|
|
const int src_channels = src_spec->channels;
|
|
const int src_frame_size = SDL_AUDIO_FRAMESIZE(*src_spec);
|
|
|
|
const SDL_AudioFormat dst_format = dst_spec->format;
|
|
const int dst_channels = dst_spec->channels;
|
|
|
|
const int max_frame_size = CalculateMaxFrameSize(src_format, src_channels, dst_format, dst_channels);
|
|
const Sint64 resample_rate = GetAudioStreamResampleRate(stream, src_spec->freq, stream->resample_offset);
|
|
|
|
#if DEBUG_AUDIOSTREAM
|
|
SDL_Log("AUDIOSTREAM: asking for %d frames.", output_frames);
|
|
#endif
|
|
|
|
SDL_assert(output_frames > 0);
|
|
|
|
// Not resampling? It's an easy conversion (and maybe not even that!)
|
|
if (resample_rate == 0) {
|
|
Uint8* input_buffer = NULL;
|
|
|
|
// If no conversion is happening, read straight into the output buffer.
|
|
// Note, this is just to avoid extra copies.
|
|
// Some other formats may fit directly into the output buffer, but i'd rather process data in a SIMD-aligned buffer.
|
|
if ((src_format == dst_format) && (src_channels == dst_channels)) {
|
|
input_buffer = buf;
|
|
} else {
|
|
input_buffer = EnsureAudioStreamWorkBufferSize(stream, output_frames * max_frame_size);
|
|
|
|
if (!input_buffer) {
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
const int input_bytes = output_frames * src_frame_size;
|
|
if (SDL_ReadFromAudioQueue(stream->queue, input_buffer, input_bytes) != 0) {
|
|
SDL_assert(!"Not enough data in queue (read)");
|
|
}
|
|
|
|
stream->total_bytes_queued -= input_bytes;
|
|
|
|
// Even if we aren't currently resampling, we always need to update the history buffer
|
|
UpdateAudioStreamHistoryBuffer(stream, input_buffer, input_bytes, NULL, 0);
|
|
|
|
// Convert the data, if necessary
|
|
if (buf != input_buffer) {
|
|
ConvertAudio(output_frames, input_buffer, src_format, src_channels, buf, dst_format, dst_channels, input_buffer);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
// Time to do some resampling!
|
|
// Calculate the number of input frames necessary for this request.
|
|
// Because resampling happens "between" frames, The same number of output_frames
|
|
// can require a different number of input_frames, depending on the resample_offset.
|
|
// Infact, input_frames can sometimes even be zero when upsampling.
|
|
const int input_frames = (int) SDL_GetResamplerInputFrames(output_frames, resample_rate, stream->resample_offset);
|
|
const int input_bytes = input_frames * src_frame_size;
|
|
|
|
const int resampler_padding_frames = SDL_GetResamplerPaddingFrames(resample_rate);
|
|
|
|
// If increasing channels, do it after resampling, since we'd just
|
|
// do more work to resample duplicate channels. If we're decreasing, do
|
|
// it first so we resample the interpolated data instead of interpolating
|
|
// the resampled data.
|
|
const int resample_channels = SDL_min(src_channels, dst_channels);
|
|
|
|
// The size of the frame used when resampling
|
|
const int resample_frame_size = resample_channels * sizeof(float);
|
|
|
|
// The main portion of the work_buffer can be used to store 3 things:
|
|
// src_sample_frame_size * (left_padding+input_buffer+right_padding)
|
|
// resample_frame_size * (left_padding+input_buffer+right_padding)
|
|
// dst_sample_frame_size * output_frames
|
|
//
|
|
// ResampleAudio also requires an additional buffer if it can't write straight to the output:
|
|
// resample_frame_size * output_frames
|
|
//
|
|
// Note, ConvertAudio requires (num_frames * max_sample_frame_size) of scratch space
|
|
const int work_buffer_frames = input_frames + (resampler_padding_frames * 2);
|
|
int work_buffer_capacity = work_buffer_frames * max_frame_size;
|
|
int resample_buffer_offset = -1;
|
|
|
|
// Check if we can resample directly into the output buffer.
|
|
// Note, this is just to avoid extra copies.
|
|
// Some other formats may fit directly into the output buffer, but i'd rather process data in a SIMD-aligned buffer.
|
|
if ((dst_format != SDL_AUDIO_F32) || (dst_channels != resample_channels)) {
|
|
// Allocate space for converting the resampled output to the destination format
|
|
int resample_convert_bytes = output_frames * max_frame_size;
|
|
work_buffer_capacity = SDL_max(work_buffer_capacity, resample_convert_bytes);
|
|
|
|
// SIMD-align the buffer
|
|
int simd_alignment = (int) SDL_SIMDGetAlignment();
|
|
work_buffer_capacity += simd_alignment - 1;
|
|
work_buffer_capacity -= work_buffer_capacity % simd_alignment;
|
|
|
|
// Allocate space for the resampled output
|
|
int resample_bytes = output_frames * resample_frame_size;
|
|
resample_buffer_offset = work_buffer_capacity;
|
|
work_buffer_capacity += resample_bytes;
|
|
}
|
|
|
|
Uint8* work_buffer = EnsureAudioStreamWorkBufferSize(stream, work_buffer_capacity);
|
|
|
|
if (!work_buffer) {
|
|
return -1;
|
|
}
|
|
|
|
const int padding_bytes = resampler_padding_frames * src_frame_size;
|
|
|
|
Uint8* work_buffer_tail = work_buffer;
|
|
|
|
// Split the work_buffer into [left_padding][input_buffer][right_padding]
|
|
Uint8* left_padding = work_buffer_tail;
|
|
work_buffer_tail += padding_bytes;
|
|
|
|
Uint8* input_buffer = work_buffer_tail;
|
|
work_buffer_tail += input_bytes;
|
|
|
|
Uint8* right_padding = work_buffer_tail;
|
|
work_buffer_tail += padding_bytes;
|
|
|
|
SDL_assert((work_buffer_tail - work_buffer) <= work_buffer_capacity);
|
|
|
|
// Now read unconverted data from the queue into the work buffer to fulfill the request.
|
|
if (SDL_ReadFromAudioQueue(stream->queue, input_buffer, input_bytes) != 0) {
|
|
SDL_assert(!"Not enough data in queue (resample read)");
|
|
}
|
|
stream->total_bytes_queued -= input_bytes;
|
|
|
|
// Update the history buffer and fill in the left padding
|
|
UpdateAudioStreamHistoryBuffer(stream, input_buffer, input_bytes, left_padding, padding_bytes);
|
|
|
|
// Fill in the right padding by peeking into the input queue (missing data is filled with silence)
|
|
if (SDL_PeekIntoAudioQueue(stream->queue, right_padding, padding_bytes) != 0) {
|
|
SDL_assert(!"Not enough data in queue (resample peek)");
|
|
}
|
|
|
|
SDL_assert(work_buffer_frames == input_frames + (resampler_padding_frames * 2));
|
|
|
|
// Resampling! get the work buffer to float32 format, etc, in-place.
|
|
ConvertAudio(work_buffer_frames, work_buffer, src_format, src_channels, work_buffer, SDL_AUDIO_F32, resample_channels, NULL);
|
|
|
|
// Update the work_buffer pointers based on the new frame size
|
|
input_buffer = work_buffer + ((input_buffer - work_buffer) / src_frame_size * resample_frame_size);
|
|
work_buffer_tail = work_buffer + ((work_buffer_tail - work_buffer) / src_frame_size * resample_frame_size);
|
|
SDL_assert((work_buffer_tail - work_buffer) <= work_buffer_capacity);
|
|
|
|
// Decide where the resampled output goes
|
|
void* resample_buffer = (resample_buffer_offset != -1) ? (work_buffer + resample_buffer_offset) : buf;
|
|
|
|
SDL_ResampleAudio(resample_channels,
|
|
(const float *) input_buffer, input_frames,
|
|
(float*) resample_buffer, output_frames,
|
|
resample_rate, &stream->resample_offset);
|
|
|
|
// Convert to the final format, if necessary
|
|
if (buf != resample_buffer) {
|
|
ConvertAudio(output_frames, resample_buffer, SDL_AUDIO_F32, resample_channels, buf, dst_format, dst_channels, work_buffer);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
// get converted/resampled data from the stream
|
|
int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
|
|
{
|
|
Uint8 *buf = (Uint8 *) voidbuf;
|
|
|
|
#if DEBUG_AUDIOSTREAM
|
|
SDL_Log("AUDIOSTREAM: want to get %d converted bytes", len);
|
|
#endif
|
|
|
|
if (stream == NULL) {
|
|
return SDL_InvalidParamError("stream");
|
|
} else if (buf == NULL) {
|
|
return SDL_InvalidParamError("buf");
|
|
} else if (len < 0) {
|
|
return SDL_InvalidParamError("len");
|
|
} else if (len == 0) {
|
|
return 0; // nothing to do.
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
|
|
if (CheckAudioStreamIsFullySetup(stream) != 0) {
|
|
SDL_UnlockMutex(stream->lock);
|
|
return -1;
|
|
}
|
|
|
|
const int dst_frame_size = SDL_AUDIO_FRAMESIZE(stream->dst_spec);
|
|
|
|
len -= len % dst_frame_size; // chop off any fractional sample frame.
|
|
|
|
// give the callback a chance to fill in more stream data if it wants.
|
|
if (stream->get_callback) {
|
|
Sint64 total_request = len / dst_frame_size; // start with sample frames desired
|
|
Sint64 additional_request = total_request;
|
|
|
|
Sint64 resample_offset = 0;
|
|
Sint64 available_frames = GetAudioStreamAvailableFrames(stream, &resample_offset);
|
|
|
|
additional_request -= SDL_min(additional_request, available_frames);
|
|
|
|
Sint64 resample_rate = GetAudioStreamResampleRate(stream, stream->src_spec.freq, resample_offset);
|
|
|
|
if (resample_rate) {
|
|
total_request = SDL_GetResamplerInputFrames(total_request, resample_rate, resample_offset);
|
|
additional_request = SDL_GetResamplerInputFrames(additional_request, resample_rate, resample_offset);
|
|
}
|
|
|
|
total_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec); // convert sample frames to bytes.
|
|
additional_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec); // convert sample frames to bytes.
|
|
stream->get_callback(stream->get_callback_userdata, stream, (int) SDL_min(additional_request, SDL_INT_MAX), (int) SDL_min(total_request, SDL_INT_MAX));
|
|
}
|
|
|
|
// Process the data in chunks to avoid allocating too much memory (and potential integer overflows)
|
|
const int chunk_size = 4096;
|
|
|
|
int total = 0;
|
|
|
|
while (total < len) {
|
|
// Audio is processed a track at a time.
|
|
SDL_AudioSpec input_spec;
|
|
SDL_bool flushed;
|
|
const Sint64 available_frames = GetAudioStreamHead(stream, &input_spec, &flushed);
|
|
|
|
if (available_frames == 0) {
|
|
if (flushed) {
|
|
SDL_PopAudioQueueHead(stream->queue);
|
|
SDL_zero(stream->input_spec);
|
|
stream->resample_offset = 0;
|
|
continue;
|
|
}
|
|
// There are no frames available, but the track hasn't been flushed, so more might be added later.
|
|
break;
|
|
}
|
|
|
|
if (UpdateAudioStreamInputSpec(stream, &input_spec) != 0) {
|
|
total = total ? total : -1;
|
|
break;
|
|
}
|
|
|
|
// Clamp the output length to the maximum currently available.
|
|
// GetAudioStreamDataInternal requires enough input data is available.
|
|
int output_frames = (len - total) / dst_frame_size;
|
|
output_frames = SDL_min(output_frames, chunk_size);
|
|
output_frames = (int) SDL_min(output_frames, available_frames);
|
|
|
|
if (GetAudioStreamDataInternal(stream, &buf[total], output_frames) != 0) {
|
|
total = total ? total : -1;
|
|
break;
|
|
}
|
|
|
|
total += output_frames * dst_frame_size;
|
|
}
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
#if DEBUG_AUDIOSTREAM
|
|
SDL_Log("AUDIOSTREAM: Final result was %d", total);
|
|
#endif
|
|
|
|
return total;
|
|
}
|
|
|
|
// number of converted/resampled bytes available for output
|
|
int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
|
|
if (CheckAudioStreamIsFullySetup(stream) != 0) {
|
|
SDL_UnlockMutex(stream->lock);
|
|
return 0;
|
|
}
|
|
|
|
Sint64 count = GetAudioStreamAvailableFrames(stream, NULL);
|
|
|
|
// convert from sample frames to bytes in destination format.
|
|
count *= SDL_AUDIO_FRAMESIZE(stream->dst_spec);
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
// if this overflows an int, just clamp it to a maximum.
|
|
return (int) SDL_min(count, SDL_INT_MAX);
|
|
}
|
|
|
|
// number of sample frames that are currently queued as input.
|
|
int SDL_GetAudioStreamQueued(SDL_AudioStream *stream)
|
|
{
|
|
if (!stream) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
const Uint64 total = stream->total_bytes_queued;
|
|
SDL_UnlockMutex(stream->lock);
|
|
|
|
// if this overflows an int, just clamp it to a maximum.
|
|
return (int) SDL_min(total, SDL_INT_MAX);
|
|
}
|
|
|
|
int SDL_ClearAudioStream(SDL_AudioStream *stream)
|
|
{
|
|
if (stream == NULL) {
|
|
return SDL_InvalidParamError("stream");
|
|
}
|
|
|
|
SDL_LockMutex(stream->lock);
|
|
|
|
SDL_ClearAudioQueue(stream->queue);
|
|
SDL_zero(stream->input_spec);
|
|
stream->resample_offset = 0;
|
|
stream->total_bytes_queued = 0;
|
|
|
|
SDL_UnlockMutex(stream->lock);
|
|
return 0;
|
|
}
|
|
|
|
void SDL_DestroyAudioStream(SDL_AudioStream *stream)
|
|
{
|
|
if (stream == NULL) {
|
|
return;
|
|
}
|
|
|
|
OnAudioStreamDestroy(stream);
|
|
|
|
const SDL_bool simplified = stream->simplified;
|
|
if (simplified) {
|
|
SDL_assert(stream->bound_device->simplified);
|
|
SDL_CloseAudioDevice(stream->bound_device->instance_id); // this will unbind the stream.
|
|
} else {
|
|
SDL_UnbindAudioStream(stream);
|
|
}
|
|
|
|
SDL_aligned_free(stream->history_buffer);
|
|
SDL_aligned_free(stream->work_buffer);
|
|
SDL_DestroyAudioQueue(stream->queue);
|
|
SDL_DestroyMutex(stream->lock);
|
|
|
|
SDL_free(stream);
|
|
}
|
|
|
|
int SDL_ConvertAudioSamples(const SDL_AudioSpec *src_spec, const Uint8 *src_data, int src_len,
|
|
const SDL_AudioSpec *dst_spec, Uint8 **dst_data, int *dst_len)
|
|
{
|
|
if (dst_data) {
|
|
*dst_data = NULL;
|
|
}
|
|
|
|
if (dst_len) {
|
|
*dst_len = 0;
|
|
}
|
|
|
|
if (src_data == NULL) {
|
|
return SDL_InvalidParamError("src_data");
|
|
} else if (src_len < 0) {
|
|
return SDL_InvalidParamError("src_len");
|
|
} else if (dst_data == NULL) {
|
|
return SDL_InvalidParamError("dst_data");
|
|
} else if (dst_len == NULL) {
|
|
return SDL_InvalidParamError("dst_len");
|
|
}
|
|
|
|
int retval = -1;
|
|
Uint8 *dst = NULL;
|
|
int dstlen = 0;
|
|
|
|
SDL_AudioStream *stream = SDL_CreateAudioStream(src_spec, dst_spec);
|
|
if (stream != NULL) {
|
|
if ((SDL_PutAudioStreamData(stream, src_data, src_len) == 0) && (SDL_FlushAudioStream(stream) == 0)) {
|
|
dstlen = SDL_GetAudioStreamAvailable(stream);
|
|
if (dstlen >= 0) {
|
|
dst = (Uint8 *)SDL_malloc(dstlen);
|
|
if (!dst) {
|
|
SDL_OutOfMemory();
|
|
} else {
|
|
retval = (SDL_GetAudioStreamData(stream, dst, dstlen) >= 0) ? 0 : -1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (retval == -1) {
|
|
SDL_free(dst);
|
|
} else {
|
|
*dst_data = dst;
|
|
*dst_len = dstlen;
|
|
}
|
|
|
|
SDL_DestroyAudioStream(stream);
|
|
return retval;
|
|
}
|