9#include <condition_variable>
30#include "LabSound/backends/AudioDevice_RtAudio.h"
34inline std::pair<AudioStreamConfig, AudioStreamConfig> GetDefaultAudioDeviceConfiguration(
const bool with_input =
false)
36 AudioStreamConfig inputConfig;
37 AudioStreamConfig outputConfig;
39 const std::vector<AudioDeviceInfo> audioDevices = lab::AudioDevice_RtAudio::MakeAudioDeviceList();
43 AudioDeviceInfo defaultOutputInfo, defaultInputInfo;
44 for (
auto& info : audioDevices)
46 if (info.is_default_output) defaultOutputInfo = info;
47 else if (info.is_default_input) defaultInputInfo = info;
48 printf(
"Device %d: %s\n", info.index, info.identifier.c_str());
49 printf(
" input channels: %d\n", info.num_input_channels);
50 printf(
" output channels: %d\n", info.num_output_channels);
51 printf(
" default sample rate: %f\n", info.nominal_samplerate);
52 printf(
" is default input: %s\n", info.is_default_input ?
"true" :
"false");
53 printf(
" is default output: %s\n", info.is_default_output ?
"true" :
"false");
56 if (defaultOutputInfo.index != -1)
58 outputConfig.device_index = defaultOutputInfo.index;
59 outputConfig.desired_channels = std::min(uint32_t(2), defaultOutputInfo.num_output_channels);
60 outputConfig.desired_samplerate = defaultOutputInfo.nominal_samplerate;
65 if (defaultInputInfo.index != -1)
67 inputConfig.device_index = defaultInputInfo.index;
68 inputConfig.desired_channels = std::min(uint32_t(1), defaultInputInfo.num_input_channels);
69 inputConfig.desired_samplerate = defaultInputInfo.nominal_samplerate;
73 throw std::invalid_argument(
"the default audio input device was requested but none were found");
77 return { inputConfig, outputConfig };
79inline std::vector<std::string> SplitCommandLine(
int argc,
char** argv)
89 auto Separate = [](
const std::string& input) -> std::vector<std::string>
91 std::vector<std::string> output;
95 size_t end = input.length();
96 bool inQuotes =
false;
100 if (input[curr] ==
'\\')
103 if (curr != end && input[curr] ==
'\"')
108 if (input[curr] ==
'\"')
111 if (inQuotes || (start != curr))
113 output.push_back(input.substr(start - (inQuotes ? 1 : 0), curr - start + (inQuotes ? 2 : 0)));
115 inQuotes = !inQuotes;
123 if (curr - start > 0) output.push_back(input.substr(start, curr - start));
130 for (
int i = 1; i < argc; ++i)
132 if (i > 1) cmd +=
" ";
133 cmd += std::string(argv[i]);
137 std::vector<std::string> result = Separate(cmd);
138 result.insert(result.begin(), std::string{ argv[0] });
142inline std::shared_ptr<AudioBus> MakeBusFromSampleFile(
char const*
const name,
int argc,
char** argv)
144 std::string path_prefix;
145 auto cmds = SplitCommandLine(argc, argv);
147 if (cmds.size() > 1) path_prefix = cmds[1] +
"/";
149 const std::string path = path_prefix + name;
150 std::shared_ptr<AudioBus> bus = MakeBusFromFile(path,
false);
151 if (!bus)
throw std::runtime_error(
"couldn't open " + path);
158template <
typename Duration>
159void Wait(Duration duration)
161 std::this_thread::sleep_for(duration);
165 DIST_LINEAR = lab::PannerNode::LINEAR_DISTANCE,
166 DIST_INVERSE = lab::PannerNode::INVERSE_DISTANCE,
167 DIST_EXPONENTIAL = lab::PannerNode::EXPONENTIAL_DISTANCE,
174static struct key_name distance_models[] = {
175{DIST_LINEAR,
"LINEAR"},
176{DIST_INVERSE,
"INVERSE"},
177{DIST_EXPONENTIAL,
"EXPONENTIAL"},
182static struct key_name periodicWave_types[] = {
183{OscillatorType::SINE,
"SINE"},
184{OscillatorType::SQUARE,
"SQUARE"},
185{OscillatorType::SAWTOOTH,
"SAWTOOTH"},
186{OscillatorType::TRIANGLE,
"TRIANGLE"},
187{OscillatorType::CUSTOM,
"CUSTOM"},
188{OscillatorType::OSCILLATOR_NONE, NULL},
191static struct key_name waveshaper_oversampling_types[] = {
192 {OverSampleType::NONE,
"NONE"},
193 {OverSampleType::_2X,
"2X"},
194 {OverSampleType::_4X,
"4x"},
198#define strcasecmp _stricmp
201unsigned int name_lookup(
char* cname,
struct key_name* keynames) {
202 unsigned int i, iname;
208 if (cn->cname != NULL) {
210 if (!strcasecmp(cn->cname, cname)) {
216 }
while (cn->cname != NULL);
221#ifndef DEGREES_PER_RADIAN
222#define DEGREES_PER_RADIAN 57.2957795130823208768
224#define RAD2DEGF(x) ((float)((x)*DEGREES_PER_RADIAN))
233#define GLDOUBLE double
240#include "../lib/vrml_parser/Structs.h"
245 static std::map<int, std::shared_ptr<lab::AudioBus>> busses;
248 void libsound_testNoise()
250 AudioStreamConfig _inputConfig;
251 AudioStreamConfig _outputConfig;
252 auto config = GetDefaultAudioDeviceConfiguration(
true);
253 _inputConfig = config.first;
254 _outputConfig = config.second;
255 std::shared_ptr<lab::AudioDevice_RtAudio> device(
new lab::AudioDevice_RtAudio(_inputConfig, _outputConfig));
257 std::shared_ptr<lab::AudioContext> context;
258 const auto defaultAudioDeviceConfigurations = GetDefaultAudioDeviceConfiguration();
260 context = std::make_shared<lab::AudioContext>(
false,
true);
262 lab::AudioContext& ac = *context.get();
264 const std::string path =
"C:/Users/Public/dev/source5/audio/LabSound-master/assets/samples/stereo-music-clip.wav";
265 std::shared_ptr<AudioBus> bus = MakeBusFromFile(path,
false);
266 auto musicClip = bus;
270 std::shared_ptr<OscillatorNode> oscillator;
271 std::shared_ptr<SampledAudioNode> musicClipNode;
272 std::shared_ptr<GainNode> gain;
274 oscillator = std::make_shared<OscillatorNode>(ac);
275 gain = std::make_shared<GainNode>(ac);
276 gain->gain()->setValue(0.0625f);
278 musicClipNode = std::make_shared<SampledAudioNode>(ac);
280 ContextRenderLock r(context.get(),
"ex_simple");
281 musicClipNode->setBus(r, musicClip);
283 context->connect(gain, musicClipNode, 0, 0);
284 musicClipNode->start(0.0f);
287 context->connect(gain, oscillator, 0, 0);
288 context->connect(context->destinationNode(), gain, 0, 0);
290 oscillator->frequency()->setValue(440.f);
291 oscillator->setType(OscillatorType::SINE);
292 oscillator->start(0.0f);
294 Wait(std::chrono::seconds(6));
298 struct anstruct { std::shared_ptr<lab::AudioNode> anode; };
300 std::shared_ptr<lab::AudioContext> context;
304 std::map<int, std::shared_ptr<lab::AudioNode>> nodes;
305 std::map<int, int> nodetype;
307 std::shared_ptr<std::vector<std::uint8_t>> bytearray;
308 std::shared_ptr<std::vector<std::float_t>> floatarray;
312 static int have_listenerpoint = 0;
313 static float listenerpoint_dir[3];
314 static float listenerpoint_up[3];
315 static float listenerpoint_pos[3];
316 void libsound_setListenerPose(
float* pos,
float* dir,
float *up,
int trackview) {
317 have_listenerpoint = trackview ? 0 : 1;
318 memcpy(listenerpoint_pos, pos, 3 *
sizeof(
float));
319 memcpy(listenerpoint_dir, dir, 3 *
sizeof(
float));
320 memcpy(listenerpoint_dir, up, 3 *
sizeof(
float));
323 static int next_audio_context;
324 static std::map<int, struct acstruct*> audio_contexts;
325 int libsound_createContext0() {
328 std::shared_ptr<lab::AudioContext> context;
335 AudioStreamConfig _inputConfig;
336 AudioStreamConfig _outputConfig;
337 auto config = GetDefaultAudioDeviceConfiguration(
true);
338 _inputConfig = config.first;
339 _outputConfig = config.second;
340 std::shared_ptr<lab::AudioDevice_RtAudio> device(
new lab::AudioDevice_RtAudio(_inputConfig, _outputConfig));
341 context = std::make_shared<lab::AudioContext>(
false,
true);
342 auto destinationNode = std::make_shared<lab::AudioDestinationNode>(*context.get(), device);
343 device->setDestinationNode(destinationNode);
344 context->setDestinationNode(destinationNode);
348 auto listener = context->listener();
356 listener->setForward({ 0.0,0.0,-1.0 });
357 listener->setUpVector({ 0.0,1.0,0.0 });
358 listener->setPosition({ 0.0,0.0,0.0 });
366 ac->context = context;
368 next_audio_context++;
369 audio_contexts[next_audio_context] = ac;
372 ac->nodes[ac->next_node] = ac->context->destinationNode();
373 ac->nodetype[ac->next_node] = NODE_AudioDestination;
374 return next_audio_context;
376 static struct type_name {
380 {NODE_AudioDestination,
"AD"},
381 {NODE_Analyser,
"Anly"},
383 {NODE_SpatialSound,
"SS"},
384 {NODE_AudioClip,
"AC"},
386 {NODE_Convolver,
"Conv"},
387 {NODE_WaveShaper,
"WShp"},
388 {NODE_BiquadFilter,
"BiQ"},
389 {NODE_DynamicsCompressor,
"DCmp"},
390 {NODE_ChannelSplitter,
"Splt"},
391 {NODE_ChannelMerger,
"Merg"},
392 {NODE_Delay,
"Dlay"},
393 {NODE_BufferAudioSource,
"BAS"},
394 {NODE_AudioBuffer,
"ABuf"},
395 {NODE_OscillatorSource,
"Osc"},
396 {NODE_ListenerPoint,
"LP"},
397 {NODE_ListenerPointSource,
"LPS"},
398 {NODE_StreamAudioDestination,
"SAD"},
399 {NODE_StreamAudioSource,
"SAS"},
400 {NODE_MicrophoneSource,
"MicS"},
403 static const char* nodetype_lookup(
int itype) {
406 struct type_name* tn;
411 if (tn->iname == itype) {
416 }
while (tn->iname != 0);
431 static std::list<connection> connections;
432 void libsound_connect0(
int icontext,
int idestination,
int isource) {
433 struct acstruct* ac = audio_contexts[icontext];
434 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
435 std::shared_ptr<AudioNode> source = ac->nodes[isource];
436 ac->context->connect(destination, source);
437 int iparent_type = ac->nodetype[idestination];
438 int ichild_type = ac->nodetype[isource];
439 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
440 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = 0; cc.dstindex = 0;
441 connections.push_back(cc);
443 void libsound_connect1(
int icontext,
int idestination,
int isource,
int indexSrc) {
444 struct acstruct* ac = audio_contexts[icontext];
445 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
446 std::shared_ptr<AudioNode> source = ac->nodes[isource];
447 ac->context->connect(destination, source,0,indexSrc);
448 int iparent_type = ac->nodetype[idestination];
449 int ichild_type = ac->nodetype[isource];
450 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
451 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = 0;
452 connections.push_back(cc);
455 void libsound_connect2(
int icontext,
int idestination,
int isource,
int indexDst,
int indexSrc) {
456 struct acstruct* ac = audio_contexts[icontext];
457 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
458 std::shared_ptr<AudioNode> source = ac->nodes[isource];
459 int dstInputs = destination->numberOfInputs();
460 int srcOutputs = source->numberOfOutputs();
461 if (indexDst > dstInputs) {
462 printf(
"destination number of inputs %d destination idx %d\n", destination->numberOfInputs(), indexDst);
469 if (indexSrc > srcOutputs) {
470 printf(
"source number of outputs %d source idx %d\n", srcOutputs, indexSrc);
478 ac->context->connect(destination, source, indexDst, indexSrc);
479 int iparent_type = ac->nodetype[idestination];
480 int ichild_type = ac->nodetype[isource];
481 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
482 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = indexDst;
483 connections.push_back(cc);
486 void libsound_disconnect2(
int icontext,
int idestination,
int isource,
int indexDst,
int indexSrc) {
487 struct acstruct* ac = audio_contexts[icontext];
488 std::shared_ptr<AudioNode> destination = ac->nodes[idestination];
489 std::shared_ptr<AudioNode> source = ac->nodes[isource];
490 int dstInputs = destination->numberOfInputs();
491 int srcOutputs = source->numberOfOutputs();
492 if (indexDst > dstInputs) {
493 printf(
"destination number of inputs %d destination idx %d\n", destination->numberOfInputs(), indexDst);
500 if (indexSrc > srcOutputs) {
501 printf(
"source number of outputs %d source idx %d\n", srcOutputs, indexSrc);
509 ac->context->disconnect(destination, source, indexDst, indexSrc);
512 int iparent_type = ac->nodetype[idestination];
513 int ichild_type = ac->nodetype[isource];
514 struct connection cc; cc.icontext = icontext; cc.iparent = idestination; cc.iparent_type = iparent_type;
515 cc.ichild = isource; cc.ichild_type = ichild_type; cc.srcindex = indexSrc; cc.dstindex = indexDst;
517 std::list<connection>::iterator it;
518 for (it = connections.begin(); it != connections.end(); ++it){
520 if (cn.icontext = cc.icontext && cn.iparent == cc.iparent && cn.ichild == cc.ichild
521 && cn.srcindex == cc.srcindex && cn.dstindex == cc.dstindex) {
522 connections.erase(it);
529 void libsound_connect(
int icontext,
icset iparent) {
531 libsound_connect2(icontext, iparent.p, iparent.n, iparent.d, iparent.s);
533 void libsound_disconnect(
int icontext,
icset iparent) {
534 libsound_disconnect2(icontext, iparent.p, iparent.n, iparent.ld, iparent.ls);
537 void libsound_print_connections() {
539 printf(
"%2s %7s %4s %7s %7s %6s %4s\n",
"ic",
"iparent",
"type",
"dstIndx",
"srcIndex",
"ichild",
"type");
542 std::list<connection>::iterator it;
543 for (it = connections.begin(); it != connections.end(); ++it) {
546 const char* ptype = nodetype_lookup(cc.iparent_type);
547 const char* ctype = nodetype_lookup(cc.ichild_type);
549 printf(
"%2d %7d %4s %7d %7d %6d %4s\n", cc.icontext, cc.iparent, ptype, cc.dstindex, cc.srcindex, cc.ichild, ctype);
551 printf(
"count %d\n", (
int)connections.size());
553 void libsound_pauseContext0(
int icontext) {
554 struct acstruct* ac = audio_contexts[icontext];
556 ac->context->suspend();
560 void libsound_resumeContext0(
int icontext) {
561 struct acstruct* ac = audio_contexts[icontext];
563 ac->context->resume();
567 void libsound_pauseNode0(
struct X3D_Node* node) {
569 struct X3D_SoundRep* srepn = getSoundRep(X3D_NODE(node));
570 int icontext = srepn->icontext;
572 struct acstruct* ac = audio_contexts[icontext];
574 switch (node->_nodeType) {
578 std::shared_ptr<PannerNode> pannerNode;
579 PannerNode* pannerNode_ptr;
581 pannerNode_ptr =
static_cast<PannerNode*
>(ac->nodes[srepn->inode].get());
583 ContextRenderLock r(ac->context.get(),
"ex_simple");
584 pannerNode_ptr->silenceOutputs(r);
594 void libsound_resumeNode0(
struct X3D_Node* node) {
596 struct X3D_SoundRep* srepn = getSoundRep(X3D_NODE(node));
597 int icontext = srepn->icontext;
599 struct acstruct* ac = audio_contexts[icontext];
601 switch (node->_nodeType) {
605 std::shared_ptr<PannerNode> pannerNode;
606 PannerNode* pannerNode_ptr;
608 pannerNode_ptr =
static_cast<PannerNode*
>(ac->nodes[srepn->inode].get());
610 ContextRenderLock r(ac->context.get(),
"ex_simple");
611 pannerNode_ptr->unsilenceOutputs(r);
622 int libsound_createBusFromBuffer0(
char* bbuffer,
int len) {
624 std::shared_ptr<AudioBus> Bus;
626 FILE * fp = fopen(
"tmp_buf_wav",
"w");
627 int nchunks = len / 1024;
628 int leftover = len % 1024;
629 for(
int i=0;i<nchunks;i++)
630 fwrite(&bbuffer[i*1024], 1024, 1, fp);
631 fwrite(&bbuffer[nchunks * 1024], leftover, 1, fp);
634 Bus = MakeBusFromFile(
"tmp_buf_wav",
false);
635 remove(
"tmp_buf.wav");
638 std::vector<uint8_t> buffer(bbuffer, bbuffer + len);
639 Bus = MakeBusFromMemory(buffer,
false);
643 busses[next_bus] = Bus;
646 int libsound_createBusFromPCM32(
float* buffer,
int nchannel,
int lentotal) {
648 int length = lentotal / nchannel;
649 std::shared_ptr<lab::AudioBus> audioBus(
new lab::AudioBus(nchannel, length));
650 audioBus->setSampleRate(44100.0);
653 for (
int i = 0; i < nchannel; ++i)
655 std::memcpy(audioBus->channel(i)->mutableData(), buffer + (i * length), length *
sizeof(
float));
659 busses[next_bus] = audioBus;
667#define NQR_INT16_MAX 32767.f
668#define NQR_INT24_MAX 8388608.f
669#define NQR_INT32_MAX 2147483648.f
671 static const float NQR_BYTE_2_FLT = 1.0f / 127.0f;
673#define int8_to_float32(s) ((float) (s) * NQR_BYTE_2_FLT)
674#define uint8_to_float32(s)(((float) (s) - 128) * NQR_BYTE_2_FLT)
675#define int16_to_float32(s) ((float) (s) / NQR_INT16_MAX)
676#define int24_to_float32(s) ((float) (s) / NQR_INT24_MAX)
677#define int32_to_float32(s) ((float) (s) / NQR_INT32_MAX)
696 inline int32_t Pack(uint8_t a, uint8_t b, uint8_t c)
699 int32_t x = (c << 16) | (b << 8) | (a << 0);
700 auto sign_extended = (x) | (!!((x) & 0x800000) * 0xff000000);
702 return sign_extended;
704 void ConvertToFloat32(
float* dst,
const uint8_t* src,
const size_t N, PCMFormat f)
706 assert(f != PCM_END);
710 const uint8_t* dataPtr =
reinterpret_cast<const uint8_t*
>(src);
711 for (
size_t i = 0; i < N; ++i)
712 dst[i] = uint8_to_float32(dataPtr[i]);
714 else if (f == PCM_S8)
716 const int8_t* dataPtr =
reinterpret_cast<const int8_t*
>(src);
717 for (
size_t i = 0; i < N; ++i)
718 dst[i] = int8_to_float32(dataPtr[i]);
720 else if (f == PCM_16)
722 const int16_t* dataPtr =
reinterpret_cast<const int16_t*
>(src);
723 for (
size_t i = 0; i < N; ++i)
724 dst[i] = int16_to_float32(Read16(dataPtr[i]));
726 else if (f == PCM_24)
728 const uint8_t* dataPtr =
reinterpret_cast<const uint8_t*
>(src);
730 for (
size_t i = 0; i < N; ++i)
732 int32_t sample = Pack(dataPtr[c], dataPtr[c + 1], dataPtr[c + 2]);
733 dst[i] = int24_to_float32(sample);
737 else if (f == PCM_32)
739 const int32_t* dataPtr =
reinterpret_cast<const int32_t*
>(src);
740 for (
size_t i = 0; i < N; ++i)
741 dst[i] = int32_to_float32(Read32(dataPtr[i]));
746 else if (f == PCM_FLT)
748 std::memcpy(dst, src, N *
sizeof(
float));
753 else if (f == PCM_DBL)
755 const double* dataPtr =
reinterpret_cast<const double*
>(src);
756 for (
size_t i = 0; i < N; ++i)
757 dst[i] = (
float)Read64(dataPtr[i]);
761 void deinterleave(
char* dst,
char* src,
int nchannel,
int bits,
int lenbytes) {
762 int kbyte = bits / 8;
763 int chunks = lenbytes / nchannel / kbyte;
765 for (
int i = 0; i < chunks; i++) {
766 for (
int j = 0; j < nchannel; j++)
767 for (
int k = 0; k < kbyte; k++)
768 dst[(j * chunks)*kbyte + i*kbyte + k] = src[(i * nchannel + j)*kbyte + k];
771 int libsound_createBusFromPCM(
char* buffer,
int bits,
int nchannel,
int lentotal,
int freq) {
773 int length = lentotal / nchannel;
776 int bytes32 = length * 32 / bits;
777 int chunks = length * 8 / bits;
778 std::shared_ptr<lab::AudioBus> audioBus(
new lab::AudioBus(nchannel, bytes32));
779 audioBus->setSampleRate((
float)freq);
781 float *chan32 = (
float*)std::malloc(bytes32);
782 PCMFormat f = bits == 8 ? PCMFormat::PCM_S8 : bits == 16 ? PCMFormat::PCM_16 : bits == 24 ? PCMFormat::PCM_24 : bits == 32 ? PCMFormat::PCM_32 : PCMFormat::PCM_64;
783 char *buffer2 = (
char*)std::malloc(lentotal);
784 deinterleave(buffer2,buffer, nchannel, bits, lentotal);
785 for (
int i = 0; i < nchannel; ++i)
787 char* channel = &buffer2[i * length];
788 ConvertToFloat32(chan32, (uint8_t*)channel, chunks, f);
789 std::memcpy(audioBus->channel(i)->mutableData(), chan32, bytes32);
794 busses[next_bus] = audioBus;
800 int libsound_createBusFromFile0(
char* url) {
802 std::shared_ptr<AudioBus> Bus;
803 Bus = MakeBusFromFile(url,
false);
805 busses[next_bus] = Bus;
808 double libsound_computeDuration0(
int ibuffer) {
809 AudioBus *bus =
static_cast<AudioBus*
>(busses[ibuffer].get());
810 double duration = bus->length()* bus->sampleRate();
813 void getChannelInterpretation(
char *interpretation,
char *mode, ChannelInterpretation *interp, ChannelCountMode *cmode) {
814 *interp = lab::ChannelInterpretation::Speakers;
815 if (!_stricmp(interpretation,
"DISCRETE"))
816 *interp = lab::ChannelInterpretation::Discrete;
818 *cmode = lab::ChannelCountMode::Max;
819 if (!_stricmp(mode,
"CLAMPED-MAX")) *cmode = lab::ChannelCountMode::ClampedMax;
820 else if (!_stricmp(mode,
"EXPLICIT")) *cmode = lab::ChannelCountMode::Explicit;
840 static int nondefault_channelinterp = 0;
841 void libsound_updateNode3(
int icontext,
icset iparent,
struct X3D_Node* node) {
842 struct acstruct* ac = audio_contexts[icontext];
843 AudioContext& context = *ac->context.get();
847 switch (node->_nodeType) {
851 std::shared_ptr<PannerNode> pannerNode;
852 PannerNode* pannerNode_ptr;
856 std::shared_ptr<GainNode> gain;
858 gain = std::make_shared<GainNode>(context);
859 gain_ptr = gain.get();
861 ac->nodes[ac->next_node] = gain;
862 ac->nodetype[ac->next_node] = NODE_Gain;
863 srepn->igain = ac->next_node;
866 libsound_connect2(icontext, iparent.p, srepn->igain, iparent.d, iparent.s);
869 if (pnode->spatialize != TRUE) {
875 pannerNode = std::make_shared<PannerNode>(context);
876 pannerNode->setPanningModel(PanningModel::EQUALPOWER);
879 ac->nodes[ac->next_node] = pannerNode;
880 ac->nodetype[ac->next_node] = NODE_Sound;
881 srepn->inode = ac->next_node;
882 srepn->icontext = icontext;
884 libsound_connect0(icontext, srepn->igain, srepn->inode);
886 pannerNode_ptr =
static_cast<PannerNode*
>(ac->nodes[srepn->inode].get());
888 pannerNode_ptr->setConeInnerAngle( 90.0f);
889 pannerNode_ptr->setConeOuterAngle(135.0f);
890 pannerNode_ptr->setConeOuterGain(.07f);
894 pannerNode_ptr->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE);
895 pannerNode_ptr->setDistanceModel(lab::PannerNode::LINEAR_DISTANCE);
898 pannerNode_ptr->setRolloffFactor(1.0f);
899 pannerNode_ptr->setRefDistance(pnode->minFront);
900 pannerNode_ptr->setMaxDistance(pnode->maxFront);
903 pannerNode_ptr =
static_cast<PannerNode*
>(ac->nodes[srepn->inode].get());
904 if (have_listenerpoint) {
905 auto listener = context.listener();
913 float* pos, * dir, * up;
914 pos = listenerpoint_pos;
915 dir = listenerpoint_dir;
916 up = listenerpoint_up;
917 listener->setForward({ dir[0],dir[1],dir[2]});
918 listener->setUpVector({ up[0], up[1], up[2]});
919 listener->setPosition({ pos[0],pos[1], pos[2]});
923 auto listener = context.listener();
925 listener->setForward({ 0.0,0.0,-1.0 });
926 listener->setUpVector({ 0.0,1.0,0.0 });
927 listener->setPosition({ 0.0,0.0,0.0 });
930 gain_ptr =
static_cast<GainNode*
>(ac->nodes[srepn->igain].get());
932 gain_ptr->gain()->setValue(pnode->intensity);
933 float *xyz = pnode->__lastlocation.c;
934 pannerNode_ptr->setPosition(xyz[0], xyz[1], xyz[2]);
938 float* rxyz = pnode->__lastdirection.c;
940 pannerNode_ptr->setOrientation({ rxyz[0], rxyz[1], rxyz[2] });
947 case NODE_SpatialSound:
950 std::shared_ptr<PannerNode> pannerNode;
951 PannerNode* pannerNode_ptr;
955 std::shared_ptr<GainNode> gain;
957 gain = std::make_shared<GainNode>(context);
958 gain_ptr = gain.get();
960 ac->nodes[ac->next_node] = gain;
961 ac->nodetype[ac->next_node] = NODE_Gain;
962 srepn->igain = ac->next_node;
965 libsound_connect2(icontext, iparent.p, srepn->igain, iparent.d, iparent.s);
967 if (pnode->spatialize != TRUE) {
973 if (!context.loadHrtfDatabase(
"hrtf")) {
974 std::string path = std::string(
"../../../../lib_windows_vc12/LabSound/share") +
"/hrtf";
975 if (!context.loadHrtfDatabase(path)) {
976 printf(
"Could not load spatialization database");
981 pannerNode = std::make_shared<PannerNode>(context);
983 if (pnode->enableHRTF == TRUE && loaded) {
984 pannerNode->setPanningModel(lab::PanningModel::HRTF);
985 printf(
"SpatialSound HRTF enabled\n");
988 pannerNode->setPanningModel(lab::PanningModel::EQUALPOWER);
992 pannerNode->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE);
993 unsigned int distance_enum = name_lookup(pnode->distanceModel->strptr, distance_models);
994 switch (distance_enum) {
995 case lab::PannerNode::LINEAR_DISTANCE:
996 pannerNode->setDistanceModel(lab::PannerNode::LINEAR_DISTANCE);
break;
997 case lab::PannerNode::INVERSE_DISTANCE:
998 pannerNode->setDistanceModel(lab::PannerNode::INVERSE_DISTANCE);
break;
999 case lab::PannerNode::EXPONENTIAL_DISTANCE:
1000 pannerNode->setDistanceModel(lab::PannerNode::EXPONENTIAL_DISTANCE);
break;
1006 ac->nodes[ac->next_node] = pannerNode;
1007 ac->nodetype[ac->next_node] = NODE_SpatialSound;
1008 srepn->inode = ac->next_node;
1009 srepn->icontext = icontext;
1011 libsound_connect0(icontext, srepn->igain, srepn->inode);
1013 pannerNode_ptr =
static_cast<PannerNode*
>(ac->nodes[srepn->inode].get());
1014 if (have_listenerpoint) {
1015 auto listener = context.listener();
1023 float* pos, * dir, * up;
1024 pos = listenerpoint_pos;
1025 dir = listenerpoint_dir;
1026 up = listenerpoint_up;
1027 listener->setForward({ dir[0],dir[1],dir[2] });
1028 listener->setUpVector({ up[0], up[1], up[2] });
1029 listener->setPosition({ pos[0],pos[1], pos[2] });
1033 auto listener = context.listener();
1035 listener->setForward({ 0.0,0.0,-1.0 });
1036 listener->setUpVector({ 0.0,1.0,0.0 });
1037 listener->setPosition({ 0.0,0.0,0.0 });
1041 gain_ptr =
static_cast<GainNode*
>(ac->nodes[srepn->igain].get());
1042 gain_ptr->gain()->setValue(pnode->intensity* pnode->gain);
1043 pannerNode_ptr->setConeInnerAngle(RAD2DEGF(pnode->coneInnerAngle));
1044 pannerNode_ptr->setConeOuterAngle(RAD2DEGF(pnode->coneOuterAngle));
1045 pannerNode_ptr->setConeOuterGain(pnode->coneOuterGain);
1047 pannerNode_ptr->setRolloffFactor(pnode->rolloffFactor);
1048 pannerNode_ptr->setRefDistance(pnode->referenceDistance);
1049 pannerNode_ptr->setMaxDistance(pnode->maxDistance);
1051 if (pnode->dopplerEnabled == TRUE) {
1052 float* v = pnode->__velocity.c;
1053 pannerNode_ptr->setVelocity(v[0], v[1], v[2]);
1055 auto listener = context.listener();
1056 listener->setDopplerFactor(1.0f);
1057 listener->setSpeedOfSound(343.0f);
1059 ContextRenderLock r(&context,
"ex_simple");
1060 float dr = pannerNode_ptr->dopplerRate(r);
1062 pnode->__dopplerFactor = dr;
1069 float* dir0 = pnode->__lastdirection.c;
1071 memcpy(dir, dir0,3*
sizeof(
float));
1077 dir[1] = fabs(dir[1]) < .001f ? copysign(.001f,dir[1]) : dir[1];
1080 pannerNode_ptr->setOrientation({ dir[0], dir[1] , dir[2] });
1086 float* xyz0 = pnode->__lastlocation.c;
1088 memcpy(xyz, xyz0, 3 *
sizeof(
float));
1094 pannerNode_ptr->setPosition(xyz[0], xyz[1], xyz[2]);
1101 case NODE_MovieTexture:
1102 case NODE_AudioClip:
1105 std::shared_ptr<SampledAudioNode> audioClipNode;
1106 SampledAudioNode* audioClipNode_ptr;
1107 if (!srepn->inode) {
1109 audioClipNode = std::make_shared<SampledAudioNode>(context);
1111 ContextRenderLock r(ac->context.get(),
"ex_simple");
1112 audioClipNode->setBus(r, busses[srepn->ibuffer]);
1115 ac->nodes[ac->next_node] = audioClipNode;
1116 ac->nodetype[ac->next_node] = NODE_AudioClip;
1117 srepn->inode = ac->next_node;
1118 srepn->icontext = icontext;
1124 audioClipNode->schedule(0.0, -1);
1130 audioClipNode_ptr =
static_cast<SampledAudioNode*
>(ac->nodes[srepn->inode].get());
1134 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1135 ac->context->resume();
1136 pnode->__context_paused = FALSE;
1138 SchedulingState status = audioClipNode_ptr->playbackState();
1139 if (status == SchedulingState::PLAYING){
1141 if (pnode->isPaused == TRUE)
1142 audioClipNode_ptr->stop(0.0);
1143 if (pnode->isPaused == TRUE) {
1144 ac->context->suspend();
1145 pnode->__context_paused = TRUE;
1148 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1149 audioClipNode_ptr->start(0.0, pnode->loop ? -1 : 0);
1151 bool isactive = audioClipNode_ptr->isPlayingOrScheduled();
1153 if (!isactive && pnode->loop)
1154 audioClipNode_ptr->start(0.0f, -1);
1155 audioClipNode_ptr->playbackRate()->setValue(pnode->pitch * srepn->dopplerFactor);
1162 case NODE_BufferAudioSource:
1165 std::shared_ptr<SampledAudioNode> audioSource;
1166 SampledAudioNode* audioSource_ptr;
1167 if (!srepn->ibuffer)
break;
1168 if (!srepn->inode) {
1170 audioSource = std::make_shared<SampledAudioNode>(context);
1172 ContextRenderLock r(ac->context.get(),
"ex_simple");
1173 audioSource->setBus(r, busses[srepn->ibuffer]);
1176 ac->nodes[ac->next_node] = audioSource;
1177 ac->nodetype[ac->next_node] = NODE_BufferAudioSource;
1178 srepn->inode = ac->next_node;
1179 srepn->icontext = icontext;
1191 audioSource_ptr =
static_cast<SampledAudioNode*
>(ac->nodes[srepn->inode].get());
1195 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1196 ac->context->resume();
1197 pnode->__context_paused = FALSE;
1199 SchedulingState status = audioSource_ptr->playbackState();
1200 if (status == SchedulingState::PLAYING) {
1201 if (pnode->isActive == FALSE)
1202 audioSource_ptr->stop(0.0);
1203 if (pnode->isPaused == TRUE) {
1204 ac->context->suspend();
1205 pnode->__context_paused = TRUE;
1208 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1209 audioSource_ptr->start(0.0, pnode->loop ? -1 : 0);
1211 audioSource_ptr->playbackRate()->setValue(pnode->playbackRate);
1212 audioSource_ptr->detune()->setValue(pnode->detune);
1222 case NODE_OscillatorSource:
1226 std::shared_ptr<OscillatorNode> oscillator;
1227 OscillatorNode* oscillator_ptr;
1229 if (!srepn->inode) {
1244 oscillator = std::make_shared<OscillatorNode>(context);
1246 ac->nodes[ac->next_node] = oscillator;
1247 ac->nodetype[ac->next_node] = NODE_OscillatorSource;
1248 srepn->inode = ac->next_node;
1249 srepn->icontext = icontext;
1262 oscillator_ptr =
static_cast<OscillatorNode*
>(ac->nodes[srepn->inode].get());
1264 oscillator_ptr->frequency()->setValue(pnode->frequency);
1266 oscillator_ptr->detune()->setValue(pnode->detune);
1268 OscillatorType wave_type = (OscillatorType) name_lookup(pnode->type->strptr, periodicWave_types);
1284 oscillator_ptr->setType(wave_type);
1285 if(wave_type == OscillatorType::CUSTOM && pnode->periodicWave){
1288 if (pnode->isPaused == FALSE && pnode->__context_paused) {
1289 ac->context->resume();
1290 pnode->__context_paused = FALSE;
1292 SchedulingState status = oscillator_ptr->playbackState();
1293 if (status == SchedulingState::PLAYING) {
1294 if (pnode->isActive == FALSE)
1295 oscillator_ptr->stop(0.0);
1296 if (pnode->isPaused == TRUE) {
1297 ac->context->suspend();
1298 pnode->__context_paused = TRUE;
1301 else if (status != SchedulingState::PLAYING && (pnode->isActive == TRUE))
1302 oscillator_ptr->start(0.0);
1307 case NODE_PeriodicWave:
1310 std::shared_ptr<PeriodicWave> pwave;
1313 std::shared_ptr<AudioNode> oscillator = ac->nodes[iparent.p];
1314 OscillatorNode* oscillator_ptr =
1315 static_cast<OscillatorNode*
>(oscillator.get());
1317 unsigned int wave_type = name_lookup(pnode->type->strptr, periodicWave_types);
1318 switch (wave_type) {
1319 case OscillatorType::SINE:
1320 oscillator_ptr->setType(OscillatorType::SINE);
break;
1321 case OscillatorType::SQUARE:
1322 oscillator_ptr->setType(OscillatorType::SQUARE);
break;
1323 case OscillatorType::SAWTOOTH:
1324 oscillator_ptr->setType(OscillatorType::SAWTOOTH);
break;
1325 case OscillatorType::TRIANGLE:
1326 oscillator_ptr->setType(OscillatorType::TRIANGLE);
break;
1327 case OscillatorType::CUSTOM:
1328 oscillator_ptr->setType(OscillatorType::CUSTOM);
break;
1330 oscillator_ptr->setType(OscillatorType::OSCILLATOR_NONE);
1333 if (pnode->optionsReal.n != 0) {
1348 std::shared_ptr<GainNode> gain;
1350 if (!srepn->inode) {
1352 gain = std::make_shared<GainNode>(context);
1353 gain_ptr = gain.get();
1355 ac->nodes[ac->next_node] = gain;
1356 ac->nodetype[ac->next_node] = NODE_Gain;
1357 srepn->inode = ac->next_node;
1358 srepn->icontext = icontext;
1359 if (nondefault_channelinterp) {
1360 ChannelInterpretation interp;
1361 ChannelCountMode cmode;
1362 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1363 gain_ptr->setChannelInterpretation(interp);
1365 ContextGraphLock g(ac->context.get(),
"ex_simple");
1366 gain_ptr->setChannelCountMode(g, cmode);
1374 gain_ptr =
dynamic_cast<GainNode*
>(ac->nodes[srepn->inode].get());
1376 gain_ptr->gain()->setValue(pnode->gain);
1384 case NODE_ChannelSplitter:
1387 std::shared_ptr<ChannelSplitterNode> splitter;
1388 ChannelSplitterNode* splitter_ptr;
1390 if (!srepn->inode) {
1404 splitter = std::make_shared<ChannelSplitterNode>(context,pnode->channelCount);
1405 splitter_ptr = splitter.get();
1408 ac->nodes[ac->next_node] = splitter;
1409 ac->nodetype[ac->next_node] = NODE_ChannelSplitter;
1410 srepn->inode = ac->next_node;
1411 srepn->icontext = icontext;
1412 if (nondefault_channelinterp) {
1414 ChannelInterpretation interp;
1415 ChannelCountMode cmode;
1416 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1417 splitter_ptr->setChannelInterpretation(interp);
1419 ContextGraphLock g(ac->context.get(),
"ex_simple");
1420 splitter_ptr->setChannelCountMode(g, cmode);
1450 splitter_ptr =
dynamic_cast<ChannelSplitterNode*
>(ac->nodes[srepn->inode].get());
1455 case NODE_ChannelSelector:
1460 case NODE_ChannelMerger:
1463 std::shared_ptr<ChannelMergerNode> merger;
1464 ChannelMergerNode* merger_ptr;
1466 if (!srepn->inode) {
1480 merger = std::make_shared<ChannelMergerNode>(context, pnode->channelCount);
1481 merger_ptr = merger.get();
1482 if (nondefault_channelinterp) {
1484 ChannelInterpretation interp;
1485 ChannelCountMode cmode;
1486 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1487 merger_ptr->setChannelInterpretation(interp);
1489 ContextGraphLock g(ac->context.get(),
"ex_simple");
1490 merger_ptr->setChannelCountMode(g, cmode);
1494 ac->nodes[ac->next_node] = merger;
1495 ac->nodetype[ac->next_node] = NODE_ChannelMerger;
1496 srepn->inode = ac->next_node;
1497 srepn->icontext = icontext;
1507 merger_ptr =
dynamic_cast<ChannelMergerNode*
>(ac->nodes[srepn->inode].get());
1515 std::shared_ptr<DelayNode> delay;
1516 DelayNode* delay_ptr;
1517 std::shared_ptr<GainNode> gain;
1519 if (!srepn->inode) {
1532 delay = std::make_shared<DelayNode>(context,pnode->maxDelayTime);
1533 delay_ptr = delay.get();
1535 ac->nodes[ac->next_node] = delay;
1536 ac->nodetype[ac->next_node] = NODE_Delay;
1537 srepn->inode = ac->next_node;
1538 srepn->icontext = icontext;
1539 if (nondefault_channelinterp) {
1541 ChannelInterpretation interp;
1542 ChannelCountMode cmode;
1543 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1544 delay_ptr->setChannelInterpretation(interp);
1546 ContextGraphLock g(ac->context.get(),
"ex_simple");
1547 delay_ptr->setChannelCountMode(g, cmode);
1559 delay_ptr =
dynamic_cast<DelayNode*
>(ac->nodes[srepn->inode].get());
1560 delay_ptr->delayTime()->setFloat((
float)pnode->delayTime,
false);
1568 std::shared_ptr<AnalyserNode> analyser;
1569 AnalyserNode* analyser_ptr;
1572 if (!srepn->inode) {
1585 analyser = std::make_shared<AnalyserNode>(context,pnode->fftSize);
1586 analyser_ptr = analyser.get();
1588 ac->nodes[ac->next_node] = analyser;
1589 ac->nodetype[ac->next_node] = NODE_Analyser;
1590 srepn->inode = ac->next_node;
1591 srepn->icontext = icontext;
1592 if (nondefault_channelinterp) {
1594 ChannelInterpretation interp;
1595 ChannelCountMode cmode;
1596 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1597 analyser_ptr->setChannelInterpretation(interp);
1599 ContextGraphLock g(ac->context.get(),
"ex_simple");
1600 analyser_ptr->setChannelCountMode(g, cmode);
1613 analyser_ptr =
dynamic_cast<AnalyserNode*
>(ac->nodes[srepn->inode].get());
1614 pnode->frequencyBinCount = (int)analyser_ptr->frequencyBinCount();
1617 analyser_ptr->setMaxDecibels(pnode->maxDecibels);
1618 analyser_ptr->setMinDecibels(pnode->minDecibels);
1619 analyser_ptr->setSmoothingTimeConstant(pnode->smoothingTimeConstant);
1621 if (ac->bytearray ==
nullptr)
1622 ac->bytearray = std::make_shared<std::vector<std::uint8_t>>(4096);
1623 if (ac->floatarray ==
nullptr)
1624 ac->floatarray = std::make_shared<std::vector<std::float_t>>(4096);
1626 std::vector<std::float_t>* floatarray = ac->floatarray.get();
1627 std::vector<std::uint8_t>* bytearray = ac->bytearray.get();
1629 analyser_ptr->getFloatFrequencyData(*floatarray);
1634 if (pnode->floatFrequencyData.n == 0) {
1635 pnode->floatFrequencyData.p = (
float*)malloc(pnode->frequencyBinCount *
sizeof(std::float_t));
1636 pnode->floatFrequencyData.n = pnode->frequencyBinCount;
1639 std::uint8_t* p8 = (std::uint8_t*)pnode->byteFrequencyData.p;
1640 std::float_t* ff = (std::float_t*)pnode->floatFrequencyData.p;
1641 for (
int i = 0; i < pnode->frequencyBinCount; i++) {
1644 ff[i] = (*floatarray)[i];
1647 analyser_ptr->getFloatTimeDomainData(*floatarray);
1652 if (pnode->floatTimeDomainData.n == 0) {
1653 pnode->floatTimeDomainData.p = (
float*)malloc(pnode->frequencyBinCount *
sizeof(std::float_t));
1654 pnode->floatTimeDomainData.n = pnode->frequencyBinCount;
1658 std::float_t* fft = (std::float_t*)pnode->floatTimeDomainData.p;
1659 for (
int i = 0; i < pnode->frequencyBinCount; i++) {
1662 fft[i] = (*floatarray)[i];
1669 case NODE_BiquadFilter:
1671 static struct key_name biquad_types[] = {
1672 {FilterType::ALLPASS,
"ALLPASS"},
1673 {FilterType::BANDPASS,
"BANDPASS"},
1674 {FilterType::FILTER_NONE,
"NONE"},
1675 {FilterType::HIGHPASS,
"HIGHPASS"},
1676 {FilterType::HIGHSHELF,
"HIGHSHELF"},
1677 {FilterType::LOWPASS,
"LOWPASS"},
1678 {FilterType::LOWSHELF,
"LOWSHELF"},
1679 {FilterType::NOTCH,
"NOTCH"},
1680 {FilterType::NOTCH,
"PEAKING"},
1684 std::shared_ptr<BiquadFilterNode> biquad;
1685 BiquadFilterNode* biquad_ptr;
1688 if (!srepn->inode) {
1701 biquad = std::make_shared<BiquadFilterNode>(context);
1702 biquad_ptr = biquad.get();
1703 if (nondefault_channelinterp) {
1705 ChannelInterpretation interp;
1706 ChannelCountMode cmode;
1707 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1708 biquad_ptr->setChannelInterpretation(interp);
1710 ContextGraphLock g(ac->context.get(),
"ex_simple");
1711 biquad_ptr->setChannelCountMode(g, cmode);
1715 ac->nodes[ac->next_node] = biquad;
1716 ac->nodetype[ac->next_node] = NODE_BiquadFilter;
1717 srepn->inode = ac->next_node;
1718 srepn->icontext = icontext;
1729 biquad_ptr =
dynamic_cast<BiquadFilterNode*
>(ac->nodes[srepn->inode].get());
1730 FilterType biquad_type = (FilterType)name_lookup(pnode->type->strptr, biquad_types);
1731 biquad_ptr->setType(biquad_type);
1732 biquad_ptr->detune()->setValue(pnode->detune);
1733 biquad_ptr->frequency()->setValue(pnode->frequency);
1734 biquad_ptr->q()->setValue(pnode->qualityFactor);
1737 case NODE_DynamicsCompressor:
1740 std::shared_ptr<DynamicsCompressorNode> dynamics;
1741 DynamicsCompressorNode* dynamics_ptr;
1744 if (!srepn->inode) {
1757 dynamics = std::make_shared<DynamicsCompressorNode>(context);
1758 dynamics_ptr = dynamics.get();
1759 if (nondefault_channelinterp) {
1761 ChannelInterpretation interp;
1762 ChannelCountMode cmode;
1763 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1764 dynamics_ptr->setChannelInterpretation(interp);
1766 ContextGraphLock g(ac->context.get(),
"ex_simple");
1767 dynamics_ptr->setChannelCountMode(g, cmode);
1771 ac->nodes[ac->next_node] = dynamics;
1772 ac->nodetype[ac->next_node] = NODE_DynamicsCompressor;
1773 srepn->inode = ac->next_node;
1774 srepn->icontext = icontext;
1785 dynamics_ptr =
dynamic_cast<DynamicsCompressorNode*
>(ac->nodes[srepn->inode].get());
1786 dynamics_ptr->attack()->setValue((
float)pnode->attack);
1787 dynamics_ptr->knee()->setValue(pnode->knee);
1788 dynamics_ptr->ratio()->setValue(pnode->ratio);
1789 dynamics_ptr->release()->setValue((
float)pnode->release);
1790 dynamics_ptr->threshold()->setValue(pnode->threshold);
1791 pnode->reduction = dynamics_ptr->reduction()->value();
1795 case NODE_WaveShaper:
1798 std::shared_ptr<WaveShaperNode> wave;
1799 WaveShaperNode* wave_ptr;
1802 if (!srepn->inode) {
1815 wave = std::make_shared<WaveShaperNode>(context);
1816 wave_ptr = wave.get();
1817 if (nondefault_channelinterp) {
1819 ChannelInterpretation interp;
1820 ChannelCountMode cmode;
1821 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1822 wave_ptr->setChannelInterpretation(interp);
1824 ContextGraphLock g(ac->context.get(),
"ex_simple");
1825 wave_ptr->setChannelCountMode(g, cmode);
1829 ac->nodes[ac->next_node] = wave;
1830 ac->nodetype[ac->next_node] = NODE_WaveShaper;
1831 srepn->inode = ac->next_node;
1832 srepn->icontext = icontext;
1842 wave_ptr =
dynamic_cast<WaveShaperNode*
>(ac->nodes[srepn->inode].get());
1843 std::vector<float> curve(pnode->curve.n);
1844 printf(
"pnode->curve.n %d p[0] %f p[44100-1] %f", pnode->curve.n, pnode->curve.p[0], pnode->curve.p[pnode->curve.n-1]);
1845 for (
int i = 0; i < pnode->curve.n; i++)
1846 curve[i] = pnode->curve.p[i];
1847 printf(
"curve[0] %f curve[-1] %f oversampe %s", curve[0], curve[curve.size() - 1], pnode->oversample->strptr);
1848 wave_ptr->setCurve(curve);
1849 OverSampleType oversample_type = (OverSampleType)name_lookup(pnode->oversample->strptr, waveshaper_oversampling_types);
1850 wave_ptr->setOversample(oversample_type);
1854 case NODE_Convolver:
1857 std::shared_ptr<ConvolverNode> convolver;
1858 ConvolverNode* convolver_ptr;
1861 if (!srepn->inode) {
1874 convolver = std::make_shared<ConvolverNode>(context);
1875 convolver_ptr = convolver.get();
1876 if (nondefault_channelinterp) {
1878 ChannelInterpretation interp;
1879 ChannelCountMode cmode;
1880 getChannelInterpretation(pnode->channelInterpretation->strptr, pnode->channelCountMode->strptr, &interp, &cmode);
1881 convolver_ptr->setChannelInterpretation(interp);
1883 ContextGraphLock g(ac->context.get(),
"ex_simple");
1884 convolver_ptr->setChannelCountMode(g, cmode);
1888 ac->nodes[ac->next_node] = convolver;
1889 ac->nodetype[ac->next_node] = NODE_Convolver;
1890 srepn->inode = ac->next_node;
1891 srepn->icontext = icontext;
1901 convolver_ptr =
dynamic_cast<ConvolverNode*
>(ac->nodes[srepn->inode].get());
1902 convolver_ptr->setNormalize(pnode->normalize);
1903 if (pnode->buffer) {
1905 if (abuf->__sourceNumber > 0)
1906 convolver_ptr->setImpulse(busses[abuf->__sourceNumber]);
1910 case NODE_MicrophoneSource:
1913 std::shared_ptr<AudioHardwareInputNode> input;
1914 AudioHardwareInputNode* input_ptr;
1915 if (!srepn->inode) {
1918 ContextRenderLock r(ac->context.get(),
"microphone");
1920 std::shared_ptr<AudioHardwareInputNode> inputNode(
1921 new AudioHardwareInputNode(*ac->context.get(), ac->context.get()->destinationNode()->device()->sourceProvider()));
1927 ac->nodes[ac->next_node] = input;
1928 ac->nodetype[ac->next_node] = NODE_MicrophoneSource;
1929 srepn->inode = ac->next_node;
1930 srepn->icontext = icontext;
1935 input_ptr =
static_cast<AudioHardwareInputNode*
>(ac->nodes[srepn->inode].get());