I have a project with four inputs that I want to attenuate with analog sliders and then mix down to one output channel. Just sending inputs to output with linear fade takes 11 to 13 % of the CPU at a buffer size of 32. However, if I map the sliders to the decibel range and back to between 0 and 1, the CPU usage jumps to 33 -35 %.
I must be doing something wrong, but what?
Below is the code with both linear and decibel fades. I am using the CTAG Face cape.
#include <Bela.h>
#include <algorithm>
#include <cmath>
#include <cstdio>
int gAudioChannelNum; // number of audio channels to iterate over
int gAnalogChannelNum; // number of analog channels to iterate overin
float in[4] = {};
float out[4] = {};
float mix = 0;
float slider[4] = {};
bool setup(BelaContext *context, void *userData)
{
if(context->audioInChannels != context->audioOutChannels ||
context->analogInChannels != context-> analogOutChannels){
printf("Different number of outputs and inputs available. Working with what we have.\n");}
gAudioChannelNum = std::min(context->audioInChannels, context->audioOutChannels);
gAnalogChannelNum = std::min(context->analogInChannels, context->analogOutChannels);
return true;
}
void render(BelaContext *context, void *userData)
{
for(unsigned int n = 0; n < context->audioFrames; n++) {
for(unsigned int ch = 0; ch < gAudioChannelNum; ch++){
slider[ch] = analogRead(context, n/2, ch); // slider
// Linear Fade
slider[ch] = map(slider[ch], 0.0, 3.3/4.096, 0 , 1.0);
// Decibel fade
// slider[ch] = map(slider[ch], 0.0, 3.3/4.096, -60.0, 0.0);
// slider[ch] = powf(10.0, slider[ch] / 20.0);
in[ch] = audioRead(context, n, ch);
out[ch] = in[ch] * slider[ch];
}
mix = out[0] + out[1] + out[2] + out[3];
audioWrite(context, n, 0, mix);
}
}
void cleanup(BelaContext *context, void *userData)
{
}