perfect! that worked.
now...
i have this in my render()
void render(BelaContext *context, void *userData)
{
int num;
for(unsigned int port = 0; port < midi.size(); ++port){
while((num = midi[port]->getParser()->numAvailableMessages()) > 0){
static MidiChannelMessage message;
message = midi[port]->getParser()->getNextChannelMessage();
switch(message.getType()){
case kmmNoteOn: {
//message.prettyPrint();
int noteNumber = message.getDataByte(0);
int velocity = message.getDataByte(1);
int channel = message.getChannel();
if (velocity > 0) gIsNoteOn = 1;
gNote = noteNumber;
// rt_printf("message: noteNumber: %f, velocity: %f, channel: %f\n", noteNumber, velocity, channel);
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmNoteOn], 0, "fff",
(float)noteNumber, (float)velocity, (float)channel+1);
break;
}
case kmmNoteOff: {
/* PureData does not seem to handle noteoff messages as per the MIDI specs,
* so that the noteoff velocity is ignored. Here we convert them to noteon
* with a velocity of 0.
*/
int noteNumber = message.getDataByte(0);
// int velocity = message.getDataByte(1); // would be ignored by Pd
int channel = message.getChannel();
// note we are sending the below to hvHashes[kmmNoteOn] !!
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmNoteOn], 0, "fff",
(float)noteNumber, (float)0, (float)channel+1);
break;
}
case kmmControlChange: {
int channel = message.getChannel();
int controller = message.getDataByte(0);
int value = message.getDataByte(1);
gControl = controller;
gCCVal = value;
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmControlChange], 0, "fff",
(float)value, (float)controller, (float)channel+1);
break;
}
case kmmProgramChange: {
int channel = message.getChannel();
int program = message.getDataByte(0);
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmProgramChange], 0, "ff",
(float)program, (float)channel+1);
break;
}
case kmmPolyphonicKeyPressure: {
//TODO: untested, I do not have anything with polyTouch... who does, anyhow?
int channel = message.getChannel();
int pitch = message.getDataByte(0);
int value = message.getDataByte(1);
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmPolyphonicKeyPressure], 0, "fff",
(float)channel+1, (float)pitch, (float)value);
break;
}
case kmmChannelPressure:
{
int channel = message.getChannel();
int value = message.getDataByte(0);
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmChannelPressure], 0, "ff",
(float)value, (float)channel+1);
break;
}
case kmmPitchBend:
{
int channel = message.getChannel();
int value = ((message.getDataByte(1) << 7) | message.getDataByte(0));
hv_sendMessageToReceiverV(gHeavyContext, hvMidiHashes[kmmPitchBend], 0, "ff",
(float)value, (float)channel+1);
break;
}
case kmmSystem:
case kmmNone:
case kmmAny:
break;
}
}
}
// De-interleave the data
if(gHvInputBuffers != NULL) {
for(unsigned int n = 0; n < context->audioFrames; n++) {
for(unsigned int ch = 0; ch < gHvInputChannels; ch++) {
if(ch >= gAudioChannelsInUse + gAnalogChannelsInUse) {
// THESE ARE PARAMETER INPUT 'CHANNELS' USED FOR ROUTING
// 'sensor' outputs from routing channels of dac~ are passed through here
// these could be also digital channels (handled by the dcm)
// or parameter channels used for routing (currently unhandled)
break;
} else {
// If more than 2 ADC inputs are used in the pd patch, route the analog inputs
// i.e. ADC3->analogIn0 etc. (first two are always audio inputs)
if(ch >= gAudioChannelsInUse)
{
unsigned int analogCh = ch - gAudioChannelsInUse;
if(analogCh < context->analogInChannels)
{
int m = n;
float mIn = analogReadNI(context, m, analogCh);
gHvInputBuffers[ch * context->audioFrames + n] = mIn;
}
} else {
if(ch < context->audioInChannels)
gHvInputBuffers[ch * context->audioFrames + n] = audioReadNI(context, n, ch);
}
}
}
}
}
if(pdMultiplexerActive){
static int lastMuxerUpdate = 0;
if(++lastMuxerUpdate == multiplexerArraySize){
lastMuxerUpdate = 0;
memcpy(hv_table_getBuffer(gHeavyContext, multiplexerTableHash), (float *const)context->multiplexerAnalogIn, multiplexerArraySize * sizeof(float));
}
}
// Bela digital in
if(gDigitalEnabled)
{
// note: in multiple places below we assume that the number of digital frames is same as number of audio
// Bela digital in at message-rate
dcm.processInput(context->digital, context->digitalFrames);
// Bela digital in at signal-rate
if(gDigitalSigInChannelsInUse > 0)
{
unsigned int j, k;
float *p0, *p1;
const unsigned int gLibpdBlockSize = context->audioFrames;
const unsigned int audioFrameBase = 0;
float* gInBuf = gHvInputBuffers;
// block below copy/pasted from libpd, except
// 16 has been replaced with gDigitalSigInChannelsInUse
for (j = 0, p0 = gInBuf; j < gLibpdBlockSize; j++, p0++) {
unsigned int digitalFrame = audioFrameBase + j;
for (k = 0, p1 = p0 + gLibpdBlockSize * gFirstDigitalChannel;
k < gDigitalSigInChannelsInUse; ++k, p1 += gLibpdBlockSize) {
if(dcm.isSignalRate(k) && dcm.isInput(k)){ // only process input channels that are handled at signal rate
*p1 = digitalRead(context, digitalFrame, k);
}
}
}
}
}
// replacement for bang~ object
//hv_sendMessageToReceiverV(gHeavyContext, "bela_bang", 0.0f, "b");
// heavy audio callback
hv_processInline(gHeavyContext, gHvInputBuffers, gHvOutputBuffers, context->audioFrames);
/*
for(int n = 0; n < context->audioFrames*gHvOutputChannels; ++n)
{
printf("%.3f, ", gHvOutputBuffers[n]);
if(n % context->audioFrames == context->audioFrames - 1)
printf("\n");
}
*/
// Bela digital out
if(gDigitalEnabled)
{
// Bela digital out at signal-rate
if(gDigitalSigOutChannelsInUse > 0)
{
unsigned int j, k;
float *p0, *p1;
const unsigned int gLibpdBlockSize = context->audioFrames;
const unsigned int audioFrameBase = 0;
float* gOutBuf = gHvOutputBuffers;
// block below copy/pasted from libpd, except
// context->digitalChannels has been replaced with gDigitalSigOutChannelsInUse
for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; ++j, ++p0) {
unsigned int digitalFrame = (audioFrameBase + j);
for (k = 0, p1 = p0 + gLibpdBlockSize * gFirstDigitalChannel;
k < gDigitalSigOutChannelsInUse; k++, p1 += gLibpdBlockSize) {
if(dcm.isSignalRate(k) && dcm.isOutput(k)){ // only process output channels that are handled at signal rate
digitalWriteOnce(context, digitalFrame, k, *p1 > 0.5);
}
}
}
}
// Bela digital out at message-rate
dcm.processOutput(context->digital, context->digitalFrames);
}
// Bela scope
if(gScopeChannelsInUse > 0)
{
unsigned int j, k;
float *p0, *p1;
const unsigned int gLibpdBlockSize = context->audioFrames;
float* gOutBuf = gHvOutputBuffers;
// block below copy/pasted from libpd
for (j = 0, p0 = gOutBuf; j < gLibpdBlockSize; ++j, ++p0) {
for (k = 0, p1 = p0 + gLibpdBlockSize * gFirstScopeChannel; k < gScopeChannelsInUse; k++, p1 += gLibpdBlockSize) {
gScopeOut[k] = *p1;
}
scope->log(gScopeOut);
}
}
// Interleave the output data
if(gHvOutputBuffers != NULL) {
for(unsigned int n = 0; n < context->audioFrames; n++) {
for(unsigned int ch = 0; ch < gHvOutputChannels; ch++) {
if(ch >= gAudioChannelsInUse + gAnalogChannelsInUse) {
// THESE ARE SENSOR OUTPUT 'CHANNELS' USED FOR ROUTING
// they are the content of the 'sensor output' dac~ channels
} else {
if(ch >= gAudioChannelsInUse) {
int m = n;
unsigned int analogCh = ch - gAudioChannelsInUse;
if(analogCh < context->analogOutChannels)
analogWriteOnceNI(context, m, analogCh, gHvOutputBuffers[ch*context->audioFrames + n]);
} else {
if(ch < context->audioOutChannels)
audioWriteNI(context, n, ch, gHvOutputBuffers[ch * context->audioFrames + n]);
}
}
}
}
}
}
that is more or less the generic heavy render part, i just changed the NI audio parts you told me and write to some variables in the midi section to use in the lv2host long thread. while trying to just put an "if" around the rest and running these lines:
gBcf.push(BelaContextFifo::kToLong, context);
/// receive from the "long" render
const InternalBelaContext* rctx = (InternalBelaContext*)gBcf.pop(BelaContextFifo::kToShort);
if(rctx) {
BelaContextSplitter::contextCopyData(rctx, (InternalBelaContext*)context);
}
in the corresponding else, i realised two things:
the midi part is still communicating with heavy of course, so i still adjust parameters in my heavy patch, so i will rewrite the midi-part into the else section as well, so as to mute midi to heavy as well when switching
my approach to just put an if around everything else does not work :-) (didn't think so, but one can try) the audio is simply muted in my case. so audio input and output need to stay in the chain i guess. which part do i need to leave in the above render if i just want audio in from the regular (short) render to be passed to the long render and audio out from the long render back to the short one?
sorry if this is all obvious, and thanks as always. i feel i am getting closer to my desired result.