Hi there! First post 🙂) Me and my friend are very new to c++ but are trying to code a psychological experiment, using a MIDI-keyboard and the bela mini. We are currently too tired to function, but heres the state of things:

Our experiment is meant to work in such a way that participants are presented with 4 metronome tones (here "cuetones"), and then afterwards they have to keep tapping at that tempo for 24 tones. In the condition that we show here, it is set up so two of the participants have to press together, in order for the bela to generate a sound, while the third participant can play a tone alone. The ‘loner’ and the ‘subgroup’ take turns playing a tone. The timing of each keypress is recorded together with information about the participant, the group, the condition and the trial number.

Our code is currently a wonky combination of code from other scripts, the bela yt channel and AI and we've desperately moved things around – so please don’t pay too much attention to the comments :’)

We have been told that we can solve an issue regarding an underrun warning, and to be certain about our timing, by moving the midi event processing into the “audioFrames loop” that runs every frame, which currently only includes actually generating the sounds. We’ve tried to move the lines 114 to 249 into the loop starting at line 250, but this completely ruined the sound generation and introduced ungodly amounts of distortion.

On top of this, we have the problem that the bela seems to stay silent for too long after the fourth metronome beat, so that pressing in time with the metronome will not generate a tone for the first tap.

We hope a kind soul will show mercy and give us a helping hand - have a nice weekend !

Here's our poor code

#include <Bela.h>
#include <libraries/Midi/Midi.h>
#include <stdlib.h>
#include <cmath>
#include <fstream>
// #include <libraries/WriteFile/WriteFile.h>
#include <chrono>
#include <string>
#include <iomanip>  // for std::fixed and std::setprecision
#include <set> // Include for std::set
#include <algorithm> // For std::includes

std::set<int> activeParticipants; // Set to track currently active participants
const std::set<int> requiredParticipants = {1, 2, 3}; // Define required participants
const std::set<int> lonerParticipants = {3}; // Define required participants
const std::set<int> subgroupParticipants = {1, 2}; // Define required participants

// Global variables
// Replace WriteFile global variables with std::ofstream
std::ofstream gDataFile;
int gCurrentGroup = 0;
const char* gCurrentCondition = "Subgroup";
int gCurrentTrial = 1;

int participant = 0;

// MIDI setup
Midi gMidi;
const char* gMidiPort0 = "hw:1,0,0";

std::string gCurrentPhase;

unsigned long gStartTime;
int gTapCount = 0;
std::vector<float> logs(6); // Create a vector to store the log data

// Oscillator state - generate the tone 
float gPhase = 0; 
float gFrequency = 0;
float gAmplitude = 0;
double freq[6] = {440.0, 493.88, 554.37, 659.26, 739.99, 880.0}; //initializing array of 7 tones from pentatonic a-major scale  
int tone_nr = 0;

// List of active notes
const int kMaxNotes = 16;
int gActiveNotes[kMaxNotes];
int gActiveNoteCount = 0;

// Cue tone variable
int CueTone = 0; 
int cueTonePlayCount = 0;
int cueToneCounter = 0;
bool playingCueTone = true;

int gTotalTaps = 0;
bool gTrialComplete = false;

float audioSampleRate = 0;

bool setup(BelaContext *context, void *userData)
{
	// print statement to check samplerate 
	rt_printf("Audio sample rate: %f\n", context->audioSampleRate);
	audioSampleRate = context->audioSampleRate;
	
    // Initialise the MIDI device
    if(gMidi.readFrom(gMidiPort0) < 0) {
        rt_printf("Unable to read from MIDI port %s\n", gMidiPort0);
        return false;
    }
    gMidi.writeTo(gMidiPort0);
    gMidi.enableParser(true);
    
    // Initialize start time
    gStartTime = std::chrono::duration_cast<std::chrono::milliseconds>
        (std::chrono::system_clock::now().time_since_epoch()).count();
    
    // Read parameters
    std::ifstream params("/root/Bela/projects/Subgroup/current_params");
    if(!params) {
        rt_printf("Warning: Could not read parameters file\n");
        gCurrentGroup = 0;
        gCurrentCondition = "Subgroup";
        gCurrentTrial = 1;
    } else {
        params >> gCurrentGroup >> gCurrentTrial;
        rt_printf("Parameters loaded: group=%d, condition=%s, trial=%d\n", 
                 gCurrentGroup, gCurrentCondition, gCurrentTrial);
    }
    
    // Setup data logging with CSV file
    char filename[100];
    sprintf(filename, "/root/Bela/projects/Subgroup/data/trial_%d_%s_%d.csv", 
            gCurrentGroup, gCurrentCondition, gCurrentTrial);
    
    gDataFile.open(filename);
    if(!gDataFile.is_open()) {
        rt_printf("Error: Could not open data file %s\n", filename);
        return false;
    }
    
    // Write CSV header
    gDataFile << "timestamp,participant,group,condition,trial" << std::endl;
    rt_printf("Data logging setup for file: %s\n", filename);
    
    gTotalTaps = 0;
    gTrialComplete = false;
    
    return true;
}

void render(BelaContext *context, void *userData)
{		
	//loop through audioframes 
	
	// At the beginning of each callback, look for available MIDI
	// messages that have come in since the last block
	while(gMidi.getParser()->numAvailableMessages() > 0) {
		MidiChannelMessage message;
		message = gMidi.getParser()->getNextChannelMessage();
		message.prettyPrint();		// Print the message data
		
		// A MIDI "note on" message type might actually hold a real
		// note onset (e.g. key press), or it might hold a note off (key release).
		// The latter is signified by a velocity of 0.
		if(message.getType() == kmmNoteOn) {
			int noteNumber = message.getDataByte(0);
			int velocity = message.getDataByte(1); 
			
			// Velocity of 0 is really a note off
			if(velocity > 0) {
				if (!gTrialComplete && gActiveNoteCount < kMaxNotes) {
        // Determine participant based on note number
        if (noteNumber == 48) participant = 1;
        else if (noteNumber == 59) participant = 2;
        else if (noteNumber == 71) participant = 3;
        else participant = 0; // Invalid participant key

        if (participant > 0) {
            // Log the keypress immediately
            unsigned long currentTime = std::chrono::duration_cast<std::chrono::milliseconds>
                (std::chrono::system_clock::now().time_since_epoch()).count();
            float timestamp = (currentTime - gStartTime) / 1000.0;

            if (gDataFile.is_open()) {
                gDataFile << std::fixed << std::setprecision(3)
                          << timestamp << "," << participant << ","
                          << gCurrentGroup << "," << gCurrentCondition << ","
                          << gCurrentTrial << std::endl;

                rt_printf("Logged: time=%.3f, participant=%d, group=%d, condition=%s, trial=%d\n",
                          timestamp, participant, gCurrentGroup, gCurrentCondition, gCurrentTrial);
            }

            // Add participant to activeParticipants for synchronization tracking
            activeParticipants.insert(participant);

            // Check if all required participants have pressed their keys
            if (std::includes(activeParticipants.begin(), activeParticipants.end(),
                  subgroupParticipants.begin(), subgroupParticipants.end()) ||
    std::includes(activeParticipants.begin(), activeParticipants.end(),
                  lonerParticipants.begin(), lonerParticipants.end())) {
                
                // All required participants pressed keys simultaneously
                gActiveNotes[gActiveNoteCount] = noteNumber;
                gActiveNoteCount++;
                gFrequency = freq[tone_nr];
                gAmplitude = 0.5;

                // Reset activeParticipants for the next synchronization event
                activeParticipants.clear();

                // Increment total taps only for synchronized events
                gTotalTaps++;
                if (tone_nr < 7) {
                    tone_nr++;
                }
                else {
                	tone_nr = 0;
                }
                
                rt_printf("Tap %d/24: participant=%d\n", gTotalTaps, participant);

                if (gTotalTaps >= 24) {
                    gTrialComplete = true;
                    rt_printf("\nTrial complete! 24 taps recorded.\n");
        			
                }
               
            }
        }
    }
			}
		}
		
		else if(message.getType() == kmmNoteOff) {
			// We can also encounter the "note off" message type which is the same
			// as "note on" with a velocity of 0.
			int noteNumber = message.getDataByte(0);
			
			// When we receive a note off, it might be the most recent note
	// that we played, or it might be an earlier note. We need to figure
	// out which indexes correspond to this note number.
	
		bool activeNoteChanged = false;
	
	// Go through all the active notes and remove any with this number
		for(int i = gActiveNoteCount - 1; i >= 0; i--) {
			if(gActiveNotes[i] == noteNumber) {
			// Found a match: is it the most recent note?
			
			// TODO 1: if the note is the most recent, set the flag
			// that says we will change the active note (activeNoteChanged)
			// But how do we know if this note in the array is the most
			// recent one? (hint: it depends on the value of i)
				if (i == gActiveNoteCount-1) {
					activeNoteChanged = true;
				}
			
			// TODO 2: move all the later notes to be one slot earlier in the
			// array. Hint: you will need another for() loop with a new
			// index, st arting from "i" and counting upward
				for (int j = i; j < gActiveNoteCount-1; j++){
					gActiveNotes[j] = gActiveNotes[j + 1];
			} 
	
			// TODO 3: decrease the number of active notes
			gActiveNoteCount--;
		}
	}

	rt_printf("Note off: %d notes remaining\n", gActiveNoteCount);
	
	if(gActiveNoteCount == 0) {
		// No notes left
		gAmplitude = 0;
	}
	//else if(activeNoteChanged) {
		// Update the frequency but don't retrigger
	//	int mostRecentNote = gActiveNotes[gActiveNoteCount - 1];
		
	//	gFrequency = powf(2.0, (mostRecentNote - 69)/12.0) * 440.0; //convert to frequency
		
	//	rt_printf("Note changed: new frequency %f\n", gFrequency);
	//}
		}

	}
	//Cue Tones 
  for(unsigned int n = 0; n < context->audioFrames; n++) {
  	
        float value = 0;
        // Cue Tone Handling
        if (playingCueTone) {
            if (cueTonePlayCount < 4) {
                // Play cue tone for 0.5 seconds, then silence for 0.5 seconds
                if (cueToneCounter < audioSampleRate / 2 ){ // Play tone
                    gFrequency = 440.0;
                    gAmplitude = 0.5; 
                } else if (cueToneCounter < audioSampleRate) { // Silence
                    gAmplitude = 0.0;
                } else { // Reset cycle
                    cueToneCounter = 0;
                    cueTonePlayCount++;
                }
                cueToneCounter++; // Increment the sample counter
                value = sin(gPhase) * gAmplitude; // Generate cue tone wave

                gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
                if (gPhase > 2.0 * M_PI) { // Ensure phase wraps correctly
                    gPhase -= 2.0 * M_PI;
                }
            } else {
                // Cue tone finished
                playingCueTone = false; // Disable cue tone mode
                cueToneCounter = 0;     // Reset counter
                gAmplitude = 0.0;       // Ensure silence is cleared
                gPhase = 0.0f;          // Reset phase
            }
        }

        // Normal Note Generation (Outside Cue Tone Block)
        else if (gActiveNoteCount > 0) {
            gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
            if (gPhase > 2.0 * M_PI) {
                gPhase -= 2.0 * M_PI;
            }
            value = sin(gPhase) * gAmplitude;
        }

        // Write audio output for each channel
        for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) {
            audioWrite(context, n, ch, value);
        
        }
        
        // Log the keypress immediately
            unsigned long currentTime = std::chrono::duration_cast<std::chrono::milliseconds>
                (std::chrono::system_clock::now().time_since_epoch()).count();
            float timestamp = (currentTime - gStartTime) / 1000.0;

            if (gDataFile.is_open()) {
                gDataFile << std::fixed << std::setprecision(3)
                          << timestamp << "," << participant << ","
                          << gCurrentGroup << "," << gCurrentCondition << ","
                          << gCurrentTrial << std::endl;

                rt_printf("Logged: time=%.3f, participant=%d, group=%d, condition=%s, trial=%d\n",
                          timestamp, participant, gCurrentGroup, gCurrentCondition, gCurrentTrial);
            }
}
}

void cleanup(BelaContext *context, void *userData)
{
    if(gDataFile.is_open()) {
        gDataFile.close();
        rt_printf("Data file closed\n");
    }
}

so please don’t pay too much attention to the comments :’)

... or to the indentation

There's no point in moving the midi event processing loop inside the for(...audioFrames...): no new events will be coming in between audio frames.

You don't specify how often you get your underruns but it looks like you should get a whole lot. You should not touch files or try to read system clock from within the audio thread (e.g..: render()). Instead you are trying to do both those things. Not only you are doing it unconditionally, but also once every audio frame!

                // Log the keypress immediately
                unsigned long currentTime = std::chrono::duration_cast<std::chrono::milliseconds>
                        (std::chrono::system_clock::now().time_since_epoch()).count();
                float timestamp = (currentTime - gStartTime) / 1000.0;

                if (gDataFile.is_open()) {
                        gDataFile << std::fixed << std::setprecision(3)
                                << timestamp << "," << participant << ","
                                << gCurrentGroup << "," << gCurrentCondition << ","
                                << gCurrentTrial << std::endl;
           }

Writing anything once per audio frame is useless, additionally, most of the variables you are writing are constants. These should be written only once, either in a header or in the file name itself. Writing the timestamp at every frame is also pointless if you write at regular intervals: you will know the time by counting the entries.

Maybe what you need is only to log individual timestamped events when they happen, without need to write every frame? Even in that case, make sure you write all stuff that's constant only once, and each log event should only contain information that is not constant.

Calling std::chrono::system_clock::now() is not real-time safe. You can get a sample-accurate timer (in case you need it) by counting audio frames. If you need block-level precision, you can just use context->audioFramesElapsed

Writing to an open file with << is not real-time safe. You need to use a buffer in memory that you write to from the audio thread and then from a different thread write that to disk. That's what the WriteFile class does. Badly, but better than this.

Hey Giulio, Thanks very much for the quick and informative reply.
We were in a bit of a crisis when we sent in the code yesterday, and it turns out we accidentally added an extra mistake by logging every frame instead of just when midi events happened. This is the code we should have sent:

#include <Bela.h>
#include <libraries/Midi/Midi.h>
#include <stdlib.h>
#include <cmath>
#include <fstream>
// #include <libraries/WriteFile/WriteFile.h>
#include <chrono>
#include <string>
#include <iomanip> // for std::fixed and std::setprecision
#include <set> // Include for std::set
#include <algorithm> // For std::includes

std::set<int> activeParticipants; // Set to track currently active participants
const std::set<int> requiredParticipants = {1, 2, 3}; // Define required participants
const std::set<int> lonerParticipants = {3}; // Define required participants
const std::set<int> subgroupParticipants = {1, 2}; // Define required participants

// Global variables
// Replace WriteFile global variables with std::ofstream
std::ofstream gDataFile;
int gCurrentGroup = 0;
const char* gCurrentCondition = "Subgroup";
int gCurrentTrial = 1;

int participant = 0;

// MIDI setup
Midi gMidi;
const char* gMidiPort0 = "hw:1,0,0";

std::string gCurrentPhase;

unsigned long gStartTime;
int gTapCount = 0;
std::vector<float> logs(6); // Create a vector to store the log data

// Oscillator state - generate the tone
float gPhase = 0;
float gFrequency = 0;
float gAmplitude = 0;
double freq[6] = {440.0, 493.88, 554.37, 659.26, 739.99, 880.0}; //initializing array of 7 tones from pentatonic a-major scale
int tone_nr = 0;

// List of active notes
const int kMaxNotes = 16;
int gActiveNotes[kMaxNotes];
int gActiveNoteCount = 0;

// Cue tone variable
int CueTone = 0;
int cueTonePlayCount = 0;
int cueToneCounter = 0;
bool playingCueTone = true;

int gTotalTaps = 0;
bool gTrialComplete = false;

float audioSampleRate = 0;

bool setup(BelaContext *context, void *userData)
{
// print statement to check samplerate
rt_printf("Audio sample rate: %f\n", context->audioSampleRate);
audioSampleRate = context->audioSampleRate;

// Initialise the MIDI device
if(gMidi.readFrom(gMidiPort0) < 0) {
    rt_printf("Unable to read from MIDI port %s\n", gMidiPort0);
    return false;
}
gMidi.writeTo(gMidiPort0);
gMidi.enableParser(true);

// Initialize start time
gStartTime = std::chrono::duration_cast<std::chrono::milliseconds>
    (std::chrono::system_clock::now().time_since_epoch()).count();

// Read parameters
std::ifstream params("/root/Bela/projects/Subgroup/current_params");
if(!params) {
    rt_printf("Warning: Could not read parameters file\n");
    gCurrentGroup = 0;
    gCurrentCondition = "Subgroup";
    gCurrentTrial = 1;
} else {
    params >> gCurrentGroup >> gCurrentTrial;
    rt_printf("Parameters loaded: group=%d, condition=%s, trial=%d\n", 
             gCurrentGroup, gCurrentCondition, gCurrentTrial);
}

// Setup data logging with CSV file
char filename[100];
sprintf(filename, "/root/Bela/projects/Subgroup/data/trial_%d_%s_%d.csv", 
        gCurrentGroup, gCurrentCondition, gCurrentTrial);

gDataFile.open(filename);
if(!gDataFile.is_open()) {
    rt_printf("Error: Could not open data file %s\n", filename);
    return false;
}

// Write CSV header
gDataFile << "timestamp,participant,group,condition,trial" << std::endl;
rt_printf("Data logging setup for file: %s\n", filename);

gTotalTaps = 0;
gTrialComplete = false;

return true;

}

void render(BelaContext *context, void *userData)
{
//loop through audioframes

// At the beginning of each callback, look for available MIDI
// messages that have come in since the last block
while(gMidi.getParser()->numAvailableMessages() > 0) {
	MidiChannelMessage message;
	message = gMidi.getParser()->getNextChannelMessage();
	message.prettyPrint();		// Print the message data
	
	// A MIDI "note on" message type might actually hold a real
	// note onset (e.g. key press), or it might hold a note off (key release).
	// The latter is signified by a velocity of 0.
	if(message.getType() == kmmNoteOn) {
		int noteNumber = message.getDataByte(0);
		int velocity = message.getDataByte(1); 
		
		// Velocity of 0 is really a note off
		if(velocity > 0) {
			if (!gTrialComplete && gActiveNoteCount < kMaxNotes) {
    // Determine participant based on note number
    if (noteNumber == 48) participant = 1;
    else if (noteNumber == 59) participant = 2;
    else if (noteNumber == 71) participant = 3;
    else participant = 0; // Invalid participant key

    if (participant > 0) {
        // Log the keypress immediately
        unsigned long currentTime = std::chrono::duration_cast<std::chrono::milliseconds>
            (std::chrono::system_clock::now().time_since_epoch()).count();
        float timestamp = (currentTime - gStartTime) / 1000.0;

        if (gDataFile.is_open()) {
            gDataFile << std::fixed << std::setprecision(3)
                      << timestamp << "," << participant << ","
                      << gCurrentGroup << "," << gCurrentCondition << ","
                      << gCurrentTrial << std::endl;

            rt_printf("Logged: time=%.3f, participant=%d, group=%d, condition=%s, trial=%d\n",
                      timestamp, participant, gCurrentGroup, gCurrentCondition, gCurrentTrial);
        }

        // Add participant to activeParticipants for synchronization tracking
        activeParticipants.insert(participant);

        // Check if all required participants have pressed their keys
        if (std::includes(activeParticipants.begin(), activeParticipants.end(),
              subgroupParticipants.begin(), subgroupParticipants.end()) ||
std::includes(activeParticipants.begin(), activeParticipants.end(),
              lonerParticipants.begin(), lonerParticipants.end())) {
            
            // All required participants pressed keys simultaneously
            gActiveNotes[gActiveNoteCount] = noteNumber;
            gActiveNoteCount++;
            gFrequency = freq[tone_nr];
            gAmplitude = 0.5;

            // Reset activeParticipants for the next synchronization event
            activeParticipants.clear();

            // Increment total taps only for synchronized events
            gTotalTaps++;
            if (tone_nr < 7) {
                tone_nr++;
            }
            else {
            	tone_nr = 0;
            }
            
            rt_printf("Tap %d/24: participant=%d\n", gTotalTaps, participant);

            if (gTotalTaps >= 24) {
                gTrialComplete = true;
                rt_printf("\nTrial complete! 24 taps recorded.\n");
    			
            }
           
        }
    }
}
		}
	}
	
	else if(message.getType() == kmmNoteOff) {
		// We can also encounter the "note off" message type which is the same
		// as "note on" with a velocity of 0.
		int noteNumber = message.getDataByte(0);
		
		// When we receive a note off, it might be the most recent note
// that we played, or it might be an earlier note. We need to figure
// out which indexes correspond to this note number.

	bool activeNoteChanged = false;

// Go through all the active notes and remove any with this number
	for(int i = gActiveNoteCount - 1; i >= 0; i--) {
		if(gActiveNotes[i] == noteNumber) {
		// Found a match: is it the most recent note?
		
		// TODO 1: if the note is the most recent, set the flag
		// that says we will change the active note (activeNoteChanged)
		// But how do we know if this note in the array is the most
		// recent one? (hint: it depends on the value of i)
			if (i == gActiveNoteCount-1) {
				activeNoteChanged = true;
			}
		
		// TODO 2: move all the later notes to be one slot earlier in the
		// array. Hint: you will need another for() loop with a new
		// index, st arting from "i" and counting upward
			for (int j = i; j < gActiveNoteCount-1; j++){
				gActiveNotes[j] = gActiveNotes[j + 1];
		} 

		// TODO 3: decrease the number of active notes
		gActiveNoteCount--;
	}
}

rt_printf("Note off: %d notes remaining\n", gActiveNoteCount);

if(gActiveNoteCount == 0) {
	// No notes left
	gAmplitude = 0;
}
//else if(activeNoteChanged) {
	// Update the frequency but don't retrigger
//	int mostRecentNote = gActiveNotes[gActiveNoteCount - 1];
	
//	gFrequency = powf(2.0, (mostRecentNote - 69)/12.0) * 440.0; //convert to frequency
	
//	rt_printf("Note changed: new frequency %f\n", gFrequency);
//}
	}

}
//Cue Tones 

for(unsigned int n = 0; n < context->audioFrames; n++) {

    float value = 0;
    // Cue Tone Handling
    if (playingCueTone) {
        if (cueTonePlayCount < 4) {
            // Play cue tone for 0.5 seconds, then silence for 0.5 seconds
            if (cueToneCounter < audioSampleRate / 2 ){ // Play tone
                gFrequency = 440.0;
                gAmplitude = 0.5; 
            } else if (cueToneCounter < audioSampleRate) { // Silence
                gAmplitude = 0.0;
            } else { // Reset cycle
                cueToneCounter = 0;
                cueTonePlayCount++;
            }
            cueToneCounter++; // Increment the sample counter
            value = sin(gPhase) * gAmplitude; // Generate cue tone wave

            gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
            if (gPhase > 2.0 * M_PI) { // Ensure phase wraps correctly
                gPhase -= 2.0 * M_PI;
            }
        } else {
            // Cue tone finished
            playingCueTone = false; // Disable cue tone mode
            cueToneCounter = 0;     // Reset counter
            gAmplitude = 0.0;       // Ensure silence is cleared
            gPhase = 0.0f;          // Reset phase
        }
    }

    // Normal Note Generation (Outside Cue Tone Block)
    else if (gActiveNoteCount > 0) {
        gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
        if (gPhase > 2.0 * M_PI) {
            gPhase -= 2.0 * M_PI;
        }
        value = sin(gPhase) * gAmplitude;
    }

    // Write audio output for each channel
    for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) {
        audioWrite(context, n, ch, value);
    
    }

}
}

void cleanup(BelaContext *context, void *userData)
{
if(gDataFile.is_open()) {
gDataFile.close();
rt_printf("Data file closed\n");
}
}

And here is the output

Running project ...
Audio sample rate: 44100.000000
Parameters loaded: group=2, condition=Subgroup, trial=1
Data logging setup for file: /root/Bela/projects/Subgroup/data/trial_2_Subgroup_1.csv
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 107,
Logged: time=5.730, participant=3, group=2, condition=Subgroup, trial=1
Tap 1/24: participant=3
Underrun detected: 4 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
1 mode switch detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 64,
Logged: time=6.628, participant=3, group=2, condition=Subgroup, trial=1
Tap 2/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 39,
Logged: time=7.302, participant=3, group=2, condition=Subgroup, trial=1
Tap 3/24: participant=3
Underrun detected: 1 blocks dropped
3 mode switches detected on the audio thread.
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 35,
Logged: time=7.871, participant=3, group=2, condition=Subgroup, trial=1
Tap 4/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 60,
Logged: time=8.419, participant=3, group=2, condition=Subgroup, trial=1
Tap 5/24: participant=3
Underrun detected: 1 blocks dropped
5 mode switches detected on the audio thread.
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 53,
Logged: time=8.868, participant=3, group=2, condition=Subgroup, trial=1
Tap 6/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 55,
Logged: time=9.211, participant=3, group=2, condition=Subgroup, trial=1
Tap 7/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 64,
Logged: time=9.507, participant=3, group=2, condition=Subgroup, trial=1
Tap 8/24: participant=3
Underrun detected: 1 blocks dropped
8 mode switches detected on the audio thread.
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 67,
Logged: time=10.100, participant=3, group=2, condition=Subgroup, trial=1
Tap 9/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 72,
Logged: time=10.551, participant=3, group=2, condition=Subgroup, trial=1
Tap 10/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
10 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 58,
Logged: time=11.022, participant=3, group=2, condition=Subgroup, trial=1
Tap 11/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 59,
Logged: time=11.434, participant=3, group=2, condition=Subgroup, trial=1
Tap 12/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 60,
Logged: time=11.841, participant=3, group=2, condition=Subgroup, trial=1
Tap 13/24: participant=3
Underrun detected: 1 blocks dropped
13 mode switches detected on the audio thread.
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 71,
Logged: time=12.243, participant=3, group=2, condition=Subgroup, trial=1
Tap 14/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
14 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 69,
Logged: time=12.991, participant=3, group=2, condition=Subgroup, trial=1
Tap 15/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 64,
Logged: time=13.547, participant=3, group=2, condition=Subgroup, trial=1
Tap 16/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
16 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 117,
Logged: time=14.268, participant=3, group=2, condition=Subgroup, trial=1
Tap 17/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 83,
Logged: time=14.628, participant=3, group=2, condition=Subgroup, trial=1
Tap 18/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 114,
Logged: time=14.983, participant=3, group=2, condition=Subgroup, trial=1
Tap 19/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
19 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 100,
Logged: time=15.317, participant=3, group=2, condition=Subgroup, trial=1
Tap 20/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 62,
Logged: time=15.650, participant=3, group=2, condition=Subgroup, trial=1
Tap 21/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 110,
Logged: time=15.980, participant=3, group=2, condition=Subgroup, trial=1
Tap 22/24: participant=3
Underrun detected: 1 blocks dropped
22 mode switches detected on the audio thread.
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 65,
Logged: time=17.085, participant=3, group=2, condition=Subgroup, trial=1
Tap 23/24: participant=3
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
23 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 69,
Logged: time=17.522, participant=3, group=2, condition=Subgroup, trial=1
Tap 24/24: participant=3
Trial complete! 24 taps recorded.
Underrun detected: 1 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 77,
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 94,
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
24 mode switches detected on the audio thread.
type: note on, channel: 0, data1: 71, data2: 76,
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 76,
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
Data file closed
Bela stopped

And now we implemented some of the things you suggested, we switched out chrono for a timestamp based on counting audioframes, and implemented writefile use and buffers.

This seems to have solved the issue of underruns!

However, the files that are being written are kind ow wrapped in square brackets and start with the word variable

The code is here:
`#include <Bela.h>
#include <libraries/Midi/Midi.h>
#include <stdlib.h>
#include <cmath>
#include <fstream>
#include <libraries/WriteFile/WriteFile.h>
#include <string>
#include <iomanip>
#include <set>
#include <algorithm>
#include <vector>

std::set<int> activeParticipants;
const std::set<int> requiredParticipants = {1, 2, 3};
const std::set<int> lonerParticipants = {3};
const std::set<int> subgroupParticipants = {1, 2};

// Global variables
WriteFile gDataFile; // Use WriteFile instead of std::ofstream
int gCurrentGroup = 0;
const char* gCurrentCondition = "Subgroup";
int gCurrentTrial = 1;

float participant = 0.0;

// MIDI setup
Midi gMidi;
const char* gMidiPort0 = "hw:1,0,0";

std::string gCurrentPhase;

unsigned long gStartTime;
int gTapCount = 0;

// Oscillator state
float gPhase = 0;
float gFrequency = 0;
float gAmplitude = 0;
double freq[6] = {440.0, 493.88, 554.37, 659.26, 739.99, 880.0};
int tone_nr = 0;

// List of active notes
const int kMaxNotes = 16;
int gActiveNotes[kMaxNotes];
int gActiveNoteCount = 0;

// Cue tone variable
int CueTone = 0;
int cueTonePlayCount = 0;
int cueToneCounter = 0;
bool playingCueTone = true;

int gTotalTaps = 0;
bool gTrialComplete = false;

float audioSampleRate = 0;
int frameCounter;

// Vector to hold log data
std::vector<float> logs(2); // Adjust size as needed

bool setup(BelaContext *context, void *userData)
{
rt_printf("Audio sample rate: %f\n", context->audioSampleRate);
audioSampleRate = context->audioSampleRate;

// Initialise the MIDI device
if(gMidi.readFrom(gMidiPort0) < 0) {
    rt_printf("Unable to read from MIDI port %s\n", gMidiPort0);
    return false;
}
gMidi.writeTo(gMidiPort0);
gMidi.enableParser(true);

// Read parameters
std::ifstream params("/root/Bela/projects/Subgroup/current_params");
if(!params) {
    rt_printf("Warning: Could not read parameters file\n");
    gCurrentGroup = 0;
    gCurrentCondition = "Subgroup";
    gCurrentTrial = 1;
} else {
    params >> gCurrentGroup >> gCurrentTrial;
    rt_printf("Parameters loaded: group=%d, condition=%s, trial=%d\n", 
             gCurrentGroup, gCurrentCondition, gCurrentTrial);
}

char filename[100];
sprintf(filename, "/root/Bela/projects/Subgroup/data/trial_%d_%s_%d.csv", 
        gCurrentGroup, gCurrentCondition, gCurrentTrial);
        
// Setup data logging with WriteFile
gDataFile.setup(filename);
gDataFile.setFormat("%f,%f\n"); // Set the format for logging
gDataFile.setFileType(kText); // Set file type to text

// Write CSV header
rt_printf("Data logging setup for file: %s\n", filename);

gTotalTaps = 0;
gTrialComplete = false;

return true;

}

unsigned int logIdx = 0;

void render(BelaContext *context, void *userData)
{
// Loop through audio frames
unsigned int writeFileSize = logs.size() + 5000;
gDataFile.setBufferSize(writeFileSize);
logIdx = 0;

// At the beginning of each callback, look for available MIDI messages
while(gMidi.getParser()->numAvailableMessages() > 0) {
    MidiChannelMessage message;
    message = gMidi.getParser()->getNextChannelMessage();
    message.prettyPrint(); // Print the message data
    
    if(message.getType() == kmmNoteOn) {
        int noteNumber = message.getDataByte(0);
        int velocity = message.getDataByte(1); 
        
        if(velocity > 0) {
            if (!gTrialComplete && gActiveNoteCount < kMaxNotes) {
                // Determine participant based on note number
                if (noteNumber == 48) participant = 1;
                else if (noteNumber == 59) participant = 2;
                else if (noteNumber == 71) participant = 3;
                else participant = 0; // Invalid participant key

                if (participant > 0) {
                    // Log the keypress immediately
                    float timestamp = frameCounter / audioSampleRate;
                    
                    logs[0] = timestamp; // Timestamp
					logs[1] = participant; // Participant number as float

                    // Log to file if buffer is full
                    if (logIdx < writeFileSize) {
                        logIdx += 2; // Increment log index by 2 for two values
                    }

                    // Check if we need to log to the file
                    if (logIdx >= writeFileSize) {
                        gDataFile.log(logs.data(), logIdx);
                        logIdx = 0; // Reset log index
                    }

                    rt_printf("Logged: time=%.3f, participant=%d\n",
                              timestamp, participant);

                    // Add participant to activeParticipants for synchronization tracking
                    activeParticipants.insert(participant);

                    // Check if all required participants have pressed their keys
                    if (std::includes(activeParticipants.begin(), activeParticipants.end(),
                                      subgroupParticipants.begin(), subgroupParticipants.end()) ||
                        std::includes(activeParticipants.begin(), activeParticipants.end(),
                                      lonerParticipants.begin(), lonerParticipants.end())) {
                        
                        // All required participants pressed keys simultaneously
                        gActiveNotes[gActiveNoteCount] = noteNumber;
                        gActiveNoteCount++;
                        gFrequency = freq[tone_nr];
                        gAmplitude = 0.5;

                        // Reset activeParticipants for the next synchronization event
                        activeParticipants.clear();

                        // Increment total taps only for synchronized events
                        gTotalTaps++;
                        if (tone_nr < 5) {
                            tone_nr++;
                        } else {
                            tone_nr = 0;
                        }
                        
                        rt_printf("Tap %d/24: participant=%d\n", gTotalTaps, participant);

                        if (gTotalTaps >= 24) {
                            gTrialComplete = true;
                            rt_printf("\nTrial complete! 24 taps recorded.\n");
                        }
                    }
                }
            }
        }
    } else if(message.getType() == kmmNoteOff) {
        int noteNumber = message.getDataByte(0);
        bool activeNoteChanged = false;

        // Go through all the active notes and remove any with this number
        for(int i = gActiveNoteCount - 1; i >= 0; i--) {
            if(gActiveNotes[i] == noteNumber) {
                if (i == gActiveNoteCount-1) {
                    activeNoteChanged = true;
                }
                for (int j = i; j < gActiveNoteCount-1; j++) {
                    gActiveNotes[j] = gActiveNotes[j + 1];
                }
                gActiveNoteCount--;
            }
        }

        rt_printf("Note off: %d notes remaining\n", gActiveNoteCount);
        
        if(gActiveNoteCount == 0) {
            gAmplitude = 0;
        }
    }
}

// Cue Tones 
for(unsigned int n = 0; n < context->audioFrames; n++) {
    float value = 0;
    // Cue Tone Handling
    if (playingCueTone) {
        if (cueTonePlayCount < 4) {
            if (cueToneCounter < audioSampleRate / 2) { // Play tone
                gFrequency = 440.0;
                gAmplitude = 0.5; 
            } else if (cueToneCounter < audioSampleRate) { // Silence
                gAmplitude = 0.0;
            } else { // Reset cycle
                cueToneCounter = 0;
                cueTonePlayCount++;
            }
            cueToneCounter++; // Increment the sample counter
            value = sin(gPhase) * gAmplitude; // Generate cue tone wave

            gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
            if (gPhase > 2.0 * M_PI) {
                gPhase -= 2.0 * M_PI;
            }
        } else {
            playingCueTone = false; // Disable cue tone mode
            cueToneCounter = 0;     // Reset counter
            gAmplitude = 0.0;       // Ensure silence is cleared
            gPhase = 0.0f;          // Reset phase
        }
    } else if (gActiveNoteCount > 0) {
        gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
        if (gPhase > 2.0 * M_PI) {
            gPhase -= 2.0 * M_PI;
        }
        value = sin(gPhase) * gAmplitude;
    }

    // Write audio output for each channel
    for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) {
        audioWrite(context, n, ch, value);
        frameCounter++;
    }
}

// Write any leftover logs to the file
if (logIdx > 0) {
    gDataFile.log(logs.data(), logIdx);
}

}

void cleanup(BelaContext *context, void *userData)
{
gDataFile.log(logs.data(), logIdx); // Ensure any remaining logs are written
rt_printf("Data file closed\n");
}`

And the file contents are as follows
variable=[
8.374422,3.000000
10.279183,2.000000
10.284263,1.000000
11.350929,3.000000
12.008345,1.000000
12.022132,2.000000
12.587392,3.000000
13.091701,1.000000
13.114921,2.000000
13.619229,3.000000
14.091610,1.000000
14.096689,2.000000
14.624944,3.000000
14.959455,1.000000
15.044353,2.000000
15.587120,3.000000
16.040634,1.000000
16.046440,2.000000
16.559456,3.000000
16.975239,1.000000
16.981043,2.000000
17.489706,3.000000
17.864853,2.000000
17.881542,1.000000
18.411972,3.000000
18.885805,2.000000
18.887257,1.000000
20.363174,2.000000
20.388571,1.000000
21.016235,1.000000
];

while earlier, they were

timestamp,participant
9.232,3
10.924,1
11.012,2
12.499,3
13.814,1
13.825,2
15.070,3
16.134,2
16.167,1
17.285,3
18.574,3
19.507,2
19.528,1
20.434,3
21.241,2
21.284,1
22.017,3
22.730,2
22.778,1
23.636,3
24.584,3
25.318,2
25.358,1
26.037,3
26.702,2
26.738,1
27.340,3
27.946,1
27.960,2
29.421,3
30.197,2
30.233,1
30.811,3
31.372,2
31.377,1

Thank you very much! We'll leave the file structure for now 🙂 Another issue we're stuck with, is that sometimes the first tone pressed after the metronome/cue tones, doesn't elicit a sound, though it is detected (alongside an underrun). As we're doing a task where it is important for participants to continue the pacing, this is quite the issue. We've tried to remove some delay after the metronome/cue tones, without luck, but now can't figure out what else to try. Hope the problem is clear 🙂

Here's the full updated bela code:

#include <libraries/Midi/Midi.h>
#include <stdlib.h>
#include <cmath>
#include <fstream>
#include <libraries/WriteFile/WriteFile.h>
#include <string>
#include <iomanip>
#include <set>
#include <algorithm>
#include <vector>

std::set<int> activeParticipants;
const std::set<int> requiredParticipants = {1, 2, 3};
const std::set<int> lonerParticipants = {3};
const std::set<int> subgroupParticipants = {1, 2};

// Global variables
WriteFile gDataFile; // Use WriteFile instead of std::ofstream
int gCurrentGroup = 0;
const char* gCurrentCondition = "Subgroup";
int gCurrentTrial = 1;

float participant = 0.0;

// MIDI setup
Midi gMidi;
const char* gMidiPort0 = "hw:1,0,0";

std::string gCurrentPhase;

unsigned long gStartTime;
int gTapCount = 0;

// Oscillator state
float gPhase = 0; 
float gFrequency = 0;
float gAmplitude = 0;
double freq[6] = {440.0, 493.88, 554.37, 659.26, 739.99, 880.0};
int tone_nr = 0;

// List of active notes
const int kMaxNotes = 16;
int gActiveNotes[kMaxNotes];
int gActiveNoteCount = 0;

// Cue tone variable
int CueTone = 0; 
int cueTonePlayCount = 0;
int cueToneCounter = 0;
bool playingCueTone = true;

int gTotalTaps = 0;
bool gTrialComplete = false;

float audioSampleRate = 0;
int frameCounter;

// Vector to hold log data
std::vector<float> logs(2); // Adjust size as needed

bool setup(BelaContext *context, void *userData)
{
    rt_printf("Audio sample rate: %f\n", context->audioSampleRate);
    audioSampleRate = context->audioSampleRate;

    // Initialise the MIDI device
    if(gMidi.readFrom(gMidiPort0) < 0) {
        rt_printf("Unable to read from MIDI port %s\n", gMidiPort0);
        return false;
    }
    gMidi.writeTo(gMidiPort0);
    gMidi.enableParser(true);
    
// Read parameters std::ifstream params("/root/Bela/projects/Subgroup/current_params"); if(!params) { rt_printf("Warning: Could not read parameters file\n"); gCurrentGroup = 0; gCurrentCondition = "Subgroup"; gCurrentTrial = 1; } else { params >> gCurrentGroup >> gCurrentTrial; rt_printf("Parameters loaded: group=%d, condition=%s, trial=%d\n", gCurrentGroup, gCurrentCondition, gCurrentTrial); }
char filename[100]; sprintf(filename, "/root/Bela/projects/Subgroup/data/trial_%d_%s_%d.csv", gCurrentGroup, gCurrentCondition, gCurrentTrial);
// Setup data logging with WriteFile gDataFile.setup(filename); gDataFile.setFormat("%f,%f\n"); // Set the format for logging gDataFile.setFileType(kText); // Set file type to text
// Write CSV header rt_printf("Data logging setup for file: %s\n", filename);
gTotalTaps = 0; gTrialComplete = false;
return true; } unsigned int logIdx = 0; void render(BelaContext *context, void *userData) { // Loop through audio frames unsigned int writeFileSize = logs.size() + 5000; gDataFile.setBufferSize(writeFileSize); logIdx = 0; // At the beginning of each callback, look for available MIDI messages while(gMidi.getParser()->numAvailableMessages() > 0) { MidiChannelMessage message; message = gMidi.getParser()->getNextChannelMessage(); message.prettyPrint(); // Print the message data
if(message.getType() == kmmNoteOn) { int noteNumber = message.getDataByte(0); int velocity = message.getDataByte(1);
if(velocity > 0) { if (!gTrialComplete && gActiveNoteCount < kMaxNotes) { // Determine participant based on note number if (noteNumber == 48) participant = 1; else if (noteNumber == 59) participant = 2; else if (noteNumber == 71) participant = 3; else participant = 0; // Invalid participant key if (participant > 0) { // Log the keypress immediately float timestamp = frameCounter / audioSampleRate;
logs[0] = timestamp; // Timestamp logs[1] = participant; // Participant number as float // Log to file if buffer is full if (logIdx < writeFileSize) { logIdx += 2; // Increment log index by 2 for two values } // Check if we need to log to the file if (logIdx >= writeFileSize) { gDataFile.log(logs.data(), logIdx); logIdx = 0; // Reset log index } rt_printf("Logged: time=%.3f, participant=%d\n", timestamp, participant); // Add participant to activeParticipants for synchronization tracking activeParticipants.insert(participant); // Check if all required participants have pressed their keys if (std::includes(activeParticipants.begin(), activeParticipants.end(), subgroupParticipants.begin(), subgroupParticipants.end()) || std::includes(activeParticipants.begin(), activeParticipants.end(), lonerParticipants.begin(), lonerParticipants.end())) {
// All required participants pressed keys simultaneously gActiveNotes[gActiveNoteCount] = noteNumber; gActiveNoteCount++; gFrequency = freq[tone_nr]; gAmplitude = 0.5; // Reset activeParticipants for the next synchronization event activeParticipants.clear(); // Increment total taps only for synchronized events gTotalTaps++; if (tone_nr < 5) { tone_nr++; } else { tone_nr = 0; }
rt_printf("Tap %d/24: participant=%d\n", gTotalTaps, participant); if (gTotalTaps >= 24) { gTrialComplete = true; rt_printf("\nTrial complete! 24 taps recorded.\n"); } } } } } } else if(message.getType() == kmmNoteOff) { int noteNumber = message.getDataByte(0); bool activeNoteChanged = false; // Go through all the active notes and remove any with this number for(int i = gActiveNoteCount - 1; i >= 0; i--) { if(gActiveNotes[i] == noteNumber) { if (i == gActiveNoteCount-1) { activeNoteChanged = true; } for (int j = i; j < gActiveNoteCount-1; j++) { gActiveNotes[j] = gActiveNotes[j + 1]; } gActiveNoteCount--; } } rt_printf("Note off: %d notes remaining\n", gActiveNoteCount);
if(gActiveNoteCount == 0) { gAmplitude = 0; } } } // Cue Tones for(unsigned int n = 0; n < context->audioFrames; n++) { float value = 0; // Cue Tone Handling if (playingCueTone) { if (cueTonePlayCount < 4) { if (cueToneCounter < audioSampleRate / 2) { // Play tone gFrequency = 440.0; gAmplitude = 0.5; } else if (cueToneCounter < audioSampleRate) { // Silence gAmplitude = 0.0; } else { // Reset cycle cueToneCounter = 0; cueTonePlayCount++; } cueToneCounter++; // Increment the sample counter value = sin(gPhase) * gAmplitude; // Generate cue tone wave gPhase += 2.0 * M_PI * gFrequency / audioSampleRate; if (gPhase > 2.0 * M_PI) { gPhase -= 2.0 * M_PI; } } else { playingCueTone = false; // Disable cue tone mode cueToneCounter = 0; // Reset counter gAmplitude = 0.0; // Ensure silence is cleared gPhase = 0.0f; // Reset phase } } else if (gActiveNoteCount > 0) { gPhase += 2.0 * M_PI * gFrequency / audioSampleRate; if (gPhase > 2.0 * M_PI) { gPhase -= 2.0 * M_PI; } value = sin(gPhase) * gAmplitude; } // Write audio output for each channel for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) { audioWrite(context, n, ch, value); frameCounter++; } } // Write any leftover logs to the file if (logIdx > 0) { gDataFile.log(logs.data(), logIdx); } } void cleanup(BelaContext *context, void *userData) { gDataFile.log(logs.data(), logIdx); // Ensure any remaining logs are written rt_printf("Data file closed\n"); }```

Here is the bela output as an example of the first few taps

Running project ...
File /root/Bela/projects/Subgroup/data/trial_8_Subgroup_0.csv exists, writing to /root/Bela/projects/Subgroup/data/trial_8_Subgroup_011.csv instead
Audio sample rate: 44100.000000
Parameters loaded: group=8, condition=Subgroup, trial=0
Data logging setup for file: /root/Bela/projects/Subgroup/data/trial_8_Subgroup_0.csv
Running
type: note on, channel: 0, data1: 71, data2: 64,
Logged: time=7.977, participant=0
Tap 1/24: participant=0
Underrun detected: 3 blocks dropped
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining
1 mode switch detected on the audio thread.
type: note on, channel: 0, data1: 59, data2: 42,
Logged: time=9.845, participant=0
type: note on, channel: 0, data1: 48, data2: 36,
Logged: time=9.897, participant=0
Tap 2/24: participant=0
type: note off, channel: 0, data1: 48, data2: 0,
Note off: 0 notes remaining
type: note off, channel: 0, data1: 59, data2: 0,
Note off: 0 notes remaining
type: note on, channel: 0, data1: 71, data2: 61,
Logged: time=11.592, participant=0
Tap 3/24: participant=0
type: note off, channel: 0, data1: 71, data2: 0,
Note off: 0 notes remaining

gDataFile.setBufferSize(writeFileSize);

should be in setup(), not in render(). That I guess is responsible for the "mode switch detected on the audio thread", even though that gets printed a few lines after the beginning.

The frameCounter should increment once per frame. Currently it's incrementing once per channel within each frame. Take it out of the channels loop.

activeParticipants.insert() is not real-time safe. Consider replacing it, although it may be good enough in practice.

Not sure about the specific problem you are mentioning. I don't understand the timing of your code in detail, but could it be that you are unexpectedly resetting gAmplitude to 0 at line 239 or 209 after you have set it to 0.5 at line 166?

Hello again! We've implemented some of your suggested changes, thanks!

We're still stuck with the "cue tone logic" part - The problem is that when the last cue tone is played, no sound is elicited by a keypress, if one does not wait a little while - This is an issue, as we want people to continue the pace of the tones. And even if one does wait a while before pressing, the first tone is still accompanied by an underrun.

We suspect the problem is due to the loop of the cue tone is "blocking" the first tap, as the cue tone loop includes silence after each cue tone. If the tap is then played slightly before this loop has finished, no sound is played. We think it's because the silence meant to follow the fourth tone is still "active". Still, this does not explain the underrun warning though, which comes every time the first key is pressed - whether it elicits a sound or not.

We've tried a few different things, and now we've set it up this way:
the cue tone logic is inside the AudioFrames loop (starting from line 245)

  1. if the CueTonePlayCount is less than 3 it plays the sound for 0.5 sec. and then silence for 0.5sec.
  2. Every frame, the CueToneCounter is increased by 1
  3. Every second, the CueToneCounter is set to 0 - and CueTonePlayCount is increased by 1
  4. Then we've attempted to make it play the fourth tone in another if-statement, one that does not have the 0.5 sec. silence

Here it is in the code:

// Cue Tones 
	for (unsigned int n = 0; n < context->audioFrames; n++) {
    frameCounter++;
    float value = 0;

    if (playingCueTone) {
        if (cueTonePlayCount < 3) { // Play 4 tones
            if (cueToneCounter < audioSampleRate / 2) {
                gFrequency = 440.0;
                gAmplitude = 0.5;  // Play tone
            } else {
                gAmplitude = 0.0;  // Silence
            }
            cueToneCounter++;
            
            if (cueToneCounter >= audioSampleRate) { // Cycle complete
                cueToneCounter = 0;
                cueTonePlayCount++;
            }

            value = sin(gPhase) * gAmplitude;
            gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
            if (gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI;
            
        if (cueTonePlayCount == 3) { // Play 4 tones
            if (cueToneCounter < audioSampleRate / 2) {
                gFrequency = 440.0;
                gAmplitude = 0.5;  // Play tone
            }
            
            if (cueToneCounter >= audioSampleRate / 2) { // Cycle complete
                cueToneCounter = 0;
                cueTonePlayCount++;
            }
            
        }

        } else {
            // Disable cue tone mode immediately after the last tone
            playingCueTone = false;
            cueToneCounter = 0;
            //gAmplitude = 0.0;  // Silence ensures no residual tone
        }
    }

    if (!playingCueTone && gActiveNoteCount > 0) {
        // Transition quickly to note playback
        gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
        if (gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI;
        value = sin(gPhase) * gAmplitude;
    }
    
        	// Write audio output for each channel
        	for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) {
            	audioWrite(context, n, ch, value);

        	}
    	}

However, this results in only 3 cue tones being played.

Here is the full updated code, in case context is needed:

#include <Bela.h>
#include <libraries/Midi/Midi.h>
#include <stdlib.h>
#include <cmath>
#include <fstream>
#include <libraries/WriteFile/WriteFile.h>
#include <string>
#include <iomanip>
#include <set>
#include <algorithm>
#include <vector>

std::set<int> activeParticipants;
std::set<int> requiredParticipants = {1, 2, 3}; //all colours 
std::set<int> lonerParticipants = {1}; //blue 
std::set<int> subgroupParticipants = {2, 3}; //black and pink 

// Global variables
WriteFile gDataFile; // Use WriteFile instead of std::ofstream
int gCurrentGroup = 0;
std::string gCurrentCondition = "Simultaneous"; //change later - to not a const 
int gCurrentTrial = 1;

float participant = 0.0;

// MIDI setup
Midi gMidi;
const char* gMidiPort0 = "hw:1,0,0";

std::string gCurrentPhase;

unsigned long gStartTime;
int gTapCount = 0;

// Oscillator state
float gPhase = 0; 
float gFrequency = 0;
float gAmplitude = 0;
double freq[6] = {440.0, 493.88, 554.37, 659.26, 739.99, 880.0};
int tone_nr = 0;

// List of active notes
const int kMaxNotes = 16;
int gActiveNotes[kMaxNotes];
int gActiveNoteCount = 0;

// Cue tone variable
int CueTone = 0; 
int cueTonePlayCount = 0;
int cueToneCounter = 0;
bool playingCueTone = true;

int gTotalTaps = 0;
bool gTrialComplete = false;

float audioSampleRate = 0;
int frameCounter;

// Vector to hold log data
std::vector<float> logs(2); // Adjust size as needed

unsigned int writeFileSize = logs.size() + 5000;

bool setup(BelaContext *context, void *userData)
{
    rt_printf("Audio sample rate: %f\n", context->audioSampleRate);
    audioSampleRate = context->audioSampleRate;

    // Initialise the MIDI device
    if(gMidi.readFrom(gMidiPort0) < 0) {
        rt_printf("Unable to read from MIDI port %s\n", gMidiPort0);
        return false;
    }
    gMidi.writeTo(gMidiPort0);
    gMidi.enableParser(true);
 

    
    // Read parameters
    std::ifstream params("/root/Bela/projects/mainB/current_params");
    if(!params) {
        rt_printf("Warning: Could not read parameters file\n");
        gCurrentGroup = 0;
        gCurrentCondition = "Simultaneous";
        gCurrentTrial = 1;
    } else {
        params >> gCurrentGroup >> gCurrentCondition >> gCurrentTrial;
        rt_printf("Parameters loaded: group=%d, condition=%s, trial=%d\n", 
                 gCurrentGroup, gCurrentCondition.c_str(), gCurrentTrial);
    }
    
    if (strcmp(gCurrentCondition.c_str(),"1-23") == 0 || strcmp(gCurrentCondition.c_str(),"23-1") == 0) { // From https://stackoverflow.com/questions/2603039/warning-comparison-with-string-literals-results-in-unspecified-behaviour 
    	requiredParticipants = {1, 2, 3, 4}; //create a set that can never be fulfilled by adding 4 
    	lonerParticipants = {1};
    	subgroupParticipants = {2, 3};
    } 
    
    if (strcmp(gCurrentCondition.c_str(),"Simultaneous") == 0) { // From https://stackoverflow.com/questions/2603039/warning-comparison-with-string-literals-results-in-unspecified-behaviour 
    	requiredParticipants = {1, 2, 3}; //create sets that can never be fulfilled by adding 4 
    	lonerParticipants = {1, 4};
    	subgroupParticipants = {2, 3, 4};
    }
    
    if (strcmp(gCurrentCondition.c_str(),"123") == 0 || strcmp(gCurrentCondition.c_str(),"231") == 0 || strcmp(gCurrentCondition.c_str(),"312") == 0) { // From https://stackoverflow.com/questions/2603039/warning-comparison-with-string-literals-results-in-unspecified-behaviour 
    	requiredParticipants = {2}; //just making all sets "achivable" 
    	lonerParticipants = {1};
    	subgroupParticipants = {3};
    }
    
    char filename[100];
    sprintf(filename, "/root/Bela/projects/mainB/data/trial_%d_%s_%d.csv", 
            gCurrentGroup, gCurrentCondition.c_str(), gCurrentTrial);
            
    // Setup data logging with WriteFile
    gDataFile.setup(filename);
    gDataFile.setFormat("%f,%f\n"); // Set the format for logging
    gDataFile.setFileType(kText); // Set file type to text
    
    // Write CSV header
    rt_printf("Data logging setup for file: %s\n", filename);
    
    gTotalTaps = 0;
    gTrialComplete = false;
    
    gDataFile.setBufferSize(writeFileSize);
    
    return true;
}

unsigned int logIdx = 0;

void render(BelaContext *context, void *userData)
{		
	//reset log index maybe?
	logIdx = 0;

    // At the beginning of each callback, look for available MIDI messages
    while(gMidi.getParser()->numAvailableMessages() > 0) {
        MidiChannelMessage message;
        message = gMidi.getParser()->getNextChannelMessage();
        message.prettyPrint(); // Print the message data
        
        if(message.getType() == kmmNoteOn) {
            int noteNumber = message.getDataByte(0);
            int velocity = message.getDataByte(1); 
            
            if(velocity > 0) {
                if (!gTrialComplete && gActiveNoteCount < kMaxNotes) {
                    // Determine participant based on note number
                    if (noteNumber == 48) participant = 1;
                    else if (noteNumber == 59) participant = 2;
                    else if (noteNumber == 71) participant = 3;
                    else participant = 0; // Invalid participant key

                    if (participant > 0) {
                        // Log the keypress immediately
                        float timestamp = frameCounter / audioSampleRate;
                        
                        logs[0] = timestamp; // Timestamp
    					logs[1] = participant; // Participant number as float

                        // Log to file if buffer is full
                        if (logIdx < writeFileSize) {
                            logIdx += 2; // Increment log index by 2 for two values
                        }

                        // Check if we need to log to the file
                        if (logIdx >= writeFileSize) {
                            gDataFile.log(logs.data(), logIdx);
                            logIdx = 0; // Reset log index
                        }

                        rt_printf("Logged: time=%.3f, participant=%d\n",
                                  timestamp, participant);

                        // Add participant to activeParticipants for synchronization tracking
                        activeParticipants.insert(participant); // real-time safe? replace with ?? Giuliomoro 
                        // Check if all required participants have pressed their keys
                        if (std::includes(activeParticipants.begin(), activeParticipants.end(),
                                          subgroupParticipants.begin(), subgroupParticipants.end()) ||
                            std::includes(activeParticipants.begin(), activeParticipants.end(),
                                          lonerParticipants.begin(), lonerParticipants.end()) ||
                            std::includes(activeParticipants.begin(), activeParticipants.end(),
                                          requiredParticipants.begin(), requiredParticipants.end())
                                        ) {
                            
                            // All required participants pressed keys simultaneously
                            gActiveNotes[gActiveNoteCount] = noteNumber;
                            gActiveNoteCount++;
                            gFrequency = freq[tone_nr];
                            gAmplitude = 0.5;

                            // Reset activeParticipants for the next synchronization event
                            activeParticipants.clear();

                            // Increment total taps only for synchronized events
                            gTotalTaps++;
                            if (tone_nr < 5) {
                                tone_nr++;
                            } else {
                                tone_nr = 0;
                            }
                            
                            rt_printf("Tap %d/24: participant=%d\n", gTotalTaps, participant);

                            if (gTotalTaps >= 24) {
                                gTrialComplete = true;
                                rt_printf("\nTrial complete! 24 taps recorded.\n");
                            }
                        }
                    }
                }
            }
        } else if(message.getType() == kmmNoteOff) {
            int noteNumber = message.getDataByte(0);
            bool activeNoteChanged = false;

            // Go through all the active notes and remove any with this number
            for(int i = gActiveNoteCount - 1; i >= 0; i--) {
                if(gActiveNotes[i] == noteNumber) {
                    if (i == gActiveNoteCount-1) {
                        activeNoteChanged = true;
                    }
                    for (int j = i; j < gActiveNoteCount-1; j++) {
                        gActiveNotes[j] = gActiveNotes[j + 1];
                    }
                    gActiveNoteCount--;
                }
            }

            rt_printf("Note off: %d notes remaining\n", gActiveNoteCount);
            
            if(gActiveNoteCount == 0) {
                gAmplitude = 0;
            }
        }
    }

    // Cue Tones 
    // Cue Tones 
	for (unsigned int n = 0; n < context->audioFrames; n++) {
    frameCounter++;
    float value = 0;

    if (playingCueTone) {
        if (cueTonePlayCount < 3) { // Play 4 tones
            if (cueToneCounter < audioSampleRate / 2) {
                gFrequency = 440.0;
                gAmplitude = 0.5;  // Play tone
            } else {
                gAmplitude = 0.0;  // Silence
            }
            cueToneCounter++;
            
            if (cueToneCounter >= audioSampleRate) { // Cycle complete
                cueToneCounter = 0;
                cueTonePlayCount++;
            }

            value = sin(gPhase) * gAmplitude;
            gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
            if (gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI;
            
        if (cueTonePlayCount == 3) { // Play 4 tones
            if (cueToneCounter < audioSampleRate / 2) {
                gFrequency = 440.0;
                gAmplitude = 0.5;  // Play tone
            }
            
            if (cueToneCounter >= audioSampleRate / 2) { // Cycle complete
                cueToneCounter = 0;
                cueTonePlayCount++;
            }
            
        }

        } else {
            // Disable cue tone mode immediately after the last tone
            playingCueTone = false;
            cueToneCounter = 0;
            //gAmplitude = 0.0;  // Silence ensures no residual tone
        }
    }

    if (!playingCueTone && gActiveNoteCount > 0) {
        // Transition quickly to note playback
        gPhase += 2.0 * M_PI * gFrequency / audioSampleRate;
        if (gPhase > 2.0 * M_PI) gPhase -= 2.0 * M_PI;
        value = sin(gPhase) * gAmplitude;
    }
    
        	// Write audio output for each channel
        	for (unsigned int ch = 0; ch < context->audioOutChannels; ++ch) {
            	audioWrite(context, n, ch, value);

        	}
    	}

    // Write any leftover logs to the file
    if (logIdx > 0) {
        gDataFile.log(logs.data(), logIdx);
    }
}

void cleanup(BelaContext *context, void *userData)
{
    gDataFile.log(logs.data(), logIdx); // Ensure any remaining logs are written
    rt_printf("Data file closed\n");
} 

Will you be our saviour once again?