bootshorn_investigation/process
2025-06-01 19:31:52 +02:00

152 lines
No EOL
6.5 KiB
Python
Executable file

#!/usr/bin/env python3
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import soundfile
from scipy.fft import rfft, rfftfreq
import shutil
import traceback
RECORDINGS_DIR = "recordings"
PROCESSED_RECORDINGS_DIR = "recordings/processed"
DETECTIONS_DIR = "events"
DETECT_FREQUENCY = 211 # Hz
DETECT_FREQUENCY_TOLERANCE = 2 # Hz
DETECT_FREQUENCY_FROM = DETECT_FREQUENCY - DETECT_FREQUENCY_TOLERANCE # Hz
DETECT_FREQUENCY_TO = DETECT_FREQUENCY + DETECT_FREQUENCY_TOLERANCE # Hz
ADJACENCY_FACTOR = 2 # area to look for the frequency (e.g. 2 means 100Hz to 400Hz for 200Hz detection)
BLOCK_SECONDS = 3 # seconds (longer means more frequency resolution, but less time resolution)
DETECTION_DISTANCE_SECONDS = 30 # seconds (minimum time between detections)
DETECTION_DISTANCE_BLOCKS = DETECTION_DISTANCE_SECONDS // BLOCK_SECONDS # number of blocks to skip after a detection
BLOCK_OVERLAP_FACTOR = 0.9 # overlap between blocks (0.2 means 20% overlap)
MIN_SIGNAL_QUALITY = 1000.0 # maximum noise level (relative DB) to consider a detection valid
PLOT_PADDING_START_SECONDS = 2 # seconds (padding before and after the event in the plot)
PLOT_PADDING_END_SECONDS = 3 # seconds (padding before and after the event in the plot)
def process_recording(filename):
print('processing', filename)
# get ISO 8601 nanosecond recording date from filename
date_string_from_filename = os.path.splitext(filename)[0]
recording_date = datetime.datetime.strptime(date_string_from_filename, "%Y-%m-%d_%H-%M-%S.%f%z")
# get data and metadata from recording
path = os.path.join(RECORDINGS_DIR, filename)
sound, samplerate = soundfile.read(path)
samples_per_block = int(BLOCK_SECONDS * samplerate)
overlapping_samples = int(samples_per_block * BLOCK_OVERLAP_FACTOR)
# chache data about current event
current_event = None
# read blocks of audio data with overlap from sound variable
sample_num = 0
while sample_num < len(sound):
# get block of audio data
block_start = sample_num
block_end = min(sample_num + samples_per_block, len(sound))
block = sound[block_start:block_end]
# calculate FFT
labels = rfftfreq(len(block), d=1/samplerate)
complex_amplitudes = rfft(block)
amplitudes = np.abs(complex_amplitudes)
# get the frequency with the highest amplitude within the search range
search_amplitudes = amplitudes[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
search_labels = labels[(labels >= DETECT_FREQUENCY_FROM/ADJACENCY_FACTOR) & (labels <= DETECT_FREQUENCY_TO*ADJACENCY_FACTOR)]
max_amplitude = max(search_amplitudes)
max_amplitude_index = np.argmax(search_amplitudes)
max_freq = search_labels[max_amplitude_index]
max_freq_detected = DETECT_FREQUENCY_FROM <= max_freq <= DETECT_FREQUENCY_TO
# calculate signal quality
adjacent_amplitudes = amplitudes[(labels < DETECT_FREQUENCY_FROM) | (labels > DETECT_FREQUENCY_TO)]
signal_quality = max_amplitude/np.mean(adjacent_amplitudes)
good_signal_quality = signal_quality > MIN_SIGNAL_QUALITY
# conclude detection
if (
max_freq_detected and
good_signal_quality
):
block_date = recording_date + datetime.timedelta(seconds=sample_num / samplerate)
# detecting an event
if not current_event:
current_event = {
'start_at': block_date,
'end_at': block_date,
'start_sample': sample_num,
'end_sample': sample_num + samples_per_block,
'start_freq': max_freq,
'end_freq': max_freq,
'max_amplitude': max_amplitude,
}
else:
current_event.update({
'end_at': block_date,
'end_freq': max_freq,
'end_sample': sample_num + samples_per_block,
'max_amplitude': max(max_amplitude, current_event['max_amplitude']),
})
print(f'- {block_date.strftime('%Y-%m-%d %H:%M:%S')}: {max_amplitude:.1f}rDB @ {max_freq:.1f}Hz (signal {signal_quality:.3f}x)')
else:
# not detecting an event
if current_event:
duration = (current_event['end_at'] - current_event['start_at']).total_seconds()
print(f'🔊 {current_event['start_at'].strftime('%Y-%m-%d %H:%M:%S')} ({duration:.1f}s): {current_event['start_freq']:.1f}Hz->{current_event['end_freq']:.1f}Hz @{current_event['max_amplitude']:.0f}rDB')
write_event(current_event=current_event, sound=sound, samplerate=samplerate)
current_event = None
sample_num += DETECTION_DISTANCE_BLOCKS * samples_per_block
sample_num += samples_per_block - overlapping_samples
# write a spectrogram using the sound from start to end of the event
def write_event(current_event, sound, samplerate):
event_start_sample = current_event['start_sample'] - samplerate * PLOT_PADDING_START_SECONDS
event_end_sample = current_event['end_sample'] + samplerate * PLOT_PADDING_END_SECONDS
event_clip = sound[event_start_sample:event_end_sample]
event = current_event['start_at'] - datetime.timedelta(seconds=PLOT_PADDING_START_SECONDS)
filename_prefix = current_event['start_at'].strftime('%Y-%m-%d_%H-%M-%S.%f%z')
# write flac
flac_path = os.path.join(DETECTIONS_DIR, f"{filename_prefix}.flac")
soundfile.write(flac_path, event_clip, samplerate, format='FLAC')
# write spectrogram
plt.figure(figsize=(8, 6))
plt.specgram(event_clip, Fs=samplerate, NFFT=samplerate, noverlap=samplerate//2, cmap='inferno', vmin=-100, vmax=-10)
plt.title(f"Bootshorn @{event.strftime('%Y-%m-%d %H:%M:%S')}")
plt.xlabel("Time (s)")
plt.ylabel("Frequency (Hz)")
plt.colorbar(label="Intensity (dB)")
plt.ylim(50, 1000)
spectrogram_path = os.path.join(DETECTIONS_DIR, f"{filename_prefix}.png")
plt.savefig(spectrogram_path)
plt.close()
def main():
os.makedirs(RECORDINGS_DIR, exist_ok=True)
os.makedirs(PROCESSED_RECORDINGS_DIR, exist_ok=True)
for filename in sorted(os.listdir(RECORDINGS_DIR)):
if filename.endswith(".flac"):
try:
process_recording(filename)
except Exception as e:
print(f"Error processing {filename}: {e}")
# print stacktrace
traceback.print_exc()
if __name__ == "__main__":
main()