#!/usr/bin/env python3 """ ACOUSTIC FINGERPRINT ANALYSIS — FOLLOWTHEEPICENTER.COM State of Utah v. Tyler Robinson | UCCU Center, Sept 10, 2025 REPRODUCIBLE ANALYSIS PIPELINE This script loads REAL AUDIO from the source video files, performs all signal processing from scratch, and outputs independently verifiable results. No hardcoded waveforms. No synthetic signals. Every result is derived from the actual recorded audio data. Requirements: pip install numpy scipy matplotlib ffmpeg must be installed and on PATH Usage: 1. Place source video files in a folder called 'videos/' Expected files: 2.MOV, 7.mp4, 1.mp4, 13.mp4, IMG_6368.MOV, Video2_1.mp4 2. Run: python acoustic_fingerprint_analysis.py 3. Results are printed to console and saved as PNG plots in 'results/' Optional flags: --video-dir PATH Path to video folder (default: videos/) --output-dir PATH Path for output plots (default: results/) --camera NAME Run analysis on a single camera only --verbose Print detailed per-sample diagnostics What this script does: 1. Extracts stereo audio from each video file via ffmpeg 2. Identifies the event frame onset in each recording 3. Bandpass-filters into five signature frequency bands 4. Detects onset times for each signature in each camera 5. Computes stereo ILD (interaural level difference) per band 6. Performs TDOA multilateration to localize each signature's source 7. Searches for the 4940 Hz tonal peak and measures its statistics 8. Generates diagnostic plots for independent verification Author: Jon Bray — followtheepicenter.com License: Public domain. Use freely. Verify independently. """ import numpy as np from scipy.signal import butter, filtfilt, find_peaks, spectrogram as scipy_spectrogram from scipy.optimize import least_squares import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import subprocess import wave import os import sys import argparse import json from datetime import datetime # ============================================================================ # CONFIGURATION — Physical layout of cameras and scene # ============================================================================ # Camera positions from GPS measurements (meters, tent = origin) # Verified via Google Earth: 40°16'39.07"N, 111°42'50.54"W = tent center CAMERA_POSITIONS = { '2.MOV': np.array([3.77, -8.34]), # Southwest, closest to van '7.mp4': np.array([-2.12, 16.06]), # North side '1.mp4': np.array([5.18, 2.16]), # East of tent '13.mp4': np.array([4.01, 0.00]), # Due east, closest to tent 'IMG_6368.MOV': np.array([8.25, -5.87]), # Southeast 'Video2_1.mp4': np.array([8.25, 0.62]), # East } # Event frames — visually identified frame where the event is first visible # (frame_number, recording_fps) EVENT_FRAMES = { '2.MOV': (507, 59.94), '7.mp4': (759, 29.97), '1.mp4': (52, 30.0), '13.mp4': (252, 30.0), 'IMG_6368.MOV': (55, 29.97), 'Video2_1.mp4': (68, 30.0), } # Reference positions TENT_ORIGIN = np.array([0.0, 0.0]) VAN_POSITION = np.array([-1.41, -4.63]) SHOOTER_POSITION = np.array([106.98, 69.19]) # 127.4m from tent SPEED_OF_SOUND = 343.0 # m/s at ~20°C # Five acoustic signatures — frequency bands and expected arrival windows # Search windows are in milliseconds relative to the event frame SIGNATURES = { 'mach_cone': { 'low': 500, 'high': 8000, 'search_ms': (-20, 80), 'name': 'Mach Cone (N-wave)', 'description': 'Supersonic projectile shock wave, broadband impulse' }, 'rode_detonation': { 'low': 200, 'high': 6000, 'search_ms': (10, 120), 'name': 'RØDE Detonation', 'description': 'RØDE wireless transmitter detonation impulse' }, 'chest_resonance': { 'low': 20, 'high': 200, 'search_ms': (30, 280), 'name': 'Chest Cavity Resonance', 'description': 'Low-frequency Helmholtz-mode resonance of thoracic cavity' }, 'muzzle_blast': { 'low': 30, 'high': 500, 'search_ms': (150, 420), 'name': 'Muzzle Blast', 'description': 'Propellant gas expansion at muzzle, arrives after projectile' }, 'tone_4940': { 'low': 4700, 'high': 5200, 'search_ms': (-20, 150), 'name': '4940 Hz Strouhal Tone', 'description': 'Ballistic gel venting resonance from .30 cal cavity' }, } # ============================================================================ # AUDIO I/O — Extract and load real audio from video files # ============================================================================ def extract_audio(video_path, wav_path): """ Extract stereo PCM audio from a video file using ffmpeg. Preserves the native sample rate of the recording device. """ if os.path.exists(wav_path): print(f" [cached] {os.path.basename(wav_path)}") return True print(f" Extracting: {os.path.basename(video_path)} -> {os.path.basename(wav_path)}") result = subprocess.run( ['ffmpeg', '-y', '-i', video_path, '-vn', '-acodec', 'pcm_s16le', '-ac', '2', wav_path], capture_output=True, text=True ) if result.returncode != 0: print(f" ERROR: ffmpeg failed for {video_path}") print(f" stderr: {result.stderr[:500]}") return False return os.path.exists(wav_path) def load_wav(path): """ Load a WAV file and return normalized float64 arrays. Returns: (left, right, mono, sample_rate) """ with wave.open(path, 'r') as w: sr = w.getframerate() nch = w.getnchannels() n = w.getnframes() raw = w.readframes(n) data = np.frombuffer(raw, dtype=np.int16).astype(np.float64) / 32768.0 if nch >= 2: left = data[0::nch] right = data[1::nch] else: left = data right = data.copy() mono = (left + right) / 2.0 return left, right, mono, sr # ============================================================================ # SIGNAL PROCESSING — All filters operate on the loaded audio # ============================================================================ def bandpass_filter(data, lo_hz, hi_hz, sample_rate, order=4): """ Apply a Butterworth bandpass filter to the signal. Returns the filtered signal (same length as input). """ nyq = 0.5 * sample_rate lo_norm = max(lo_hz / nyq, 0.001) hi_norm = min(hi_hz / nyq, 0.999) if lo_norm >= hi_norm: print(f" WARNING: Invalid filter band {lo_hz}-{hi_hz} Hz at {sample_rate} Hz SR") return data b, a = butter(order, [lo_norm, hi_norm], btype='band') pad_len = min(3 * max(len(b), len(a)), len(data) - 1) if pad_len < 10 or len(data) < pad_len * 3: print(f" WARNING: Signal too short for filter ({len(data)} samples)") return data return filtfilt(b, a, data, padlen=pad_len) def compute_envelope(signal, sample_rate, window_ms=2.0): """ Compute RMS envelope of a signal with a sliding window. """ win_samples = max(int(sample_rate * window_ms / 1000), 4) step = max(1, win_samples // 2) n_windows = (len(signal) - win_samples) // step + 1 if n_windows <= 0: return np.array([0.0]), np.array([0.0]) envelope = np.zeros(n_windows) times = np.zeros(n_windows) for i in range(n_windows): start = i * step segment = signal[start:start + win_samples] envelope[i] = np.sqrt(np.mean(segment ** 2)) times[i] = (start + win_samples / 2) / sample_rate return times, envelope def detect_onset(signal, sample_rate, threshold_factor=4.0): """ Detect the onset time of an impulsive event using envelope derivative with adaptive noise-floor thresholding. Returns: onset time in seconds relative to start of signal """ times, envelope = compute_envelope(signal, sample_rate, window_ms=2.0) if len(envelope) < 5: return 0.0 deriv = np.diff(envelope) # Estimate noise floor from first quarter of the derivative noise_region = max(5, len(deriv) // 4) noise_std = np.std(deriv[:noise_region]) if noise_std < 1e-12: return times[np.argmax(envelope)] threshold = noise_std * threshold_factor for j in range(len(deriv)): if deriv[j] > threshold: return times[j] # Fallback: return peak time return times[np.argmax(envelope)] def compute_ild(left_signal, right_signal, lo_hz, hi_hz, sample_rate): """ Compute Interaural Level Difference (ILD) in dB for a frequency band. Positive = louder in left channel, negative = louder in right. """ left_filtered = bandpass_filter(left_signal, lo_hz, hi_hz, sample_rate) right_filtered = bandpass_filter(right_signal, lo_hz, hi_hz, sample_rate) left_energy = np.mean(left_filtered ** 2) + 1e-15 right_energy = np.mean(right_filtered ** 2) + 1e-15 return 10.0 * np.log10(left_energy / right_energy) def compute_running_ild(left_signal, right_signal, sample_rate, window_ms=10.0, step_ms=3.0): """ Compute time-varying ILD with sliding windows. Returns: (times_ms, ild_values_dB) """ win = int(sample_rate * window_ms / 1000) step = int(sample_rate * step_ms / 1000) ilds = [] times = [] for j in range(0, len(left_signal) - win, step): l_e = np.mean(left_signal[j:j + win] ** 2) + 1e-15 r_e = np.mean(right_signal[j:j + win] ** 2) + 1e-15 ilds.append(10.0 * np.log10(l_e / r_e)) times.append((j + win / 2) / sample_rate * 1000) return np.array(times), np.array(ilds) # ============================================================================ # TDOA MULTILATERATION — Localize acoustic source from onset delays # ============================================================================ def multilaterate(onset_times, camera_positions, speed_of_sound=343.0): """ Solve for source location (x, y) and emission time using TDOA (time difference of arrival) from multiple microphones. Uses nonlinear least-squares with multiple random initializations to avoid local minima. Returns: (location_xy, residual_cost) or (None, inf) """ cameras = sorted(onset_times.keys()) if len(cameras) < 3: return None, float('inf') positions = np.array([camera_positions[c] for c in cameras]) times = np.array([onset_times[c] for c in cameras]) def residuals(params): x, y, t_emit = params source = np.array([x, y]) predicted = t_emit + np.linalg.norm(positions - source, axis=1) / speed_of_sound return predicted - times best_result = None best_cost = float('inf') # Try multiple initializations init_points = [ [0, 0, np.min(times) - 0.5], # Tent origin [VAN_POSITION[0], VAN_POSITION[1], np.min(times) - 0.5], # Van [np.mean(positions[:, 0]), np.mean(positions[:, 1]), np.min(times) - 0.3], # Centroid ] # Add random perturbations for _ in range(10): init_points.append([ np.random.uniform(-20, 120), np.random.uniform(-20, 80), np.min(times) - np.random.uniform(0, 1) ]) for init in init_points: try: result = least_squares(residuals, init, method='lm', max_nfev=5000) if result.cost < best_cost: best_cost = result.cost best_result = result.x except Exception: continue if best_result is not None: return best_result[:2], best_cost return None, float('inf') # ============================================================================ # 4940 Hz TONAL ANALYSIS — Statistical detection of the Strouhal peak # ============================================================================ def analyze_4940hz(mono, sample_rate, event_sample, window_ms=100): """ Search for a tonal peak near 4940 Hz in the post-event audio. Computes SNR, deviation from expected frequency, and statistical significance against the noise floor. Returns a dict of measurements. """ # Extract post-event segment start = event_sample end = min(event_sample + int(sample_rate * window_ms / 1000), len(mono)) segment = mono[start:end] if len(segment) < 1024: return {'detected': False, 'reason': 'segment too short'} # Compute FFT n_fft = len(segment) window = np.hanning(n_fft) spectrum = np.abs(np.fft.rfft(segment * window)) freqs = np.fft.rfftfreq(n_fft, 1.0 / sample_rate) # Convert to dB spectrum_db = 20 * np.log10(spectrum + 1e-12) # Find peak near 4940 Hz (search 4700-5200 Hz) band_mask = (freqs >= 4700) & (freqs <= 5200) if not np.any(band_mask): return {'detected': False, 'reason': 'frequency range not covered'} band_spectrum = spectrum[band_mask] band_freqs = freqs[band_mask] peak_idx = np.argmax(band_spectrum) peak_freq = band_freqs[peak_idx] peak_amplitude = band_spectrum[peak_idx] # Compute noise floor (exclude the 4700-5200 Hz band) noise_mask = ~band_mask & (freqs > 100) & (freqs < sample_rate / 2 * 0.9) noise_spectrum = spectrum[noise_mask] if len(noise_spectrum) < 10: return {'detected': False, 'reason': 'insufficient noise samples'} noise_mean = np.mean(noise_spectrum) noise_std = np.std(noise_spectrum) # Statistical significance if noise_std > 0: sigma_above = (peak_amplitude - noise_mean) / noise_std else: sigma_above = 0 # SNR in dB snr_db = 20 * np.log10(peak_amplitude / (noise_mean + 1e-12)) # Deviation from expected 4940 Hz freq_deviation_hz = peak_freq - 4940.0 freq_deviation_pct = (freq_deviation_hz / 4940.0) * 100 return { 'detected': True, 'peak_freq_hz': float(peak_freq), 'peak_amplitude': float(peak_amplitude), 'snr_db': float(snr_db), 'sigma_above_noise': float(sigma_above), 'noise_mean': float(noise_mean), 'noise_std': float(noise_std), 'freq_deviation_hz': float(freq_deviation_hz), 'freq_deviation_pct': float(freq_deviation_pct), } # ============================================================================ # PLOTTING — Diagnostic output for independent verification # ============================================================================ def plot_camera_diagnostics(cam_name, mono, left, right, sample_rate, event_sample, output_dir): """ Generate a multi-panel diagnostic plot for a single camera showing: - Raw waveform around event - Spectrogram - Per-band filtered waveforms - Running ILD """ fig, axes = plt.subplots(5, 1, figsize=(16, 20)) fig.suptitle(f'Acoustic Diagnostics: {cam_name}', fontsize=16, fontweight='bold') # Time window: -50ms to +300ms around event pre_ms, post_ms = 50, 300 s = max(0, event_sample - int(sample_rate * pre_ms / 1000)) e = min(len(mono), event_sample + int(sample_rate * post_ms / 1000)) seg = mono[s:e] t_ms = (np.arange(len(seg)) - (event_sample - s)) / sample_rate * 1000 # Panel 1: Raw waveform axes[0].plot(t_ms, seg, linewidth=0.5, color='white', alpha=0.8) axes[0].axvline(0, color='red', linestyle='--', alpha=0.7, label='Event frame') axes[0].set_ylabel('Amplitude') axes[0].set_title('Raw Mono Waveform') axes[0].legend() axes[0].set_facecolor('black') # Panel 2: Spectrogram if len(seg) > 256: nperseg = min(1024, len(seg) // 4) f_spec, t_spec, Sxx = scipy_spectrogram(seg, fs=sample_rate, nperseg=nperseg, noverlap=nperseg - nperseg // 8, window='hann') t_spec_ms = t_spec * 1000 - pre_ms Sxx_db = 10 * np.log10(Sxx + 1e-12) axes[1].pcolormesh(t_spec_ms, f_spec, Sxx_db, shading='gouraud', cmap='inferno', vmin=np.percentile(Sxx_db, 5), vmax=np.percentile(Sxx_db, 99)) axes[1].set_ylabel('Frequency (Hz)') axes[1].set_title('Spectrogram (dB)') axes[1].set_ylim(0, min(10000, sample_rate / 2)) else: axes[1].text(0.5, 0.5, 'Segment too short', transform=axes[1].transAxes, ha='center', fontsize=14) # Panel 3: Bandpass-filtered signatures colors = ['#00ff88', '#ff4444', '#4488ff', '#ffaa00', '#ff44ff'] for idx, (sig_name, sig_info) in enumerate(SIGNATURES.items()): filtered = bandpass_filter(seg, sig_info['low'], sig_info['high'], sample_rate) # Normalize for display fmax = np.max(np.abs(filtered)) + 1e-12 axes[2].plot(t_ms, filtered / fmax + idx * 2.5, linewidth=0.5, color=colors[idx], label=f"{sig_info['name']} ({sig_info['low']}-{sig_info['high']}Hz)") axes[2].axvline(0, color='red', linestyle='--', alpha=0.5) axes[2].set_ylabel('Normalized (stacked)') axes[2].set_title('Five Signature Bands (bandpass filtered from audio)') axes[2].legend(fontsize=8, loc='upper right') axes[2].set_facecolor('#111111') # Panel 4: Running ILD per band seg_l = left[s:e] seg_r = right[s:e] for idx, (sig_name, sig_info) in enumerate(SIGNATURES.items()): l_filt = bandpass_filter(seg_l, sig_info['low'], sig_info['high'], sample_rate) r_filt = bandpass_filter(seg_r, sig_info['low'], sig_info['high'], sample_rate) t_ild, ild_vals = compute_running_ild(l_filt, r_filt, sample_rate) t_ild_shifted = t_ild - pre_ms axes[3].plot(t_ild_shifted, ild_vals, linewidth=1, color=colors[idx], label=sig_info['name'], alpha=0.8) axes[3].axhline(0, color='gray', linestyle='-', alpha=0.3) axes[3].axvline(0, color='red', linestyle='--', alpha=0.5) axes[3].set_ylabel('ILD (dB, +L/-R)') axes[3].set_title('Stereo ILD per Signature Band') axes[3].legend(fontsize=8, loc='upper right') axes[3].set_ylim(-15, 15) # Panel 5: 4940 Hz narrowband tone_filt = bandpass_filter(seg, 4700, 5200, sample_rate) axes[4].plot(t_ms, tone_filt, linewidth=0.5, color='#ff44ff') axes[4].axvline(0, color='red', linestyle='--', alpha=0.5) axes[4].set_ylabel('Amplitude') axes[4].set_xlabel('Time (ms relative to event frame)') axes[4].set_title('4940 Hz Band (4700-5200 Hz bandpass)') axes[4].set_facecolor('#111111') plt.tight_layout() outpath = os.path.join(output_dir, f'diagnostics_{cam_name.replace(".", "_")}.png') plt.savefig(outpath, dpi=150, facecolor='#1a1a1a') plt.close() print(f" Saved: {outpath}") def plot_tdoa_map(results, output_dir): """ Plot the TDOA multilateration results showing localized source positions for each acoustic signature overlaid on the scene geometry. """ fig, ax = plt.subplots(1, 1, figsize=(14, 12)) fig.patch.set_facecolor('#1a1a1a') ax.set_facecolor('#111111') # Plot camera positions for cam, pos in CAMERA_POSITIONS.items(): ax.plot(pos[0], pos[1], 'ws', markersize=8) ax.annotate(cam, (pos[0], pos[1]), textcoords="offset points", xytext=(5, 5), fontsize=7, color='white') # Plot reference points ax.plot(*TENT_ORIGIN, 'g^', markersize=15, label='Tent (origin)', zorder=5) ax.plot(*VAN_POSITION, 'rv', markersize=15, label='Van', zorder=5) # Arrow to shooter (off-scale) angle = np.arctan2(SHOOTER_POSITION[1], SHOOTER_POSITION[0]) arrow_len = 20 ax.annotate(f'Shooter ({np.linalg.norm(SHOOTER_POSITION):.0f}m)', xy=(arrow_len * np.cos(angle), arrow_len * np.sin(angle)), xytext=(15 * np.cos(angle), 15 * np.sin(angle)), fontsize=9, color='orange', arrowprops=dict(arrowstyle='->', color='orange')) # Plot localized signature sources colors = {'mach_cone': '#00ff88', 'rode_detonation': '#ff4444', 'chest_resonance': '#4488ff', 'muzzle_blast': '#ffaa00', 'tone_4940': '#ff44ff'} for sig_name, sig_result in results.items(): loc = sig_result.get('location') if loc is not None and np.linalg.norm(loc) < 200: ax.plot(loc[0], loc[1], 'o', color=colors.get(sig_name, 'white'), markersize=12, markeredgecolor='white', markeredgewidth=1, label=f"{SIGNATURES[sig_name]['name']}: ({loc[0]:.1f}, {loc[1]:.1f})m", zorder=10) ax.set_xlabel('X (meters, East+)', color='white') ax.set_ylabel('Y (meters, North+)', color='white') ax.set_title('TDOA Source Localization — Five Acoustic Signatures', fontsize=14, fontweight='bold', color='white') ax.legend(fontsize=9, loc='upper left', facecolor='#222222', edgecolor='gray', labelcolor='white') ax.grid(True, alpha=0.2) ax.set_aspect('equal') ax.tick_params(colors='white') plt.tight_layout() outpath = os.path.join(output_dir, 'tdoa_localization_map.png') plt.savefig(outpath, dpi=150, facecolor='#1a1a1a') plt.close() print(f" Saved TDOA map: {outpath}") def plot_4940hz_analysis(tone_results, output_dir): """ Plot 4940 Hz detection results across all cameras. """ detected = {cam: r for cam, r in tone_results.items() if r.get('detected')} if not detected: print(" No 4940 Hz detections to plot.") return fig, axes = plt.subplots(1, 2, figsize=(14, 6)) fig.patch.set_facecolor('#1a1a1a') cameras = sorted(detected.keys()) peak_freqs = [detected[c]['peak_freq_hz'] for c in cameras] snrs = [detected[c]['snr_db'] for c in cameras] sigmas = [detected[c]['sigma_above_noise'] for c in cameras] # Panel 1: Peak frequencies ax = axes[0] ax.set_facecolor('#111111') ax.barh(range(len(cameras)), peak_freqs, color='#ff44ff', alpha=0.8) ax.axvline(4940, color='white', linestyle='--', alpha=0.5, label='Expected: 4940 Hz') ax.set_yticks(range(len(cameras))) ax.set_yticklabels(cameras, color='white', fontsize=9) ax.set_xlabel('Peak Frequency (Hz)', color='white') ax.set_title('Detected Peak Frequency', color='white') ax.legend(facecolor='#222222', edgecolor='gray', labelcolor='white') ax.tick_params(colors='white') # Panel 2: SNR ax = axes[1] ax.set_facecolor('#111111') ax.barh(range(len(cameras)), snrs, color='#00ff88', alpha=0.8) ax.set_yticks(range(len(cameras))) ax.set_yticklabels(cameras, color='white', fontsize=9) ax.set_xlabel('SNR (dB)', color='white') ax.set_title('Signal-to-Noise Ratio in 4940 Hz Band', color='white') ax.tick_params(colors='white') plt.tight_layout() outpath = os.path.join(output_dir, '4940hz_analysis.png') plt.savefig(outpath, dpi=150, facecolor='#1a1a1a') plt.close() print(f" Saved 4940 Hz plot: {outpath}") # ============================================================================ # MAIN ANALYSIS PIPELINE # ============================================================================ def run_analysis(video_dir='videos', output_dir='results', single_camera=None, verbose=False): """ Main entry point. Runs the complete five-signature acoustic analysis. """ print("=" * 80) print(" ACOUSTIC FINGERPRINT ANALYSIS — followtheepicenter.com") print(" State of Utah v. Tyler Robinson | UCCU Center, Sept 10, 2025") print("=" * 80) print(f" Video directory: {os.path.abspath(video_dir)}") print(f" Output directory: {os.path.abspath(output_dir)}") print(f" Analysis started: {datetime.now().isoformat()}") print() os.makedirs(output_dir, exist_ok=True) wav_dir = os.path.join(output_dir, 'wav_extracted') os.makedirs(wav_dir, exist_ok=True) # ---------------------------------------------------------------- # Step 1: Load audio from all available video files # ---------------------------------------------------------------- print("STEP 1: EXTRACTING & LOADING AUDIO FROM VIDEO FILES") print("-" * 60) audio_data = {} cameras_to_process = [single_camera] if single_camera else list(CAMERA_POSITIONS.keys()) for cam_name in cameras_to_process: video_path = os.path.join(video_dir, cam_name) if not os.path.exists(video_path): print(f" MISSING: {video_path} — skipping") continue wav_path = os.path.join(wav_dir, cam_name.rsplit('.', 1)[0] + '.wav') if not extract_audio(video_path, wav_path): continue left, right, mono, sr = load_wav(wav_path) # Compute event sample from frame number and fps frame_num, fps = EVENT_FRAMES[cam_name] event_sample = int((frame_num / fps) * sr) file_size_mb = os.path.getsize(video_path) / (1024 * 1024) duration_s = len(mono) / sr print(f" {cam_name}: {sr} Hz, {duration_s:.1f}s, " f"{len(mono):,} samples, event @ sample {event_sample:,} " f"(frame {frame_num} @ {fps} fps), file: {file_size_mb:.1f} MB") audio_data[cam_name] = { 'left': left, 'right': right, 'mono': mono, 'sr': sr, 'event_sample': event_sample, } if len(audio_data) < 3: print(f"\n ERROR: Need at least 3 cameras for multilateration, " f"found {len(audio_data)}.") print(" Place video files in the 'videos/' folder and try again.") sys.exit(1) print(f"\n Loaded {len(audio_data)} cameras successfully.\n") # ---------------------------------------------------------------- # Step 2: Five-signature onset detection & TDOA multilateration # ---------------------------------------------------------------- print("STEP 2: FIVE-SIGNATURE ANALYSIS") print("-" * 60) all_results = {} for sig_name, sig_info in SIGNATURES.items(): print(f"\n [{sig_info['name']}] ({sig_info['low']}-{sig_info['high']} Hz)") print(f" Search window: {sig_info['search_ms'][0]} to {sig_info['search_ms'][1]} ms") t_start_ms, t_end_ms = sig_info['search_ms'] onsets = {} stereo_ilds = {} for cam_name, ad in audio_data.items(): sr = ad['sr'] es = ad['event_sample'] mono = ad['mono'] # Extract search window s = max(0, es + int(sr * t_start_ms / 1000)) e = min(len(mono), es + int(sr * t_end_ms / 1000)) seg = mono[s:e] if len(seg) < 100: print(f" {cam_name}: segment too short ({len(seg)} samples), skipping") continue # Bandpass filter and detect onset filtered = bandpass_filter(seg, sig_info['low'], sig_info['high'], sr) onset_relative = detect_onset(filtered, sr) onset_absolute = t_start_ms / 1000 + onset_relative onsets[cam_name] = onset_absolute # Stereo ILD seg_l = ad['left'][s:e] seg_r = ad['right'][s:e] ild = compute_ild(seg_l, seg_r, sig_info['low'], sig_info['high'], sr) stereo_ilds[cam_name] = ild dom = 'L' if ild > 0.5 else ('R' if ild < -0.5 else '~') if verbose: print(f" {cam_name:18s}: onset {onset_absolute * 1000:+8.2f} ms, " f"ILD {ild:+5.1f} dB ({dom})") # Multilaterate loc, cost = multilaterate(onsets, CAMERA_POSITIONS) if loc is not None and np.linalg.norm(loc) < 200: d_tent = np.linalg.norm(loc - TENT_ORIGIN) d_van = np.linalg.norm(loc - VAN_POSITION) d_shooter = np.linalg.norm(loc - SHOOTER_POSITION) print(f" LOCALIZED: ({loc[0]:.2f}, {loc[1]:.2f}) m") print(f" Distance to tent: {d_tent:.2f} m") print(f" Distance to van: {d_van:.2f} m") print(f" Distance to shooter: {d_shooter:.1f} m") print(f" Residual cost: {cost:.6f}") else: print(f" Could not localize (insufficient data or divergent solution)") all_results[sig_name] = { 'location': loc, 'cost': cost, 'onsets': onsets, 'stereo_ilds': stereo_ilds, } # ---------------------------------------------------------------- # Step 3: 4940 Hz tonal analysis # ---------------------------------------------------------------- print(f"\n\nSTEP 3: 4940 Hz TONAL ANALYSIS") print("-" * 60) tone_results = {} for cam_name, ad in audio_data.items(): result = analyze_4940hz(ad['mono'], ad['sr'], ad['event_sample']) tone_results[cam_name] = result if result['detected']: print(f" {cam_name:18s}: {result['peak_freq_hz']:.1f} Hz, " f"SNR {result['snr_db']:.1f} dB, " f"{result['sigma_above_noise']:.0f}σ above noise, " f"deviation {result['freq_deviation_hz']:+.1f} Hz " f"({result['freq_deviation_pct']:+.2f}%)") else: print(f" {cam_name:18s}: not detected ({result.get('reason', 'unknown')})") # Cross-camera frequency agreement detected_freqs = [r['peak_freq_hz'] for r in tone_results.values() if r.get('detected')] if len(detected_freqs) >= 2: freq_spread = max(detected_freqs) - min(detected_freqs) freq_mean = np.mean(detected_freqs) freq_agreement_pct = (1 - freq_spread / freq_mean) * 100 print(f"\n Cross-camera agreement: {freq_agreement_pct:.2f}%") print(f" Frequency spread: {freq_spread:.1f} Hz across {len(detected_freqs)} cameras") print(f" Mean detected frequency: {freq_mean:.1f} Hz") # ---------------------------------------------------------------- # Step 4: Generate diagnostic plots # ---------------------------------------------------------------- print(f"\n\nSTEP 4: GENERATING DIAGNOSTIC PLOTS") print("-" * 60) for cam_name, ad in audio_data.items(): print(f" Plotting {cam_name}...") plot_camera_diagnostics(cam_name, ad['mono'], ad['left'], ad['right'], ad['sr'], ad['event_sample'], output_dir) plot_tdoa_map(all_results, output_dir) plot_4940hz_analysis(tone_results, output_dir) # ---------------------------------------------------------------- # Step 5: Summary report # ---------------------------------------------------------------- print(f"\n\n{'=' * 80}") print(" ANALYSIS COMPLETE — SUMMARY") print(f"{'=' * 80}") print(f"\n Cameras analyzed: {len(audio_data)}") print(f" Signatures tested: {len(SIGNATURES)}") summary = { 'timestamp': datetime.now().isoformat(), 'cameras_analyzed': list(audio_data.keys()), 'signatures': {}, 'tone_4940': {}, } for sig_name, sig_result in all_results.items(): loc = sig_result['location'] loc_str = f"({loc[0]:.2f}, {loc[1]:.2f})" if loc is not None else "N/A" d_tent = np.linalg.norm(loc - TENT_ORIGIN) if loc is not None else float('nan') d_van = np.linalg.norm(loc - VAN_POSITION) if loc is not None else float('nan') print(f"\n {SIGNATURES[sig_name]['name']}:") print(f" Source location: {loc_str} m") if loc is not None: print(f" Tent: {d_tent:.2f} m | Van: {d_van:.2f} m") print(f" Residual: {sig_result['cost']:.6f}") summary['signatures'][sig_name] = { 'location': loc.tolist() if loc is not None else None, 'cost': float(sig_result['cost']), 'onsets': {k: float(v) for k, v in sig_result['onsets'].items()}, 'stereo_ilds': {k: float(v) for k, v in sig_result['stereo_ilds'].items()}, } for cam, r in tone_results.items(): summary['tone_4940'][cam] = r # Save JSON results json_path = os.path.join(output_dir, 'analysis_results.json') with open(json_path, 'w') as f: json.dump(summary, f, indent=2, default=str) print(f"\n Full results saved to: {json_path}") print(f"\n Diagnostic plots saved to: {os.path.abspath(output_dir)}/") print(f"\n Analysis completed: {datetime.now().isoformat()}") print("=" * 80) print(" Run this script yourself. Verify every result independently.") print(" If you get different results, we want to know: followtheepicenter.com") print("=" * 80) # ============================================================================ # CLI # ============================================================================ if __name__ == '__main__': parser = argparse.ArgumentParser( description='Acoustic Fingerprint Analysis — followtheepicenter.com', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" This script loads REAL AUDIO from video files and performs all signal processing from scratch. No hardcoded waveforms. No synthetic signals. Place source video files in a 'videos/' folder and run: python acoustic_fingerprint_analysis.py Requirements: numpy, scipy, matplotlib, ffmpeg """ ) parser.add_argument('--video-dir', default='videos', help='Path to video folder (default: videos/)') parser.add_argument('--output-dir', default='results', help='Path for output plots and data (default: results/)') parser.add_argument('--camera', default=None, help='Analyze a single camera only (e.g., 2.MOV)') parser.add_argument('--verbose', action='store_true', help='Print detailed per-camera diagnostics') args = parser.parse_args() run_analysis( video_dir=args.video_dir, output_dir=args.output_dir, single_camera=args.camera, verbose=args.verbose, )