import scipy.fftpack as fftpack import numpy as np import cv2 from scipy.signal import find_peaks import pyramid import video def start(vidFile, alpha, low, high, chromAttenuation, fps, width, height): ''' Performs color magnification on the video by applying an ideal bandpass filter, i.e. applies a discrete fourier transform on the gaussian downsapled video and cuts off the frequencies outside the bandpass filter, magnifies the result and saves the output video. Additionally, it detects heartbeat and returns BPM. :param vidFile: Video file :param alpha: Magnification factor :param low: Low frequency cut-off :param high: High frequency cut-off :param chromAttenuation: Chrominance attenuation factor :param mode: Processing mode (unused in this example, but kept for compatibility) :param fps: Frames per second of the video :param width: Width of the video frame :param height: Height of the video frame :return: final processed video, heart rate in BPM ''' # Convert from RGB to YIQ for better processing of chrominance information t = video.rgb2yiq(vidFile) levels = 4 # Build Gaussian pyramid and use the highest level gauss_video_list = pyramid.gaussian_video(t, levels) print('Apply Ideal filter') # Apply discrete Fourier transformation (real) fft = fftpack.rfft(gauss_video_list, axis=0) frequencies = fftpack.rfftfreq(fft.shape[0], d=1.0 / fps) # Sample frequencies mask = np.logical_and(frequencies > low, frequencies < high) # Logical array if values between low and high frequencies fft[~mask] = 0 # Cutoff values outside the bandpass filtered = fftpack.irfft(fft, axis=0) # Inverse Fourier transformation filtered *= alpha # Magnification # Chromatic attenuation filtered[:, :, :, 1] *= chromAttenuation filtered[:, :, :, 2] *= chromAttenuation print(chromAttenuation) # Resize last Gaussian level to the frame size filtered_video_list = np.zeros(t.shape) for i in range(t.shape[0]): f = filtered[i] filtered_video_list[i] = cv2.resize(f, (t.shape[2], t.shape[1])) final = filtered_video_list # Add to original final += t # Convert back from YIQ to RGB final = video.yiq2rgb(final) # Cutoff invalid values final[final < 0] = 0 final[final > 255] = 255 # Detect heartbeat and return BPM bpm = detect_heartbeat(filtered_video_list, fps) return final, bpm def detect_heartbeat(video_frames, fps): ''' Detects heartbeat by analyzing pixel intensity variations in the filtered video over time. :param video_frames: Processed video frames (filtered and magnified) :param fps: Frames per second of the video :return: Detected heart rate in BPM (beats per minute) ''' # Focus on the green channel for heart rate detection (more sensitive to blood flow changes) green_channel = video_frames[:, :, :, 1] # Extract green channel # Calculate the average intensity of the green channel for each frame avg_intensity = np.mean(green_channel, axis=(1, 2)) # Shape: (num_frames,) # Normalize intensity values avg_intensity -= np.mean(avg_intensity) avg_intensity /= np.std(avg_intensity) # Detect peaks in the intensity signal (peaks correspond to heartbeats) peaks, _ = find_peaks(avg_intensity, distance=fps // 2) # Ensure at least half a second between peaks # Calculate the time differences between peaks to compute the heart rate peak_intervals = np.diff(peaks) / fps # Convert frame intervals to seconds if len(peak_intervals) > 0: avg_heartbeat_interval = np.mean(peak_intervals) bpm = 60 / avg_heartbeat_interval # Convert to beats per minute else: bpm = 0 # No peaks detected return bpm