update workflow, fix huge memory leak

This commit is contained in:
Yonokid
2025-05-02 15:56:00 -04:00
parent 08394cf97a
commit 6bf264a555
13 changed files with 369 additions and 115 deletions

View File

@@ -119,6 +119,8 @@ class TJAParser:
self.offset = float(item.split(':')[1])
elif 'DEMOSTART' in item:
self.demo_start = float(item.split(':')[1])
elif 'BGMOVIE' in item:
self.bg_movie = self.folder_path / item.split(':')[1].strip()
elif 'COURSE' in item:
# Determine which difficulty we're now processing
course = str(item.split(':')[1]).lower().strip()
@@ -149,7 +151,7 @@ class TJAParser:
# Only process these items if we have a current difficulty
elif current_diff is not None:
if 'LEVEL' in item:
level = int(item.split(':')[1])
level = int(float(item.split(':')[1]))
self.course_data[current_diff].append(level)
elif 'BALLOON' in item:
balloon_data = item.split(':')[1]
@@ -376,6 +378,9 @@ class TJAParser:
play_note_list.append(note)
self.get_moji(play_note_list, ms_per_measure)
index += 1
if len(play_note_list) > 3:
if isinstance(play_note_list[-2], Drumroll) and play_note_list[-1].type != 8:
raise Exception(play_note_list[-2])
# https://stackoverflow.com/questions/72899/how-to-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary-in-python
# Sorting by load_ms is necessary for drawing, as some notes appear on the
# screen slower regardless of when they reach the judge circle

View File

@@ -1,119 +1,125 @@
import moviepy
import pyray as ray
from cv2 import CAP_PROP_FPS, COLOR_BGR2RGB, VideoCapture, cvtColor
from libs.audio import audio
from libs.utils import get_current_ms
class VideoPlayer:
def __init__(self, path: str):
def __init__(self, path):
self.is_finished_list = [False, False]
self.video_path = path
self.start_ms = None
self.current_frame = None
self.last_frame = self.current_frame
self.frame_index = 0
self.frames = []
self.cap = VideoCapture(self.video_path)
self.fps = self.cap.get(CAP_PROP_FPS)
self.is_finished_list = [False, False, False] # Added third flag for frame conversion
self.all_frames_converted = False
self.video = moviepy.VideoFileClip(path)
audio_path = path[:-4] + '.ogg'
self.audio = audio.load_music_stream(audio_path)
def is_finished(self) -> bool:
return all(self.is_finished_list)
self.buffer_size = 10 # Number of frames to keep in memory
self.frame_buffer = {} # Dictionary to store frames {timestamp: texture}
self.frame_timestamps = [(i * 1000) / self.video.fps for i in range(int(self.video.duration * self.video.fps) + 1)]
def _convert_frames(self):
"""Legacy method that converts all frames at once"""
if not self.cap.isOpened():
raise ValueError("Error: Could not open video file.")
frame_count = 0
success, frame = self.cap.read()
while success:
timestamp = (frame_count / self.fps * 1000)
frame_rgb = cvtColor(frame, COLOR_BGR2RGB)
new_frame = ray.Image(frame_rgb.tobytes(), frame_rgb.shape[1], frame_rgb.shape[0], 1, ray.PixelFormat.PIXELFORMAT_UNCOMPRESSED_R8G8B8)
self.frames.append((timestamp, new_frame))
success, frame = self.cap.read()
frame_count += 1
self.cap.release()
print(f"Extracted {len(self.frames)} frames.")
self.start_ms = get_current_ms()
self.all_frames_converted = True
self.is_finished_list[2] = True
def convert_frames_background(self):
"""Converts a single frame each time it's called"""
if self.all_frames_converted:
return
if not self.cap.isOpened():
self.cap = VideoCapture(self.video_path)
if not self.cap.isOpened():
raise ValueError("Error: Could not open video file.")
# Process one frame
success, frame = self.cap.read()
if success:
timestamp = (len(self.frames) / self.fps * 1000)
frame_rgb = cvtColor(frame, COLOR_BGR2RGB)
new_frame = ray.Image(frame_rgb.tobytes(), frame_rgb.shape[1], frame_rgb.shape[0], 1, ray.PixelFormat.PIXELFORMAT_UNCOMPRESSED_R8G8B8)
self.frames.append((timestamp, new_frame))
else:
# No more frames to convert
self.cap.release()
print(f"Extracted {len(self.frames)} frames.")
self.all_frames_converted = True
self.is_finished_list[2] = True
def _check_for_start(self):
# Start audio once we have at least one frame
if len(self.frames) > 0 and not audio.is_music_stream_playing(self.audio):
audio.play_music_stream(self.audio)
self.start_ms = None
self.frame_index = 0
self.current_frame = None
self.fps = self.video.fps
self.frame_duration = 1000 / self.fps
def _audio_manager(self):
if not audio.is_music_stream_playing(self.audio):
audio.play_music_stream(self.audio)
audio.update_music_stream(self.audio)
time_played = audio.get_music_time_played(self.audio) / audio.get_music_time_length(self.audio)
ending_lenience = 0.95
if time_played > ending_lenience:
self.is_finished_list[1] = True
def _load_frame(self, index):
"""Load a specific frame into the buffer"""
if index >= len(self.frame_timestamps) or index < 0:
return None
timestamp = self.frame_timestamps[index]
if timestamp in self.frame_buffer:
return self.frame_buffer[timestamp]
try:
time_sec = timestamp / 1000
frame_data = self.video.get_frame(time_sec)
image = ray.Image(frame_data, self.video.w, self.video.h, 1, ray.PixelFormat.PIXELFORMAT_UNCOMPRESSED_R8G8B8)
texture = ray.load_texture_from_image(image)
self.frame_buffer[timestamp] = texture
self._manage_buffer()
return texture
except Exception as e:
print(f"Error loading frame at index {index}: {e}")
return None
def _manage_buffer(self):
if len(self.frame_buffer) > self.buffer_size:
keep_range = set()
half_buffer = self.buffer_size // 2
for i in range(max(0, self.frame_index - half_buffer),
min(len(self.frame_timestamps), self.frame_index + half_buffer + 1)):
keep_range.add(self.frame_timestamps[i])
buffer_timestamps = list(self.frame_buffer.keys())
buffer_timestamps.sort()
for ts in buffer_timestamps:
if ts not in keep_range and len(self.frame_buffer) > self.buffer_size:
texture = self.frame_buffer.pop(ts)
ray.unload_texture(texture)
def is_started(self):
return self.start_ms is not None
def start(self, current_ms):
self.start_ms = current_ms
for i in range(min(self.buffer_size, len(self.frame_timestamps))):
self._load_frame(i)
def is_finished(self):
return all(self.is_finished_list)
def set_volume(self, volume):
audio.set_music_volume(self.audio, volume)
def update(self):
self._check_for_start()
self._audio_manager()
# Check if we've reached the end of available frames
if self.frame_index == len(self.frames) - 1 and self.all_frames_converted:
if self.frame_index >= len(self.frame_timestamps) - 1:
self.is_finished_list[0] = True
return
if self.start_ms is None:
return
# Only proceed if we have frames to display
if self.frame_index < len(self.frames):
timestamp, frame = self.frames[self.frame_index][0], self.frames[self.frame_index][1]
elapsed_time = get_current_ms() - self.start_ms
elapsed_time = get_current_ms() - self.start_ms
if elapsed_time >= timestamp:
self.current_frame = ray.load_texture_from_image(frame)
if self.last_frame != self.current_frame and self.last_frame is not None:
ray.unload_texture(self.last_frame)
self.frame_index += 1
self.last_frame = self.current_frame
while (self.frame_index < len(self.frame_timestamps) and
elapsed_time >= self.frame_timestamps[self.frame_index]):
self.frame_index += 1
current_index = max(0, self.frame_index - 1)
self.current_frame = self._load_frame(current_index)
for i in range(1, 5):
if current_index + i < len(self.frame_timestamps):
self._load_frame(current_index + i)
def draw(self):
if self.current_frame is not None:
ray.draw_texture(self.current_frame, 0, 0, ray.WHITE)
def stop(self):
if hasattr(self, 'current_frame') and self.current_frame:
ray.unload_texture(self.current_frame)
if hasattr(self, 'last_frame') and self.last_frame:
ray.unload_texture(self.last_frame)
for timestamp, texture in self.frame_buffer.items():
ray.unload_texture(texture)
self.frame_buffer.clear()
if audio.is_music_stream_playing(self.audio):
audio.stop_music_stream(self.audio)
if hasattr(self, 'cap') and self.cap.isOpened():
self.cap.release()