From 9c8a51118e5ca9b3c2eed1034f3724b9ecd768d3 Mon Sep 17 00:00:00 2001 From: Yonokid <37304577+Yonokid@users.noreply.github.com> Date: Tue, 24 Jun 2025 11:48:21 -0400 Subject: [PATCH] improving --- .gitignore | 2 +- PyTaiko.py | 33 ++++- README.md | 1 + config.toml | 10 +- libs/animation.py | 28 ++-- libs/audio.py | 333 ++++++++++++++++++++++++++---------------- libs/video.py | 28 ++-- pyproject.toml | 2 + scenes/game.py | 12 +- scenes/song_select.py | 37 +++-- scenes/title.py | 35 ++--- uv.lock | 46 ++++++ 12 files changed, 373 insertions(+), 194 deletions(-) diff --git a/.gitignore b/.gitignore index ad5006f..d85c758 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,8 @@ Songs2 __pycache__ .venv -.ruff_cache scores.db cache full.csv dev-config.toml +.env diff --git a/PyTaiko.py b/PyTaiko.py index 83361d6..6817175 100644 --- a/PyTaiko.py +++ b/PyTaiko.py @@ -2,16 +2,24 @@ import sqlite3 from pathlib import Path import pyray as ray +import sentry_sdk +from dotenv import dotenv_values from raylib.defines import ( RL_FUNC_ADD, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_SRC_ALPHA, ) +from sentry_sdk import profiler from libs import song_hash from libs.audio import audio -from libs.utils import get_config, global_data, load_all_textures_from_zip +from libs.utils import ( + get_config, + get_current_ms, + global_data, + load_all_textures_from_zip, +) from scenes.entry import EntryScreen from scenes.game import GameScreen from scenes.result import ResultScreen @@ -51,6 +59,7 @@ def create_song_db(): print("Scores database created successfully") def main(): + env_config = dotenv_values(".env") create_song_db() song_hash.song_hashes = song_hash.build_song_hashes() global_data.config = get_config() @@ -69,7 +78,6 @@ def main(): if global_data.config["video"]["vsync"]: ray.set_config_flags(ray.ConfigFlags.FLAG_VSYNC_HINT) ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT) - ray.hide_cursor() ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING) ray.init_window(screen_width, screen_height, "PyTaiko") @@ -79,7 +87,6 @@ def main(): ray.maximize_window() current_screen = Screens.TITLE - _frames_counter = 0 audio.init_audio_device() @@ -104,7 +111,26 @@ def main(): ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD) ray.set_exit_key(ray.KeyboardKey.KEY_A) global_data.textures = load_all_textures_from_zip(Path('Graphics/lumendata/intermission.zip')) + prev_ms = get_current_ms() + sentry_sdk.init( + dsn=env_config["SENTRY_URL"], + # Add data like request headers and IP for users, + # see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info + send_default_pii=True, + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for tracing. + traces_sample_rate=1.0, + # Set profile_session_sample_rate to 1.0 to profile 100% + # of profile sessions. + profile_session_sample_rate=1.0, + ) + if global_data.config['general']['send_diagnostic_data']: + profiler.start_profiler() while not ray.window_should_close(): + current_ms = get_current_ms() + if current_ms >= prev_ms + 100: + print("LAG SPIKE DETECTED") + prev_ms = current_ms ray.begin_texture_mode(target) ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE) @@ -138,6 +164,7 @@ def main(): ray.end_drawing() ray.close_window() audio.close_audio_device() + profiler.stop_profiler() if __name__ == "__main__": main() diff --git a/README.md b/README.md index 9f761e8..62df431 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ https://linear.app/pytaiko ## Known Issues +- Everything - See linear page. ## Run Locally diff --git a/config.toml b/config.toml index d2b1c17..faf474e 100644 --- a/config.toml +++ b/config.toml @@ -5,17 +5,25 @@ visual_offset = 0 autoplay = false sfx = true language = "ja" +send_diagnostic_data = false +hard_judge = 108 [paths] tja_path = ['Songs'] video_path = 'Videos' -[keybinds] +[keys] left_kat = ['D'] left_don = ['F'] right_don = ['J'] right_kat = ['K'] +[gamepad] +left_kat = [10] +left_don = [16] +right_don = [17] +right_kat = [12] + [audio] device_type = "Windows WASAPI" buffer_size = 64 diff --git a/libs/animation.py b/libs/animation.py index 8644bf3..21210be 100644 --- a/libs/animation.py +++ b/libs/animation.py @@ -4,7 +4,7 @@ from libs.utils import get_current_ms class BaseAnimation(): - def __init__(self, duration: float, delay: float = 0.0): + def __init__(self, duration: float, delay: float = 0.0) -> None: """ Initialize a base animation. @@ -57,7 +57,7 @@ class FadeAnimation(BaseAnimation): def __init__(self, duration: float, initial_opacity: float = 1.0, final_opacity: float = 0.0, delay: float = 0.0, ease_in: Optional[str] = None, ease_out: Optional[str] = None, - reverse_delay: Optional[float] = None): + reverse_delay: Optional[float] = None) -> None: super().__init__(duration, delay) self.initial_opacity = initial_opacity self.final_opacity = final_opacity @@ -68,13 +68,13 @@ class FadeAnimation(BaseAnimation): self.reverse_delay = reverse_delay self.reverse_delay_saved = reverse_delay - def restart(self): + def restart(self) -> None: super().restart() self.reverse_delay = self.reverse_delay_saved self.initial_opacity = self.initial_opacity_saved self.final_opacity = self.final_opacity_saved - def update(self, current_time_ms: float): + def update(self, current_time_ms: float) -> None: elapsed_time = current_time_ms - self.start_ms if elapsed_time <= self.delay: @@ -100,7 +100,7 @@ class MoveAnimation(BaseAnimation): def __init__(self, duration: float, total_distance: int = 0, start_position: int = 0, delay: float = 0.0, reverse_delay: Optional[float] = None, - ease_in: Optional[str] = None, ease_out: Optional[str] = None): + ease_in: Optional[str] = None, ease_out: Optional[str] = None) -> None: super().__init__(duration, delay) self.reverse_delay = reverse_delay self.reverse_delay_saved = reverse_delay @@ -111,13 +111,13 @@ class MoveAnimation(BaseAnimation): self.ease_in = ease_in self.ease_out = ease_out - def restart(self): + def restart(self) -> None: super().restart() self.reverse_delay = self.reverse_delay_saved self.total_distance = self.total_distance_saved self.start_position = self.start_position_saved - def update(self, current_time_ms: float): + def update(self, current_time_ms: float) -> None: elapsed_time = current_time_ms - self.start_ms if elapsed_time < self.delay: self.attribute = self.start_position @@ -138,12 +138,12 @@ class MoveAnimation(BaseAnimation): self.attribute = self.start_position + (self.total_distance * progress) class TextureChangeAnimation(BaseAnimation): - def __init__(self, duration: float, textures: list[tuple[float, float, int]], delay: float = 0.0): + def __init__(self, duration: float, textures: list[tuple[float, float, int]], delay: float = 0.0) -> None: super().__init__(duration) self.textures = textures self.delay = delay - def update(self, current_time_ms: float): + def update(self, current_time_ms: float) -> None: elapsed_time = current_time_ms - self.start_ms - self.delay if elapsed_time <= self.duration: for start, end, index in self.textures: @@ -153,9 +153,9 @@ class TextureChangeAnimation(BaseAnimation): self.is_finished = True class TextStretchAnimation(BaseAnimation): - def __init__(self, duration: float): + def __init__(self, duration: float) -> None: super().__init__(duration) - def update(self, current_time_ms: float): + def update(self, current_time_ms: float) -> None: elapsed_time = current_time_ms - self.start_ms if elapsed_time <= self.duration: self.attribute = 2 + 5 * (elapsed_time // 25) @@ -169,7 +169,7 @@ class TextStretchAnimation(BaseAnimation): class TextureResizeAnimation(BaseAnimation): def __init__(self, duration: float, initial_size: float = 1.0, final_size: float = 0.0, delay: float = 0.0, - reverse_delay: Optional[float] = None): + reverse_delay: Optional[float] = None) -> None: super().__init__(duration, delay) self.initial_size = initial_size self.final_size = final_size @@ -178,14 +178,14 @@ class TextureResizeAnimation(BaseAnimation): self.final_size_saved = final_size self.reverse_delay_saved = reverse_delay - def restart(self): + def restart(self) -> None: super().restart() self.reverse_delay = self.reverse_delay_saved self.initial_size = self.initial_size_saved self.final_size = self.final_size_saved - def update(self, current_time_ms: float): + def update(self, current_time_ms: float) -> None: elapsed_time = current_time_ms - self.start_ms if elapsed_time <= self.delay: diff --git a/libs/audio.py b/libs/audio.py index 7123085..901eed4 100644 --- a/libs/audio.py +++ b/libs/audio.py @@ -91,7 +91,7 @@ def get_average_volume_rms(data): return rms class Sound: - def __init__(self, file_path: Path, data=None, target_sample_rate=44100): + def __init__(self, file_path: Path, data: Optional[ndarray]=None, target_sample_rate: int=44100): self.file_path = file_path self.data = data self.channels = 0 @@ -103,10 +103,10 @@ class Sound: self.pan = 0.5 # 0.0 = left, 0.5 = center, 1.0 = right self.normalize: Optional[float] = None - if file_path: + if file_path.exists(): self.load() - def load(self): + def load(self) -> None: """Load and prepare the sound file data""" data, original_sample_rate = sf.read(str(self.file_path)) @@ -129,33 +129,33 @@ class Sound: self.data = data - def play(self): + def play(self) -> None: self.position = 0 self.is_playing = True self.is_paused = False - def stop(self): + def stop(self) -> None: self.is_playing = False self.is_paused = False self.position = 0 - def pause(self): + def pause(self) -> None: if self.is_playing: self.is_paused = True self.is_playing = False - def resume(self): + def resume(self) -> None: if self.is_paused: self.is_playing = True self.is_paused = False - def normalize_vol(self, rms: float): + def normalize_vol(self, rms: float) -> None: self.normalize = rms if self.data is not None: self.data = None self.load() - def get_frames(self, num_frames): + def get_frames(self, num_frames: int) -> Optional[ndarray]: """Get the next num_frames of audio data, applying volume, pitch, and pan""" if self.data is None: return @@ -203,21 +203,22 @@ class Sound: return output class Music: - def __init__(self, file_path: Path, data=None, file_type=None, target_sample_rate=44100): + def __init__(self, file_path: Path, data: Optional[ndarray]=None, target_sample_rate: int=44100, sample_rate: int =44100, preview: Optional[float]=None, normalize: Optional[float]=None): self.file_path = file_path - self.file_type = file_type self.data = data self.target_sample_rate = target_sample_rate - self.sample_rate = target_sample_rate + self.sample_rate = sample_rate self.channels = 0 - self.position = 0 # In frames + self.position = 0 # In frames (original sample rate) self.is_playing = False self.is_paused = False self.volume = 0.75 self.pan = 0.5 # Center self.total_frames = 0 self.valid = False - self.normalize = None + self.normalize = normalize + self.preview = preview # Preview start time in seconds + self.is_preview_mode = preview is not None self.file_buffer_size = int(target_sample_rate * 5) # 5 seconds buffer self.buffer = None @@ -225,25 +226,97 @@ class Music: # Thread-safe updates self.lock = Lock() + self.sound_file = None + if self.file_path.exists(): + self.load_from_file() + else: + self.load_from_memory() - self.load_from_file() + def load_from_memory(self) -> None: + """Load music from in-memory numpy array""" + try: + if self.data is None: + raise Exception("No data provided for memory loading") - def load_from_file(self): + # Convert to float32 if needed + if self.data.dtype != float32: + self.data = self.data.astype(float32) + + if self.sample_rate != self.target_sample_rate: + print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz") + self.data = resample(self.data, self.sample_rate, self.target_sample_rate) + + if self.normalize is not None: + current_rms = get_average_volume_rms(self.data) + if current_rms > 0: # Avoid division by zero + target_rms = self.normalize + rms_scale_factor = target_rms / current_rms + self.data *= rms_scale_factor + + # Determine channels and total frames + if self.data.ndim == 1: + self.channels = 1 + self.total_frames = len(self.data) + # Reshape for consistency + self.data = self.data.reshape(-1, 1) + else: + self.channels = self.data.shape[1] + self.total_frames = self.data.shape[0] + + self.sample_width = 4 # float32 + self._fill_buffer() + self.valid = True + print(f"Music loaded from memory: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames") + + except Exception as e: + print(f"Error loading music from memory: {e}") + self.valid = False + + def load_from_file(self) -> None: """Load music from file""" try: - # soundfile handles OGG, WAV, FLAC, etc. natively self.sound_file = sf.SoundFile(str(self.file_path)) # Get file properties self.channels = self.sound_file.channels self.sample_width = 2 if self.sound_file.subtype in ['PCM_16', 'VORBIS'] else 4 # Most common self.sample_rate = self.sound_file.samplerate - self.total_frames = len(self.sound_file) + original_total_frames = self.sound_file.frames - # Initialize buffer with some initial data - self._fill_buffer() + if self.is_preview_mode: + # Calculate preview start and end frames + preview_start_frame = int(self.preview * self.sample_rate) + preview_duration_frames = original_total_frames - preview_start_frame + preview_end_frame = min(preview_start_frame + preview_duration_frames, original_total_frames) + + # Ensure preview start is within bounds + if preview_start_frame >= original_total_frames: + preview_start_frame = max(0, original_total_frames - preview_duration_frames) + preview_end_frame = original_total_frames + + # Seek to preview start position + self.sound_file.seek(preview_start_frame) + + # Read only the preview segment + frames_to_read = preview_end_frame - preview_start_frame + self.data = self.sound_file.read(frames_to_read) + + # Update total frames to reflect the preview segment + self.total_frames = len(self.data) if self.data.ndim == 1 else self.data.shape[0] + + print(f"Preview mode: Loading {frames_to_read} frames ({frames_to_read/self.sample_rate:.2f}s) starting at {self.preview:.2f}s") + else: + # Load entire file + self.data = self.sound_file.read() + self.total_frames = original_total_frames + + self.load_from_memory() self.valid = True - print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames") + + if self.is_preview_mode: + print(f"Music preview loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames ({self.get_time_length():.2f}s)") + else: + print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames") except Exception as e: print(f"Error loading music file: {e}") @@ -252,50 +325,31 @@ class Music: self.sound_file = None self.valid = False - def _fill_buffer(self): - """Fill the streaming buffer from file""" - if not self.sound_file: - return False - - # Read a chunk of frames from file + def _fill_buffer(self) -> bool: + """Fill buffer from in-memory data""" try: - frames_to_read = min(self.file_buffer_size, self.total_frames - self.position) - if frames_to_read <= 0: + if self.data is None: return False - # Read data directly as numpy array (float64 by default) - data = self.sound_file.read(frames_to_read) + start_frame = self.position + self.buffer_position + end_frame = min(start_frame + self.file_buffer_size, self.total_frames) - # Convert to float32 if needed (soundfile returns float64 by default) - if data.dtype != float32: - data = data.astype(float32) + if start_frame >= self.total_frames: + return False - # Ensure proper shape for mono audio - if self.channels == 1 and data.ndim == 1: - data = data.reshape(-1, 1) - elif self.channels == 1 and data.ndim == 2: - data = data[:, 0].reshape(-1, 1) # Take first channel if stereo file but expecting mono + # Extract the chunk of data + data_chunk = self.data[start_frame:end_frame] - # Resample if needed - if self.sample_rate != self.target_sample_rate: - print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz") - data = resample(data, self.sample_rate, self.target_sample_rate) - - if self.normalize is not None: - current_rms = get_average_volume_rms(data) - if current_rms > 0: # Avoid division by zero - target_rms = self.normalize - rms_scale_factor = target_rms / current_rms - data *= rms_scale_factor - - self.buffer = data + self.buffer = data_chunk + self.position += self.buffer_position self.buffer_position = 0 return True + except Exception as e: - print(f"Error filling buffer: {e}") + print(f"Error filling buffer from memory: {e}") return False - def update(self): + def update(self) -> None: """Update music stream buffers""" if not self.is_playing or self.is_paused: return @@ -303,25 +357,27 @@ class Music: with self.lock: # Check if we need to refill the buffer if self.buffer is None: - raise Exception("buffer is None") - if self.sound_file and self.buffer_position >= len(self.buffer): - if not self._fill_buffer(): - self.is_playing = False + return + if self.buffer_position >= len(self.buffer): + self.is_playing = self._fill_buffer() - def play(self): + def play(self) -> None: """Start playing the music stream""" with self.lock: # Reset position if at the end - if self.sound_file and self.position >= self.total_frames: - self.sound_file.seek(0) # Reset to beginning + if self.position >= self.total_frames: self.position = 0 self.buffer_position = 0 - self._fill_buffer() + if self.sound_file: + # For preview mode, seek to the preview start position + seek_pos = int(self.preview * self.sample_rate) if self.is_preview_mode else 0 + self.sound_file.seek(seek_pos) + self._fill_buffer() self.is_playing = True self.is_paused = False - def stop(self): + def stop(self) -> None: """Stop playing the music stream""" with self.lock: self.is_playing = False @@ -329,49 +385,62 @@ class Music: self.position = 0 self.buffer_position = 0 if self.sound_file: - self.sound_file.seek(0) # Reset to beginning + # For preview mode, seek to the preview start position + seek_pos = int(self.preview * self.sample_rate) if self.is_preview_mode else 0 + self.sound_file.seek(seek_pos) self._fill_buffer() - def pause(self): + def pause(self) -> None: """Pause the music playback""" with self.lock: if self.is_playing: self.is_paused = True self.is_playing = False - def resume(self): + def resume(self) -> None: """Resume the music playback""" with self.lock: if self.is_paused: self.is_playing = True self.is_paused = False - def seek(self, position_seconds): - """Seek to a specific position in seconds""" + def seek(self, position_seconds) -> None: + """Seek to a specific position in seconds (relative to preview start if in preview mode)""" with self.lock: # Convert seconds to frames - frame_position = int(position_seconds * self.sample_rate) + frame_position = int(position_seconds * self.target_sample_rate) # Clamp position to valid range frame_position = max(0, min(frame_position, self.total_frames - 1)) # Update file position if streaming from file if self.sound_file: - self.sound_file.seek(frame_position) - self._fill_buffer() + # For preview mode, add the preview offset + actual_file_position = frame_position + if self.is_preview_mode: + actual_file_position += int(self.preview * self.sample_rate) + self.sound_file.seek(actual_file_position) self.position = frame_position self.buffer_position = 0 + self._fill_buffer() - def get_time_length(self): - """Get the total length of the music in seconds""" - return self.total_frames / self.sample_rate + def get_time_length(self) -> float: + """Get the total length of the music in seconds (preview length if in preview mode)""" + return self.total_frames / self.target_sample_rate - def get_time_played(self): - """Get the current playback position in seconds""" - return (self.position + self.buffer_position) / self.sample_rate + def get_time_played(self) -> float: + """Get the current playback position in seconds (relative to preview start if in preview mode)""" + return (self.position + self.buffer_position) / self.target_sample_rate - def get_frames(self, num_frames): + def get_actual_time_played(self) -> float: + """Get the actual playback position in the original file (including preview offset)""" + base_time = (self.position + self.buffer_position) / self.target_sample_rate + if self.is_preview_mode: + return base_time + self.preview + return base_time + + def get_frames(self, num_frames) -> ndarray: """Get the next num_frames of music data, applying volume, pitch, and pan""" if not self.is_playing: # Return silence if not playing @@ -382,11 +451,12 @@ class Music: with self.lock: if self.buffer is None: - raise Exception("buffer is None") + return zeros(num_frames, dtype=float32) + # Check if we need more data if self.buffer_position >= len(self.buffer): - # If no more data available and streaming from file - if self.sound_file and not self._fill_buffer(): + # Try to fill buffer again + if not self._fill_buffer(): self.is_playing = False if self.channels == 1: return zeros(num_frames, dtype=float32) @@ -409,7 +479,6 @@ class Music: # Update buffer position self.buffer_position += frames_to_get - self.position += frames_to_get # Apply volume output *= self.volume @@ -425,7 +494,7 @@ class Music: return output - def __del__(self): + def __del__(self) -> None: """Cleanup when the music object is deleted""" if hasattr(self, 'sound_file') and self.sound_file: try: @@ -434,7 +503,7 @@ class Music: raise Exception("unable to close music stream") class AudioEngine: - def __init__(self, type: str): + def __init__(self, type: str) -> None: self.target_sample_rate = 44100 self.buffer_size = 10 self.sounds: dict[str, Sound] = {} @@ -453,20 +522,20 @@ class AudioEngine: self.update_thread_running = False self.type = type - def _initialize_asio(self): - """Set up ASIO device""" - # Find ASIO API and use its default device + def _initialize_api(self) -> bool: + """Set up API device""" + # Find API and use its default device hostapis = sd.query_hostapis() - asio_api_index = -1 + api_index = -1 for i, api in enumerate(hostapis): if isinstance(api, dict) and 'name' in api and api['name'] == self.type: - asio_api_index = i + api_index = i break if isinstance(hostapis, tuple): - asio_api = hostapis[asio_api_index] - if isinstance(asio_api, dict) and 'default_output_device' in asio_api: - default_asio_device = asio_api['default_output_device'] + api = hostapis[api_index] + if isinstance(api, dict) and 'default_output_device' in api: + default_asio_device = api['default_output_device'] else: raise Exception("Warning: 'default_output_device' key not found in ASIO API info.") if default_asio_device >= 0: @@ -500,7 +569,7 @@ class AudioEngine: self.output_channels = min(2, device_info['max_output_channels']) return True - def _audio_callback(self, outdata, frames, time, status): + def _audio_callback(self, outdata: ndarray, frames: int, time: int, status: str) -> None: """Callback function for the sounddevice stream""" if status: print(f"Status: {status}") @@ -589,31 +658,36 @@ class AudioEngine: outdata[:] = output - def _start_update_thread(self): + def _start_update_thread(self) -> None: """Start a thread to update music streams""" self.update_thread_running = True self.update_thread = Thread(target=self._update_music_thread) self.update_thread.daemon = True self.update_thread.start() - def _update_music_thread(self): + def _update_music_thread(self) -> None: """Thread function to update all music streams""" while self.update_thread_running: - # Update all active music streams - for music_name, music in self.music_streams.items(): - if music.is_playing: - music.update() + active_streams = [music for music in self.music_streams.values() if music.is_playing] - # Sleep to not consume too much CPU - time.sleep(0.1) + if not active_streams: + # Sleep longer when no streams are active + time.sleep(0.5) + continue + + for music in active_streams: + music.update() + + # Adjust sleep based on number of active streams + sleep_time = max(0.05, 0.1 / len(active_streams)) + time.sleep(sleep_time) def init_audio_device(self): if self.audio_device_ready: return True try: - # Try to use ASIO if available - self._initialize_asio() + self._initialize_api() # Set up and start the stream extra_settings = None @@ -630,7 +704,6 @@ class AudioEngine: self.stream.start() self.running = True self.audio_device_ready = True - print(self.stream.samplerate, self.stream.blocksize, self.stream.latency*1000) # Start update thread for music streams self._start_update_thread() @@ -642,7 +715,7 @@ class AudioEngine: self.audio_device_ready = False return False - def close_audio_device(self): + def close_audio_device(self) -> None: self.update_thread_running = False if self.update_thread: self.update_thread.join(timeout=1.0) @@ -673,27 +746,27 @@ class AudioEngine: print(f"Loaded sound from {fileName} as {sound_id}") return sound_id - def play_sound(self, sound): + def play_sound(self, sound) -> None: if sound in self.sounds: self.sound_queue.put(sound) - def stop_sound(self, sound): + def stop_sound(self, sound) -> None: if sound in self.sounds: self.sounds[sound].stop() - def pause_sound(self, sound: str): + def pause_sound(self, sound: str) -> None: if sound in self.sounds: self.sounds[sound].pause() - def resume_sound(self, sound: str): + def resume_sound(self, sound: str) -> None: if sound in self.sounds: self.sounds[sound].resume() - def unload_sound(self, sound: str): + def unload_sound(self, sound: str) -> None: if sound in self.sounds: del self.sounds[sound] - def normalize_sound(self, sound: str, rms: float): + def normalize_sound(self, sound: str, rms: float) -> None: if sound in self.sounds: self.sounds[sound].normalize_vol(rms) @@ -705,31 +778,41 @@ class AudioEngine: return self.sounds[sound].is_playing return False - def set_sound_volume(self, sound: str, volume: float): + def set_sound_volume(self, sound: str, volume: float) -> None: if sound in self.sounds: self.sounds[sound].volume = max(0.0, min(1.0, volume)) - def set_sound_pan(self, sound: str, pan: float): + def set_sound_pan(self, sound: str, pan: float) -> None: if sound in self.sounds: self.sounds[sound].pan = max(0.0, min(1.0, pan)) - def load_music_stream(self, fileName: Path) -> str: - music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate) + def load_music_stream(self, fileName: Path, preview: float=0, normalize: Optional[float] = None) -> str: + music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate, preview=preview, normalize=normalize) music_id = f"music_{len(self.music_streams)}" self.music_streams[music_id] = music print(f"Loaded music stream from {fileName} as {music_id}") return music_id + def load_music_stream_from_data(self, audio_array: ndarray, sample_rate: int=44100) -> str: + """Load music stream from numpy array data""" + # Create a dummy path since Music class expects one + dummy_path = Path("memory_audio") + music = Music(file_path=dummy_path, data=audio_array, target_sample_rate=self.target_sample_rate, sample_rate=sample_rate) + music_id = f"music_{len(self.music_streams)}" + self.music_streams[music_id] = music + print(f"Loaded music stream from memory data as {music_id}") + return music_id + def is_music_valid(self, music: str) -> bool: if music in self.music_streams: return self.music_streams[music].valid return False - def unload_music_stream(self, music: str): + def unload_music_stream(self, music: str) -> None: if music in self.music_streams: del self.music_streams[music] - def play_music_stream(self, music: str): + def play_music_stream(self, music: str) -> None: if music in self.music_streams: self.music_queue.put((music, 'play')) @@ -738,35 +821,35 @@ class AudioEngine: return self.music_streams[music].is_playing return False - def update_music_stream(self, music: str): + def update_music_stream(self, music: str) -> None: if music in self.music_streams: self.music_streams[music].update() - def stop_music_stream(self, music: str): + def stop_music_stream(self, music: str) -> None: if music in self.music_streams: self.music_queue.put((music, 'stop')) - def pause_music_stream(self, music: str): + def pause_music_stream(self, music: str) -> None: if music in self.music_streams: self.music_queue.put((music, 'pause')) - def resume_music_stream(self, music: str): + def resume_music_stream(self, music: str) -> None: if music in self.music_streams: self.music_queue.put((music, 'resume')) - def seek_music_stream(self, music: str, position: float): + def seek_music_stream(self, music: str, position: float) -> None: if music in self.music_streams: self.music_queue.put((music, 'seek', position)) - def set_music_volume(self, music: str, volume: float): + def set_music_volume(self, music: str, volume: float) -> None: if music in self.music_streams: self.music_streams[music].volume = max(0.0, min(1.0, volume)) - def set_music_pan(self, music: str, pan: float): + def set_music_pan(self, music: str, pan: float) -> None: if music in self.music_streams: self.music_streams[music].pan = max(0.0, min(1.0, pan)) - def normalize_music_stream(self, music: str, rms: float): + def normalize_music_stream(self, music: str, rms: float) -> None: if music in self.music_streams: self.music_streams[music].normalize = rms diff --git a/libs/video.py b/libs/video.py index 8fb9da8..0c1df54 100644 --- a/libs/video.py +++ b/libs/video.py @@ -9,14 +9,11 @@ from libs.utils import get_current_ms class VideoPlayer: def __init__(self, path: Path): - """Initialize a video player instance. Audio must have the same name and an ogg extension. - Todo: extract audio from video directly - """ + """Initialize a video player instance""" self.is_finished_list = [False, False] - self.video_path = path self.video = VideoFileClip(path) - audio_path = path.with_suffix('.ogg') - self.audio = audio.load_music_stream(audio_path) + if self.video.audio is not None: + self.audio = audio.load_music_stream_from_data(self.video.audio.to_soundarray(), sample_rate=self.video.audio.fps) self.buffer_size = 10 # Number of frames to keep in memory self.frame_buffer = {} # Dictionary to store frames {timestamp: texture} @@ -27,15 +24,18 @@ class VideoPlayer: self.current_frame = None self.fps = self.video.fps self.frame_duration = 1000 / self.fps + self.audio_played = False def _audio_manager(self): - if not audio.is_music_stream_playing(self.audio): + if self.audio is None: + return + if self.is_finished_list[1]: + return + if not audio.is_music_stream_playing(self.audio) and not self.audio_played: audio.play_music_stream(self.audio) + self.audio_played = True audio.update_music_stream(self.audio) - time_played = audio.get_music_time_played(self.audio) / audio.get_music_time_length(self.audio) - ending_lenience = 0.95 - if time_played > ending_lenience: - self.is_finished_list[1] = True + self.is_finished_list[1] = not audio.is_music_stream_playing(self.audio) def _load_frame(self, index: int): """Load a specific frame into the buffer""" @@ -102,7 +102,7 @@ class VideoPlayer: """Updates video playback, advancing frames and audio""" self._audio_manager() - if self.frame_index >= len(self.frame_timestamps) - 1: + if self.frame_index >= len(self.frame_timestamps): self.is_finished_list[0] = True return @@ -130,11 +130,11 @@ class VideoPlayer: def stop(self): """Stops the video, audio, and clears its buffer""" + self.video.close() for timestamp, texture in self.frame_buffer.items(): ray.unload_texture(texture) self.frame_buffer.clear() if audio.is_music_stream_playing(self.audio): audio.stop_music_stream(self.audio) - - self.video.close() + audio.unload_music_stream(self.audio) diff --git a/pyproject.toml b/pyproject.toml index 4ec9169..0055108 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,4 +11,6 @@ dependencies = [ "raylib-sdl>=5.5.0.2", "soundfile>=0.13.1", "tomlkit>=0.13.3", + "sentry-sdk>=2.30.0", + "dotenv>=0.9.9", ] diff --git a/scenes/game.py b/scenes/game.py index ac2af1e..d28d76b 100644 --- a/scenes/game.py +++ b/scenes/game.py @@ -728,6 +728,11 @@ class Judgement: index = int(self.texture_animation.attribute) hit_color = ray.fade(ray.WHITE, self.fade_animation_1.attribute) color = ray.fade(ray.WHITE, self.fade_animation_2.attribute) + if self.curr_hit_ms is not None: + if float(self.curr_hit_ms) < -(global_data.config['general']['hard_judge']): + color = ray.fade(ray.BLUE, self.fade_animation_2.attribute) + elif float(self.curr_hit_ms) > (global_data.config['general']['hard_judge']): + color = ray.fade(ray.RED, self.fade_animation_2.attribute) if self.type == 'GOOD': if self.big: ray.draw_texture(textures_1[21], 342, 184, color) @@ -736,9 +741,6 @@ class Judgement: ray.draw_texture(textures_1[19], 342, 184, color) ray.draw_texture(textures_2[index+5], 304, 143, hit_color) ray.draw_texture(textures_2[9], 370, int(y), color) - if self.curr_hit_ms is not None: - pass - #ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.YELLOW, self.fade_animation_1.attribute)) elif self.type == 'OK': if self.big: ray.draw_texture(textures_1[20], 342, 184, color) @@ -747,12 +749,8 @@ class Judgement: ray.draw_texture(textures_1[18], 342, 184, color) ray.draw_texture(textures_2[index], 304, 143, hit_color) ray.draw_texture(textures_2[4], 370, int(y), color) - if self.curr_hit_ms is not None: - ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.WHITE, self.fade_animation_1.attribute)) elif self.type == 'BAD': ray.draw_texture(textures_2[10], 370, int(y), color) - if self.curr_hit_ms is not None: - ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.BLUE, self.fade_animation_1.attribute)) class LaneHitEffect: def __init__(self, type: str): diff --git a/scenes/song_select.py b/scenes/song_select.py index 5045615..7607eb9 100644 --- a/scenes/song_select.py +++ b/scenes/song_select.py @@ -223,9 +223,7 @@ class SongSelectScreen: if not isinstance(song, Directory) and song.box.is_open: if self.demo_song is None and get_current_ms() >= song.box.wait + (83.33*3): song.box.get_scores() - self.demo_song = audio.load_music_stream(song.tja.metadata.wave) - audio.normalize_music_stream(self.demo_song, 0.1935) - audio.seek_music_stream(self.demo_song, song.tja.metadata.demostart) + self.demo_song = audio.load_music_stream(song.tja.metadata.wave, preview=song.tja.metadata.demostart, normalize=0.1935) audio.play_music_stream(self.demo_song) audio.stop_sound(self.sound_bgm) if song.box.is_open: @@ -450,7 +448,7 @@ class SongBox: direction = -1 if abs(self.target_position - self.position) > 250: direction *= -1 - self.move = Animation.create_move(66.67, start_position=0, total_distance=100 * direction) + self.move = Animation.create_move(83.3, start_position=0, total_distance=100 * direction, ease_out='cubic') if self.is_open or self.target_position == SongSelectScreen.BOX_CENTER + 150: self.move.total_distance = 250 * direction self.start_position = self.position @@ -929,6 +927,7 @@ class FileNavigator: self.items: list[Directory | SongFile] = [] self.selected_index = 0 self.history = [] + self.box_open = False # Generate all objects upfront self._generate_all_objects() @@ -1257,16 +1256,23 @@ class FileNavigator: self.selected_index = 0 if self.items else -1 self.calculate_box_positions() - def load_current_directory(self): + def load_current_directory(self, selected_item=None): """Load pre-generated items for the current directory""" - self.items = [] + has_children = any(item.is_dir() and (item / "box.def").exists() for item in self.current_dir.iterdir()) + if has_children: + self.items = [] + else: + if selected_item in self.items: + self.items.remove(selected_item) + self.box_open = True dir_key = str(self.current_dir) # Add back/to_root navigation items if self.current_dir != self.current_root_dir: back_dir = Directory(self.current_dir.parent, "", 552, back=True) - self.items.append(back_dir) + if has_children: + self.items.append(back_dir) elif not self.in_root_selection: to_root_dir = Directory(Path(), "", 552, to_root=True) self.items.append(to_root_dir) @@ -1275,7 +1281,6 @@ class FileNavigator: if dir_key in self.directory_contents: content_items = self.directory_contents[dir_key] - # Handle the every-10-songs navigation logic song_count = 0 for item in content_items: if isinstance(item, SongFile): @@ -1283,13 +1288,20 @@ class FileNavigator: # Add navigation item if self.current_dir != self.current_root_dir: back_dir = Directory(self.current_dir.parent, "", 552, back=True) - self.items.append(back_dir) + if not has_children: + self.items.insert(self.selected_index+song_count, back_dir) + else: + self.items.append(back_dir) elif not self.in_root_selection: to_root_dir = Directory(Path(), "", 552, to_root=True) - self.items.append(to_root_dir) + if has_children: + self.items.append(to_root_dir) song_count += 1 - self.items.append(item) + if not has_children: + self.items.insert(self.selected_index+song_count, item) + else: + self.items.append(item) # OPTIMIZED: Use cached crowns (calculated on-demand) for item in self.items: @@ -1361,7 +1373,7 @@ class FileNavigator: self.current_root_dir = selected_item.path self.in_root_selection = False self.selected_index = 0 - self.load_current_directory() + self.load_current_directory(selected_item=selected_item) elif isinstance(selected_item, SongFile): return selected_item @@ -1378,6 +1390,7 @@ class FileNavigator: self.load_root_directories() else: self.load_current_directory() + self.box_open = False def get_current_item(self): """Get the currently selected item""" diff --git a/scenes/title.py b/scenes/title.py index 8a96b2b..f0985ee 100644 --- a/scenes/title.py +++ b/scenes/title.py @@ -33,9 +33,6 @@ class TitleScreen: self.screen_init = False self.fade_out = None - def get_videos(self): - return self.op_video, self.attract_video - def load_sounds(self): sounds_dir = Path("Sounds") title_dir = sounds_dir / "title" @@ -55,13 +52,15 @@ class TitleScreen: self.screen_init = True self.load_textures() self.state = State.OP_VIDEO - self.op_video = VideoPlayer(random.choice(self.op_video_list)) - self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) + self.op_video = None + self.attract_video = None self.warning_board = None def on_screen_end(self) -> str: - self.op_video.stop() - self.attract_video.stop() + if self.op_video is not None: + self.op_video.stop() + if self.attract_video is not None: + self.attract_video.stop() for sound in self.sounds: if audio.is_sound_playing(sound): audio.stop_sound(sound) @@ -73,28 +72,30 @@ class TitleScreen: def scene_manager(self): if self.state == State.OP_VIDEO: - if not self.op_video.is_started(): + if self.op_video is None: + self.op_video = VideoPlayer(random.choice(self.op_video_list)) self.op_video.start(get_current_ms()) self.op_video.update() if self.op_video.is_finished(): self.op_video.stop() - self.op_video = VideoPlayer(random.choice(self.op_video_list)) - + self.op_video = None self.state = State.WARNING + elif self.state == State.WARNING: + if self.warning_board is None: self.warning_board = WarningScreen(get_current_ms(), self) - elif self.state == State.WARNING and self.warning_board is not None: self.warning_board.update(get_current_ms(), self) if self.warning_board.is_finished: self.state = State.ATTRACT_VIDEO - self.attract_video.start(get_current_ms()) + self.warning_board = None elif self.state == State.ATTRACT_VIDEO: + if self.attract_video is None: + self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) + self.attract_video.start(get_current_ms()) self.attract_video.update() if self.attract_video.is_finished(): self.attract_video.stop() - self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) - + self.attract_video = None self.state = State.OP_VIDEO - self.op_video.start(get_current_ms()) def update(self): @@ -111,14 +112,14 @@ class TitleScreen: audio.play_sound(self.sound_don) def draw(self): - if self.state == State.OP_VIDEO: + if self.state == State.OP_VIDEO and self.op_video is not None: self.op_video.draw() elif self.state == State.WARNING and self.warning_board is not None: bg_source = ray.Rectangle(0, 0, self.textures['keikoku'][0].width, self.textures['keikoku'][0].height) bg_dest = ray.Rectangle(0, 0, self.width, self.height) ray.draw_texture_pro(self.textures['keikoku'][0], bg_source, bg_dest, ray.Vector2(0,0), 0, ray.WHITE) self.warning_board.draw(self) - elif self.state == State.ATTRACT_VIDEO: + elif self.state == State.ATTRACT_VIDEO and self.attract_video is not None: self.attract_video.draw() if self.fade_out is not None: diff --git a/uv.lock b/uv.lock index 2da1ea5..1591bde 100644 --- a/uv.lock +++ b/uv.lock @@ -2,6 +2,15 @@ version = 1 revision = 2 requires-python = ">=3.11" +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + [[package]] name = "cffi" version = "1.17.1" @@ -65,6 +74,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] +[[package]] +name = "dotenv" +version = "0.9.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dotenv" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" }, +] + [[package]] name = "imageio" version = "2.37.0" @@ -253,9 +273,11 @@ name = "pytaiko" version = "0.1.0" source = { virtual = "." } dependencies = [ + { name = "dotenv" }, { name = "moviepy" }, { name = "numpy" }, { name = "raylib-sdl" }, + { name = "sentry-sdk" }, { name = "sounddevice" }, { name = "soundfile" }, { name = "tomlkit" }, @@ -263,9 +285,11 @@ dependencies = [ [package.metadata] requires-dist = [ + { name = "dotenv", specifier = ">=0.9.9" }, { name = "moviepy", specifier = ">=2.1.2" }, { name = "numpy", specifier = ">=2.2.5" }, { name = "raylib-sdl", specifier = ">=5.5.0.2" }, + { name = "sentry-sdk", specifier = ">=2.30.0" }, { name = "sounddevice", specifier = ">=0.5.1" }, { name = "soundfile", specifier = ">=0.13.1" }, { name = "tomlkit", specifier = ">=0.13.3" }, @@ -307,6 +331,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/4f/8ba71611c74d6e3ff5e95d2935c1f5f98fc61183ebf70d4dfb09547a5767/raylib_sdl-5.5.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fd3841ab8d8d6ca24e4fbffc9514c58c0cf5bd29e3f2406006eba872416325b3", size = 1499920, upload-time = "2025-02-12T04:22:16.996Z" }, ] +[[package]] +name = "sentry-sdk" +version = "2.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/4c/af31e0201b48469786ddeb1bf6fd3dfa3a291cc613a0fe6a60163a7535f9/sentry_sdk-2.30.0.tar.gz", hash = "sha256:436369b02afef7430efb10300a344fb61a11fe6db41c2b11f41ee037d2dd7f45", size = 326767, upload-time = "2025-06-12T10:34:34.733Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/99/31ac6faaae33ea698086692638f58d14f121162a8db0039e68e94135e7f1/sentry_sdk-2.30.0-py2.py3-none-any.whl", hash = "sha256:59391db1550662f746ea09b483806a631c3ae38d6340804a1a4c0605044f6877", size = 343149, upload-time = "2025-06-12T10:34:32.896Z" }, +] + [[package]] name = "sounddevice" version = "0.5.2" @@ -361,3 +398,12 @@ sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e wheels = [ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, ] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +]