improving

This commit is contained in:
Yonokid
2025-06-24 11:48:21 -04:00
parent c1081d255b
commit 9c8a51118e
12 changed files with 373 additions and 194 deletions

2
.gitignore vendored
View File

@@ -1,8 +1,8 @@
Songs2 Songs2
__pycache__ __pycache__
.venv .venv
.ruff_cache
scores.db scores.db
cache cache
full.csv full.csv
dev-config.toml dev-config.toml
.env

View File

@@ -2,16 +2,24 @@ import sqlite3
from pathlib import Path from pathlib import Path
import pyray as ray import pyray as ray
import sentry_sdk
from dotenv import dotenv_values
from raylib.defines import ( from raylib.defines import (
RL_FUNC_ADD, RL_FUNC_ADD,
RL_ONE, RL_ONE,
RL_ONE_MINUS_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA,
RL_SRC_ALPHA, RL_SRC_ALPHA,
) )
from sentry_sdk import profiler
from libs import song_hash from libs import song_hash
from libs.audio import audio from libs.audio import audio
from libs.utils import get_config, global_data, load_all_textures_from_zip from libs.utils import (
get_config,
get_current_ms,
global_data,
load_all_textures_from_zip,
)
from scenes.entry import EntryScreen from scenes.entry import EntryScreen
from scenes.game import GameScreen from scenes.game import GameScreen
from scenes.result import ResultScreen from scenes.result import ResultScreen
@@ -51,6 +59,7 @@ def create_song_db():
print("Scores database created successfully") print("Scores database created successfully")
def main(): def main():
env_config = dotenv_values(".env")
create_song_db() create_song_db()
song_hash.song_hashes = song_hash.build_song_hashes() song_hash.song_hashes = song_hash.build_song_hashes()
global_data.config = get_config() global_data.config = get_config()
@@ -69,7 +78,6 @@ def main():
if global_data.config["video"]["vsync"]: if global_data.config["video"]["vsync"]:
ray.set_config_flags(ray.ConfigFlags.FLAG_VSYNC_HINT) ray.set_config_flags(ray.ConfigFlags.FLAG_VSYNC_HINT)
ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT) ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT)
ray.hide_cursor()
ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING) ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING)
ray.init_window(screen_width, screen_height, "PyTaiko") ray.init_window(screen_width, screen_height, "PyTaiko")
@@ -79,7 +87,6 @@ def main():
ray.maximize_window() ray.maximize_window()
current_screen = Screens.TITLE current_screen = Screens.TITLE
_frames_counter = 0
audio.init_audio_device() audio.init_audio_device()
@@ -104,7 +111,26 @@ def main():
ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD) ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD)
ray.set_exit_key(ray.KeyboardKey.KEY_A) ray.set_exit_key(ray.KeyboardKey.KEY_A)
global_data.textures = load_all_textures_from_zip(Path('Graphics/lumendata/intermission.zip')) global_data.textures = load_all_textures_from_zip(Path('Graphics/lumendata/intermission.zip'))
prev_ms = get_current_ms()
sentry_sdk.init(
dsn=env_config["SENTRY_URL"],
# Add data like request headers and IP for users,
# see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info
send_default_pii=True,
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for tracing.
traces_sample_rate=1.0,
# Set profile_session_sample_rate to 1.0 to profile 100%
# of profile sessions.
profile_session_sample_rate=1.0,
)
if global_data.config['general']['send_diagnostic_data']:
profiler.start_profiler()
while not ray.window_should_close(): while not ray.window_should_close():
current_ms = get_current_ms()
if current_ms >= prev_ms + 100:
print("LAG SPIKE DETECTED")
prev_ms = current_ms
ray.begin_texture_mode(target) ray.begin_texture_mode(target)
ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE) ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE)
@@ -138,6 +164,7 @@ def main():
ray.end_drawing() ray.end_drawing()
ray.close_window() ray.close_window()
audio.close_audio_device() audio.close_audio_device()
profiler.stop_profiler()
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@@ -25,6 +25,7 @@ https://linear.app/pytaiko
## Known Issues ## Known Issues
- Everything
- See linear page. - See linear page.
## Run Locally ## Run Locally

View File

@@ -5,17 +5,25 @@ visual_offset = 0
autoplay = false autoplay = false
sfx = true sfx = true
language = "ja" language = "ja"
send_diagnostic_data = false
hard_judge = 108
[paths] [paths]
tja_path = ['Songs'] tja_path = ['Songs']
video_path = 'Videos' video_path = 'Videos'
[keybinds] [keys]
left_kat = ['D'] left_kat = ['D']
left_don = ['F'] left_don = ['F']
right_don = ['J'] right_don = ['J']
right_kat = ['K'] right_kat = ['K']
[gamepad]
left_kat = [10]
left_don = [16]
right_don = [17]
right_kat = [12]
[audio] [audio]
device_type = "Windows WASAPI" device_type = "Windows WASAPI"
buffer_size = 64 buffer_size = 64

View File

@@ -4,7 +4,7 @@ from libs.utils import get_current_ms
class BaseAnimation(): class BaseAnimation():
def __init__(self, duration: float, delay: float = 0.0): def __init__(self, duration: float, delay: float = 0.0) -> None:
""" """
Initialize a base animation. Initialize a base animation.
@@ -57,7 +57,7 @@ class FadeAnimation(BaseAnimation):
def __init__(self, duration: float, initial_opacity: float = 1.0, def __init__(self, duration: float, initial_opacity: float = 1.0,
final_opacity: float = 0.0, delay: float = 0.0, final_opacity: float = 0.0, delay: float = 0.0,
ease_in: Optional[str] = None, ease_out: Optional[str] = None, ease_in: Optional[str] = None, ease_out: Optional[str] = None,
reverse_delay: Optional[float] = None): reverse_delay: Optional[float] = None) -> None:
super().__init__(duration, delay) super().__init__(duration, delay)
self.initial_opacity = initial_opacity self.initial_opacity = initial_opacity
self.final_opacity = final_opacity self.final_opacity = final_opacity
@@ -68,13 +68,13 @@ class FadeAnimation(BaseAnimation):
self.reverse_delay = reverse_delay self.reverse_delay = reverse_delay
self.reverse_delay_saved = reverse_delay self.reverse_delay_saved = reverse_delay
def restart(self): def restart(self) -> None:
super().restart() super().restart()
self.reverse_delay = self.reverse_delay_saved self.reverse_delay = self.reverse_delay_saved
self.initial_opacity = self.initial_opacity_saved self.initial_opacity = self.initial_opacity_saved
self.final_opacity = self.final_opacity_saved self.final_opacity = self.final_opacity_saved
def update(self, current_time_ms: float): def update(self, current_time_ms: float) -> None:
elapsed_time = current_time_ms - self.start_ms elapsed_time = current_time_ms - self.start_ms
if elapsed_time <= self.delay: if elapsed_time <= self.delay:
@@ -100,7 +100,7 @@ class MoveAnimation(BaseAnimation):
def __init__(self, duration: float, total_distance: int = 0, def __init__(self, duration: float, total_distance: int = 0,
start_position: int = 0, delay: float = 0.0, start_position: int = 0, delay: float = 0.0,
reverse_delay: Optional[float] = None, reverse_delay: Optional[float] = None,
ease_in: Optional[str] = None, ease_out: Optional[str] = None): ease_in: Optional[str] = None, ease_out: Optional[str] = None) -> None:
super().__init__(duration, delay) super().__init__(duration, delay)
self.reverse_delay = reverse_delay self.reverse_delay = reverse_delay
self.reverse_delay_saved = reverse_delay self.reverse_delay_saved = reverse_delay
@@ -111,13 +111,13 @@ class MoveAnimation(BaseAnimation):
self.ease_in = ease_in self.ease_in = ease_in
self.ease_out = ease_out self.ease_out = ease_out
def restart(self): def restart(self) -> None:
super().restart() super().restart()
self.reverse_delay = self.reverse_delay_saved self.reverse_delay = self.reverse_delay_saved
self.total_distance = self.total_distance_saved self.total_distance = self.total_distance_saved
self.start_position = self.start_position_saved self.start_position = self.start_position_saved
def update(self, current_time_ms: float): def update(self, current_time_ms: float) -> None:
elapsed_time = current_time_ms - self.start_ms elapsed_time = current_time_ms - self.start_ms
if elapsed_time < self.delay: if elapsed_time < self.delay:
self.attribute = self.start_position self.attribute = self.start_position
@@ -138,12 +138,12 @@ class MoveAnimation(BaseAnimation):
self.attribute = self.start_position + (self.total_distance * progress) self.attribute = self.start_position + (self.total_distance * progress)
class TextureChangeAnimation(BaseAnimation): class TextureChangeAnimation(BaseAnimation):
def __init__(self, duration: float, textures: list[tuple[float, float, int]], delay: float = 0.0): def __init__(self, duration: float, textures: list[tuple[float, float, int]], delay: float = 0.0) -> None:
super().__init__(duration) super().__init__(duration)
self.textures = textures self.textures = textures
self.delay = delay self.delay = delay
def update(self, current_time_ms: float): def update(self, current_time_ms: float) -> None:
elapsed_time = current_time_ms - self.start_ms - self.delay elapsed_time = current_time_ms - self.start_ms - self.delay
if elapsed_time <= self.duration: if elapsed_time <= self.duration:
for start, end, index in self.textures: for start, end, index in self.textures:
@@ -153,9 +153,9 @@ class TextureChangeAnimation(BaseAnimation):
self.is_finished = True self.is_finished = True
class TextStretchAnimation(BaseAnimation): class TextStretchAnimation(BaseAnimation):
def __init__(self, duration: float): def __init__(self, duration: float) -> None:
super().__init__(duration) super().__init__(duration)
def update(self, current_time_ms: float): def update(self, current_time_ms: float) -> None:
elapsed_time = current_time_ms - self.start_ms elapsed_time = current_time_ms - self.start_ms
if elapsed_time <= self.duration: if elapsed_time <= self.duration:
self.attribute = 2 + 5 * (elapsed_time // 25) self.attribute = 2 + 5 * (elapsed_time // 25)
@@ -169,7 +169,7 @@ class TextStretchAnimation(BaseAnimation):
class TextureResizeAnimation(BaseAnimation): class TextureResizeAnimation(BaseAnimation):
def __init__(self, duration: float, initial_size: float = 1.0, def __init__(self, duration: float, initial_size: float = 1.0,
final_size: float = 0.0, delay: float = 0.0, final_size: float = 0.0, delay: float = 0.0,
reverse_delay: Optional[float] = None): reverse_delay: Optional[float] = None) -> None:
super().__init__(duration, delay) super().__init__(duration, delay)
self.initial_size = initial_size self.initial_size = initial_size
self.final_size = final_size self.final_size = final_size
@@ -178,14 +178,14 @@ class TextureResizeAnimation(BaseAnimation):
self.final_size_saved = final_size self.final_size_saved = final_size
self.reverse_delay_saved = reverse_delay self.reverse_delay_saved = reverse_delay
def restart(self): def restart(self) -> None:
super().restart() super().restart()
self.reverse_delay = self.reverse_delay_saved self.reverse_delay = self.reverse_delay_saved
self.initial_size = self.initial_size_saved self.initial_size = self.initial_size_saved
self.final_size = self.final_size_saved self.final_size = self.final_size_saved
def update(self, current_time_ms: float): def update(self, current_time_ms: float) -> None:
elapsed_time = current_time_ms - self.start_ms elapsed_time = current_time_ms - self.start_ms
if elapsed_time <= self.delay: if elapsed_time <= self.delay:

View File

@@ -91,7 +91,7 @@ def get_average_volume_rms(data):
return rms return rms
class Sound: class Sound:
def __init__(self, file_path: Path, data=None, target_sample_rate=44100): def __init__(self, file_path: Path, data: Optional[ndarray]=None, target_sample_rate: int=44100):
self.file_path = file_path self.file_path = file_path
self.data = data self.data = data
self.channels = 0 self.channels = 0
@@ -103,10 +103,10 @@ class Sound:
self.pan = 0.5 # 0.0 = left, 0.5 = center, 1.0 = right self.pan = 0.5 # 0.0 = left, 0.5 = center, 1.0 = right
self.normalize: Optional[float] = None self.normalize: Optional[float] = None
if file_path: if file_path.exists():
self.load() self.load()
def load(self): def load(self) -> None:
"""Load and prepare the sound file data""" """Load and prepare the sound file data"""
data, original_sample_rate = sf.read(str(self.file_path)) data, original_sample_rate = sf.read(str(self.file_path))
@@ -129,33 +129,33 @@ class Sound:
self.data = data self.data = data
def play(self): def play(self) -> None:
self.position = 0 self.position = 0
self.is_playing = True self.is_playing = True
self.is_paused = False self.is_paused = False
def stop(self): def stop(self) -> None:
self.is_playing = False self.is_playing = False
self.is_paused = False self.is_paused = False
self.position = 0 self.position = 0
def pause(self): def pause(self) -> None:
if self.is_playing: if self.is_playing:
self.is_paused = True self.is_paused = True
self.is_playing = False self.is_playing = False
def resume(self): def resume(self) -> None:
if self.is_paused: if self.is_paused:
self.is_playing = True self.is_playing = True
self.is_paused = False self.is_paused = False
def normalize_vol(self, rms: float): def normalize_vol(self, rms: float) -> None:
self.normalize = rms self.normalize = rms
if self.data is not None: if self.data is not None:
self.data = None self.data = None
self.load() self.load()
def get_frames(self, num_frames): def get_frames(self, num_frames: int) -> Optional[ndarray]:
"""Get the next num_frames of audio data, applying volume, pitch, and pan""" """Get the next num_frames of audio data, applying volume, pitch, and pan"""
if self.data is None: if self.data is None:
return return
@@ -203,21 +203,22 @@ class Sound:
return output return output
class Music: class Music:
def __init__(self, file_path: Path, data=None, file_type=None, target_sample_rate=44100): def __init__(self, file_path: Path, data: Optional[ndarray]=None, target_sample_rate: int=44100, sample_rate: int =44100, preview: Optional[float]=None, normalize: Optional[float]=None):
self.file_path = file_path self.file_path = file_path
self.file_type = file_type
self.data = data self.data = data
self.target_sample_rate = target_sample_rate self.target_sample_rate = target_sample_rate
self.sample_rate = target_sample_rate self.sample_rate = sample_rate
self.channels = 0 self.channels = 0
self.position = 0 # In frames self.position = 0 # In frames (original sample rate)
self.is_playing = False self.is_playing = False
self.is_paused = False self.is_paused = False
self.volume = 0.75 self.volume = 0.75
self.pan = 0.5 # Center self.pan = 0.5 # Center
self.total_frames = 0 self.total_frames = 0
self.valid = False self.valid = False
self.normalize = None self.normalize = normalize
self.preview = preview # Preview start time in seconds
self.is_preview_mode = preview is not None
self.file_buffer_size = int(target_sample_rate * 5) # 5 seconds buffer self.file_buffer_size = int(target_sample_rate * 5) # 5 seconds buffer
self.buffer = None self.buffer = None
@@ -225,24 +226,96 @@ class Music:
# Thread-safe updates # Thread-safe updates
self.lock = Lock() self.lock = Lock()
self.sound_file = None
if self.file_path.exists():
self.load_from_file() self.load_from_file()
else:
self.load_from_memory()
def load_from_file(self): def load_from_memory(self) -> None:
"""Load music from in-memory numpy array"""
try:
if self.data is None:
raise Exception("No data provided for memory loading")
# Convert to float32 if needed
if self.data.dtype != float32:
self.data = self.data.astype(float32)
if self.sample_rate != self.target_sample_rate:
print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz")
self.data = resample(self.data, self.sample_rate, self.target_sample_rate)
if self.normalize is not None:
current_rms = get_average_volume_rms(self.data)
if current_rms > 0: # Avoid division by zero
target_rms = self.normalize
rms_scale_factor = target_rms / current_rms
self.data *= rms_scale_factor
# Determine channels and total frames
if self.data.ndim == 1:
self.channels = 1
self.total_frames = len(self.data)
# Reshape for consistency
self.data = self.data.reshape(-1, 1)
else:
self.channels = self.data.shape[1]
self.total_frames = self.data.shape[0]
self.sample_width = 4 # float32
self._fill_buffer()
self.valid = True
print(f"Music loaded from memory: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames")
except Exception as e:
print(f"Error loading music from memory: {e}")
self.valid = False
def load_from_file(self) -> None:
"""Load music from file""" """Load music from file"""
try: try:
# soundfile handles OGG, WAV, FLAC, etc. natively
self.sound_file = sf.SoundFile(str(self.file_path)) self.sound_file = sf.SoundFile(str(self.file_path))
# Get file properties # Get file properties
self.channels = self.sound_file.channels self.channels = self.sound_file.channels
self.sample_width = 2 if self.sound_file.subtype in ['PCM_16', 'VORBIS'] else 4 # Most common self.sample_width = 2 if self.sound_file.subtype in ['PCM_16', 'VORBIS'] else 4 # Most common
self.sample_rate = self.sound_file.samplerate self.sample_rate = self.sound_file.samplerate
self.total_frames = len(self.sound_file) original_total_frames = self.sound_file.frames
# Initialize buffer with some initial data if self.is_preview_mode:
self._fill_buffer() # Calculate preview start and end frames
preview_start_frame = int(self.preview * self.sample_rate)
preview_duration_frames = original_total_frames - preview_start_frame
preview_end_frame = min(preview_start_frame + preview_duration_frames, original_total_frames)
# Ensure preview start is within bounds
if preview_start_frame >= original_total_frames:
preview_start_frame = max(0, original_total_frames - preview_duration_frames)
preview_end_frame = original_total_frames
# Seek to preview start position
self.sound_file.seek(preview_start_frame)
# Read only the preview segment
frames_to_read = preview_end_frame - preview_start_frame
self.data = self.sound_file.read(frames_to_read)
# Update total frames to reflect the preview segment
self.total_frames = len(self.data) if self.data.ndim == 1 else self.data.shape[0]
print(f"Preview mode: Loading {frames_to_read} frames ({frames_to_read/self.sample_rate:.2f}s) starting at {self.preview:.2f}s")
else:
# Load entire file
self.data = self.sound_file.read()
self.total_frames = original_total_frames
self.load_from_memory()
self.valid = True self.valid = True
if self.is_preview_mode:
print(f"Music preview loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames ({self.get_time_length():.2f}s)")
else:
print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames") print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames")
except Exception as e: except Exception as e:
@@ -252,50 +325,31 @@ class Music:
self.sound_file = None self.sound_file = None
self.valid = False self.valid = False
def _fill_buffer(self): def _fill_buffer(self) -> bool:
"""Fill the streaming buffer from file""" """Fill buffer from in-memory data"""
if not self.sound_file:
return False
# Read a chunk of frames from file
try: try:
frames_to_read = min(self.file_buffer_size, self.total_frames - self.position) if self.data is None:
if frames_to_read <= 0:
return False return False
# Read data directly as numpy array (float64 by default) start_frame = self.position + self.buffer_position
data = self.sound_file.read(frames_to_read) end_frame = min(start_frame + self.file_buffer_size, self.total_frames)
# Convert to float32 if needed (soundfile returns float64 by default) if start_frame >= self.total_frames:
if data.dtype != float32: return False
data = data.astype(float32)
# Ensure proper shape for mono audio # Extract the chunk of data
if self.channels == 1 and data.ndim == 1: data_chunk = self.data[start_frame:end_frame]
data = data.reshape(-1, 1)
elif self.channels == 1 and data.ndim == 2:
data = data[:, 0].reshape(-1, 1) # Take first channel if stereo file but expecting mono
# Resample if needed self.buffer = data_chunk
if self.sample_rate != self.target_sample_rate: self.position += self.buffer_position
print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz")
data = resample(data, self.sample_rate, self.target_sample_rate)
if self.normalize is not None:
current_rms = get_average_volume_rms(data)
if current_rms > 0: # Avoid division by zero
target_rms = self.normalize
rms_scale_factor = target_rms / current_rms
data *= rms_scale_factor
self.buffer = data
self.buffer_position = 0 self.buffer_position = 0
return True return True
except Exception as e: except Exception as e:
print(f"Error filling buffer: {e}") print(f"Error filling buffer from memory: {e}")
return False return False
def update(self): def update(self) -> None:
"""Update music stream buffers""" """Update music stream buffers"""
if not self.is_playing or self.is_paused: if not self.is_playing or self.is_paused:
return return
@@ -303,25 +357,27 @@ class Music:
with self.lock: with self.lock:
# Check if we need to refill the buffer # Check if we need to refill the buffer
if self.buffer is None: if self.buffer is None:
raise Exception("buffer is None") return
if self.sound_file and self.buffer_position >= len(self.buffer): if self.buffer_position >= len(self.buffer):
if not self._fill_buffer(): self.is_playing = self._fill_buffer()
self.is_playing = False
def play(self): def play(self) -> None:
"""Start playing the music stream""" """Start playing the music stream"""
with self.lock: with self.lock:
# Reset position if at the end # Reset position if at the end
if self.sound_file and self.position >= self.total_frames: if self.position >= self.total_frames:
self.sound_file.seek(0) # Reset to beginning
self.position = 0 self.position = 0
self.buffer_position = 0 self.buffer_position = 0
if self.sound_file:
# For preview mode, seek to the preview start position
seek_pos = int(self.preview * self.sample_rate) if self.is_preview_mode else 0
self.sound_file.seek(seek_pos)
self._fill_buffer() self._fill_buffer()
self.is_playing = True self.is_playing = True
self.is_paused = False self.is_paused = False
def stop(self): def stop(self) -> None:
"""Stop playing the music stream""" """Stop playing the music stream"""
with self.lock: with self.lock:
self.is_playing = False self.is_playing = False
@@ -329,49 +385,62 @@ class Music:
self.position = 0 self.position = 0
self.buffer_position = 0 self.buffer_position = 0
if self.sound_file: if self.sound_file:
self.sound_file.seek(0) # Reset to beginning # For preview mode, seek to the preview start position
seek_pos = int(self.preview * self.sample_rate) if self.is_preview_mode else 0
self.sound_file.seek(seek_pos)
self._fill_buffer() self._fill_buffer()
def pause(self): def pause(self) -> None:
"""Pause the music playback""" """Pause the music playback"""
with self.lock: with self.lock:
if self.is_playing: if self.is_playing:
self.is_paused = True self.is_paused = True
self.is_playing = False self.is_playing = False
def resume(self): def resume(self) -> None:
"""Resume the music playback""" """Resume the music playback"""
with self.lock: with self.lock:
if self.is_paused: if self.is_paused:
self.is_playing = True self.is_playing = True
self.is_paused = False self.is_paused = False
def seek(self, position_seconds): def seek(self, position_seconds) -> None:
"""Seek to a specific position in seconds""" """Seek to a specific position in seconds (relative to preview start if in preview mode)"""
with self.lock: with self.lock:
# Convert seconds to frames # Convert seconds to frames
frame_position = int(position_seconds * self.sample_rate) frame_position = int(position_seconds * self.target_sample_rate)
# Clamp position to valid range # Clamp position to valid range
frame_position = max(0, min(frame_position, self.total_frames - 1)) frame_position = max(0, min(frame_position, self.total_frames - 1))
# Update file position if streaming from file # Update file position if streaming from file
if self.sound_file: if self.sound_file:
self.sound_file.seek(frame_position) # For preview mode, add the preview offset
self._fill_buffer() actual_file_position = frame_position
if self.is_preview_mode:
actual_file_position += int(self.preview * self.sample_rate)
self.sound_file.seek(actual_file_position)
self.position = frame_position self.position = frame_position
self.buffer_position = 0 self.buffer_position = 0
self._fill_buffer()
def get_time_length(self): def get_time_length(self) -> float:
"""Get the total length of the music in seconds""" """Get the total length of the music in seconds (preview length if in preview mode)"""
return self.total_frames / self.sample_rate return self.total_frames / self.target_sample_rate
def get_time_played(self): def get_time_played(self) -> float:
"""Get the current playback position in seconds""" """Get the current playback position in seconds (relative to preview start if in preview mode)"""
return (self.position + self.buffer_position) / self.sample_rate return (self.position + self.buffer_position) / self.target_sample_rate
def get_frames(self, num_frames): def get_actual_time_played(self) -> float:
"""Get the actual playback position in the original file (including preview offset)"""
base_time = (self.position + self.buffer_position) / self.target_sample_rate
if self.is_preview_mode:
return base_time + self.preview
return base_time
def get_frames(self, num_frames) -> ndarray:
"""Get the next num_frames of music data, applying volume, pitch, and pan""" """Get the next num_frames of music data, applying volume, pitch, and pan"""
if not self.is_playing: if not self.is_playing:
# Return silence if not playing # Return silence if not playing
@@ -382,11 +451,12 @@ class Music:
with self.lock: with self.lock:
if self.buffer is None: if self.buffer is None:
raise Exception("buffer is None") return zeros(num_frames, dtype=float32)
# Check if we need more data # Check if we need more data
if self.buffer_position >= len(self.buffer): if self.buffer_position >= len(self.buffer):
# If no more data available and streaming from file # Try to fill buffer again
if self.sound_file and not self._fill_buffer(): if not self._fill_buffer():
self.is_playing = False self.is_playing = False
if self.channels == 1: if self.channels == 1:
return zeros(num_frames, dtype=float32) return zeros(num_frames, dtype=float32)
@@ -409,7 +479,6 @@ class Music:
# Update buffer position # Update buffer position
self.buffer_position += frames_to_get self.buffer_position += frames_to_get
self.position += frames_to_get
# Apply volume # Apply volume
output *= self.volume output *= self.volume
@@ -425,7 +494,7 @@ class Music:
return output return output
def __del__(self): def __del__(self) -> None:
"""Cleanup when the music object is deleted""" """Cleanup when the music object is deleted"""
if hasattr(self, 'sound_file') and self.sound_file: if hasattr(self, 'sound_file') and self.sound_file:
try: try:
@@ -434,7 +503,7 @@ class Music:
raise Exception("unable to close music stream") raise Exception("unable to close music stream")
class AudioEngine: class AudioEngine:
def __init__(self, type: str): def __init__(self, type: str) -> None:
self.target_sample_rate = 44100 self.target_sample_rate = 44100
self.buffer_size = 10 self.buffer_size = 10
self.sounds: dict[str, Sound] = {} self.sounds: dict[str, Sound] = {}
@@ -453,20 +522,20 @@ class AudioEngine:
self.update_thread_running = False self.update_thread_running = False
self.type = type self.type = type
def _initialize_asio(self): def _initialize_api(self) -> bool:
"""Set up ASIO device""" """Set up API device"""
# Find ASIO API and use its default device # Find API and use its default device
hostapis = sd.query_hostapis() hostapis = sd.query_hostapis()
asio_api_index = -1 api_index = -1
for i, api in enumerate(hostapis): for i, api in enumerate(hostapis):
if isinstance(api, dict) and 'name' in api and api['name'] == self.type: if isinstance(api, dict) and 'name' in api and api['name'] == self.type:
asio_api_index = i api_index = i
break break
if isinstance(hostapis, tuple): if isinstance(hostapis, tuple):
asio_api = hostapis[asio_api_index] api = hostapis[api_index]
if isinstance(asio_api, dict) and 'default_output_device' in asio_api: if isinstance(api, dict) and 'default_output_device' in api:
default_asio_device = asio_api['default_output_device'] default_asio_device = api['default_output_device']
else: else:
raise Exception("Warning: 'default_output_device' key not found in ASIO API info.") raise Exception("Warning: 'default_output_device' key not found in ASIO API info.")
if default_asio_device >= 0: if default_asio_device >= 0:
@@ -500,7 +569,7 @@ class AudioEngine:
self.output_channels = min(2, device_info['max_output_channels']) self.output_channels = min(2, device_info['max_output_channels'])
return True return True
def _audio_callback(self, outdata, frames, time, status): def _audio_callback(self, outdata: ndarray, frames: int, time: int, status: str) -> None:
"""Callback function for the sounddevice stream""" """Callback function for the sounddevice stream"""
if status: if status:
print(f"Status: {status}") print(f"Status: {status}")
@@ -589,31 +658,36 @@ class AudioEngine:
outdata[:] = output outdata[:] = output
def _start_update_thread(self): def _start_update_thread(self) -> None:
"""Start a thread to update music streams""" """Start a thread to update music streams"""
self.update_thread_running = True self.update_thread_running = True
self.update_thread = Thread(target=self._update_music_thread) self.update_thread = Thread(target=self._update_music_thread)
self.update_thread.daemon = True self.update_thread.daemon = True
self.update_thread.start() self.update_thread.start()
def _update_music_thread(self): def _update_music_thread(self) -> None:
"""Thread function to update all music streams""" """Thread function to update all music streams"""
while self.update_thread_running: while self.update_thread_running:
# Update all active music streams active_streams = [music for music in self.music_streams.values() if music.is_playing]
for music_name, music in self.music_streams.items():
if music.is_playing: if not active_streams:
# Sleep longer when no streams are active
time.sleep(0.5)
continue
for music in active_streams:
music.update() music.update()
# Sleep to not consume too much CPU # Adjust sleep based on number of active streams
time.sleep(0.1) sleep_time = max(0.05, 0.1 / len(active_streams))
time.sleep(sleep_time)
def init_audio_device(self): def init_audio_device(self):
if self.audio_device_ready: if self.audio_device_ready:
return True return True
try: try:
# Try to use ASIO if available self._initialize_api()
self._initialize_asio()
# Set up and start the stream # Set up and start the stream
extra_settings = None extra_settings = None
@@ -630,7 +704,6 @@ class AudioEngine:
self.stream.start() self.stream.start()
self.running = True self.running = True
self.audio_device_ready = True self.audio_device_ready = True
print(self.stream.samplerate, self.stream.blocksize, self.stream.latency*1000)
# Start update thread for music streams # Start update thread for music streams
self._start_update_thread() self._start_update_thread()
@@ -642,7 +715,7 @@ class AudioEngine:
self.audio_device_ready = False self.audio_device_ready = False
return False return False
def close_audio_device(self): def close_audio_device(self) -> None:
self.update_thread_running = False self.update_thread_running = False
if self.update_thread: if self.update_thread:
self.update_thread.join(timeout=1.0) self.update_thread.join(timeout=1.0)
@@ -673,27 +746,27 @@ class AudioEngine:
print(f"Loaded sound from {fileName} as {sound_id}") print(f"Loaded sound from {fileName} as {sound_id}")
return sound_id return sound_id
def play_sound(self, sound): def play_sound(self, sound) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sound_queue.put(sound) self.sound_queue.put(sound)
def stop_sound(self, sound): def stop_sound(self, sound) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].stop() self.sounds[sound].stop()
def pause_sound(self, sound: str): def pause_sound(self, sound: str) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].pause() self.sounds[sound].pause()
def resume_sound(self, sound: str): def resume_sound(self, sound: str) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].resume() self.sounds[sound].resume()
def unload_sound(self, sound: str): def unload_sound(self, sound: str) -> None:
if sound in self.sounds: if sound in self.sounds:
del self.sounds[sound] del self.sounds[sound]
def normalize_sound(self, sound: str, rms: float): def normalize_sound(self, sound: str, rms: float) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].normalize_vol(rms) self.sounds[sound].normalize_vol(rms)
@@ -705,31 +778,41 @@ class AudioEngine:
return self.sounds[sound].is_playing return self.sounds[sound].is_playing
return False return False
def set_sound_volume(self, sound: str, volume: float): def set_sound_volume(self, sound: str, volume: float) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].volume = max(0.0, min(1.0, volume)) self.sounds[sound].volume = max(0.0, min(1.0, volume))
def set_sound_pan(self, sound: str, pan: float): def set_sound_pan(self, sound: str, pan: float) -> None:
if sound in self.sounds: if sound in self.sounds:
self.sounds[sound].pan = max(0.0, min(1.0, pan)) self.sounds[sound].pan = max(0.0, min(1.0, pan))
def load_music_stream(self, fileName: Path) -> str: def load_music_stream(self, fileName: Path, preview: float=0, normalize: Optional[float] = None) -> str:
music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate) music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate, preview=preview, normalize=normalize)
music_id = f"music_{len(self.music_streams)}" music_id = f"music_{len(self.music_streams)}"
self.music_streams[music_id] = music self.music_streams[music_id] = music
print(f"Loaded music stream from {fileName} as {music_id}") print(f"Loaded music stream from {fileName} as {music_id}")
return music_id return music_id
def load_music_stream_from_data(self, audio_array: ndarray, sample_rate: int=44100) -> str:
"""Load music stream from numpy array data"""
# Create a dummy path since Music class expects one
dummy_path = Path("memory_audio")
music = Music(file_path=dummy_path, data=audio_array, target_sample_rate=self.target_sample_rate, sample_rate=sample_rate)
music_id = f"music_{len(self.music_streams)}"
self.music_streams[music_id] = music
print(f"Loaded music stream from memory data as {music_id}")
return music_id
def is_music_valid(self, music: str) -> bool: def is_music_valid(self, music: str) -> bool:
if music in self.music_streams: if music in self.music_streams:
return self.music_streams[music].valid return self.music_streams[music].valid
return False return False
def unload_music_stream(self, music: str): def unload_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
del self.music_streams[music] del self.music_streams[music]
def play_music_stream(self, music: str): def play_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_queue.put((music, 'play')) self.music_queue.put((music, 'play'))
@@ -738,35 +821,35 @@ class AudioEngine:
return self.music_streams[music].is_playing return self.music_streams[music].is_playing
return False return False
def update_music_stream(self, music: str): def update_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_streams[music].update() self.music_streams[music].update()
def stop_music_stream(self, music: str): def stop_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_queue.put((music, 'stop')) self.music_queue.put((music, 'stop'))
def pause_music_stream(self, music: str): def pause_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_queue.put((music, 'pause')) self.music_queue.put((music, 'pause'))
def resume_music_stream(self, music: str): def resume_music_stream(self, music: str) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_queue.put((music, 'resume')) self.music_queue.put((music, 'resume'))
def seek_music_stream(self, music: str, position: float): def seek_music_stream(self, music: str, position: float) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_queue.put((music, 'seek', position)) self.music_queue.put((music, 'seek', position))
def set_music_volume(self, music: str, volume: float): def set_music_volume(self, music: str, volume: float) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_streams[music].volume = max(0.0, min(1.0, volume)) self.music_streams[music].volume = max(0.0, min(1.0, volume))
def set_music_pan(self, music: str, pan: float): def set_music_pan(self, music: str, pan: float) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_streams[music].pan = max(0.0, min(1.0, pan)) self.music_streams[music].pan = max(0.0, min(1.0, pan))
def normalize_music_stream(self, music: str, rms: float): def normalize_music_stream(self, music: str, rms: float) -> None:
if music in self.music_streams: if music in self.music_streams:
self.music_streams[music].normalize = rms self.music_streams[music].normalize = rms

View File

@@ -9,14 +9,11 @@ from libs.utils import get_current_ms
class VideoPlayer: class VideoPlayer:
def __init__(self, path: Path): def __init__(self, path: Path):
"""Initialize a video player instance. Audio must have the same name and an ogg extension. """Initialize a video player instance"""
Todo: extract audio from video directly
"""
self.is_finished_list = [False, False] self.is_finished_list = [False, False]
self.video_path = path
self.video = VideoFileClip(path) self.video = VideoFileClip(path)
audio_path = path.with_suffix('.ogg') if self.video.audio is not None:
self.audio = audio.load_music_stream(audio_path) self.audio = audio.load_music_stream_from_data(self.video.audio.to_soundarray(), sample_rate=self.video.audio.fps)
self.buffer_size = 10 # Number of frames to keep in memory self.buffer_size = 10 # Number of frames to keep in memory
self.frame_buffer = {} # Dictionary to store frames {timestamp: texture} self.frame_buffer = {} # Dictionary to store frames {timestamp: texture}
@@ -27,15 +24,18 @@ class VideoPlayer:
self.current_frame = None self.current_frame = None
self.fps = self.video.fps self.fps = self.video.fps
self.frame_duration = 1000 / self.fps self.frame_duration = 1000 / self.fps
self.audio_played = False
def _audio_manager(self): def _audio_manager(self):
if not audio.is_music_stream_playing(self.audio): if self.audio is None:
return
if self.is_finished_list[1]:
return
if not audio.is_music_stream_playing(self.audio) and not self.audio_played:
audio.play_music_stream(self.audio) audio.play_music_stream(self.audio)
self.audio_played = True
audio.update_music_stream(self.audio) audio.update_music_stream(self.audio)
time_played = audio.get_music_time_played(self.audio) / audio.get_music_time_length(self.audio) self.is_finished_list[1] = not audio.is_music_stream_playing(self.audio)
ending_lenience = 0.95
if time_played > ending_lenience:
self.is_finished_list[1] = True
def _load_frame(self, index: int): def _load_frame(self, index: int):
"""Load a specific frame into the buffer""" """Load a specific frame into the buffer"""
@@ -102,7 +102,7 @@ class VideoPlayer:
"""Updates video playback, advancing frames and audio""" """Updates video playback, advancing frames and audio"""
self._audio_manager() self._audio_manager()
if self.frame_index >= len(self.frame_timestamps) - 1: if self.frame_index >= len(self.frame_timestamps):
self.is_finished_list[0] = True self.is_finished_list[0] = True
return return
@@ -130,11 +130,11 @@ class VideoPlayer:
def stop(self): def stop(self):
"""Stops the video, audio, and clears its buffer""" """Stops the video, audio, and clears its buffer"""
self.video.close()
for timestamp, texture in self.frame_buffer.items(): for timestamp, texture in self.frame_buffer.items():
ray.unload_texture(texture) ray.unload_texture(texture)
self.frame_buffer.clear() self.frame_buffer.clear()
if audio.is_music_stream_playing(self.audio): if audio.is_music_stream_playing(self.audio):
audio.stop_music_stream(self.audio) audio.stop_music_stream(self.audio)
audio.unload_music_stream(self.audio)
self.video.close()

View File

@@ -11,4 +11,6 @@ dependencies = [
"raylib-sdl>=5.5.0.2", "raylib-sdl>=5.5.0.2",
"soundfile>=0.13.1", "soundfile>=0.13.1",
"tomlkit>=0.13.3", "tomlkit>=0.13.3",
"sentry-sdk>=2.30.0",
"dotenv>=0.9.9",
] ]

View File

@@ -728,6 +728,11 @@ class Judgement:
index = int(self.texture_animation.attribute) index = int(self.texture_animation.attribute)
hit_color = ray.fade(ray.WHITE, self.fade_animation_1.attribute) hit_color = ray.fade(ray.WHITE, self.fade_animation_1.attribute)
color = ray.fade(ray.WHITE, self.fade_animation_2.attribute) color = ray.fade(ray.WHITE, self.fade_animation_2.attribute)
if self.curr_hit_ms is not None:
if float(self.curr_hit_ms) < -(global_data.config['general']['hard_judge']):
color = ray.fade(ray.BLUE, self.fade_animation_2.attribute)
elif float(self.curr_hit_ms) > (global_data.config['general']['hard_judge']):
color = ray.fade(ray.RED, self.fade_animation_2.attribute)
if self.type == 'GOOD': if self.type == 'GOOD':
if self.big: if self.big:
ray.draw_texture(textures_1[21], 342, 184, color) ray.draw_texture(textures_1[21], 342, 184, color)
@@ -736,9 +741,6 @@ class Judgement:
ray.draw_texture(textures_1[19], 342, 184, color) ray.draw_texture(textures_1[19], 342, 184, color)
ray.draw_texture(textures_2[index+5], 304, 143, hit_color) ray.draw_texture(textures_2[index+5], 304, 143, hit_color)
ray.draw_texture(textures_2[9], 370, int(y), color) ray.draw_texture(textures_2[9], 370, int(y), color)
if self.curr_hit_ms is not None:
pass
#ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.YELLOW, self.fade_animation_1.attribute))
elif self.type == 'OK': elif self.type == 'OK':
if self.big: if self.big:
ray.draw_texture(textures_1[20], 342, 184, color) ray.draw_texture(textures_1[20], 342, 184, color)
@@ -747,12 +749,8 @@ class Judgement:
ray.draw_texture(textures_1[18], 342, 184, color) ray.draw_texture(textures_1[18], 342, 184, color)
ray.draw_texture(textures_2[index], 304, 143, hit_color) ray.draw_texture(textures_2[index], 304, 143, hit_color)
ray.draw_texture(textures_2[4], 370, int(y), color) ray.draw_texture(textures_2[4], 370, int(y), color)
if self.curr_hit_ms is not None:
ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.WHITE, self.fade_animation_1.attribute))
elif self.type == 'BAD': elif self.type == 'BAD':
ray.draw_texture(textures_2[10], 370, int(y), color) ray.draw_texture(textures_2[10], 370, int(y), color)
if self.curr_hit_ms is not None:
ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.BLUE, self.fade_animation_1.attribute))
class LaneHitEffect: class LaneHitEffect:
def __init__(self, type: str): def __init__(self, type: str):

View File

@@ -223,9 +223,7 @@ class SongSelectScreen:
if not isinstance(song, Directory) and song.box.is_open: if not isinstance(song, Directory) and song.box.is_open:
if self.demo_song is None and get_current_ms() >= song.box.wait + (83.33*3): if self.demo_song is None and get_current_ms() >= song.box.wait + (83.33*3):
song.box.get_scores() song.box.get_scores()
self.demo_song = audio.load_music_stream(song.tja.metadata.wave) self.demo_song = audio.load_music_stream(song.tja.metadata.wave, preview=song.tja.metadata.demostart, normalize=0.1935)
audio.normalize_music_stream(self.demo_song, 0.1935)
audio.seek_music_stream(self.demo_song, song.tja.metadata.demostart)
audio.play_music_stream(self.demo_song) audio.play_music_stream(self.demo_song)
audio.stop_sound(self.sound_bgm) audio.stop_sound(self.sound_bgm)
if song.box.is_open: if song.box.is_open:
@@ -450,7 +448,7 @@ class SongBox:
direction = -1 direction = -1
if abs(self.target_position - self.position) > 250: if abs(self.target_position - self.position) > 250:
direction *= -1 direction *= -1
self.move = Animation.create_move(66.67, start_position=0, total_distance=100 * direction) self.move = Animation.create_move(83.3, start_position=0, total_distance=100 * direction, ease_out='cubic')
if self.is_open or self.target_position == SongSelectScreen.BOX_CENTER + 150: if self.is_open or self.target_position == SongSelectScreen.BOX_CENTER + 150:
self.move.total_distance = 250 * direction self.move.total_distance = 250 * direction
self.start_position = self.position self.start_position = self.position
@@ -929,6 +927,7 @@ class FileNavigator:
self.items: list[Directory | SongFile] = [] self.items: list[Directory | SongFile] = []
self.selected_index = 0 self.selected_index = 0
self.history = [] self.history = []
self.box_open = False
# Generate all objects upfront # Generate all objects upfront
self._generate_all_objects() self._generate_all_objects()
@@ -1257,15 +1256,22 @@ class FileNavigator:
self.selected_index = 0 if self.items else -1 self.selected_index = 0 if self.items else -1
self.calculate_box_positions() self.calculate_box_positions()
def load_current_directory(self): def load_current_directory(self, selected_item=None):
"""Load pre-generated items for the current directory""" """Load pre-generated items for the current directory"""
has_children = any(item.is_dir() and (item / "box.def").exists() for item in self.current_dir.iterdir())
if has_children:
self.items = [] self.items = []
else:
if selected_item in self.items:
self.items.remove(selected_item)
self.box_open = True
dir_key = str(self.current_dir) dir_key = str(self.current_dir)
# Add back/to_root navigation items # Add back/to_root navigation items
if self.current_dir != self.current_root_dir: if self.current_dir != self.current_root_dir:
back_dir = Directory(self.current_dir.parent, "", 552, back=True) back_dir = Directory(self.current_dir.parent, "", 552, back=True)
if has_children:
self.items.append(back_dir) self.items.append(back_dir)
elif not self.in_root_selection: elif not self.in_root_selection:
to_root_dir = Directory(Path(), "", 552, to_root=True) to_root_dir = Directory(Path(), "", 552, to_root=True)
@@ -1275,7 +1281,6 @@ class FileNavigator:
if dir_key in self.directory_contents: if dir_key in self.directory_contents:
content_items = self.directory_contents[dir_key] content_items = self.directory_contents[dir_key]
# Handle the every-10-songs navigation logic
song_count = 0 song_count = 0
for item in content_items: for item in content_items:
if isinstance(item, SongFile): if isinstance(item, SongFile):
@@ -1283,12 +1288,19 @@ class FileNavigator:
# Add navigation item # Add navigation item
if self.current_dir != self.current_root_dir: if self.current_dir != self.current_root_dir:
back_dir = Directory(self.current_dir.parent, "", 552, back=True) back_dir = Directory(self.current_dir.parent, "", 552, back=True)
if not has_children:
self.items.insert(self.selected_index+song_count, back_dir)
else:
self.items.append(back_dir) self.items.append(back_dir)
elif not self.in_root_selection: elif not self.in_root_selection:
to_root_dir = Directory(Path(), "", 552, to_root=True) to_root_dir = Directory(Path(), "", 552, to_root=True)
if has_children:
self.items.append(to_root_dir) self.items.append(to_root_dir)
song_count += 1 song_count += 1
if not has_children:
self.items.insert(self.selected_index+song_count, item)
else:
self.items.append(item) self.items.append(item)
# OPTIMIZED: Use cached crowns (calculated on-demand) # OPTIMIZED: Use cached crowns (calculated on-demand)
@@ -1361,7 +1373,7 @@ class FileNavigator:
self.current_root_dir = selected_item.path self.current_root_dir = selected_item.path
self.in_root_selection = False self.in_root_selection = False
self.selected_index = 0 self.selected_index = 0
self.load_current_directory() self.load_current_directory(selected_item=selected_item)
elif isinstance(selected_item, SongFile): elif isinstance(selected_item, SongFile):
return selected_item return selected_item
@@ -1378,6 +1390,7 @@ class FileNavigator:
self.load_root_directories() self.load_root_directories()
else: else:
self.load_current_directory() self.load_current_directory()
self.box_open = False
def get_current_item(self): def get_current_item(self):
"""Get the currently selected item""" """Get the currently selected item"""

View File

@@ -33,9 +33,6 @@ class TitleScreen:
self.screen_init = False self.screen_init = False
self.fade_out = None self.fade_out = None
def get_videos(self):
return self.op_video, self.attract_video
def load_sounds(self): def load_sounds(self):
sounds_dir = Path("Sounds") sounds_dir = Path("Sounds")
title_dir = sounds_dir / "title" title_dir = sounds_dir / "title"
@@ -55,12 +52,14 @@ class TitleScreen:
self.screen_init = True self.screen_init = True
self.load_textures() self.load_textures()
self.state = State.OP_VIDEO self.state = State.OP_VIDEO
self.op_video = VideoPlayer(random.choice(self.op_video_list)) self.op_video = None
self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) self.attract_video = None
self.warning_board = None self.warning_board = None
def on_screen_end(self) -> str: def on_screen_end(self) -> str:
if self.op_video is not None:
self.op_video.stop() self.op_video.stop()
if self.attract_video is not None:
self.attract_video.stop() self.attract_video.stop()
for sound in self.sounds: for sound in self.sounds:
if audio.is_sound_playing(sound): if audio.is_sound_playing(sound):
@@ -73,28 +72,30 @@ class TitleScreen:
def scene_manager(self): def scene_manager(self):
if self.state == State.OP_VIDEO: if self.state == State.OP_VIDEO:
if not self.op_video.is_started(): if self.op_video is None:
self.op_video = VideoPlayer(random.choice(self.op_video_list))
self.op_video.start(get_current_ms()) self.op_video.start(get_current_ms())
self.op_video.update() self.op_video.update()
if self.op_video.is_finished(): if self.op_video.is_finished():
self.op_video.stop() self.op_video.stop()
self.op_video = VideoPlayer(random.choice(self.op_video_list)) self.op_video = None
self.state = State.WARNING self.state = State.WARNING
elif self.state == State.WARNING:
if self.warning_board is None:
self.warning_board = WarningScreen(get_current_ms(), self) self.warning_board = WarningScreen(get_current_ms(), self)
elif self.state == State.WARNING and self.warning_board is not None:
self.warning_board.update(get_current_ms(), self) self.warning_board.update(get_current_ms(), self)
if self.warning_board.is_finished: if self.warning_board.is_finished:
self.state = State.ATTRACT_VIDEO self.state = State.ATTRACT_VIDEO
self.attract_video.start(get_current_ms()) self.warning_board = None
elif self.state == State.ATTRACT_VIDEO: elif self.state == State.ATTRACT_VIDEO:
if self.attract_video is None:
self.attract_video = VideoPlayer(random.choice(self.attract_video_list))
self.attract_video.start(get_current_ms())
self.attract_video.update() self.attract_video.update()
if self.attract_video.is_finished(): if self.attract_video.is_finished():
self.attract_video.stop() self.attract_video.stop()
self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) self.attract_video = None
self.state = State.OP_VIDEO self.state = State.OP_VIDEO
self.op_video.start(get_current_ms())
def update(self): def update(self):
@@ -111,14 +112,14 @@ class TitleScreen:
audio.play_sound(self.sound_don) audio.play_sound(self.sound_don)
def draw(self): def draw(self):
if self.state == State.OP_VIDEO: if self.state == State.OP_VIDEO and self.op_video is not None:
self.op_video.draw() self.op_video.draw()
elif self.state == State.WARNING and self.warning_board is not None: elif self.state == State.WARNING and self.warning_board is not None:
bg_source = ray.Rectangle(0, 0, self.textures['keikoku'][0].width, self.textures['keikoku'][0].height) bg_source = ray.Rectangle(0, 0, self.textures['keikoku'][0].width, self.textures['keikoku'][0].height)
bg_dest = ray.Rectangle(0, 0, self.width, self.height) bg_dest = ray.Rectangle(0, 0, self.width, self.height)
ray.draw_texture_pro(self.textures['keikoku'][0], bg_source, bg_dest, ray.Vector2(0,0), 0, ray.WHITE) ray.draw_texture_pro(self.textures['keikoku'][0], bg_source, bg_dest, ray.Vector2(0,0), 0, ray.WHITE)
self.warning_board.draw(self) self.warning_board.draw(self)
elif self.state == State.ATTRACT_VIDEO: elif self.state == State.ATTRACT_VIDEO and self.attract_video is not None:
self.attract_video.draw() self.attract_video.draw()
if self.fade_out is not None: if self.fade_out is not None:

46
uv.lock generated
View File

@@ -2,6 +2,15 @@ version = 1
revision = 2 revision = 2
requires-python = ">=3.11" requires-python = ">=3.11"
[[package]]
name = "certifi"
version = "2025.6.15"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" },
]
[[package]] [[package]]
name = "cffi" name = "cffi"
version = "1.17.1" version = "1.17.1"
@@ -65,6 +74,17 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
] ]
[[package]]
name = "dotenv"
version = "0.9.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "python-dotenv" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/b2/b7/545d2c10c1fc15e48653c91efde329a790f2eecfbbf2bd16003b5db2bab0/dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9", size = 1892, upload-time = "2025-02-19T22:15:01.647Z" },
]
[[package]] [[package]]
name = "imageio" name = "imageio"
version = "2.37.0" version = "2.37.0"
@@ -253,9 +273,11 @@ name = "pytaiko"
version = "0.1.0" version = "0.1.0"
source = { virtual = "." } source = { virtual = "." }
dependencies = [ dependencies = [
{ name = "dotenv" },
{ name = "moviepy" }, { name = "moviepy" },
{ name = "numpy" }, { name = "numpy" },
{ name = "raylib-sdl" }, { name = "raylib-sdl" },
{ name = "sentry-sdk" },
{ name = "sounddevice" }, { name = "sounddevice" },
{ name = "soundfile" }, { name = "soundfile" },
{ name = "tomlkit" }, { name = "tomlkit" },
@@ -263,9 +285,11 @@ dependencies = [
[package.metadata] [package.metadata]
requires-dist = [ requires-dist = [
{ name = "dotenv", specifier = ">=0.9.9" },
{ name = "moviepy", specifier = ">=2.1.2" }, { name = "moviepy", specifier = ">=2.1.2" },
{ name = "numpy", specifier = ">=2.2.5" }, { name = "numpy", specifier = ">=2.2.5" },
{ name = "raylib-sdl", specifier = ">=5.5.0.2" }, { name = "raylib-sdl", specifier = ">=5.5.0.2" },
{ name = "sentry-sdk", specifier = ">=2.30.0" },
{ name = "sounddevice", specifier = ">=0.5.1" }, { name = "sounddevice", specifier = ">=0.5.1" },
{ name = "soundfile", specifier = ">=0.13.1" }, { name = "soundfile", specifier = ">=0.13.1" },
{ name = "tomlkit", specifier = ">=0.13.3" }, { name = "tomlkit", specifier = ">=0.13.3" },
@@ -307,6 +331,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b0/4f/8ba71611c74d6e3ff5e95d2935c1f5f98fc61183ebf70d4dfb09547a5767/raylib_sdl-5.5.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fd3841ab8d8d6ca24e4fbffc9514c58c0cf5bd29e3f2406006eba872416325b3", size = 1499920, upload-time = "2025-02-12T04:22:16.996Z" }, { url = "https://files.pythonhosted.org/packages/b0/4f/8ba71611c74d6e3ff5e95d2935c1f5f98fc61183ebf70d4dfb09547a5767/raylib_sdl-5.5.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fd3841ab8d8d6ca24e4fbffc9514c58c0cf5bd29e3f2406006eba872416325b3", size = 1499920, upload-time = "2025-02-12T04:22:16.996Z" },
] ]
[[package]]
name = "sentry-sdk"
version = "2.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/04/4c/af31e0201b48469786ddeb1bf6fd3dfa3a291cc613a0fe6a60163a7535f9/sentry_sdk-2.30.0.tar.gz", hash = "sha256:436369b02afef7430efb10300a344fb61a11fe6db41c2b11f41ee037d2dd7f45", size = 326767, upload-time = "2025-06-12T10:34:34.733Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5a/99/31ac6faaae33ea698086692638f58d14f121162a8db0039e68e94135e7f1/sentry_sdk-2.30.0-py2.py3-none-any.whl", hash = "sha256:59391db1550662f746ea09b483806a631c3ae38d6340804a1a4c0605044f6877", size = 343149, upload-time = "2025-06-12T10:34:32.896Z" },
]
[[package]] [[package]]
name = "sounddevice" name = "sounddevice"
version = "0.5.2" version = "0.5.2"
@@ -361,3 +398,12 @@ sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
] ]
[[package]]
name = "urllib3"
version = "2.5.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
]