diff --git a/.gitignore b/.gitignore index 7583399..36c3a6d 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,8 @@ __pycache__ .venv .ruff_cache scores.db +cache +pytaiko.build +pytaiko.dist +pytaiko.onefile-build +pytaiko.exe diff --git a/PyTaiko.py b/PyTaiko.py index 163a9d8..a6aaeb4 100644 --- a/PyTaiko.py +++ b/PyTaiko.py @@ -55,6 +55,7 @@ def main(): if get_config()["video"]["vsync"]: ray.set_config_flags(ray.ConfigFlags.FLAG_VSYNC_HINT) ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT) + ray.set_trace_log_level(ray.TraceLogLevel.LOG_ERROR) ray.set_window_max_size(screen_width, screen_height) ray.set_window_min_size(screen_width, screen_height) diff --git a/config.toml b/config.toml index bab6e05..c094bc6 100644 --- a/config.toml +++ b/config.toml @@ -1,11 +1,12 @@ [general] -fps_counter = true +fps_counter = false judge_offset = 0 -autoplay = false +autoplay = true sfx = true +language = 'ja' [paths] -tja_path = 'Songs' +tja_path = ['E:/Taiko/ESE', 'Songs', 'E:/Taiko/VersionSort'] video_path = 'Videos' [keybinds] @@ -15,8 +16,9 @@ right_don = ['J'] right_kat = ['K'] [audio] -device_type = 'Windows WASAPI' -buffer_size = 22 +device_type = 'ASIO' +buffer_size = 6 +sample_rate = 48000 exclusive = false [video] @@ -24,4 +26,4 @@ screen_width = 1280 screen_height = 720 fullscreen = false borderless = false -vsync = true +vsync = false diff --git a/libs/audio.py b/libs/audio.py index 459bce3..74e8f85 100644 --- a/libs/audio.py +++ b/libs/audio.py @@ -1,10 +1,11 @@ -import io import os import queue import time -import wave +from pathlib import Path from threading import Lock, Thread +from typing import Optional +import soundfile as sf from numpy import abs as np_abs from numpy import ( arange, @@ -15,6 +16,8 @@ from numpy import ( int32, interp, mean, + ndarray, + sqrt, uint8, zeros, ) @@ -22,7 +25,6 @@ from numpy import max as np_max os.environ["SD_ENABLE_ASIO"] = "1" import sounddevice as sd -from pydub import AudioSegment from libs.utils import get_config, rounded @@ -83,8 +85,13 @@ def get_np_array(sample_width, raw_data): else: raise ValueError(f"Unsupported sample width: {sample_width}") +def get_average_volume_rms(data): + """Calculate average volume using RMS method""" + rms = sqrt(mean(data ** 2)) + return rms + class Sound: - def __init__(self, file_path, data=None, target_sample_rate=44100): + def __init__(self, file_path: Path, data=None, target_sample_rate=44100): self.file_path = file_path self.data = data self.channels = 0 @@ -94,42 +101,33 @@ class Sound: self.is_paused = False self.volume = 1.0 self.pan = 0.5 # 0.0 = left, 0.5 = center, 1.0 = right + self.normalize: Optional[float] = None if file_path: self.load() def load(self): """Load and prepare the sound file data""" - if self.file_path.endswith('.ogg'): - audio = AudioSegment.from_ogg(self.file_path) - wav_io = io.BytesIO() - audio.export(wav_io, format="wav") - wav_io.seek(0) - file_path = wav_io + data, original_sample_rate = sf.read(str(self.file_path)) + + if data.ndim == 1: + self.channels = 1 + data = data.reshape(-1, 1) else: - file_path = self.file_path - with wave.open(file_path, 'rb') as wf: - # Get file properties - self.channels = wf.getnchannels() - sample_width = wf.getsampwidth() - original_sample_rate = wf.getframerate() - frames = wf.getnframes() + self.channels = data.shape[1] - # Read all frames from the file - raw_data = wf.readframes(frames) + if original_sample_rate != self.sample_rate: + print(f"Resampling {self.file_path} from {original_sample_rate}Hz to {self.sample_rate}Hz") + data = resample(data, original_sample_rate, self.sample_rate) - data = get_np_array(sample_width, raw_data) + if self.normalize is not None: + current_rms = get_average_volume_rms(data) + if current_rms > 0: # Avoid division by zero + target_rms = self.normalize + rms_scale_factor = target_rms / current_rms + data *= rms_scale_factor - # Reshape for multi-channel audio - if self.channels > 1: - data = data.reshape(-1, self.channels) - - # Resample if needed - if original_sample_rate != self.sample_rate: - print(f"Resampling {self.file_path} from {original_sample_rate}Hz to {self.sample_rate}Hz") - data = resample(data, original_sample_rate, self.sample_rate) - - self.data = data + self.data = data def play(self): self.position = 0 @@ -151,6 +149,12 @@ class Sound: self.is_playing = True self.is_paused = False + def normalize_vol(self, rms: float): + self.normalize = rms + if self.data is not None: + self.data = None + self.load() + def get_frames(self, num_frames): """Get the next num_frames of audio data, applying volume, pitch, and pan""" if self.data is None: @@ -180,7 +184,7 @@ class Sound: if self.channels == 1: output = zeros(num_frames, dtype=float32) - output[:frames_to_get] = self.data[self.position:self.position+frames_to_get] + output[:frames_to_get] = self.data[self.position:self.position+frames_to_get].flatten() else: output = zeros((num_frames, self.channels), dtype=float32) output[:frames_to_get] = self.data[self.position:self.position+frames_to_get] @@ -199,7 +203,7 @@ class Sound: return output class Music: - def __init__(self, file_path, data=None, file_type=None, target_sample_rate=44100): + def __init__(self, file_path: Path, data=None, file_type=None, target_sample_rate=44100): self.file_path = file_path self.file_type = file_type self.data = data @@ -209,12 +213,12 @@ class Music: self.position = 0 # In frames self.is_playing = False self.is_paused = False - self.volume = 1.0 + self.volume = 0.75 self.pan = 0.5 # Center self.total_frames = 0 self.valid = False + self.normalize = None - self.wave_file = None self.file_buffer_size = int(target_sample_rate * 5) # 5 seconds buffer self.buffer = None self.buffer_position = 0 @@ -226,39 +230,31 @@ class Music: def load_from_file(self): """Load music from file""" - if self.file_path.endswith('.ogg'): - audio = AudioSegment.from_ogg(self.file_path) - wav_io = io.BytesIO() - audio.export(wav_io, format="wav") - wav_io.seek(0) - file_path = wav_io - else: - file_path = self.file_path try: - # Keep the file open for streaming - self.wave_file = wave.open(file_path, 'rb') + # soundfile handles OGG, WAV, FLAC, etc. natively + self.sound_file = sf.SoundFile(str(self.file_path)) # Get file properties - self.channels = self.wave_file.getnchannels() - self.sample_width = self.wave_file.getsampwidth() - self.sample_rate = self.wave_file.getframerate() - self.total_frames = self.wave_file.getnframes() + self.channels = self.sound_file.channels + self.sample_width = 2 if self.sound_file.subtype in ['PCM_16', 'VORBIS'] else 4 # Most common + self.sample_rate = self.sound_file.samplerate + self.total_frames = len(self.sound_file) # Initialize buffer with some initial data self._fill_buffer() - self.valid = True print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames") + except Exception as e: print(f"Error loading music file: {e}") - if self.wave_file: - self.wave_file.close() - self.wave_file = None + if hasattr(self, 'sound_file') and self.sound_file: + self.sound_file.close() + self.sound_file = None self.valid = False def _fill_buffer(self): """Fill the streaming buffer from file""" - if not self.wave_file: + if not self.sound_file: return False # Read a chunk of frames from file @@ -267,18 +263,31 @@ class Music: if frames_to_read <= 0: return False - raw_data = self.wave_file.readframes(frames_to_read) + # Read data directly as numpy array (float64 by default) + data = self.sound_file.read(frames_to_read) - data = get_np_array(self.sample_width, raw_data) + # Convert to float32 if needed (soundfile returns float64 by default) + if data.dtype != float32: + data = data.astype(float32) - # Reshape for multi-channel audio - if self.channels > 1: - data = data.reshape(-1, self.channels) + # Ensure proper shape for mono audio + if self.channels == 1 and data.ndim == 1: + data = data.reshape(-1, 1) + elif self.channels == 1 and data.ndim == 2: + data = data[:, 0].reshape(-1, 1) # Take first channel if stereo file but expecting mono + # Resample if needed if self.sample_rate != self.target_sample_rate: print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz") data = resample(data, self.sample_rate, self.target_sample_rate) + if self.normalize is not None: + current_rms = get_average_volume_rms(data) + if current_rms > 0: # Avoid division by zero + target_rms = self.normalize + rms_scale_factor = target_rms / current_rms + data *= rms_scale_factor + self.buffer = data self.buffer_position = 0 return True @@ -295,7 +304,7 @@ class Music: # Check if we need to refill the buffer if self.buffer is None: raise Exception("buffer is None") - if self.wave_file and self.buffer_position >= len(self.buffer): + if self.sound_file and self.buffer_position >= len(self.buffer): if not self._fill_buffer(): self.is_playing = False @@ -303,8 +312,8 @@ class Music: """Start playing the music stream""" with self.lock: # Reset position if at the end - if self.wave_file and self.position >= self.total_frames: - self.wave_file.rewind() + if self.sound_file and self.position >= self.total_frames: + self.sound_file.seek(0) # Reset to beginning self.position = 0 self.buffer_position = 0 self._fill_buffer() @@ -319,8 +328,8 @@ class Music: self.is_paused = False self.position = 0 self.buffer_position = 0 - if self.wave_file: - self.wave_file.rewind() + if self.sound_file: + self.sound_file.seek(0) # Reset to beginning self._fill_buffer() def pause(self): @@ -347,8 +356,8 @@ class Music: frame_position = max(0, min(frame_position, self.total_frames - 1)) # Update file position if streaming from file - if self.wave_file: - self.wave_file.setpos(frame_position) + if self.sound_file: + self.sound_file.seek(frame_position) self._fill_buffer() self.position = frame_position @@ -377,7 +386,7 @@ class Music: # Check if we need more data if self.buffer_position >= len(self.buffer): # If no more data available and streaming from file - if self.wave_file and not self._fill_buffer(): + if self.sound_file and not self._fill_buffer(): self.is_playing = False if self.channels == 1: return zeros(num_frames, dtype=float32) @@ -393,7 +402,7 @@ class Music: if self.channels == 1: output = zeros(num_frames, dtype=float32) - output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get] + output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get].flatten() else: output = zeros((num_frames, self.channels), dtype=float32) output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get] @@ -418,9 +427,9 @@ class Music: def __del__(self): """Cleanup when the music object is deleted""" - if self.wave_file: + if hasattr(self, 'sound_file') and self.sound_file: try: - self.wave_file.close() + self.sound_file.close() except Exception: raise Exception("unable to close music stream") @@ -428,12 +437,12 @@ class AudioEngine: def __init__(self, type: str): self.target_sample_rate = 44100 self.buffer_size = 10 - self.sounds = {} + self.sounds: dict[str, Sound] = {} self.music_streams = {} self.stream = None self.device_id = None self.running = False - self.sound_queue = queue.Queue() + self.sound_queue: queue.Queue[str] = queue.Queue() self.music_queue = queue.Queue() self.master_volume = 1.0 self.output_channels = 2 # Default to stereo @@ -532,20 +541,21 @@ class AudioEngine: sound_data = sound.get_frames(frames) # If mono sound but stereo output, duplicate to both channels - if sound.channels == 1 and self.output_channels > 1: - sound_data = column_stack([sound_data] * self.output_channels) + if isinstance(sound_data, ndarray): + if sound.channels == 1 and self.output_channels > 1: + sound_data = column_stack([sound_data] * self.output_channels) - # Ensure sound_data matches the output format - if sound.channels > self.output_channels: - # Down-mix if needed - if self.output_channels == 1: - sound_data = mean(sound_data, axis=1) - else: - # Keep only the first output_channels - sound_data = sound_data[:, :self.output_channels] + # Ensure sound_data matches the output format + if sound.channels > self.output_channels: + # Down-mix if needed + if self.output_channels == 1: + sound_data = mean(sound_data, axis=1) + else: + # Keep only the first output_channels + sound_data = sound_data[:, :self.output_channels] - # Add to the mix (simple additive mixing) - output += sound_data + # Add to the mix (simple additive mixing) + output += sound_data # Mix music streams for music_name, music in self.music_streams.items(): @@ -655,7 +665,7 @@ class AudioEngine: def get_master_volume(self) -> float: return self.master_volume - def load_sound(self, fileName: str) -> str: + def load_sound(self, fileName: Path) -> str: sound = Sound(fileName, target_sample_rate=self.target_sample_rate) sound_id = f"sound_{len(self.sounds)}" self.sounds[sound_id] = sound @@ -678,6 +688,17 @@ class AudioEngine: if sound in self.sounds: self.sounds[sound].resume() + def unload_sound(self, sound: str): + if sound in self.sounds: + del self.sounds[sound] + + def normalize_sound(self, sound: str, rms: float): + if sound in self.sounds: + self.sounds[sound].normalize_vol(rms) + + def is_sound_valid(self, sound: str) -> bool: + return sound in self.music_streams + def is_sound_playing(self, sound: str) -> bool: if sound in self.sounds: return self.sounds[sound].is_playing @@ -691,7 +712,7 @@ class AudioEngine: if sound in self.sounds: self.sounds[sound].pan = max(0.0, min(1.0, pan)) - def load_music_stream(self, fileName: str) -> str: + def load_music_stream(self, fileName: Path) -> str: music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate) music_id = f"music_{len(self.music_streams)}" self.music_streams[music_id] = music @@ -744,6 +765,10 @@ class AudioEngine: if music in self.music_streams: self.music_streams[music].pan = max(0.0, min(1.0, pan)) + def normalize_music_stream(self, music: str, rms: float): + if music in self.music_streams: + self.music_streams[music].normalize = rms + def get_music_time_length(self, music: str) -> float: if music in self.music_streams: return self.music_streams[music].get_time_length() @@ -755,4 +780,3 @@ class AudioEngine: raise ValueError(f"Music stream {music} not initialized") audio = AudioEngine(get_config()["audio"]["device_type"]) -audio.set_master_volume(0.75) diff --git a/libs/tja.py b/libs/tja.py index be9efa9..86b0b00 100644 --- a/libs/tja.py +++ b/libs/tja.py @@ -1,6 +1,5 @@ import hashlib import math -import os from collections import deque from dataclasses import dataclass, field, fields from pathlib import Path @@ -8,36 +7,127 @@ from pathlib import Path from libs.utils import get_pixels_per_frame, strip_comments -@dataclass +@dataclass() class Note: type: int = field(init=False) hit_ms: float = field(init=False) load_ms: float = field(init=False) pixels_per_frame: float = field(init=False) + display: bool = field(init=False) index: int = field(init=False) + bpm: float = field(init=False) + gogo_time: bool = field(init=False) moji: int = field(init=False) + def __le__(self, other): + return self.hit_ms <= other.hit_ms + + def _get_hash_data(self) -> bytes: + """Get deterministic byte representation for hashing""" + field_values = [] + for f in sorted([f.name for f in fields(self)]): # Sort for consistency + value = getattr(self, f, None) + field_values.append((f, value)) + field_values.append(('__class__', self.__class__.__name__)) + hash_string = str(field_values) + return hash_string.encode('utf-8') + + def get_hash(self, algorithm='sha256') -> str: + """Generate hash of the note""" + hash_obj = hashlib.new(algorithm) + hash_obj.update(self._get_hash_data()) + return hash_obj.hexdigest() + + def __hash__(self) -> int: + """Make instances hashable for use in sets/dicts""" + return int(self.get_hash('md5')[:8], 16) # Use first 8 chars of MD5 as int + + def __repr__(self): + return str(self.__dict__) + @dataclass class Drumroll(Note): _source_note: Note color: int = field(init=False) + def __repr__(self): + return str(self.__dict__) + def __post_init__(self): for field_name in [f.name for f in fields(Note)]: if hasattr(self._source_note, field_name): setattr(self, field_name, getattr(self._source_note, field_name)) + def _get_hash_data(self) -> bytes: + """Override to include source note and drumroll-specific data""" + field_values = [] + for f in sorted([f.name for f in fields(Note)]): + value = getattr(self, f, None) + field_values.append((f, value)) + + field_values.append(('color', getattr(self, 'color', None))) + field_values.append(('__class__', self.__class__.__name__)) + field_values.append(('_source_note_hash', self._source_note.get_hash())) + + hash_string = str(field_values) + return hash_string.encode('utf-8') + @dataclass class Balloon(Note): _source_note: Note count: int = field(init=False) popped: bool = False + def __repr__(self): + return str(self.__dict__) + def __post_init__(self): for field_name in [f.name for f in fields(Note)]: if hasattr(self._source_note, field_name): setattr(self, field_name, getattr(self._source_note, field_name)) + def _get_hash_data(self) -> bytes: + """Override to include source note and balloon-specific data""" + field_values = [] + for f in sorted([f.name for f in fields(Note)]): + value = getattr(self, f, None) + field_values.append((f, value)) + field_values.append(('count', getattr(self, 'count', None))) + field_values.append(('popped', self.popped)) + field_values.append(('__class__', self.__class__.__name__)) + field_values.append(('_source_note_hash', self._source_note.get_hash())) + + hash_string = str(field_values) + return hash_string.encode('utf-8') + +@dataclass +class CourseData: + level: int = 0 + balloon: list[int] = field(default_factory=lambda: []) + scoreinit: list[int] = field(default_factory=lambda: []) + scorediff: int = 0 + +@dataclass +class TJAMetadata: + title: dict[str, str] = field(default_factory= lambda: {'en': ''}) + subtitle: dict[str, str] = field(default_factory= lambda: {'en': ''}) + genre: str = '' + wave: Path = Path() + demostart: float = 0.0 + offset: float = 0.0 + bpm: float = 120.0 + bgmovie: Path = Path() + movieoffset: float = 0.0 + course_data: dict[int, CourseData] = field(default_factory=dict) + +@dataclass +class TJAEXData: + new_audio: bool = False + old_audio: bool = False + limited_time: bool = False + new: bool = False + + def calculate_base_score(play_note_list: deque[Note | Drumroll | Balloon]) -> int: total_notes = 0 balloon_num = 0 @@ -60,117 +150,114 @@ def calculate_base_score(play_note_list: deque[Note | Drumroll | Balloon]) -> in return math.ceil(total_score / 10) * 10 class TJAParser: - def __init__(self, path: str, start_delay: int = 0): - #Defined on startup - self.folder_path = Path(path) - self.folder_name = self.folder_path.name - for _, _, files in os.walk(self.folder_path): - for file in files: - if file.endswith('tja'): - self.file_path = self.folder_path / f'{file}' + def __init__(self, path: Path, start_delay: int = 0, distance: int = 866): + self.file_path: Path = path - #Defined on file_to_data() - self.data = [] - with open(self.file_path, 'rt', encoding='utf-8-sig') as tja_file: - for line in tja_file: - line = strip_comments(line).strip() - if line != '': - self.data.append(str(line)) + lines = self.file_path.read_text(encoding='utf-8-sig').splitlines() + self.data = [cleaned for line in lines + if (cleaned := strip_comments(line).strip())] - #Defined on get_metadata() - self.title = '' - self.title_ja = '' - self.subtitle = '' - self.subtitle_ja = '' - self.wave = self.folder_path / "" - self.offset = 0 - self.demo_start = 0 - self.course_data = dict() + self.metadata = TJAMetadata() + self.ex_data = TJAEXData() + self.get_metadata() - #Defined in metadata but can change throughout the chart - self.bpm = 120 - self.time_signature = 4/4 - - self.distance = 0 - self.scroll_modifier = 1 - self.current_ms = start_delay - self.barline_display = True - self.gogo_time = False + self.distance = distance + self.current_ms: float = start_delay def get_metadata(self): current_diff = None # Track which difficulty we're currently processing for item in self.data: - if item[0] == '#': + if item.startswith("#") or item[0].isdigit(): continue - elif 'SUBTITLEJA' in item: - self.subtitle_ja = str(item.split('SUBTITLEJA:')[1]) - elif 'TITLEJA' in item: - self.title_ja = str(item.split('TITLEJA:')[1]) - elif 'SUBTITLE' in item: - self.subtitle = str(item.split('SUBTITLE:')[1][2:]) - elif 'TITLE' in item: - self.title = str(item.split('TITLE:')[1]) - elif 'BPM' in item: - self.bpm = float(item.split(':')[1]) - elif 'WAVE' in item: - filename = item.split(':')[1].strip() - self.wave = self.folder_path / filename - elif 'OFFSET' in item: - self.offset = float(item.split(':')[1]) - elif 'DEMOSTART' in item: - self.demo_start = float(item.split(':')[1]) - elif 'BGMOVIE' in item: - self.bg_movie = self.folder_path / item.split(':')[1].strip() - elif 'COURSE' in item: - # Determine which difficulty we're now processing + elif item.startswith('SUBTITLE'): + region_code = 'en' + if item[len('SUBTITLE')] != ':': + region_code = (item[len('SUBTITLE'):len('SUBTITLE')+2]).lower() + self.metadata.subtitle[region_code] = ''.join(item.split(':')[1:]) + if '限定' in self.metadata.subtitle: + self.ex_data.limited_time = True + elif item.startswith('TITLE'): + region_code = 'en' + if item[len('TITLE')] != ':': + region_code = (item[len('TITLE'):len('TITLE')+2]).lower() + self.metadata.title[region_code] = ''.join(item.split(':')[1:]) + elif item.startswith('BPM'): + self.metadata.bpm = float(item.split(':')[1]) + elif item.startswith('WAVE'): + self.metadata.wave = self.file_path.parent / item.split(':')[1].strip() + elif item.startswith('OFFSET'): + self.metadata.offset = float(item.split(':')[1]) + elif item.startswith('DEMOSTART'): + self.metadata.demostart = float(item.split(':')[1]) + elif item.startswith('BGMOVIE'): + self.metadata.bgmovie = self.file_path.parent / item.split(':')[1].strip() + elif item.startswith('MOVIEOFFSET'): + self.metadata.movieoffset = float(item.split(':')[1]) + elif item.startswith('COURSE'): course = str(item.split(':')[1]).lower().strip() - # Map the course string to its corresponding index - if course == 'dan' or course == '6': + if course == '6' or course == 'dan': current_diff = 6 - self.course_data[6] = [] - elif course == 'tower' or course == '5': + elif course == '5' or course == 'tower': current_diff = 5 - self.course_data[5] = [] - elif course == 'edit' or course == '4': + elif course == '4' or course == 'edit' or course == 'ura': current_diff = 4 - self.course_data[4] = [] - elif course == 'oni' or course == '3': + elif course == '3' or course == 'oni': current_diff = 3 - self.course_data[3] = [] - elif course == 'hard' or course == '2': + elif course == '2' or course == 'hard': current_diff = 2 - self.course_data[2] = [] - elif course == 'normal' or course == '1': + elif course == '1' or course == 'normal': current_diff = 1 - self.course_data[1] = [] - elif course == 'easy' or course == '0': + elif course == '0' or course == 'easy': current_diff = 0 - self.course_data[0] = [] - - # Only process these items if we have a current difficulty + else: + raise Exception("course level empty") + self.metadata.course_data[current_diff] = CourseData() elif current_diff is not None: - if 'LEVEL' in item: - level = int(float(item.split(':')[1])) - self.course_data[current_diff].append(level) - elif 'BALLOON' in item: + if item.startswith('LEVEL'): + self.metadata.course_data[current_diff].level = int(float(item.split(':')[1])) + elif item.startswith('BALLOONNOR'): balloon_data = item.split(':')[1] if balloon_data == '': continue - self.course_data[current_diff].append([int(x) for x in balloon_data.split(',')]) - elif 'SCOREINIT' in item: + self.metadata.course_data[current_diff].balloon.extend([int(x) for x in balloon_data.split(',')]) + elif item.startswith('BALLOONEXP'): + balloon_data = item.split(':')[1] + if balloon_data == '': + continue + self.metadata.course_data[current_diff].balloon.extend([int(x) for x in balloon_data.split(',')]) + elif item.startswith('BALLOONMAS'): + balloon_data = item.split(':')[1] + if balloon_data == '': + continue + self.metadata.course_data[current_diff].balloon = ([int(x) for x in balloon_data.split(',')]) + elif item.startswith('BALLOON'): + balloon_data = item.split(':')[1] + if balloon_data == '': + continue + self.metadata.course_data[current_diff].balloon = [int(x) for x in balloon_data.split(',')] + elif item.startswith('SCOREINIT'): score_init = item.split(':')[1] if score_init == '': continue - self.course_data[current_diff].append([int(x) for x in score_init.split(',')]) - elif 'SCOREDIFF' in item: + self.metadata.course_data[current_diff].scoreinit = [int(x) for x in score_init.split(',')] + elif item.startswith('SCOREDIFF'): score_diff = item.split(':')[1] if score_diff == '': continue - self.course_data[current_diff].append(int(score_diff)) - return [self.title, self.title_ja, self.subtitle, self.subtitle_ja, - self.bpm, self.wave, self.offset, self.demo_start, self.course_data] + self.metadata.course_data[current_diff].scorediff = int(score_diff) + for region_code in self.metadata.title: + if '-New Audio-' in self.metadata.title[region_code] or '-新曲-' in self.metadata.title[region_code]: + self.metadata.title[region_code] = self.metadata.title[region_code].strip('-New Audio-') + self.metadata.title[region_code] = self.metadata.title[region_code].strip('-新曲-') + self.ex_data.new_audio = True + elif '-Old Audio-' in self.metadata.title[region_code] or '-旧曲-' in self.metadata.title[region_code]: + self.metadata.title[region_code] = self.metadata.title[region_code].strip('-Old Audio-') + self.metadata.title[region_code] = self.metadata.title[region_code].strip('-旧曲-') + self.ex_data.old_audio = True + elif '限定' in self.metadata.title[region_code]: + self.ex_data.limited_time = True def data_to_notes(self, diff): note_start = -1 @@ -223,9 +310,7 @@ class TJAParser: if item != line: notes.append(bar) bar = [] - if len(self.course_data[diff]) < 2: - return notes, None - return notes, self.course_data[diff][1] + return notes def get_moji(self, play_note_list: deque[Note], ms_per_measure: float) -> None: se_notes = { @@ -287,66 +372,91 @@ class TJAParser: else: play_note_list[-3].moji = se_notes[play_note_list[-3].moji][2] - def notes_to_position(self, diff): + def notes_to_position(self, diff: int): play_note_list: deque[Note | Drumroll | Balloon] = deque() bar_list: deque[Note] = deque() draw_note_list: deque[Note | Drumroll | Balloon] = deque() - notes, balloon = self.data_to_notes(diff) - balloon_index = 0 + notes = self.data_to_notes(diff) + balloon = self.metadata.course_data[diff].balloon.copy() + count = 0 index = 0 + time_signature = 4/4 + bpm = self.metadata.bpm + scroll_modifier = 1 + barline_display = True + gogo_time = False + skip_branch = False for bar in notes: #Length of the bar is determined by number of notes excluding commands bar_length = sum(len(part) for part in bar if '#' not in part) - + barline_added = False for part in bar: + if part.startswith('#BRANCHSTART'): + skip_branch = True + continue if '#JPOSSCROLL' in part: continue elif '#NMSCROLL' in part: continue elif '#MEASURE' in part: divisor = part.find('/') - self.time_signature = float(part[9:divisor]) / float(part[divisor+1:]) + time_signature = float(part[9:divisor]) / float(part[divisor+1:]) continue elif '#SCROLL' in part: - self.scroll_modifier = float(part[7:]) + scroll_modifier = float(part[7:]) continue elif '#BPMCHANGE' in part: - self.bpm = float(part[11:]) + bpm = float(part[11:]) continue elif '#BARLINEOFF' in part: - self.barline_display = False + barline_display = False continue elif '#BARLINEON' in part: - self.barline_display = True + barline_display = True continue elif '#GOGOSTART' in part: - self.gogo_time = True + gogo_time = True continue elif '#GOGOEND' in part: - self.gogo_time = False + gogo_time = False continue elif '#LYRIC' in part: continue + elif part.startswith('#M'): + skip_branch = False + continue #Unrecognized commands will be skipped for now - elif '#' in part: + elif len(part) > 0 and not part[0].isdigit(): + continue + if skip_branch: continue - #https://gist.github.com/KatieFrogs/e000f406bbc70a12f3c34a07303eec8b#measure - ms_per_measure = 60000 * (self.time_signature*4) / self.bpm + if bpm == 0: + ms_per_measure = 0 + else: + #https://gist.github.com/KatieFrogs/e000f406bbc70a12f3c34a07303eec8b#measure + ms_per_measure = 60000 * (time_signature*4) / bpm #Create note object - bar = Note() + bar_line = Note() #Determines how quickly the notes need to move across the screen to reach the judgment circle in time - bar.pixels_per_frame = get_pixels_per_frame(self.bpm * self.time_signature * self.scroll_modifier, self.time_signature*4, self.distance) - pixels_per_ms = bar.pixels_per_frame / (1000 / 60) + bar_line.pixels_per_frame = get_pixels_per_frame(bpm * time_signature * scroll_modifier, time_signature*4, self.distance) + pixels_per_ms = bar_line.pixels_per_frame / (1000 / 60) - bar.hit_ms = self.current_ms - bar.load_ms = bar.hit_ms - (self.distance / pixels_per_ms) - bar.type = 0 + bar_line.hit_ms = self.current_ms + if pixels_per_ms == 0: + bar_line.load_ms = bar_line.hit_ms + else: + bar_line.load_ms = bar_line.hit_ms - (self.distance / pixels_per_ms) + bar_line.type = 0 + bar_line.display = barline_display + bar_line.bpm = bpm + if barline_added: + bar_line.display = False - if self.barline_display: - bar_list.append(bar) + bar_list.append(bar_line) + barline_added = True #Empty bar is still a bar, otherwise start increment if len(part) == 0: @@ -355,29 +465,40 @@ class TJAParser: else: increment = ms_per_measure / bar_length - for item in (part): + for item in part: if item == '0': self.current_ms += increment continue note = Note() note.hit_ms = self.current_ms - note.load_ms = note.hit_ms - (self.distance / pixels_per_ms) + if pixels_per_ms == 0: + note.load_ms = note.hit_ms + else: + note.load_ms = note.hit_ms - (self.distance / pixels_per_ms) note.type = int(item) - note.pixels_per_frame = bar.pixels_per_frame + note.pixels_per_frame = bar_line.pixels_per_frame note.index = index + note.bpm = bpm + note.gogo_time = gogo_time note.moji = -1 if item in {'5', '6'}: note = Drumroll(note) note.color = 255 - elif item in {'7', '9'}: + elif item in {'7'}: + count += 1 if balloon is None: raise Exception("Balloon note found, but no count was specified") note = Balloon(note) - note.count = int(balloon[balloon_index]) - balloon_index += 1 + if not balloon: + note.count = 1 + else: + note.count = balloon.pop(0) elif item == '8': new_pixels_per_ms = play_note_list[-1].pixels_per_frame / (1000 / 60) - note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms) + if new_pixels_per_ms == 0: + note.load_ms = note.hit_ms + else: + note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms) note.pixels_per_frame = play_note_list[-1].pixels_per_frame self.current_ms += increment play_note_list.append(note) @@ -385,7 +506,9 @@ class TJAParser: index += 1 if len(play_note_list) > 3: if isinstance(play_note_list[-2], Drumroll) and play_note_list[-1].type != 8: - raise Exception(play_note_list[-2]) + print(self.file_path, diff) + print(bar) + raise Exception(f"{play_note_list[-2]}") # https://stackoverflow.com/questions/72899/how-to-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary-in-python # Sorting by load_ms is necessary for drawing, as some notes appear on the # screen slower regardless of when they reach the judge circle @@ -394,9 +517,23 @@ class TJAParser: bar_list = deque(sorted(bar_list, key=lambda b: b.load_ms)) return play_note_list, draw_note_list, bar_list - def hash_note_data(self, notes: list): + def hash_note_data(self, play_notes: deque[Note | Drumroll | Balloon], bars: deque[Note]): n = hashlib.sha256() - for bar in notes: - for part in bar: - n.update(part.encode('utf-8')) + list1 = list(play_notes) + list2 = list(bars) + merged: list[Note | Drumroll | Balloon] = [] + i = 0 + j = 0 + while i < len(list1) and j < len(list2): + if list1[i] <= list2[j]: + merged.append(list1[i]) + i += 1 + else: + merged.append(list2[j]) + j += 1 + merged.extend(list1[i:]) + merged.extend(list2[j:]) + for item in merged: + n.update(item.get_hash().encode('utf-8')) + return n.hexdigest() diff --git a/libs/utils.py b/libs/utils.py index 00e8a3e..2dca94a 100644 --- a/libs/utils.py +++ b/libs/utils.py @@ -1,3 +1,4 @@ +import hashlib import os import tempfile import time @@ -85,6 +86,8 @@ def strip_comments(code: str) -> str: return result def get_pixels_per_frame(bpm: float, time_signature: float, distance: float) -> float: + if bpm == 0: + return 0 beat_duration = 60 / bpm total_time = time_signature * beat_duration total_frames = 60 * total_time @@ -119,38 +122,164 @@ def reset_session(): @dataclass class GlobalData: - selected_song: str = '' #Path + selected_song: Path = Path() textures: dict[str, list[ray.Texture]] = field(default_factory=lambda: dict()) songs_played: int = 0 global_data = GlobalData() +rotation_cache = dict() +char_size_cache = dict() +horizontal_cache = dict() +text_cache = set() +for file in Path('cache/image').iterdir(): + text_cache.add(file.stem) + @dataclass class OutlinedText: - font: ray.Font text: str font_size: int text_color: ray.Color outline_color: ray.Color + font: ray.Font = ray.Font() outline_thickness: int = 2 vertical: bool = False line_spacing: float = 1.0 # Line spacing for vertical text + horizontal_spacing: float = 1.0 # Character spacing for horizontal text lowercase_spacing_factor: float = 0.85 # Adjust spacing for lowercase letters and whitespace - vertical_chars: set = field(default_factory=lambda: {'-', '|', '/', '\\', 'ー'}) + vertical_chars: set = field(default_factory=lambda: {'-', '‐', '|', '/', '\\', 'ー', '~', '~', '(', ')', '(', ')', + '「', '」', '[', ']', '[', ']', '【', '】', '…', '→', '→', ':', ':'}) no_space_chars: set = field(default_factory=lambda: { 'ぁ', 'ア','ぃ', 'イ','ぅ', 'ウ','ぇ', 'エ','ぉ', 'オ', 'ゃ', 'ャ','ゅ', 'ュ','ょ', 'ョ','っ', 'ッ','ゎ', 'ヮ', 'ヶ', 'ヵ','ㇰ','ㇱ','ㇲ','ㇳ','ㇴ','ㇵ','ㇶ','ㇷ','ㇸ', 'ㇹ','ㇺ','ㇻ','ㇼ','ㇽ','ㇾ','ㇿ' }) + # New field for horizontal exception strings + horizontal_exceptions: set = field(default_factory=lambda: {'!!!!', '!!!', '!!', '!!','!!!','!?', '!?', '??', '??', '†††', '(°∀°)', '(°∀°)'}) + # New field for adjacent punctuation characters + adjacent_punctuation: set = field(default_factory=lambda: {'.', ',', '。', '、', "'", '"', '´', '`'}) def __post_init__(self): # Cache for rotated characters - self._rotation_cache = {} + self._rotation_cache = rotation_cache # Cache for character measurements - self._char_size_cache = {} + self._char_size_cache = char_size_cache + # Cache for horizontal exception measurements + self._horizontal_cache = horizontal_cache + self.hash = self._get_hash() self.texture = self._create_texture() + def _load_font_for_text(self, text: str) -> ray.Font: + codepoint_count = ray.ffi.new('int *', 0) + unique_codepoints = set(text) + codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count) + return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), self.font_size, codepoints, 0) + + def _get_hash(self): + n = hashlib.sha256() + n.update(self.text.encode('utf-8')) + n.update(str(self.vertical).encode('utf-8')) + n.update(str(self.horizontal_spacing).encode('utf-8')) # Include horizontal spacing in hash + n.update(str(self.outline_color.a).encode('utf-8')) + n.update(str(self.outline_color.r).encode('utf-8')) + n.update(str(self.outline_color.g).encode('utf-8')) + n.update(str(self.outline_color.b).encode('utf-8')) + n.update(str(self.text_color.a).encode('utf-8')) + n.update(str(self.text_color.r).encode('utf-8')) + n.update(str(self.text_color.g).encode('utf-8')) + n.update(str(self.text_color.b).encode('utf-8')) + n.update(str(self.font_size).encode('utf-8')) + return n.hexdigest() + + def _parse_text_segments(self): + """Parse text into segments, identifying horizontal exceptions""" + if not self.vertical: + return [{'text': self.text, 'is_horizontal': False}] + + segments = [] + i = 0 + current_segment = "" + + while i < len(self.text): + # Check if any horizontal exception starts at current position + found_exception = None + for exception in self.horizontal_exceptions: + if self.text[i:].startswith(exception): + found_exception = exception + break + + if found_exception: + # Save current segment if it exists + if current_segment: + segments.append({'text': current_segment, 'is_horizontal': False}) + current_segment = "" + + # Add horizontal exception as separate segment + segments.append({'text': found_exception, 'is_horizontal': True}) + i += len(found_exception) + else: + # Add character to current segment + current_segment += self.text[i] + i += 1 + + # Add remaining segment + if current_segment: + segments.append({'text': current_segment, 'is_horizontal': False}) + + return segments + + def _group_characters_with_punctuation(self, text): + """Group characters with their adjacent punctuation""" + groups = [] + i = 0 + + while i < len(text): + current_char = text[i] + group = {'main_char': current_char, 'adjacent_punct': []} + + # Look ahead for adjacent punctuation + j = i + 1 + while j < len(text) and text[j] in self.adjacent_punctuation: + group['adjacent_punct'].append(text[j]) + j += 1 + + groups.append(group) + i = j # Move to next non-punctuation character + + return groups + + def _get_horizontal_exception_texture(self, text: str, color): + """Get or create a texture for horizontal exception text""" + cache_key = (text, color.r, color.g, color.b, color.a, 'horizontal') + + if cache_key in self._horizontal_cache: + return self._horizontal_cache[cache_key] + + # Measure the text + text_size = ray.measure_text_ex(self.font, text, self.font_size, 1.0) + padding = int(self.outline_thickness * 3) + + # Create image with proper dimensions + img_width = int(text_size.x + padding * 2) + img_height = int(text_size.y + padding * 2) + temp_image = ray.gen_image_color(img_width, img_height, ray.Color(0, 0, 0, 0)) + + # Draw the text centered + ray.image_draw_text_ex( + temp_image, + self.font, + text, + ray.Vector2(padding, padding), + self.font_size, + 1.0, + color + ) + + # Cache the image + self._horizontal_cache[cache_key] = temp_image + return temp_image + def _get_char_size(self, char): """Cache character size measurements""" if char not in self._char_size_cache: @@ -165,8 +294,7 @@ class OutlinedText: """Calculate vertical spacing between characters""" # Check if current char is lowercase, whitespace or a special character is_spacing_char = (current_char.islower() or - current_char.isspace() or - current_char in self.no_space_chars) + current_char.isspace()) # Additional check for capitalization transition if next_char and ((current_char.isupper() and next_char.islower()) or @@ -177,33 +305,27 @@ class OutlinedText: spacing = self.line_spacing * (self.lowercase_spacing_factor if is_spacing_char else 1.0) return self.font_size * spacing - def _get_rotated_char(self, char, color): + def _get_rotated_char(self, char: str, color): """Get or create a rotated character texture from cache""" - cache_key = (char, color[0], color[1], color[2], color[3]) + cache_key = (char, color.r, color.g, color.b, color.a) if cache_key in self._rotation_cache: return self._rotation_cache[cache_key] char_size = self._get_char_size(char) - - # For rotated text, we need extra padding to prevent cutoff - padding = max(int(self.font_size * 0.2), 2) # Add padding proportional to font size - temp_width = int(char_size.y) + padding * 2 - temp_height = int(char_size.x) + padding * 2 - - # Create a temporary image with padding to ensure characters aren't cut off + padding = int(self.outline_thickness * 3) # Increased padding + temp_width = max(int(char_size.y) + padding, self.font_size + padding) + temp_height = max(int(char_size.x) + padding, self.font_size + padding) temp_image = ray.gen_image_color(temp_width, temp_height, ray.Color(0, 0, 0, 0)) - # Calculate centering offsets - x_offset = padding - y_offset = padding + center_x = (temp_width - char_size.y) // 2 + center_y = (temp_height - char_size.x) // 2 - # Draw the character centered in the temporary image ray.image_draw_text_ex( temp_image, self.font, char, - ray.Vector2(x_offset-5, y_offset), + ray.Vector2(center_x-5, center_y), # Centered placement with padding self.font_size, 1.0, color @@ -223,191 +345,379 @@ class OutlinedText: self._rotation_cache[cache_key] = rotated_image return rotated_image + def _calculate_horizontal_text_width(self): + """Calculate the total width of horizontal text with custom spacing""" + if not self.text: + return 0 + + total_width = 0 + for i, char in enumerate(self.text): + char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0) + total_width += char_size.x + + # Add spacing between characters (except for the last character) + if i < len(self.text) - 1: + total_width += (char_size.x * (self.horizontal_spacing - 1.0)) + + return total_width + def _calculate_dimensions(self): - """Calculate dimensions based on orientation""" + padding = int(self.outline_thickness * 3) + if not self.vertical: - # Horizontal text - text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0) - - # Add extra padding to prevent cutoff - extra_padding = max(int(self.font_size * 0.15), 2) - width = int(text_size.x + self.outline_thickness * 4 + extra_padding * 2) - height = int(text_size.y + self.outline_thickness * 4 + extra_padding * 2) - padding_x = self.outline_thickness * 2 + extra_padding - padding_y = self.outline_thickness * 2 + extra_padding - - return width, height, padding_x, padding_y + if self.horizontal_spacing == 1.0: + # Use default raylib measurement for normal spacing + text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0) + return int(text_size.x + padding * 2), int(text_size.y + padding * 2) + else: + # Calculate custom spacing width + text_width = self._calculate_horizontal_text_width() + text_height = ray.measure_text_ex(self.font, "Ag", self.font_size, 1.0).y # Use sample chars for height + return int(text_width + padding * 2), int(text_height + padding * 2) else: - # For vertical text, pre-calculate all character heights and widths + # Parse text into segments + segments = self._parse_text_segments() + char_heights = [] char_widths = [] - for i, char in enumerate(self.text): - next_char = self.text[i+1] if i+1 < len(self.text) else None - char_heights.append(self._calculate_vertical_spacing(char, next_char)) - - # For vertical characters, consider rotated dimensions - if char in self.vertical_chars: - # Use padded width for rotated characters - padding = max(int(self.font_size * 0.2), 2) * 2 - char_widths.append(self._get_char_size(char).x + padding) + for segment in segments: + if segment['is_horizontal']: + # For horizontal exceptions, add their height as spacing + text_size = ray.measure_text_ex(self.font, segment['text'], self.font_size, 1.0) + char_heights.append(text_size.y * self.line_spacing) + char_widths.append(text_size.x) else: - char_widths.append(self._get_char_size(char).x) + # Process vertical text with character grouping + char_groups = self._group_characters_with_punctuation(segment['text']) + + for i, group in enumerate(char_groups): + main_char = group['main_char'] + adjacent_punct = group['adjacent_punct'] + + # Get next group's main character for spacing calculation + next_char = char_groups[i+1]['main_char'] if i+1 < len(char_groups) else None + char_heights.append(self._calculate_vertical_spacing(main_char, next_char)) + + # Calculate width considering main char + adjacent punctuation + main_char_size = self._get_char_size(main_char) + group_width = main_char_size.x + + # Add width for adjacent punctuation + for punct in adjacent_punct: + punct_size = self._get_char_size(punct) + group_width += punct_size.x + + # For vertical characters, consider rotated dimensions + if main_char in self.vertical_chars: + char_widths.append(group_width + padding) + else: + char_widths.append(group_width) max_char_width = max(char_widths) if char_widths else 0 total_height = sum(char_heights) if char_heights else 0 - # Add extra padding for vertical text - extra_padding = max(int(self.font_size * 0.15), 2) - width = int(max_char_width + self.outline_thickness * 4 + extra_padding * 2) - height = int(total_height + self.outline_thickness * 4 + extra_padding * 2) - padding_x = self.outline_thickness * 2 + extra_padding - padding_y = self.outline_thickness * 2 + extra_padding + width = int(max_char_width + padding * 2) # Padding on both sides + height = int(total_height + padding * 2) # Padding on top and bottom - return width, height, padding_x, padding_y + return width, height - def _draw_horizontal_text(self, image, padding_x, padding_y): - """Draw horizontal text with outline""" - # Draw outline - for dx in range(-self.outline_thickness, self.outline_thickness + 1): - for dy in range(-self.outline_thickness, self.outline_thickness + 1): - if dx == 0 and dy == 0: - continue - ray.image_draw_text_ex( - image, - self.font, - self.text, - ray.Vector2(padding_x + dx, padding_y + dy), - self.font_size, - 1.0, - self.outline_color - ) + def _draw_horizontal_text(self, image): + if self.horizontal_spacing == 1.0: + # Use original method for normal spacing + text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0) + position = ray.Vector2((image.width - text_size.x) / 2, (image.height - text_size.y) / 2) - # Draw main text - ray.image_draw_text_ex( - image, - self.font, - self.text, - ray.Vector2(padding_x, padding_y), - self.font_size, - 1.0, - self.text_color - ) + for dx in range(-self.outline_thickness, self.outline_thickness + 1): + for dy in range(-self.outline_thickness, self.outline_thickness + 1): + # Skip the center position (will be drawn as main text) + if dx == 0 and dy == 0: + continue - def _draw_vertical_text(self, image, width, padding_x, padding_y): - """Draw vertical text with outline""" - # Precalculate positions and spacings to avoid redundant calculations - positions = [] - current_y = padding_y + # Calculate outline distance + dist = (dx*dx + dy*dy) ** 0.5 - for i, char in enumerate(self.text): - char_size = self._get_char_size(char) - char_height = self._calculate_vertical_spacing( - char, - self.text[i+1] if i+1 < len(self.text) else None - ) - - # Calculate center position for each character - if char in self.vertical_chars: - # For vertical characters, we need to use the rotated image dimensions - rotated_img = self._get_rotated_char(char, self.text_color) - char_width = rotated_img.width - center_offset = (width - char_width) // 2 - else: - char_width = char_size.x - center_offset = (width - char_width) // 2 - - positions.append((char, center_offset, current_y, char_height, char in self.vertical_chars)) - current_y += char_height - - # First draw all outlines - for dx in range(-self.outline_thickness, self.outline_thickness + 1): - for dy in range(-self.outline_thickness, self.outline_thickness + 1): - if dx == 0 and dy == 0: - continue - - for char, center_offset, y_pos, _, is_vertical in positions: - if is_vertical: - rotated_img = self._get_rotated_char(char, self.outline_color) - ray.image_draw( - image, - rotated_img, - ray.Rectangle(0, 0, rotated_img.width, rotated_img.height), - ray.Rectangle( - int(center_offset + dx), - int(y_pos + dy), - rotated_img.width, - rotated_img.height - ), - ray.WHITE - ) - else: + # Only draw outline positions that are near the outline thickness + if dist <= self.outline_thickness + 0.5: ray.image_draw_text_ex( image, self.font, - char, - ray.Vector2(center_offset + dx, y_pos + dy), + self.text, + ray.Vector2(position.x + dx, position.y + dy), self.font_size, 1.0, self.outline_color ) - # Then draw all main text - for char, center_offset, y_pos, _, is_vertical in positions: - if is_vertical: - rotated_img = self._get_rotated_char(char, self.text_color) - ray.image_draw( - image, - rotated_img, - ray.Rectangle(0, 0, rotated_img.width, rotated_img.height), - ray.Rectangle( - int(center_offset), - int(y_pos), - rotated_img.width, - rotated_img.height - ), - ray.WHITE - ) - else: + # Draw main text + ray.image_draw_text_ex( + image, + self.font, + self.text, + position, + self.font_size, + 1.0, + self.text_color + ) + else: + # Draw text with custom character spacing + text_width = self._calculate_horizontal_text_width() + text_height = ray.measure_text_ex(self.font, "Ag", self.font_size, 1.0).y + + start_x = (image.width - text_width) / 2 + start_y = (image.height - text_height) / 2 + + # First draw all outlines + current_x = start_x + for i, char in enumerate(self.text): + char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0) + + for dx in range(-self.outline_thickness, self.outline_thickness + 1): + for dy in range(-self.outline_thickness, self.outline_thickness + 1): + if dx == 0 and dy == 0: + continue + + dist = (dx*dx + dy*dy) ** 0.5 + if dist <= self.outline_thickness + 0.5: + ray.image_draw_text_ex( + image, + self.font, + char, + ray.Vector2(current_x + dx, start_y + dy), + self.font_size, + 1.0, + self.outline_color + ) + + # Move to next character position + current_x += char_size.x + if i < len(self.text) - 1: # Add spacing except for last character + current_x += (char_size.x * (self.horizontal_spacing - 1.0)) + + # Then draw all main text + current_x = start_x + for i, char in enumerate(self.text): + char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0) + ray.image_draw_text_ex( image, self.font, char, - ray.Vector2(center_offset, y_pos), + ray.Vector2(current_x, start_y), self.font_size, 1.0, self.text_color ) - def _create_texture(self): - """Create a texture with outlined text""" - # Calculate dimensions - width, height, padding_x, padding_y = self._calculate_dimensions() + # Move to next character position + current_x += char_size.x + if i < len(self.text) - 1: # Add spacing except for last character + current_x += (char_size.x * (self.horizontal_spacing - 1.0)) + + def _draw_vertical_text(self, image, width): + padding = int(self.outline_thickness * 2) + segments = self._parse_text_segments() + + positions = [] + current_y = padding # Start with padding at the top + + for segment in segments: + if segment['is_horizontal']: + # Handle horizontal exception + text_size = ray.measure_text_ex(self.font, segment['text'], self.font_size, 1.0) + center_offset = (width - text_size.x) // 2 + char_height = text_size.y * self.line_spacing + + positions.append({ + 'type': 'horizontal', + 'text': segment['text'], + 'x': center_offset, + 'y': current_y, + 'height': char_height + }) + current_y += char_height + else: + # Handle vertical text with character grouping + char_groups = self._group_characters_with_punctuation(segment['text']) + + for i, group in enumerate(char_groups): + main_char = group['main_char'] + adjacent_punct = group['adjacent_punct'] + + # Get next group for spacing calculation + next_char = char_groups[i+1]['main_char'] if i+1 < len(char_groups) else None + char_height = self._calculate_vertical_spacing(main_char, next_char) + + # Calculate positioning for main character + main_char_size = self._get_char_size(main_char) + + if main_char in self.vertical_chars: + rotated_img = self._get_rotated_char(main_char, self.text_color) + main_char_width = rotated_img.width + center_offset = (width - main_char_width) // 2 + else: + main_char_width = main_char_size.x + center_offset = (width - main_char_width) // 2 + + # Add main character position + positions.append({ + 'type': 'vertical', + 'char': main_char, + 'x': center_offset, + 'y': current_y, + 'height': char_height, + 'is_vertical_char': main_char in self.vertical_chars + }) + + # Add adjacent punctuation positions + punct_x_offset = center_offset + main_char_width + for punct in adjacent_punct: + punct_size = self._get_char_size(punct) + + positions.append({ + 'type': 'vertical', + 'char': punct, + 'x': punct_x_offset, + 'y': current_y+5, + 'height': 0, # No additional height for punctuation + 'is_vertical_char': punct in self.vertical_chars, + 'is_adjacent': True + }) + + punct_x_offset += punct_size.x + + current_y += char_height + + # First draw all outlines + outline_thickness = int(self.outline_thickness) + + for pos in positions: + if pos['type'] == 'horizontal': + # Draw horizontal text outline + for dx in range(-outline_thickness, outline_thickness + 1): + for dy in range(-outline_thickness, outline_thickness + 1): + if dx == 0 and dy == 0: + continue + + dist = (dx*dx + dy*dy) ** 0.5 + if dist <= outline_thickness + 0.5: + ray.image_draw_text_ex( + image, + self.font, + pos['text'], + ray.Vector2(pos['x'] + dx, pos['y'] + dy), + self.font_size, + 1.0, + self.outline_color + ) + else: + # Draw vertical character outline + for dx in range(-outline_thickness, outline_thickness + 1): + for dy in range(-outline_thickness, outline_thickness + 1): + if dx == 0 and dy == 0: + continue + + dist = (dx*dx + dy*dy) ** 0.5 + if dist <= outline_thickness + 0.5: + if pos['is_vertical_char']: + rotated_img = self._get_rotated_char(pos['char'], self.outline_color) + ray.image_draw( + image, + rotated_img, + ray.Rectangle(0, 0, rotated_img.width, rotated_img.height), + ray.Rectangle( + int(pos['x'] + dx), + int(pos['y'] + dy), + rotated_img.width, + rotated_img.height + ), + ray.WHITE + ) + else: + ray.image_draw_text_ex( + image, + self.font, + pos['char'], + ray.Vector2(pos['x'] + dx, pos['y'] + dy), + self.font_size, + 1.0, + self.outline_color + ) + + # Then draw all main text + for pos in positions: + if pos['type'] == 'horizontal': + # Draw horizontal text + ray.image_draw_text_ex( + image, + self.font, + pos['text'], + ray.Vector2(pos['x'], pos['y']), + self.font_size, + 1.0, + self.text_color + ) + else: + # Draw vertical character + if pos['is_vertical_char']: + rotated_img = self._get_rotated_char(pos['char'], self.text_color) + ray.image_draw( + image, + rotated_img, + ray.Rectangle(0, 0, rotated_img.width, rotated_img.height), + ray.Rectangle( + int(pos['x']), + int(pos['y']), + rotated_img.width, + rotated_img.height + ), + ray.WHITE + ) + else: + ray.image_draw_text_ex( + image, + self.font, + pos['char'], + ray.Vector2(pos['x'], pos['y']), + self.font_size, + 1.0, + self.text_color + ) + + def _create_texture(self): + if self.hash in text_cache: + texture = ray.load_texture(f'cache/image/{self.hash}.png') + return texture + + self.font = self._load_font_for_text(self.text) + + width, height = self._calculate_dimensions() + + width += int(self.outline_thickness * 1.5) + height += int(self.outline_thickness * 1.5) - # Create transparent image image = ray.gen_image_color(width, height, ray.Color(0, 0, 0, 0)) - # Draw text based on orientation if not self.vertical: - self._draw_horizontal_text(image, padding_x, padding_y) + self._draw_horizontal_text(image) else: - self._draw_vertical_text(image, width, padding_x, padding_y) + self._draw_vertical_text(image, width) - # Create texture from image + ray.export_image(image, f'cache/image/{self.hash}.png') texture = ray.load_texture_from_image(image) ray.unload_image(image) return texture def draw(self, src: ray.Rectangle, dest: ray.Rectangle, origin: ray.Vector2, rotation: float, color: ray.Color): - """Draw the outlined text""" ray.draw_texture_pro(self.texture, src, dest, origin, rotation, color) def unload(self): - """Clean up resources""" - # Unload all cached rotated images for img in self._rotation_cache.values(): ray.unload_image(img) self._rotation_cache.clear() - # Unload texture + for img in self._horizontal_cache.values(): + ray.unload_image(img) + self._horizontal_cache.clear() + ray.unload_texture(self.texture) diff --git a/libs/video.py b/libs/video.py index 75082f6..ee0d4fa 100644 --- a/libs/video.py +++ b/libs/video.py @@ -1,3 +1,5 @@ +from pathlib import Path + import pyray as ray from moviepy import VideoFileClip @@ -6,14 +8,14 @@ from libs.utils import get_current_ms class VideoPlayer: - def __init__(self, path: str): + def __init__(self, path: Path): """Initialize a video player instance. Audio must have the same name and an ogg extension. Todo: extract audio from video directly """ self.is_finished_list = [False, False] self.video_path = path self.video = VideoFileClip(path) - audio_path = path[:-4] + '.ogg' + audio_path = path.with_suffix('.ogg') self.audio = audio.load_music_stream(audio_path) self.buffer_size = 10 # Number of frames to keep in memory diff --git a/scenes/entry.py b/scenes/entry.py index f632d37..8b2ea65 100644 --- a/scenes/entry.py +++ b/scenes/entry.py @@ -2,7 +2,7 @@ from pathlib import Path import pyray as ray -from libs.utils import load_texture_from_zip +from libs.utils import get_config, load_texture_from_zip class EntryScreen: @@ -24,8 +24,10 @@ class EntryScreen: def update(self): self.on_screen_start() - if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER): - return self.on_screen_end() + keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"] + for key in keys: + if ray.is_key_pressed(ord(key)): + return self.on_screen_end() def draw(self): ray.draw_texture(self.texture_footer, 0, self.height - 151, ray.WHITE) diff --git a/scenes/game.py b/scenes/game.py index a14fffe..567a094 100644 --- a/scenes/game.py +++ b/scenes/game.py @@ -24,10 +24,12 @@ from libs.video import VideoPlayer class GameScreen: + JUDGE_X = 414 + SCREEN_WIDTH = 1280 + SCREEN_HEIGHT = 720 def __init__(self, width: int, height: int): self.width = width self.height = height - self.judge_x = 414 self.current_ms = 0 self.result_transition = None self.song_info = None @@ -82,16 +84,14 @@ class GameScreen: def load_sounds(self): sounds_dir = Path("Sounds") - self.sound_don = audio.load_sound(str(sounds_dir / "inst_00_don.wav")) - self.sound_kat = audio.load_sound(str(sounds_dir / "inst_00_katsu.wav")) - self.sound_balloon_pop = audio.load_sound(str(sounds_dir / "balloon_pop.wav")) - self.sound_result_transition = audio.load_sound(str(sounds_dir / "result" / "VO_RESULT [1].ogg")) + self.sound_don = audio.load_sound(sounds_dir / "inst_00_don.wav") + self.sound_kat = audio.load_sound(sounds_dir / "inst_00_katsu.wav") + self.sound_restart = audio.load_sound(sounds_dir / 'song_select' / 'Skip.ogg') + self.sound_balloon_pop = audio.load_sound(sounds_dir / "balloon_pop.wav") + self.sound_result_transition = audio.load_sound(sounds_dir / "result" / "VO_RESULT [1].ogg") self.sounds = [self.sound_don, self.sound_kat, self.sound_balloon_pop, self.sound_result_transition] - def init_tja(self, song: str, difficulty: int): - self.load_textures() - self.load_sounds() - + def init_tja(self, song: Path, difficulty: int): #Map notes to textures self.note_type_list = [self.textures['lane_syousetsu'][0], self.textures['onp_don'], self.textures['onp_katsu'], @@ -103,26 +103,27 @@ class GameScreen: self.textures['onp_renda_dai'][0], self.textures['onp_renda_dai'][1], self.textures['onp_fusen'][0]] - self.tja = TJAParser(song, start_delay=self.start_delay) - metadata = self.tja.get_metadata() - if hasattr(self.tja, 'bg_movie'): - if Path(self.tja.bg_movie).exists(): - self.movie = VideoPlayer(str(Path(self.tja.bg_movie))) - self.movie.set_volume(0.0) + self.tja = TJAParser(song, start_delay=self.start_delay, distance=self.width - GameScreen.JUDGE_X) + if self.tja.metadata.bgmovie != Path() and self.tja.metadata.bgmovie.exists(): + self.movie = VideoPlayer(self.tja.metadata.bgmovie) + self.movie.set_volume(0.0) else: self.movie = None - self.tja.distance = self.width - self.judge_x - session_data.song_title = self.tja.title + session_data.song_title = self.tja.metadata.title.get(get_config()['general']['language'].lower(), self.tja.metadata.title['en']) - self.player_1 = Player(self, 1, difficulty, metadata) - self.song_music = audio.load_sound(str(Path(self.tja.wave))) - self.start_ms = (get_current_ms() - self.tja.offset*1000) + self.player_1 = Player(self, 1, difficulty) + if not hasattr(self, 'song_music'): + self.song_music = audio.load_sound(self.tja.metadata.wave) + audio.normalize_sound(self.song_music, 0.1935) + self.start_ms = (get_current_ms() - self.tja.metadata.offset*1000) def on_screen_start(self): if not self.screen_init: self.screen_init = True + self.load_textures() + self.load_sounds() self.init_tja(global_data.selected_song, session_data.selected_difficulty) - self.song_info = SongInfo(self.tja.title, 'TEST') + self.song_info = SongInfo(session_data.song_title, 'TEST') self.result_transition = None def on_screen_end(self): @@ -130,6 +131,8 @@ class GameScreen: for zip in self.textures: for texture in self.textures[zip]: ray.unload_texture(texture) + audio.unload_sound(self.song_music) + del self.song_music self.song_started = False self.end_ms = 0 self.movie = None @@ -140,7 +143,8 @@ class GameScreen: return with sqlite3.connect('scores.db') as con: cursor = con.cursor() - hash = self.tja.hash_note_data(self.tja.data_to_notes(self.player_1.difficulty)[0]) + notes, _, bars = TJAParser.notes_to_position(TJAParser(self.tja.file_path), self.player_1.difficulty) + hash = self.tja.hash_note_data(notes, bars) check_query = "SELECT score FROM Scores WHERE hash = ? LIMIT 1" cursor.execute(check_query, (hash,)) result = cursor.fetchone() @@ -149,8 +153,8 @@ class GameScreen: INSERT OR REPLACE INTO Scores (hash, en_name, jp_name, diff, score, good, ok, bad, drumroll, combo) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?); ''' - data = (hash, self.tja.title, - self.tja.title_ja, self.player_1.difficulty, + data = (hash, self.tja.metadata.title['en'], + self.tja.metadata.title['ja'], self.player_1.difficulty, session_data.result_score, session_data.result_good, session_data.result_ok, session_data.result_bad, session_data.result_total_drumroll, session_data.result_max_combo) @@ -160,10 +164,11 @@ class GameScreen: def update(self): self.on_screen_start() self.current_ms = get_current_ms() - self.start_ms - if (self.current_ms >= self.tja.offset*1000 + self.start_delay - get_config()["general"]["judge_offset"]) and not self.song_started: + if (self.current_ms >= self.tja.metadata.offset*1000 + self.start_delay - get_config()["general"]["judge_offset"]) and not self.song_started: if self.song_music is not None: if not audio.is_sound_playing(self.song_music): audio.play_sound(self.song_music) + print(f"Song started at {self.current_ms}") if self.movie is not None: self.movie.start(get_current_ms()) self.song_started = True @@ -182,15 +187,21 @@ class GameScreen: return self.on_screen_end() elif len(self.player_1.play_notes) == 0: session_data.result_score, session_data.result_good, session_data.result_ok, session_data.result_bad, session_data.result_max_combo, session_data.result_total_drumroll = self.player_1.get_result_score() - self.write_score() session_data.result_gauge_length = self.player_1.gauge.gauge_length if self.end_ms != 0: if get_current_ms() >= self.end_ms + 8533.34: self.result_transition = ResultTransition(self.height) audio.play_sound(self.sound_result_transition) else: + self.write_score() self.end_ms = get_current_ms() + if ray.is_key_pressed(ray.KeyboardKey.KEY_F1): + audio.stop_sound(self.song_music) + self.init_tja(global_data.selected_song, session_data.selected_difficulty) + audio.play_sound(self.sound_restart) + self.song_started = False + def draw(self): if self.movie is not None: self.movie.draw() @@ -207,7 +218,7 @@ class Player: TIMING_OK = 75.0750045776367 TIMING_BAD = 108.441665649414 - def __init__(self, game_screen: GameScreen, player_number: int, difficulty: int, metadata): + def __init__(self, game_screen: GameScreen, player_number: int, difficulty: int): self.player_number = player_number self.difficulty = difficulty @@ -248,7 +259,7 @@ class Player: self.input_log: dict[float, tuple] = dict() - self.gauge = Gauge(self.difficulty, metadata[-1][self.difficulty][0]) + self.gauge = Gauge(self.difficulty, game_screen.tja.metadata.course_data[self.difficulty].level) self.gauge_hit_effect: list[GaugeHitEffect] = [] self.autoplay_hit_side = 'L' @@ -282,7 +293,7 @@ class Player: for i in range(len(self.current_bars)-1, -1, -1): bar = self.current_bars[i] position = self.get_position(game_screen, bar.hit_ms, bar.pixels_per_frame) - if position < game_screen.judge_x + 650: + if position < GameScreen.JUDGE_X + 650: self.current_bars.pop(i) def play_note_manager(self, game_screen: GameScreen): @@ -335,7 +346,7 @@ class Player: if note.type in {5, 6, 7} and len(self.current_notes_draw) > 1: note = self.current_notes_draw[1] position = self.get_position(game_screen, note.hit_ms, note.pixels_per_frame) - if position < game_screen.judge_x + 650: + if position < GameScreen.JUDGE_X + 650: self.current_notes_draw.pop(0) def note_manager(self, game_screen: GameScreen): @@ -420,7 +431,7 @@ class Player: return big = curr_note.type == 3 or curr_note.type == 4 if (curr_note.hit_ms - Player.TIMING_GOOD) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_GOOD): - self.draw_judge_list.append(Judgement('GOOD', big)) + self.draw_judge_list.append(Judgement('GOOD', big, ms_display=game_screen.current_ms - curr_note.hit_ms)) self.lane_hit_effect = LaneHitEffect('GOOD') self.good_count += 1 self.score += self.base_score @@ -428,14 +439,14 @@ class Player: self.note_correct(game_screen, curr_note) elif (curr_note.hit_ms - Player.TIMING_OK) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_OK): - self.draw_judge_list.append(Judgement('OK', big)) + self.draw_judge_list.append(Judgement('OK', big, ms_display=game_screen.current_ms - curr_note.hit_ms)) self.ok_count += 1 self.score += 10 * math.floor(self.base_score / 2 / 10) self.base_score_list.append(ScoreCounterAnimation(10 * math.floor(self.base_score / 2 / 10))) self.note_correct(game_screen, curr_note) elif (curr_note.hit_ms - Player.TIMING_BAD) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_BAD): - self.draw_judge_list.append(Judgement('BAD', big)) + self.draw_judge_list.append(Judgement('BAD', big, ms_display=game_screen.current_ms - curr_note.hit_ms)) self.bad_count += 1 self.combo = 0 self.play_notes.popleft() @@ -484,7 +495,10 @@ class Player: return note = self.play_notes[0] if self.is_drumroll or self.is_balloon: - subdivision_in_ms = game_screen.current_ms // ((60000 * 4 / game_screen.tja.bpm) / 24) + if self.play_notes[0].bpm == 0: + subdivision_in_ms = 0 + else: + subdivision_in_ms = game_screen.current_ms // ((60000 * 4 / self.play_notes[0].bpm) / 24) if subdivision_in_ms > self.last_subdivision: self.last_subdivision = subdivision_in_ms hit_type = 'DON' @@ -522,7 +536,6 @@ class Player: self.check_note(game_screen, type) if len(self.play_notes) > 0: note = self.play_notes[0] - print(note) else: break @@ -596,6 +609,8 @@ class Player: return for bar in reversed(self.current_bars): + if not bar.display: + continue position = self.get_position(game_screen, bar.load_ms, bar.pixels_per_frame) ray.draw_texture(game_screen.note_type_list[bar.type], position+60, 190, ray.WHITE) @@ -603,9 +618,18 @@ class Player: if len(self.current_notes_draw) <= 0: return - eighth_in_ms = (60000 * 4 / game_screen.tja.bpm) / 8 + if len(self.current_bars) > 0: + if self.current_bars[0].bpm == 0: + eighth_in_ms = 0 + else: + eighth_in_ms = (60000 * 4 / self.current_bars[0].bpm) / 8 + else: + if self.current_notes_draw[0].bpm == 0: + eighth_in_ms = 0 + else: + eighth_in_ms = (60000 * 4 / self.current_notes_draw[0].bpm) / 8 current_eighth = 0 - if self.combo >= 50: + if self.combo >= 50 and eighth_in_ms != 0: current_eighth = int((game_screen.current_ms - game_screen.start_ms) // eighth_in_ms) for note in reversed(self.current_notes_draw): @@ -664,10 +688,13 @@ class Player: anim.draw(game_screen) class Judgement: - def __init__(self, type: str, big: bool): + def __init__(self, type: str, big: bool, ms_display: Optional[float]=None): self.type = type self.big = big self.is_finished = False + self.curr_hit_ms = None + if ms_display is not None: + self.curr_hit_ms = str(round(ms_display, 2)) self.fade_animation_1 = Animation.create_fade(132, initial_opacity=0.5, delay=100) self.fade_animation_2 = Animation.create_fade(316 - 233.3, delay=233.3) @@ -696,6 +723,8 @@ class Judgement: ray.draw_texture(textures_1[19], 342, 184, color) ray.draw_texture(textures_2[index+5], 304, 143, hit_color) ray.draw_texture(textures_2[9], 370, int(y), color) + if self.curr_hit_ms is not None: + ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.YELLOW, self.fade_animation_1.attribute)) elif self.type == 'OK': if self.big: ray.draw_texture(textures_1[20], 342, 184, color) @@ -704,8 +733,12 @@ class Judgement: ray.draw_texture(textures_1[18], 342, 184, color) ray.draw_texture(textures_2[index], 304, 143, hit_color) ray.draw_texture(textures_2[4], 370, int(y), color) + if self.curr_hit_ms is not None: + ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.WHITE, self.fade_animation_1.attribute)) elif self.type == 'BAD': ray.draw_texture(textures_2[10], 370, int(y), color) + if self.curr_hit_ms is not None: + ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.BLUE, self.fade_animation_1.attribute)) class LaneHitEffect: def __init__(self, type: str): @@ -1133,21 +1166,13 @@ class SongInfo: def __init__(self, song_name: str, genre: str): self.song_name = song_name self.genre = genre - - self.font = self._load_font_for_text(song_name) self.song_title = OutlinedText( - self.font, song_name, 40, ray.WHITE, ray.BLACK, outline_thickness=4 + song_name, 40, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5 ) self.fade_in = Animation.create_fade(self.FADE_DURATION, initial_opacity=0.0, final_opacity=1.0) self.fade_out = Animation.create_fade(self.FADE_DURATION, delay=self.DISPLAY_DURATION) self.fade_fake = Animation.create_fade(0, delay=self.DISPLAY_DURATION*2 + self.FADE_DURATION) - def _load_font_for_text(self, text: str) -> ray.Font: - codepoint_count = ray.ffi.new('int *', 0) - unique_codepoints = set(text) - codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count) - return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0) - def update(self, current_ms: float): self.fade_in.update(current_ms) self.fade_out.update(current_ms) diff --git a/scenes/result.py b/scenes/result.py index ae32c17..64f9c80 100644 --- a/scenes/result.py +++ b/scenes/result.py @@ -29,10 +29,10 @@ class ResultScreen: def load_sounds(self): sounds_dir = Path("Sounds") - self.sound_don = audio.load_sound(str(sounds_dir / "inst_00_don.wav")) - self.sound_kat = audio.load_sound(str(sounds_dir / "inst_00_katsu.wav")) - self.sound_num_up = audio.load_sound(str(sounds_dir / "result" / "SE_RESULT [4].ogg")) - self.bgm = audio.load_sound(str(sounds_dir / "result" / "JINGLE_SEISEKI [1].ogg")) + self.sound_don = audio.load_sound(sounds_dir / "inst_00_don.wav") + self.sound_kat = audio.load_sound(sounds_dir / "inst_00_katsu.wav") + self.sound_num_up = audio.load_sound(sounds_dir / "result" / "SE_RESULT [4].ogg") + self.bgm = audio.load_sound(sounds_dir / "result" / "JINGLE_SEISEKI [1].ogg") def on_screen_start(self): if not self.screen_init: @@ -211,12 +211,7 @@ class FadeIn: class FontText: def __init__(self, text, font_size): - codepoint_count = ray.ffi.new('int *', 0) - codepoints_no_dup = set() - codepoints_no_dup.update(session_data.song_title) - codepoints = ray.load_codepoints(''.join(codepoints_no_dup), codepoint_count) - self.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0) - self.text = OutlinedText(self.font, str(text), font_size, ray.WHITE, ray.BLACK, outline_thickness=4) + self.text = OutlinedText(str(text), font_size, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5) self.texture = self.text.texture diff --git a/scenes/song_select.py b/scenes/song_select.py index e1f9f3a..8cb737c 100644 --- a/scenes/song_select.py +++ b/scenes/song_select.py @@ -1,9 +1,10 @@ -import os import sqlite3 from pathlib import Path +from typing import Optional import pyray as ray +from libs import song_hash from libs.animation import Animation from libs.audio import audio from libs.tja import TJAParser @@ -19,99 +20,138 @@ from libs.utils import ( class SongSelectScreen: BOX_CENTER = 444 - def __init__(self, width: int, height: int): - self.width = width - self.height = height - self.song_name_textures: list[OutlinedText] = [] - self.selected_song = 0 - self.selected_difficulty = 0 - self.song_boxes: list[SongBox] = [] + def __init__(self, screen_width: int, screen_height: int): self.screen_init = False + self.root_dir = get_config()["paths"]["tja_path"] + self.screen_width = screen_width + self.screen_height = screen_height - i = 0 - for dirpath, dirnames, filenames in os.walk(f'{get_config()["paths"]["tja_path"]}'): - for filename in filenames: - if filename.endswith(".tja"): - position = -56 + (100*i) - if position == SongSelectScreen.BOX_CENTER: - position += 150 - elif position > SongSelectScreen.BOX_CENTER: - position += 300 - self.song_boxes.append(SongBox(dirpath, position)) - i += 1 - + self.navigator = FileNavigator(self.root_dir) def load_textures(self): self.textures = load_all_textures_from_zip(Path('Graphics/lumendata/song_select.zip')) + self.textures['custom'] = [ray.load_texture('1.png'), ray.load_texture('2.png')] def load_sounds(self): sounds_dir = Path("Sounds") - self.sound_don = audio.load_sound(str(sounds_dir / "inst_00_don.wav")) - self.sound_kat = audio.load_sound(str(sounds_dir / "inst_00_katsu.wav")) + self.sound_don = audio.load_sound(sounds_dir / "inst_00_don.wav") + self.sound_kat = audio.load_sound(sounds_dir / "inst_00_katsu.wav") + self.sound_skip = audio.load_sound(sounds_dir / 'song_select' / 'Skip.ogg') + #self.sound_select = audio.load_sound(sounds_dir / "song_select.wav") + #self.sound_cancel = audio.load_sound(sounds_dir / "cancel.wav") def on_screen_start(self): if not self.screen_init: self.load_textures() self.load_sounds() + self.selected_song = None + self.selected_difficulty = 0 self.game_transition = None self.move_away = Animation.create_move(float('inf')) self.diff_fade_out = Animation.create_fade(0, final_opacity=1.0) + self.background_move = Animation.create_move(15000, start_position=0, total_distance=1280) + self.state = "BROWSING" self.text_fade_out = None self.text_fade_in = None + self.texture_index = 784 + self.last_texture_index = 784 + self.background_fade_change = None + self.demo_song = None + for item in self.navigator.items: + item.box.reset() + self.navigator.get_current_item().box.get_scores() self.screen_init = True - self.is_difficulty_select = False - self.background_move = Animation.create_move(15000, start_position=0, total_distance=1280) def on_screen_end(self): self.screen_init = False - curr_box = self.song_boxes[0] - for box in self.song_boxes: - if box.is_open: - curr_box = box - box.reset() - global_data.selected_song = curr_box.tja_path + global_data.selected_song = self.navigator.get_current_item().path session_data.selected_difficulty = self.selected_difficulty + self.reset_demo_music() for zip in self.textures: for texture in self.textures[zip]: ray.unload_texture(texture) return "GAME" - def update_song_select(self): - if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER): - audio.play_sound(self.sound_don) - self.move_away = Animation.create_move(233, total_distance=500) - self.diff_fade_out = Animation.create_fade(83) - elif ray.is_key_pressed(ray.KeyboardKey.KEY_LEFT): - audio.play_sound(self.sound_kat) - for box in self.song_boxes: - box.move_left() - elif ray.is_key_pressed(ray.KeyboardKey.KEY_RIGHT): - audio.play_sound(self.sound_kat) - for box in self.song_boxes: - box.move_right() + def reset_demo_music(self): + if self.demo_song is not None: + audio.stop_music_stream(self.demo_song) + audio.unload_music_stream(self.demo_song) + self.demo_song = None + self.navigator.get_current_item().box.wait = get_current_ms() + def handle_input(self): + if self.state == "BROWSING": + # Up/Down navigation + keys = get_config()["keybinds"]["left_kat"] + for key in keys: + if ray.is_key_pressed(ord(key)): + self.reset_demo_music() + self.navigator.navigate_left() + audio.play_sound(self.sound_kat) - def update_difficulty_select(self): - if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER): - if self.selected_difficulty == -1: - self.is_difficulty_select = False - self.move_away = Animation.create_move(float('inf')) - self.diff_fade_out = Animation.create_fade(0, final_opacity=1.0) - self.text_fade_out = None - self.text_fade_in = None - for box in self.song_boxes: - if box.yellow_box is not None: - box.yellow_box.reset_animations() - else: - audio.play_sound(self.sound_don) - self.game_transition = Transition(self.height) - elif ray.is_key_pressed(ray.KeyboardKey.KEY_LEFT): - audio.play_sound(self.sound_kat) - if self.selected_difficulty >= 0: - self.selected_difficulty = (self.selected_difficulty - 1) - elif ray.is_key_pressed(ray.KeyboardKey.KEY_RIGHT): - audio.play_sound(self.sound_kat) - if self.selected_difficulty < 4: - self.selected_difficulty = (self.selected_difficulty + 1) + keys = get_config()["keybinds"]["right_kat"] + for key in keys: + if ray.is_key_pressed(ord(key)): + self.reset_demo_music() + self.navigator.navigate_right() + audio.play_sound(self.sound_kat) + if ray.is_key_pressed(ray.KeyboardKey.KEY_LEFT_CONTROL): + self.reset_demo_music() + self.wait = get_current_ms() + for i in range(10): + self.navigator.navigate_left() + audio.play_sound(self.sound_skip) + + if ray.is_key_pressed(ray.KeyboardKey.KEY_RIGHT_CONTROL): + self.reset_demo_music() + for i in range(10): + self.navigator.navigate_right() + audio.play_sound(self.sound_skip) + + # Select/Enter + keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"] + for key in keys: + if ray.is_key_pressed(ord(key)): + selected_item = self.navigator.items[self.navigator.selected_index] + if selected_item is not None and selected_item.box.name == "Back": + self.navigator.go_back() + #audio.play_sound(self.sound_cancel) + else: + selected_song = self.navigator.select_current_item() + if selected_song: + self.state = "SONG_SELECTED" + audio.play_sound(self.sound_don) + self.move_away = Animation.create_move(233, total_distance=500) + self.diff_fade_out = Animation.create_fade(83) + + elif self.state == "SONG_SELECTED": + # Handle song selection confirmation or cancel + keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"] + for key in keys: + if ray.is_key_pressed(ord(key)): + if self.selected_difficulty == -1: + self.selected_song = None + self.move_away = Animation.create_move(float('inf')) + self.diff_fade_out = Animation.create_fade(0, final_opacity=1.0) + self.text_fade_out = None + self.text_fade_in = None + self.state = "BROWSING" + for item in self.navigator.items: + item.box.reset() + else: + audio.play_sound(self.sound_don) + self.game_transition = Transition(self.screen_height) + keys = get_config()["keybinds"]["left_kat"] + for key in keys: + if ray.is_key_pressed(ord(key)): + audio.play_sound(self.sound_kat) + if self.selected_difficulty >= 0: + self.selected_difficulty = (self.selected_difficulty - 1) + keys = get_config()["keybinds"]["right_kat"] + for key in keys: + if ray.is_key_pressed(ord(key)): + audio.play_sound(self.sound_kat) + if self.selected_difficulty < 4: + self.selected_difficulty = (self.selected_difficulty + 1) def update(self): self.on_screen_start() @@ -119,40 +159,55 @@ class SongSelectScreen: self.background_move = Animation.create_move(15000, start_position=0, total_distance=1280) self.background_move.update(get_current_ms()) - if self.move_away.is_finished and self.text_fade_out is None: - self.text_fade_out = Animation.create_fade(33) - self.text_fade_in = Animation.create_fade(33, initial_opacity=0.0, final_opacity=1.0, delay=self.text_fade_out.duration) - self.move_away.update(get_current_ms()) - - if self.text_fade_out is not None: - self.text_fade_out.update(get_current_ms()) - if self.text_fade_out.is_finished: - self.is_difficulty_select = True - - if self.text_fade_in is not None: - self.text_fade_in.update(get_current_ms()) - - self.diff_fade_out.update(get_current_ms()) - - if self.is_difficulty_select: - self.update_difficulty_select() - else: - self.update_song_select() - for box in self.song_boxes: - box.update(self.is_difficulty_select) - if self.game_transition is not None: self.game_transition.update(get_current_ms()) if self.game_transition.is_finished: return self.on_screen_end() + self.handle_input() - def draw_song_select(self): - for box in self.song_boxes: - if box.position <= 500: - box.draw(box.position - int(self.move_away.attribute), 95, 620, self.textures, int(self.diff_fade_out.attribute)) - else: - box.draw(box.position + int(self.move_away.attribute), 95, 620, self.textures, int(self.diff_fade_out.attribute)) + if self.demo_song is not None: + audio.update_music_stream(self.demo_song) + + if self.background_fade_change is None: + self.last_texture_index = self.texture_index + for song in self.navigator.items: + song.box.update(self.state == "SONG_SELECTED") + song.box.is_open = song.box.position == SongSelectScreen.BOX_CENTER + 150 + if not isinstance(song, Directory) and song.box.is_open: + if self.demo_song is None and get_current_ms() >= song.box.wait + (83.33*3): + song.box.get_scores() + self.demo_song = audio.load_music_stream(song.tja.metadata.wave) + audio.normalize_music_stream(self.demo_song, 0.1935) + audio.seek_music_stream(self.demo_song, song.tja.metadata.demostart) + audio.play_music_stream(self.demo_song) + if song.box.is_open: + current_box = song.box + if current_box.texture_index != 552 and get_current_ms() >= song.box.wait + (83.33*3): + self.texture_index = SongBox.BACKGROUND_MAP[current_box.texture_index] + + if self.last_texture_index != self.texture_index and self.background_fade_change is None: + self.background_fade_change = Animation.create_fade(200) + + self.move_away.update(get_current_ms()) + self.diff_fade_out.update(get_current_ms()) + + if self.background_fade_change is not None: + self.background_fade_change.update(get_current_ms()) + if self.background_fade_change.is_finished: + self.background_fade_change = None + + if self.move_away.is_finished and self.text_fade_out is None: + self.text_fade_out = Animation.create_fade(33) + self.text_fade_in = Animation.create_fade(33, initial_opacity=0.0, final_opacity=1.0, delay=self.text_fade_out.duration) + + if self.text_fade_out is not None: + self.text_fade_out.update(get_current_ms()) + if self.text_fade_out.is_finished: + self.selected_song = True + + if self.text_fade_in is not None: + self.text_fade_in.update(get_current_ms()) def draw_selector(self): if self.selected_difficulty == -1: @@ -161,137 +216,321 @@ class SongSelectScreen: ray.draw_texture(self.textures['song_select'][140], 450 + (self.selected_difficulty * 115), 7, ray.WHITE) ray.draw_texture(self.textures['song_select'][131], 461 + (self.selected_difficulty * 115), 132, ray.WHITE) - def draw_difficulty_select(self): - for box in self.song_boxes: - if box.is_open: - box.draw(box.position, 95, 620, self.textures, int(self.diff_fade_out.attribute)) - self.draw_selector() - def draw(self): - texture = self.textures['song_select'][784] + # Draw file/directory list + texture_back = self.textures['song_select'][self.last_texture_index] + texture = self.textures['song_select'][self.texture_index] for i in range(0, texture.width * 4, texture.width): - ray.draw_texture(self.textures['song_select'][784], i - int(self.background_move.attribute), 0, ray.WHITE) - if self.is_difficulty_select: - self.draw_difficulty_select() + if self.background_fade_change is not None: + color = ray.fade(ray.WHITE, self.background_fade_change.attribute) + ray.draw_texture(texture_back, i - int(self.background_move.attribute), 0, color) + reverse_color = ray.fade(ray.WHITE, 1 - self.background_fade_change.attribute) + ray.draw_texture(texture, i - int(self.background_move.attribute), 0, reverse_color) + else: + ray.draw_texture(texture, i - int(self.background_move.attribute), 0, ray.WHITE) + + for item in self.navigator.get_items(): + box = item.box + if -156 <= box.position <= self.screen_width + 144: + if box.position <= 500: + box.draw(box.position - int(self.move_away.attribute), 95, self.textures, self.diff_fade_out.attribute) + else: + box.draw(box.position + int(self.move_away.attribute), 95, self.textures, self.diff_fade_out.attribute) + + if self.selected_song and self.state == "SONG_SELECTED": + self.draw_selector() fade = ray.WHITE if self.text_fade_in is not None: fade = ray.fade(ray.WHITE, self.text_fade_in.attribute) ray.draw_texture(self.textures['song_select'][192], 5, 5, fade) else: - self.draw_song_select() fade = ray.WHITE if self.text_fade_out is not None: fade = ray.fade(ray.WHITE, self.text_fade_out.attribute) ray.draw_texture(self.textures['song_select'][244], 5, 5, fade) - ray.draw_texture(self.textures['song_select'][394], 0, self.height - self.textures['song_select'][394].height, ray.WHITE) + + ray.draw_texture(self.textures['song_select'][394], 0, self.screen_height - self.textures['song_select'][394].height, ray.WHITE) if self.game_transition is not None: - self.game_transition.draw(self.height) + self.game_transition.draw(self.screen_height) + class SongBox: - def __init__(self, tja_path: str, position: int): - self.tja_path = tja_path + OUTLINE_MAP = { + 555: ray.Color(0, 77, 104, 255), + 560: ray.Color(156, 64, 2, 255), + 565: ray.Color(153, 4, 46, 255), + 570: ray.Color(60, 104, 0, 255), + 575: ray.Color(134, 88, 0, 255), + 580: ray.Color(79, 40, 134, 255), + 585: ray.Color(148, 24, 0, 255), + 615: ray.Color(84, 101, 126, 255) + } + FOLDER_HEADER_MAP = { + 555: 643, + 560: 645, + 565: 647, + 570: 649, + 575: 651, + 580: 653, + 585: 655, + 615: 667, + 620: 670 + } + FULL_FOLDER_HEADER_MAP = { + 555: 736, + 560: 738, + 565: 740, + 570: 742, + 575: 744, + 580: 746, + 585: 748, + 615: 760, + 620: 762, + } + BACKGROUND_MAP = { + 555: 772, + 560: 773, + 565: 774, + 570: 775, + 575: 776, + 580: 777, + 585: 778, + 615: 783, + 620: 784 + } + GENRE_CHAR_MAP = { + 555: 507, + 560: 509, + 565: 511, + 570: 513, + 575: 515, + 580: 517, + 585: 519, + 615: 532, + } + def __init__(self, name: str, texture_index: int, is_dir: bool, tja: Optional[TJAParser] = None, tja_count: Optional[int] = None): + self.text_name = name + self.texture_index = texture_index self.scores = dict() - self.position = position - self.start_position = position - tja = TJAParser(tja_path) - self.course_data = tja.get_metadata() - for diff in self.course_data[8].keys(): - self.scores[diff] = self._get_scores(tja, diff) + self.position = -11111 + self.start_position = -1 + self.target_position = -1 self.is_open = False self.name = None + self.black_name = None + self.hori_name = None self.yellow_box = None - self.move = Animation.create_move(0) + self.open_anim = None + self.open_fade = None + self.move = None self.wait = 0 + self.is_dir = is_dir + self.tja_count = tja_count + self.tja_count_text = None + if self.tja_count is not None and self.tja_count != 0: + self.tja_count_text = OutlinedText(str(self.tja_count), 35, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5, horizontal_spacing=1.2) + self.tja = tja + self.hash = dict() self.update(False) def reset(self): - self.yellow_box = YellowBox(self.name) + if self.black_name is not None: + self.yellow_box = YellowBox(self.black_name, self.texture_index == 552, tja=self.tja) + self.open_anim = None + self.open_fade = None - def _load_font_for_text(self, text: str) -> ray.Font: - codepoint_count = ray.ffi.new('int *', 0) - unique_codepoints = set(text) - codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count) - return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0) + def get_scores(self): + if self.tja is None: + return - def _get_scores(self, tja: TJAParser, difficulty: int): with sqlite3.connect('scores.db') as con: cursor = con.cursor() - hash = tja.hash_note_data(tja.data_to_notes(difficulty)[0]) - check_query = "SELECT score, good, ok, bad FROM Scores WHERE hash = ? LIMIT 1" - cursor.execute(check_query, (hash,)) - result = cursor.fetchone() - return result - def move_left(self): - if not self.move.is_finished: - self.position = self.start_position - self.move = Animation.create_move(0) - return - self.start_position = self.position - self.move = Animation.create_move(66.67, start_position=0, total_distance=100) - if self.is_open: - self.move.total_distance = 250 - elif self.position + self.move.total_distance == SongSelectScreen.BOX_CENTER: - self.move.total_distance = 250 - elif SongSelectScreen.BOX_CENTER < self.position + self.move.total_distance < SongSelectScreen.BOX_CENTER + 300: - self.move.total_distance = 400 + diffs_to_compute = [] + for diff in self.tja.metadata.course_data: + if diff not in self.hash: + diffs_to_compute.append(diff) - def move_right(self): - if not self.move.is_finished: - self.position = self.start_position - self.move = Animation.create_move(0) - return - self.start_position = self.position - self.move = Animation.create_move(66.67, start_position=0, total_distance=-100) - if self.is_open: - self.move.total_distance = -250 - elif self.position + (self.move.total_distance - 300) == SongSelectScreen.BOX_CENTER: - self.move.total_distance = -250 - elif SongSelectScreen.BOX_CENTER < self.position + self.move.total_distance < SongSelectScreen.BOX_CENTER + 300: - self.move.total_distance = -400 + if diffs_to_compute: + for diff in diffs_to_compute: + notes, _, bars = self.tja.notes_to_position(diff) + self.hash[diff] = self.tja.hash_note_data(notes, bars) + + # Batch database query for all diffs at once + if self.tja.metadata.course_data: + hash_values = [self.hash[diff] for diff in self.tja.metadata.course_data] + placeholders = ','.join('?' * len(hash_values)) + + batch_query = f""" + SELECT hash, score, good, ok, bad + FROM Scores + WHERE hash IN ({placeholders}) + """ + cursor.execute(batch_query, hash_values) + + hash_to_score = {row[0]: row[1:] for row in cursor.fetchall()} + + for diff in self.tja.metadata.course_data: + diff_hash = self.hash[diff] + self.scores[diff] = hash_to_score.get(diff_hash) def update(self, is_diff_select): self.is_diff_select = is_diff_select if self.yellow_box is not None: self.yellow_box.update(is_diff_select) - self.move.update(get_current_ms()) - self.position = self.start_position + int(self.move.attribute) is_open_prev = self.is_open + if self.position != self.target_position and self.move is None: + if self.position < self.target_position: + direction = 1 + else: + direction = -1 + if abs(self.target_position - self.position) > 250: + direction *= -1 + self.move = Animation.create_move(66.67, start_position=0, total_distance=100 * direction) + if self.is_open or self.target_position == SongSelectScreen.BOX_CENTER + 150: + self.move.total_distance = 250 * direction + self.start_position = self.position + if self.move is not None: + self.move.update(get_current_ms()) + self.position = self.start_position + int(self.move.attribute) + if self.move.is_finished: + self.position = self.target_position + self.move = None self.is_open = self.position == SongSelectScreen.BOX_CENTER + 150 if not is_open_prev and self.is_open: - if self.yellow_box is not None: - self.wait = get_current_ms() + if self.black_name is None: + self.black_name = OutlinedText(self.text_name, 40, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5, vertical=True) + #print(f"loaded black name {self.text_name}") + if self.tja is not None or self.texture_index == 552: + self.yellow_box = YellowBox(self.black_name, self.texture_index == 552, tja=self.tja) self.yellow_box.create_anim() + else: + self.hori_name = OutlinedText(self.text_name, 40, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5) + #print(f"loaded hori name {self.text_name}") + self.open_anim = Animation.create_move(133, start_position=0, total_distance=150, delay=83.33) + self.open_fade = Animation.create_fade(200, initial_opacity=0, final_opacity=1.0) + self.wait = get_current_ms() - if self.name is None and 0 <= self.position <= 1280: - name = self.course_data[0] - font = self._load_font_for_text(name) - self.name = OutlinedText(font, name, 40, ray.WHITE, ray.BLACK, outline_thickness=4, vertical=True) - self.yellow_box = YellowBox(self.name) - def _draw_closed(self, x: int, y: int, texture_index: int, textures): - ray.draw_texture(textures['song_select'][texture_index+1], x, y, ray.WHITE) - for i in range(0, textures['song_select'][texture_index].width * 4, textures['song_select'][texture_index].width): - ray.draw_texture(textures['song_select'][texture_index], (x+32)+i, y, ray.WHITE) - ray.draw_texture(textures['song_select'][texture_index+2], x+64, y, ray.WHITE) - ray.draw_texture(textures['song_select'][texture_index+3], x+12, y+16, ray.WHITE) + elif not self.is_open: + if self.black_name is not None: + self.black_name.unload() + self.black_name = None + if self.yellow_box is not None: + self.yellow_box = None + if self.hori_name is not None: + self.hori_name.unload() + self.hori_name = None - if self.name is not None: + if self.open_anim is not None: + self.open_anim.update(get_current_ms()) + if self.open_fade is not None: + self.open_fade.update(get_current_ms()) + + ''' + if self.black_name is None: + self.black_name = OutlinedText(self.text_name, 40, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5, vertical=True) + if self.name is None: + self.name = OutlinedText(self.text_name, 40, ray.Color(255, 255, 255, 255), SongBox.OUTLINE_MAP.get(self.texture_index, ray.Color(101, 0, 82, 255)), outline_thickness=5, vertical=True) + ''' + + if self.name is None and -56 <= self.position <= 1280: + self.name = OutlinedText(self.text_name, 40, ray.Color(255, 255, 255, 255), SongBox.OUTLINE_MAP.get(self.texture_index, ray.Color(101, 0, 82, 255)), outline_thickness=5, vertical=True) + #print(f"loaded {self.text_name}") + elif self.name is not None and (self.position < -56 or self.position > 1280): + self.name.unload() + self.name = None + + + def _draw_closed(self, x: int, y: int, textures): + ray.draw_texture(textures['song_select'][self.texture_index+1], x, y, ray.WHITE) + offset = 0 + if 555 <= self.texture_index <= 600: + offset = 1 + for i in range(0, textures['song_select'][self.texture_index].width * 4, textures['song_select'][self.texture_index].width): + ray.draw_texture(textures['song_select'][self.texture_index], (x+32)+i, y - offset, ray.WHITE) + ray.draw_texture(textures['song_select'][self.texture_index+2], x+64, y, ray.WHITE) + if self.texture_index == 620: + ray.draw_texture(textures['song_select'][self.texture_index+3], x+12, y+16, ray.WHITE) + if self.texture_index != 552 and self.is_dir: + ray.draw_texture(textures['song_select'][SongBox.FOLDER_HEADER_MAP[self.texture_index]], x+4 - offset, y-6, ray.WHITE) + + + if self.texture_index == 552: + ray.draw_texture(textures['song_select'][422], x + 47 - int(textures['song_select'][422].width / 2), y+35, ray.WHITE) + elif self.name is not None: src = ray.Rectangle(0, 0, self.name.texture.width, self.name.texture.height) dest = ray.Rectangle(x + 47 - int(self.name.texture.width / 2), y+35, self.name.texture.width, min(self.name.texture.height, 417)) self.name.draw(src, dest, ray.Vector2(0, 0), 0, ray.WHITE) + #ray.draw_text(str(self.position), x, y-25, 25, ray.GREEN) - def draw(self, x: int, y: int, texture_index: int, textures, fade_override=None): + def _draw_open(self, x: int, y: int, textures, fade_override): + if self.open_anim is not None: + color = ray.WHITE + if fade_override is not None: + color = ray.fade(ray.WHITE, fade_override) + if self.hori_name is not None and self.open_anim.attribute >= 100: + texture = textures['song_select'][SongBox.FULL_FOLDER_HEADER_MAP[self.texture_index]] + src = ray.Rectangle(0, 0, texture.width, texture.height) + dest = ray.Rectangle(x-115+48, (y-56) + 150 - int(self.open_anim.attribute), texture.width+220, texture.height) + ray.draw_texture_pro(texture, src, dest, ray.Vector2(0,0), 0, color) + + texture = textures['song_select'][SongBox.FULL_FOLDER_HEADER_MAP[self.texture_index]+1] + src = ray.Rectangle(0, 0, -texture.width, texture.height) + dest = ray.Rectangle(x-115, y-56 + 150 - int(self.open_anim.attribute), texture.width, texture.height) + ray.draw_texture(texture, x+160, y-56 + 150 - int(self.open_anim.attribute), color) + ray.draw_texture_pro(texture, src, dest, ray.Vector2(0,0), 0, color) + + src = ray.Rectangle(0, 0, self.hori_name.texture.width, self.hori_name.texture.height) + dest_width = min(300, self.hori_name.texture.width) + dest = ray.Rectangle((x + 48) - (dest_width//2), y-50 + 150 - int(self.open_anim.attribute), dest_width, self.hori_name.texture.height) + self.hori_name.draw(src, dest, ray.Vector2(0, 0), 0, color) + + + ray.draw_texture(textures['song_select'][self.texture_index+1], x - int(self.open_anim.attribute), y, ray.WHITE) + + offset = 0 + if 555 <= self.texture_index <= 600: + offset = 1 + for i in range(0, textures['song_select'][self.texture_index].width * (5+int(self.open_anim.attribute / 4)), textures['song_select'][self.texture_index].width): + ray.draw_texture(textures['song_select'][self.texture_index], ((x- int(self.open_anim.attribute))+32)+i, y - offset, ray.WHITE) + + ray.draw_texture(textures['song_select'][self.texture_index+2], x+64 + int(self.open_anim.attribute), y, ray.WHITE) + + color = ray.WHITE + if self.texture_index == 620: + ray.draw_texture(textures['song_select'][self.texture_index+4], x+12 - 150, y+16, color) + if fade_override is not None: + color = ray.fade(ray.WHITE, min(0.5, fade_override)) + ray.draw_texture(textures['song_select'][492], 470, 125, color) + + color = ray.WHITE + if fade_override is not None: + color = ray.fade(ray.WHITE, fade_override) + if self.tja_count_text is not None: + ray.draw_texture(textures['song_select'][493], 475, 125, color) + ray.draw_texture(textures['song_select'][494], 600, 125, color) + src = ray.Rectangle(0, 0, self.tja_count_text.texture.width, self.tja_count_text.texture.height) + dest_width = min(124, self.tja_count_text.texture.width) + dest = ray.Rectangle(560 - (dest_width//2), 118, dest_width, self.tja_count_text.texture.height) + self.tja_count_text.draw(src, dest, ray.Vector2(0, 0), 0, color) + if self.texture_index in SongBox.GENRE_CHAR_MAP: + ray.draw_texture(textures['song_select'][SongBox.GENRE_CHAR_MAP[self.texture_index]+1], 650, 125, color) + ray.draw_texture(textures['song_select'][SongBox.GENRE_CHAR_MAP[self.texture_index]], 470, 180, color) + + def draw(self, x: int, y: int, textures, fade_override=None): if self.is_open and get_current_ms() >= self.wait + 83.33: if self.yellow_box is not None: self.yellow_box.draw(textures, self, fade_override) + else: + if self.open_fade is not None: + self._draw_open(x, y, textures, self.open_fade.attribute) else: - self._draw_closed(x, y, texture_index, textures) - + self._draw_closed(x, y, textures) class YellowBox: - def __init__(self, name): + def __init__(self, name: OutlinedText, is_back: bool, tja: Optional[TJAParser] = None): self.is_diff_select = False self.right_x = 803 self.left_x = 443 @@ -301,6 +540,8 @@ class YellowBox: self.center_height = 422 self.edge_height = 32 self.name = name + self.is_back = is_back + self.tja = tja self.anim_created = False self.left_out = Animation.create_move(83.33, total_distance=-152, delay=83.33) self.right_out = Animation.create_move(83.33, total_distance=145, delay=83.33) @@ -333,7 +574,7 @@ class YellowBox: self.fade_in = Animation.create_fade(116.67, initial_opacity=0.0, final_opacity=1.0, delay=self.left_out_2.duration + self.top_y_out.duration + 16.67) - def update(self, is_diff_select): + def update(self, is_diff_select: bool): self.left_out.update(get_current_ms()) self.right_out.update(get_current_ms()) self.center_out.update(get_current_ms()) @@ -362,8 +603,7 @@ class YellowBox: self.center_width = 32 + int(self.center_out.attribute) self.center_height = 422 - def draw(self, textures: dict[str, list[ray.Texture]], song_box: SongBox, fade_override): - + def draw(self, textures: dict[str, list[ray.Texture]], song_box: SongBox, fade_override: Optional[float]): # Draw corners ray.draw_texture(textures['song_select'][235], self.right_x, self.bottom_y, ray.WHITE) # Bottom right ray.draw_texture(textures['song_select'][236], self.left_x, self.bottom_y, ray.WHITE) # Bottom left @@ -413,12 +653,10 @@ class YellowBox: ray.draw_texture(textures['song_select'][185], 680, 90, color) ray.draw_texture(textures['song_select'][188], 795, 90, color) - for i in range(4): - try: - for j in range(song_box.course_data[8][i][0]): - ray.draw_texture(textures['song_select'][155], 482+(i*115), 471+(j*-20), color) - except: - pass + if self.tja is not None: + for course in self.tja.metadata.course_data: + for j in range(self.tja.metadata.course_data[course].level): + ray.draw_texture(textures['song_select'][155], 482+(course*115), 471+(j*-20), color) else: #Crowns @@ -426,33 +664,49 @@ class YellowBox: if fade_override is not None: fade = min(self.fade.attribute, fade_override) color = ray.fade(ray.WHITE, fade) - for i in range(4): - if i in song_box.scores and song_box.scores[i] is not None and song_box.scores[i][3] == 0: - ray.draw_texture(textures['song_select'][160], 473 + (i*60), 175, color) - ray.draw_texture(textures['song_select'][158], 473 + (i*60), 175, ray.fade(color, min(fade, 0.25))) + if self.is_back: + ray.draw_texture(textures['song_select'][421], 498, 250, color) + elif self.tja is not None: + for diff in self.tja.metadata.course_data: + if diff in song_box.scores and song_box.scores[diff] is not None and song_box.scores[diff][3] == 0: + ray.draw_texture(textures['song_select'][160], 473 + (diff*60), 175, color) + ray.draw_texture(textures['song_select'][158], 473 + (diff*60), 175, ray.fade(color, min(fade, 0.25))) - #Difficulties - ray.draw_texture(textures['song_select'][395], 458, 210, color) - ray.draw_texture(textures['song_select'][401], 518, 210, color) - ray.draw_texture(textures['song_select'][403], 578, 210, color) - ray.draw_texture(textures['song_select'][406], 638, 210, color) + #EX Data + if self.tja.ex_data.new_audio: + ray.draw_texture(textures['custom'][0], 458, 120, color) + elif self.tja.ex_data.old_audio: + ray.draw_texture(textures['custom'][1], 458, 120, color) + elif self.tja.ex_data.limited_time: + ray.draw_texture(textures['song_select'][418], 458, 120, color) - #Stars - for i in range(4): - try: - for j in range(song_box.course_data[8][i][0]): - ray.draw_texture(textures['song_select'][396], 474+(i*60), 490+(j*-17), color) - except: - pass + #Difficulties + ray.draw_texture(textures['song_select'][395], 458, 210, color) + ray.draw_texture(textures['song_select'][401], 518, 210, color) + ray.draw_texture(textures['song_select'][403], 578, 210, color) + ray.draw_texture(textures['song_select'][406], 638, 210, color) - if self.name is not None: + #Stars + for course in self.tja.metadata.course_data: + for j in range(self.tja.metadata.course_data[course].level): + ray.draw_texture(textures['song_select'][396], 474+(course*60), 490+(j*-17), color) + else: + pass + if self.is_back: + texture = textures['song_select'][422] + x = int(((song_box.position + 47) - texture.width / 2) + (int(self.right_out.attribute)*0.85) + (int(self.right_out_2.attribute))) + y = self.top_y+35 + ray.draw_texture(texture, x, y, ray.WHITE) + elif self.name is not None: texture = self.name.texture + x = int(((song_box.position + 47) - texture.width / 2) + (int(self.right_out.attribute)*0.85) + (int(self.right_out_2.attribute))) + y = self.top_y+35 src = ray.Rectangle(0, 0, texture.width, texture.height) - dest = ray.Rectangle(((song_box.position + 47) - texture.width / 2) + (int(self.right_out.attribute)*0.85) + (int(self.right_out_2.attribute)), self.top_y+35, texture.width, min(texture.height, 417)) + dest = ray.Rectangle(x, y, texture.width, min(texture.height, 417)) self.name.draw(src, dest, ray.Vector2(0, 0), 0, ray.WHITE) class Transition: - def __init__(self, screen_height) -> None: + def __init__(self, screen_height: int) -> None: self.is_finished = False self.rainbow_up = Animation.create_move(266, start_position=0, total_distance=screen_height + global_data.textures['scene_change_rainbow'][2].height, ease_in='cubic') self.chara_down = None @@ -465,7 +719,7 @@ class Transition: self.chara_down.update(current_time_ms) self.is_finished = self.chara_down.is_finished - def draw(self, screen_height): + def draw(self, screen_height: int): ray.draw_texture(global_data.textures['scene_change_rainbow'][2], 0, screen_height - int(self.rainbow_up.attribute), ray.WHITE) texture = global_data.textures['scene_change_rainbow'][0] src = ray.Rectangle(0, 0, texture.width, texture.height) @@ -476,3 +730,341 @@ class Transition: if self.chara_down is not None: offset = int(self.chara_down.attribute) ray.draw_texture(texture, 76, 816 - int(self.rainbow_up.attribute) + offset, ray.WHITE) + +class FileSystemItem: + GENRE_MAP = { + 'J-POP': 555, + 'アニメ': 560, + 'どうよう': 565, + 'バラエティー': 570, + 'クラシック': 575, + 'ゲームミュージック': 580, + 'ナムコオリジナル': 585, + 'VOCALOID': 615, + } + """Base class for files and directories in the navigation system""" + def __init__(self, path: Path, name: str): + self.path = path + self.selected = False + + def is_selectable(self): + return True + + +class Directory(FileSystemItem): + """Represents a directory in the navigation system""" + def __init__(self, path: Path, name: str, texture_index: int, has_box_def=False, to_root=False, back=False): + super().__init__(path, name) + self.has_box_def = has_box_def + self.to_root = to_root + self.back = back + if self.to_root or self.back: + texture_index = 552 + tja_count = 0 + if self.has_box_def: + tja_count = self.count_tja_files(path) + if (path / "song_list.txt").exists(): + with open(path / "song_list.txt", 'r', encoding='utf-8-sig') as song_list_file: + tja_count += len(song_list_file.readlines()) + self.box = SongBox(name, texture_index, True, tja_count=tja_count) + + def count_tja_files(self, folder_path: Path): + tja_count = 0 + + #print(f"Scanning {folder_path}") + try: + items = folder_path.iterdir() + + for item in items: + item_path = folder_path / item + + if item_path.is_file(): + if item.suffix == '.tja': + tja_count += 1 + #print(f"Found: {item_path}") + + elif item_path.is_dir(): + tja_count += self.count_tja_files(item_path) + + except PermissionError: + print(f"Permission denied accessing '{folder_path}'") + except Exception as e: + print(f"Error accessing '{folder_path}': {e}") + + return tja_count + + def get_display_name(self): + return self.box + + +class SongFile(FileSystemItem): + """Represents a song file (TJA) in the navigation system""" + def __init__(self, path: Path, name: str, texture_index: int): + super().__init__(path, name) + self.tja = TJAParser(path) + title = self.tja.metadata.title.get(get_config()['general']['language'].lower(), self.tja.metadata.title['en']) + self.box = SongBox(title, texture_index, False, tja=self.tja) + + + def get_display_name(self): + return self.box + + +class FileNavigator: + """Manages navigation through the file system""" + def __init__(self, root_dirs: list[str]): + # Handle both single path and list of paths + if isinstance(root_dirs, (list, tuple)): + self.root_dirs = [Path(p) if not isinstance(p, Path) else p for p in root_dirs] + else: + self.root_dirs = [Path(root_dirs) if not isinstance(root_dirs, Path) else root_dirs] + + self.in_root_selection = True # Whether we're showing the root directory selection screen + self.current_dir = Path() + self.current_root_dir = Path() + self.items: list[Directory | SongFile] = [] + self.selected_index = 0 + self.history = [] # For tracking directory navigation history + self.load_root_directories() + + def check_for_box_def(self, dir_path: Path): + """Check if the directory contains a box.def file""" + box_def_path = dir_path / "box.def" + return box_def_path.exists() + + def get_tja_folder_count(self, directory: Path): + return len(self.find_tja_files_recursive(directory)) + + def find_tja_files_recursive(self, directory: Path, box_def_dirs_only=True): + tja_files = [] + + try: + has_box_def = self.check_for_box_def(directory) + if box_def_dirs_only and has_box_def and directory != self.current_dir: + return [] + for path in directory.iterdir(): + if path.is_file() and path.suffix.lower() == ".tja": + tja_files.append(path) + elif path.is_dir(): + sub_dir_has_box_def = self.check_for_box_def(path) + if not sub_dir_has_box_def: + tja_files.extend(self.find_tja_files_recursive(path, box_def_dirs_only)) + except (PermissionError, OSError): + pass + + return tja_files + + def parse_box_def(self, path): + texture_index = 620 + name = path.name + with open(path / "box.def", 'r', encoding='utf-8') as box_def: + for line in box_def: + if line.strip().startswith("#GENRE:"): + texture_index = FileSystemItem.GENRE_MAP[line.split(":")[1].strip()] + if line.strip().startswith("#TITLE:"): + name = line.split(":")[1].strip() + if line.strip().startswith("#TITLEJA:"): + if get_config()['general']['language'] == 'ja': + name = line.split(":")[1].strip() + return name, texture_index + + def calculate_box_positions(self): + """Dynamically calculate box positions based on current selection with wrap-around support""" + if not self.items: + return + + num_items = len(self.items) + + # Calculate positions for each item relative to the selected item + for i, item in enumerate(self.items): + # Calculate the circular distance from selected index + offset = i - self.selected_index + + # Handle wrap-around by choosing the shortest circular distance + if offset > num_items // 2: + offset -= num_items + elif offset < -num_items // 2: + offset += num_items + + # Calculate position based on offset + position = SongSelectScreen.BOX_CENTER + (100 * offset) + + # Apply the same position adjustments as before + if position == SongSelectScreen.BOX_CENTER: + position += 150 + elif position > SongSelectScreen.BOX_CENTER: + position += 300 + else: + position -= 0 + + if item.box.position == -11111: + item.box.position = position + item.box.target_position = position + else: + item.box.target_position = position + + def set_base_positions(self): + """Set initial positions for all items""" + self.calculate_box_positions() + + def load_root_directories(self): + """Load the list of root directories as selectable items""" + self.items = [] + self.in_root_selection = True + self.current_dir = Path() + self.current_root_dir = Path() + + # Create directory items for each root + for root_path in self.root_dirs: + name = root_path.name if root_path.name else str(root_path) + has_box_def = self.check_for_box_def(root_path) + # Only add roots with box.def as directories + if has_box_def: + name, texture_index = self.parse_box_def(root_path) + self.items.append(Directory(root_path, name, texture_index, has_box_def=True)) + else: + # For roots without box.def, add their TJA files directly to the root selection + tja_files = self.find_tja_files_recursive(root_path) + for tja_path in sorted(tja_files): + self.items.append(SongFile(tja_path, tja_path.name, 620)) + + # Reset selection + self.selected_index = 0 if self.items else -1 + + self.calculate_box_positions() + + def load_current_directory(self): + """Load all directories and TJA files in the current directory""" + self.items = [] + self.selected_index = 0 + + if self.current_dir != self.current_root_dir: + self.items.append(Directory(self.current_dir.parent, "", 552, back=True)) + elif not self.in_root_selection: + self.items.append(Directory(Path(), "", 552, to_root=True)) + # Add only directories that contain box.def files + for path in sorted(self.current_dir.iterdir()): + if path.is_dir(): + has_box_def = self.check_for_box_def(path) + if has_box_def: + name, texture_index = self.parse_box_def(path) + self.items.append(Directory(path, name, texture_index, has_box_def=True)) + + tja_files = [] + if (self.current_dir / 'song_list.txt').exists(): + updated_lines = [] + file_updated = False + + with open(self.current_dir / 'song_list.txt', 'r', encoding='utf-8-sig') as song_list: + for line in song_list: + hash, title, subtitle = line.strip().split('|') + original_hash = hash + + if song_hash.song_hashes is not None: + if hash in song_hash.song_hashes: + tja_files.append(Path(song_hash.song_hashes[hash]["file_path"])) + else: + for key, value in song_hash.song_hashes.items(): + if value["title"]["en"] == title and value["subtitle"]["en"][2:] == subtitle and Path(value["file_path"]).exists(): + hash = key + tja_files.append(Path(song_hash.song_hashes[hash]["file_path"])) + break + if hash != original_hash: + file_updated = True + updated_lines.append(f"{hash}|{title}|{subtitle}") + + if file_updated: + with open(self.current_dir / 'song_list.txt', 'w', encoding='utf-8-sig') as song_list: + for line in updated_lines: + song_list.write(line + '\n') + + else: + tja_files = self.find_tja_files_recursive(self.current_dir) + + # Then add TJA files found + for i, tja_path in enumerate(sorted(tja_files)): + if i % 10 == 0 and i != 0: + if self.current_dir != self.current_root_dir: + self.items.append(Directory(self.current_dir.parent, "", 552, back=True)) + elif not self.in_root_selection: + self.items.append(Directory(Path(), "", 552, to_root=True)) + texture_index = 620 + _, texture_index = self.parse_box_def(self.current_dir) + self.items.append(SongFile(tja_path, tja_path.name, texture_index)) + + self.calculate_box_positions() + + def navigate_left(self): + """Move selection left with wrap-around""" + if self.items: + self.selected_index = (self.selected_index - 1) % len(self.items) + self.calculate_box_positions() + + def navigate_right(self): + """Move selection right with wrap-around""" + if self.items: + self.selected_index = (self.selected_index + 1) % len(self.items) + self.calculate_box_positions() + + def get_items(self): + """Get visible items on screen - now returns all items since positions are dynamic""" + # With wrap-around, we might want to show all items or filter based on visibility + # For now, return all items since their positions are dynamically calculated + return self.items + + def get_visible_items(self, screen_width=1280): + """Get only the items that would be visible on screen""" + if not self.items: + return [] + + visible_items = [] + center = SongSelectScreen.BOX_CENTER + half_screen = screen_width // 2 + + for item in self.items: + # Check if item's position is within the visible screen area + if abs(item.box.position - center) <= half_screen: + visible_items.append(item) + + return visible_items + + def select_current_item(self): + """Select the currently highlighted item""" + if not self.items or self.selected_index >= len(self.items): + return + + selected_item = self.items[self.selected_index] + + if isinstance(selected_item, Directory): + if selected_item.to_root: + self.load_root_directories() + else: + if self.current_dir is not None: + self.history.append((self.current_dir, self.selected_index, self.in_root_selection, self.current_root_dir)) + self.current_dir = selected_item.path + if self.in_root_selection: + self.current_root_dir = selected_item.path + self.in_root_selection = False + self.selected_index = 0 + self.load_current_directory() + elif isinstance(selected_item, SongFile): + return selected_item + + def go_back(self): + """Navigate back to the previous directory""" + if self.history: + previous_dir, previous_index, previous_in_root, previous_root_dir = self.history.pop() + self.current_dir = previous_dir + self.selected_index = previous_index + self.in_root_selection = previous_in_root + self.current_root_dir = previous_root_dir + self.load_current_directory() + elif not self.in_root_selection: + # If we're not in history but also not in root selection, go back to root selection + self.load_root_directories() + + def get_current_item(self): + """Get the currently selected item""" + if self.items and 0 <= self.selected_index < len(self.items): + return self.items[self.selected_index] + raise Exception() diff --git a/scenes/title.py b/scenes/title.py index 569c792..442fdd7 100644 --- a/scenes/title.py +++ b/scenes/title.py @@ -3,6 +3,7 @@ from pathlib import Path import pyray as ray +from libs import song_hash from libs.animation import Animation from libs.audio import audio from libs.utils import ( @@ -19,9 +20,9 @@ class TitleScreen: self.width = width self.height = height video_dir = Path(get_config()["paths"]["video_path"]) / "op_videos" - self.op_video_list = [str(file) for file in video_dir.glob("**/*.mp4")] + self.op_video_list = [file for file in video_dir.glob("**/*.mp4")] video_dir = Path(get_config()["paths"]["video_path"]) / "attract_videos" - self.attract_video_list = [str(file) for file in video_dir.glob("**/*.mp4")] + self.attract_video_list = [file for file in video_dir.glob("**/*.mp4")] self.load_sounds() self.screen_init = False @@ -32,10 +33,10 @@ class TitleScreen: sounds_dir = Path("Sounds") title_dir = sounds_dir / "title" - self.sound_bachi_swipe = audio.load_sound(str(title_dir / "SE_ATTRACT_2.ogg")) - self.sound_bachi_hit = audio.load_sound(str(title_dir / "SE_ATTRACT_3.ogg")) - self.sound_warning_message = audio.load_sound(str(title_dir / "VO_ATTRACT_3.ogg")) - self.sound_warning_error = audio.load_sound(str(title_dir / "SE_ATTRACT_1.ogg")) + self.sound_bachi_swipe = audio.load_sound(title_dir / "SE_ATTRACT_2.ogg") + self.sound_bachi_hit = audio.load_sound(title_dir / "SE_ATTRACT_3.ogg") + self.sound_warning_message = audio.load_sound(title_dir / "VO_ATTRACT_3.ogg") + self.sound_warning_error = audio.load_sound(title_dir / "SE_ATTRACT_1.ogg") self.sounds = [self.sound_bachi_swipe, self.sound_bachi_hit, self.sound_warning_message, self.sound_warning_error] def load_textures(self): @@ -47,6 +48,8 @@ class TitleScreen: self.screen_init = True self.load_textures() + song_hash.song_hashes = song_hash.build_song_hashes() + self.scene = 'Opening Video' self.op_video = VideoPlayer(random.choice(self.op_video_list)) self.attract_video = VideoPlayer(random.choice(self.attract_video_list)) @@ -94,8 +97,10 @@ class TitleScreen: self.on_screen_start() self.scene_manager() - if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER): - return self.on_screen_end() + keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"] + for key in keys: + if ray.is_key_pressed(ord(key)): + return self.on_screen_end() def draw(self): if self.scene == 'Opening Video':