the fire alarm went off so I need to commit

This commit is contained in:
Yonokid
2025-06-01 16:08:45 -04:00
parent 2a6278943b
commit 190b8cf352
12 changed files with 1743 additions and 643 deletions

5
.gitignore vendored
View File

@@ -3,3 +3,8 @@ __pycache__
.venv
.ruff_cache
scores.db
cache
pytaiko.build
pytaiko.dist
pytaiko.onefile-build
pytaiko.exe

View File

@@ -55,6 +55,7 @@ def main():
if get_config()["video"]["vsync"]:
ray.set_config_flags(ray.ConfigFlags.FLAG_VSYNC_HINT)
ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT)
ray.set_trace_log_level(ray.TraceLogLevel.LOG_ERROR)
ray.set_window_max_size(screen_width, screen_height)
ray.set_window_min_size(screen_width, screen_height)

View File

@@ -1,11 +1,12 @@
[general]
fps_counter = true
fps_counter = false
judge_offset = 0
autoplay = false
autoplay = true
sfx = true
language = 'ja'
[paths]
tja_path = 'Songs'
tja_path = ['E:/Taiko/ESE', 'Songs', 'E:/Taiko/VersionSort']
video_path = 'Videos'
[keybinds]
@@ -15,8 +16,9 @@ right_don = ['J']
right_kat = ['K']
[audio]
device_type = 'Windows WASAPI'
buffer_size = 22
device_type = 'ASIO'
buffer_size = 6
sample_rate = 48000
exclusive = false
[video]
@@ -24,4 +26,4 @@ screen_width = 1280
screen_height = 720
fullscreen = false
borderless = false
vsync = true
vsync = false

View File

@@ -1,10 +1,11 @@
import io
import os
import queue
import time
import wave
from pathlib import Path
from threading import Lock, Thread
from typing import Optional
import soundfile as sf
from numpy import abs as np_abs
from numpy import (
arange,
@@ -15,6 +16,8 @@ from numpy import (
int32,
interp,
mean,
ndarray,
sqrt,
uint8,
zeros,
)
@@ -22,7 +25,6 @@ from numpy import max as np_max
os.environ["SD_ENABLE_ASIO"] = "1"
import sounddevice as sd
from pydub import AudioSegment
from libs.utils import get_config, rounded
@@ -83,8 +85,13 @@ def get_np_array(sample_width, raw_data):
else:
raise ValueError(f"Unsupported sample width: {sample_width}")
def get_average_volume_rms(data):
"""Calculate average volume using RMS method"""
rms = sqrt(mean(data ** 2))
return rms
class Sound:
def __init__(self, file_path, data=None, target_sample_rate=44100):
def __init__(self, file_path: Path, data=None, target_sample_rate=44100):
self.file_path = file_path
self.data = data
self.channels = 0
@@ -94,41 +101,32 @@ class Sound:
self.is_paused = False
self.volume = 1.0
self.pan = 0.5 # 0.0 = left, 0.5 = center, 1.0 = right
self.normalize: Optional[float] = None
if file_path:
self.load()
def load(self):
"""Load and prepare the sound file data"""
if self.file_path.endswith('.ogg'):
audio = AudioSegment.from_ogg(self.file_path)
wav_io = io.BytesIO()
audio.export(wav_io, format="wav")
wav_io.seek(0)
file_path = wav_io
data, original_sample_rate = sf.read(str(self.file_path))
if data.ndim == 1:
self.channels = 1
data = data.reshape(-1, 1)
else:
file_path = self.file_path
with wave.open(file_path, 'rb') as wf:
# Get file properties
self.channels = wf.getnchannels()
sample_width = wf.getsampwidth()
original_sample_rate = wf.getframerate()
frames = wf.getnframes()
self.channels = data.shape[1]
# Read all frames from the file
raw_data = wf.readframes(frames)
data = get_np_array(sample_width, raw_data)
# Reshape for multi-channel audio
if self.channels > 1:
data = data.reshape(-1, self.channels)
# Resample if needed
if original_sample_rate != self.sample_rate:
print(f"Resampling {self.file_path} from {original_sample_rate}Hz to {self.sample_rate}Hz")
data = resample(data, original_sample_rate, self.sample_rate)
if self.normalize is not None:
current_rms = get_average_volume_rms(data)
if current_rms > 0: # Avoid division by zero
target_rms = self.normalize
rms_scale_factor = target_rms / current_rms
data *= rms_scale_factor
self.data = data
def play(self):
@@ -151,6 +149,12 @@ class Sound:
self.is_playing = True
self.is_paused = False
def normalize_vol(self, rms: float):
self.normalize = rms
if self.data is not None:
self.data = None
self.load()
def get_frames(self, num_frames):
"""Get the next num_frames of audio data, applying volume, pitch, and pan"""
if self.data is None:
@@ -180,7 +184,7 @@ class Sound:
if self.channels == 1:
output = zeros(num_frames, dtype=float32)
output[:frames_to_get] = self.data[self.position:self.position+frames_to_get]
output[:frames_to_get] = self.data[self.position:self.position+frames_to_get].flatten()
else:
output = zeros((num_frames, self.channels), dtype=float32)
output[:frames_to_get] = self.data[self.position:self.position+frames_to_get]
@@ -199,7 +203,7 @@ class Sound:
return output
class Music:
def __init__(self, file_path, data=None, file_type=None, target_sample_rate=44100):
def __init__(self, file_path: Path, data=None, file_type=None, target_sample_rate=44100):
self.file_path = file_path
self.file_type = file_type
self.data = data
@@ -209,12 +213,12 @@ class Music:
self.position = 0 # In frames
self.is_playing = False
self.is_paused = False
self.volume = 1.0
self.volume = 0.75
self.pan = 0.5 # Center
self.total_frames = 0
self.valid = False
self.normalize = None
self.wave_file = None
self.file_buffer_size = int(target_sample_rate * 5) # 5 seconds buffer
self.buffer = None
self.buffer_position = 0
@@ -226,39 +230,31 @@ class Music:
def load_from_file(self):
"""Load music from file"""
if self.file_path.endswith('.ogg'):
audio = AudioSegment.from_ogg(self.file_path)
wav_io = io.BytesIO()
audio.export(wav_io, format="wav")
wav_io.seek(0)
file_path = wav_io
else:
file_path = self.file_path
try:
# Keep the file open for streaming
self.wave_file = wave.open(file_path, 'rb')
# soundfile handles OGG, WAV, FLAC, etc. natively
self.sound_file = sf.SoundFile(str(self.file_path))
# Get file properties
self.channels = self.wave_file.getnchannels()
self.sample_width = self.wave_file.getsampwidth()
self.sample_rate = self.wave_file.getframerate()
self.total_frames = self.wave_file.getnframes()
self.channels = self.sound_file.channels
self.sample_width = 2 if self.sound_file.subtype in ['PCM_16', 'VORBIS'] else 4 # Most common
self.sample_rate = self.sound_file.samplerate
self.total_frames = len(self.sound_file)
# Initialize buffer with some initial data
self._fill_buffer()
self.valid = True
print(f"Music loaded: {self.channels} channels, {self.sample_rate}Hz, {self.total_frames} frames")
except Exception as e:
print(f"Error loading music file: {e}")
if self.wave_file:
self.wave_file.close()
self.wave_file = None
if hasattr(self, 'sound_file') and self.sound_file:
self.sound_file.close()
self.sound_file = None
self.valid = False
def _fill_buffer(self):
"""Fill the streaming buffer from file"""
if not self.wave_file:
if not self.sound_file:
return False
# Read a chunk of frames from file
@@ -267,18 +263,31 @@ class Music:
if frames_to_read <= 0:
return False
raw_data = self.wave_file.readframes(frames_to_read)
# Read data directly as numpy array (float64 by default)
data = self.sound_file.read(frames_to_read)
data = get_np_array(self.sample_width, raw_data)
# Convert to float32 if needed (soundfile returns float64 by default)
if data.dtype != float32:
data = data.astype(float32)
# Reshape for multi-channel audio
if self.channels > 1:
data = data.reshape(-1, self.channels)
# Ensure proper shape for mono audio
if self.channels == 1 and data.ndim == 1:
data = data.reshape(-1, 1)
elif self.channels == 1 and data.ndim == 2:
data = data[:, 0].reshape(-1, 1) # Take first channel if stereo file but expecting mono
# Resample if needed
if self.sample_rate != self.target_sample_rate:
print(f"Resampling {self.file_path} from {self.sample_rate}Hz to {self.target_sample_rate}Hz")
data = resample(data, self.sample_rate, self.target_sample_rate)
if self.normalize is not None:
current_rms = get_average_volume_rms(data)
if current_rms > 0: # Avoid division by zero
target_rms = self.normalize
rms_scale_factor = target_rms / current_rms
data *= rms_scale_factor
self.buffer = data
self.buffer_position = 0
return True
@@ -295,7 +304,7 @@ class Music:
# Check if we need to refill the buffer
if self.buffer is None:
raise Exception("buffer is None")
if self.wave_file and self.buffer_position >= len(self.buffer):
if self.sound_file and self.buffer_position >= len(self.buffer):
if not self._fill_buffer():
self.is_playing = False
@@ -303,8 +312,8 @@ class Music:
"""Start playing the music stream"""
with self.lock:
# Reset position if at the end
if self.wave_file and self.position >= self.total_frames:
self.wave_file.rewind()
if self.sound_file and self.position >= self.total_frames:
self.sound_file.seek(0) # Reset to beginning
self.position = 0
self.buffer_position = 0
self._fill_buffer()
@@ -319,8 +328,8 @@ class Music:
self.is_paused = False
self.position = 0
self.buffer_position = 0
if self.wave_file:
self.wave_file.rewind()
if self.sound_file:
self.sound_file.seek(0) # Reset to beginning
self._fill_buffer()
def pause(self):
@@ -347,8 +356,8 @@ class Music:
frame_position = max(0, min(frame_position, self.total_frames - 1))
# Update file position if streaming from file
if self.wave_file:
self.wave_file.setpos(frame_position)
if self.sound_file:
self.sound_file.seek(frame_position)
self._fill_buffer()
self.position = frame_position
@@ -377,7 +386,7 @@ class Music:
# Check if we need more data
if self.buffer_position >= len(self.buffer):
# If no more data available and streaming from file
if self.wave_file and not self._fill_buffer():
if self.sound_file and not self._fill_buffer():
self.is_playing = False
if self.channels == 1:
return zeros(num_frames, dtype=float32)
@@ -393,7 +402,7 @@ class Music:
if self.channels == 1:
output = zeros(num_frames, dtype=float32)
output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get]
output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get].flatten()
else:
output = zeros((num_frames, self.channels), dtype=float32)
output[:frames_to_get] = self.buffer[self.buffer_position:self.buffer_position+frames_to_get]
@@ -418,9 +427,9 @@ class Music:
def __del__(self):
"""Cleanup when the music object is deleted"""
if self.wave_file:
if hasattr(self, 'sound_file') and self.sound_file:
try:
self.wave_file.close()
self.sound_file.close()
except Exception:
raise Exception("unable to close music stream")
@@ -428,12 +437,12 @@ class AudioEngine:
def __init__(self, type: str):
self.target_sample_rate = 44100
self.buffer_size = 10
self.sounds = {}
self.sounds: dict[str, Sound] = {}
self.music_streams = {}
self.stream = None
self.device_id = None
self.running = False
self.sound_queue = queue.Queue()
self.sound_queue: queue.Queue[str] = queue.Queue()
self.music_queue = queue.Queue()
self.master_volume = 1.0
self.output_channels = 2 # Default to stereo
@@ -532,6 +541,7 @@ class AudioEngine:
sound_data = sound.get_frames(frames)
# If mono sound but stereo output, duplicate to both channels
if isinstance(sound_data, ndarray):
if sound.channels == 1 and self.output_channels > 1:
sound_data = column_stack([sound_data] * self.output_channels)
@@ -655,7 +665,7 @@ class AudioEngine:
def get_master_volume(self) -> float:
return self.master_volume
def load_sound(self, fileName: str) -> str:
def load_sound(self, fileName: Path) -> str:
sound = Sound(fileName, target_sample_rate=self.target_sample_rate)
sound_id = f"sound_{len(self.sounds)}"
self.sounds[sound_id] = sound
@@ -678,6 +688,17 @@ class AudioEngine:
if sound in self.sounds:
self.sounds[sound].resume()
def unload_sound(self, sound: str):
if sound in self.sounds:
del self.sounds[sound]
def normalize_sound(self, sound: str, rms: float):
if sound in self.sounds:
self.sounds[sound].normalize_vol(rms)
def is_sound_valid(self, sound: str) -> bool:
return sound in self.music_streams
def is_sound_playing(self, sound: str) -> bool:
if sound in self.sounds:
return self.sounds[sound].is_playing
@@ -691,7 +712,7 @@ class AudioEngine:
if sound in self.sounds:
self.sounds[sound].pan = max(0.0, min(1.0, pan))
def load_music_stream(self, fileName: str) -> str:
def load_music_stream(self, fileName: Path) -> str:
music = Music(file_path=fileName, target_sample_rate=self.target_sample_rate)
music_id = f"music_{len(self.music_streams)}"
self.music_streams[music_id] = music
@@ -744,6 +765,10 @@ class AudioEngine:
if music in self.music_streams:
self.music_streams[music].pan = max(0.0, min(1.0, pan))
def normalize_music_stream(self, music: str, rms: float):
if music in self.music_streams:
self.music_streams[music].normalize = rms
def get_music_time_length(self, music: str) -> float:
if music in self.music_streams:
return self.music_streams[music].get_time_length()
@@ -755,4 +780,3 @@ class AudioEngine:
raise ValueError(f"Music stream {music} not initialized")
audio = AudioEngine(get_config()["audio"]["device_type"])
audio.set_master_volume(0.75)

View File

@@ -1,6 +1,5 @@
import hashlib
import math
import os
from collections import deque
from dataclasses import dataclass, field, fields
from pathlib import Path
@@ -8,36 +7,127 @@ from pathlib import Path
from libs.utils import get_pixels_per_frame, strip_comments
@dataclass
@dataclass()
class Note:
type: int = field(init=False)
hit_ms: float = field(init=False)
load_ms: float = field(init=False)
pixels_per_frame: float = field(init=False)
display: bool = field(init=False)
index: int = field(init=False)
bpm: float = field(init=False)
gogo_time: bool = field(init=False)
moji: int = field(init=False)
def __le__(self, other):
return self.hit_ms <= other.hit_ms
def _get_hash_data(self) -> bytes:
"""Get deterministic byte representation for hashing"""
field_values = []
for f in sorted([f.name for f in fields(self)]): # Sort for consistency
value = getattr(self, f, None)
field_values.append((f, value))
field_values.append(('__class__', self.__class__.__name__))
hash_string = str(field_values)
return hash_string.encode('utf-8')
def get_hash(self, algorithm='sha256') -> str:
"""Generate hash of the note"""
hash_obj = hashlib.new(algorithm)
hash_obj.update(self._get_hash_data())
return hash_obj.hexdigest()
def __hash__(self) -> int:
"""Make instances hashable for use in sets/dicts"""
return int(self.get_hash('md5')[:8], 16) # Use first 8 chars of MD5 as int
def __repr__(self):
return str(self.__dict__)
@dataclass
class Drumroll(Note):
_source_note: Note
color: int = field(init=False)
def __repr__(self):
return str(self.__dict__)
def __post_init__(self):
for field_name in [f.name for f in fields(Note)]:
if hasattr(self._source_note, field_name):
setattr(self, field_name, getattr(self._source_note, field_name))
def _get_hash_data(self) -> bytes:
"""Override to include source note and drumroll-specific data"""
field_values = []
for f in sorted([f.name for f in fields(Note)]):
value = getattr(self, f, None)
field_values.append((f, value))
field_values.append(('color', getattr(self, 'color', None)))
field_values.append(('__class__', self.__class__.__name__))
field_values.append(('_source_note_hash', self._source_note.get_hash()))
hash_string = str(field_values)
return hash_string.encode('utf-8')
@dataclass
class Balloon(Note):
_source_note: Note
count: int = field(init=False)
popped: bool = False
def __repr__(self):
return str(self.__dict__)
def __post_init__(self):
for field_name in [f.name for f in fields(Note)]:
if hasattr(self._source_note, field_name):
setattr(self, field_name, getattr(self._source_note, field_name))
def _get_hash_data(self) -> bytes:
"""Override to include source note and balloon-specific data"""
field_values = []
for f in sorted([f.name for f in fields(Note)]):
value = getattr(self, f, None)
field_values.append((f, value))
field_values.append(('count', getattr(self, 'count', None)))
field_values.append(('popped', self.popped))
field_values.append(('__class__', self.__class__.__name__))
field_values.append(('_source_note_hash', self._source_note.get_hash()))
hash_string = str(field_values)
return hash_string.encode('utf-8')
@dataclass
class CourseData:
level: int = 0
balloon: list[int] = field(default_factory=lambda: [])
scoreinit: list[int] = field(default_factory=lambda: [])
scorediff: int = 0
@dataclass
class TJAMetadata:
title: dict[str, str] = field(default_factory= lambda: {'en': ''})
subtitle: dict[str, str] = field(default_factory= lambda: {'en': ''})
genre: str = ''
wave: Path = Path()
demostart: float = 0.0
offset: float = 0.0
bpm: float = 120.0
bgmovie: Path = Path()
movieoffset: float = 0.0
course_data: dict[int, CourseData] = field(default_factory=dict)
@dataclass
class TJAEXData:
new_audio: bool = False
old_audio: bool = False
limited_time: bool = False
new: bool = False
def calculate_base_score(play_note_list: deque[Note | Drumroll | Balloon]) -> int:
total_notes = 0
balloon_num = 0
@@ -60,117 +150,114 @@ def calculate_base_score(play_note_list: deque[Note | Drumroll | Balloon]) -> in
return math.ceil(total_score / 10) * 10
class TJAParser:
def __init__(self, path: str, start_delay: int = 0):
#Defined on startup
self.folder_path = Path(path)
self.folder_name = self.folder_path.name
for _, _, files in os.walk(self.folder_path):
for file in files:
if file.endswith('tja'):
self.file_path = self.folder_path / f'{file}'
def __init__(self, path: Path, start_delay: int = 0, distance: int = 866):
self.file_path: Path = path
#Defined on file_to_data()
self.data = []
with open(self.file_path, 'rt', encoding='utf-8-sig') as tja_file:
for line in tja_file:
line = strip_comments(line).strip()
if line != '':
self.data.append(str(line))
lines = self.file_path.read_text(encoding='utf-8-sig').splitlines()
self.data = [cleaned for line in lines
if (cleaned := strip_comments(line).strip())]
#Defined on get_metadata()
self.title = ''
self.title_ja = ''
self.subtitle = ''
self.subtitle_ja = ''
self.wave = self.folder_path / ""
self.offset = 0
self.demo_start = 0
self.course_data = dict()
self.metadata = TJAMetadata()
self.ex_data = TJAEXData()
self.get_metadata()
#Defined in metadata but can change throughout the chart
self.bpm = 120
self.time_signature = 4/4
self.distance = 0
self.scroll_modifier = 1
self.current_ms = start_delay
self.barline_display = True
self.gogo_time = False
self.distance = distance
self.current_ms: float = start_delay
def get_metadata(self):
current_diff = None # Track which difficulty we're currently processing
for item in self.data:
if item[0] == '#':
if item.startswith("#") or item[0].isdigit():
continue
elif 'SUBTITLEJA' in item:
self.subtitle_ja = str(item.split('SUBTITLEJA:')[1])
elif 'TITLEJA' in item:
self.title_ja = str(item.split('TITLEJA:')[1])
elif 'SUBTITLE' in item:
self.subtitle = str(item.split('SUBTITLE:')[1][2:])
elif 'TITLE' in item:
self.title = str(item.split('TITLE:')[1])
elif 'BPM' in item:
self.bpm = float(item.split(':')[1])
elif 'WAVE' in item:
filename = item.split(':')[1].strip()
self.wave = self.folder_path / filename
elif 'OFFSET' in item:
self.offset = float(item.split(':')[1])
elif 'DEMOSTART' in item:
self.demo_start = float(item.split(':')[1])
elif 'BGMOVIE' in item:
self.bg_movie = self.folder_path / item.split(':')[1].strip()
elif 'COURSE' in item:
# Determine which difficulty we're now processing
elif item.startswith('SUBTITLE'):
region_code = 'en'
if item[len('SUBTITLE')] != ':':
region_code = (item[len('SUBTITLE'):len('SUBTITLE')+2]).lower()
self.metadata.subtitle[region_code] = ''.join(item.split(':')[1:])
if '限定' in self.metadata.subtitle:
self.ex_data.limited_time = True
elif item.startswith('TITLE'):
region_code = 'en'
if item[len('TITLE')] != ':':
region_code = (item[len('TITLE'):len('TITLE')+2]).lower()
self.metadata.title[region_code] = ''.join(item.split(':')[1:])
elif item.startswith('BPM'):
self.metadata.bpm = float(item.split(':')[1])
elif item.startswith('WAVE'):
self.metadata.wave = self.file_path.parent / item.split(':')[1].strip()
elif item.startswith('OFFSET'):
self.metadata.offset = float(item.split(':')[1])
elif item.startswith('DEMOSTART'):
self.metadata.demostart = float(item.split(':')[1])
elif item.startswith('BGMOVIE'):
self.metadata.bgmovie = self.file_path.parent / item.split(':')[1].strip()
elif item.startswith('MOVIEOFFSET'):
self.metadata.movieoffset = float(item.split(':')[1])
elif item.startswith('COURSE'):
course = str(item.split(':')[1]).lower().strip()
# Map the course string to its corresponding index
if course == 'dan' or course == '6':
if course == '6' or course == 'dan':
current_diff = 6
self.course_data[6] = []
elif course == 'tower' or course == '5':
elif course == '5' or course == 'tower':
current_diff = 5
self.course_data[5] = []
elif course == 'edit' or course == '4':
elif course == '4' or course == 'edit' or course == 'ura':
current_diff = 4
self.course_data[4] = []
elif course == 'oni' or course == '3':
elif course == '3' or course == 'oni':
current_diff = 3
self.course_data[3] = []
elif course == 'hard' or course == '2':
elif course == '2' or course == 'hard':
current_diff = 2
self.course_data[2] = []
elif course == 'normal' or course == '1':
elif course == '1' or course == 'normal':
current_diff = 1
self.course_data[1] = []
elif course == 'easy' or course == '0':
elif course == '0' or course == 'easy':
current_diff = 0
self.course_data[0] = []
# Only process these items if we have a current difficulty
else:
raise Exception("course level empty")
self.metadata.course_data[current_diff] = CourseData()
elif current_diff is not None:
if 'LEVEL' in item:
level = int(float(item.split(':')[1]))
self.course_data[current_diff].append(level)
elif 'BALLOON' in item:
if item.startswith('LEVEL'):
self.metadata.course_data[current_diff].level = int(float(item.split(':')[1]))
elif item.startswith('BALLOONNOR'):
balloon_data = item.split(':')[1]
if balloon_data == '':
continue
self.course_data[current_diff].append([int(x) for x in balloon_data.split(',')])
elif 'SCOREINIT' in item:
self.metadata.course_data[current_diff].balloon.extend([int(x) for x in balloon_data.split(',')])
elif item.startswith('BALLOONEXP'):
balloon_data = item.split(':')[1]
if balloon_data == '':
continue
self.metadata.course_data[current_diff].balloon.extend([int(x) for x in balloon_data.split(',')])
elif item.startswith('BALLOONMAS'):
balloon_data = item.split(':')[1]
if balloon_data == '':
continue
self.metadata.course_data[current_diff].balloon = ([int(x) for x in balloon_data.split(',')])
elif item.startswith('BALLOON'):
balloon_data = item.split(':')[1]
if balloon_data == '':
continue
self.metadata.course_data[current_diff].balloon = [int(x) for x in balloon_data.split(',')]
elif item.startswith('SCOREINIT'):
score_init = item.split(':')[1]
if score_init == '':
continue
self.course_data[current_diff].append([int(x) for x in score_init.split(',')])
elif 'SCOREDIFF' in item:
self.metadata.course_data[current_diff].scoreinit = [int(x) for x in score_init.split(',')]
elif item.startswith('SCOREDIFF'):
score_diff = item.split(':')[1]
if score_diff == '':
continue
self.course_data[current_diff].append(int(score_diff))
return [self.title, self.title_ja, self.subtitle, self.subtitle_ja,
self.bpm, self.wave, self.offset, self.demo_start, self.course_data]
self.metadata.course_data[current_diff].scorediff = int(score_diff)
for region_code in self.metadata.title:
if '-New Audio-' in self.metadata.title[region_code] or '-新曲-' in self.metadata.title[region_code]:
self.metadata.title[region_code] = self.metadata.title[region_code].strip('-New Audio-')
self.metadata.title[region_code] = self.metadata.title[region_code].strip('-新曲-')
self.ex_data.new_audio = True
elif '-Old Audio-' in self.metadata.title[region_code] or '-旧曲-' in self.metadata.title[region_code]:
self.metadata.title[region_code] = self.metadata.title[region_code].strip('-Old Audio-')
self.metadata.title[region_code] = self.metadata.title[region_code].strip('-旧曲-')
self.ex_data.old_audio = True
elif '限定' in self.metadata.title[region_code]:
self.ex_data.limited_time = True
def data_to_notes(self, diff):
note_start = -1
@@ -223,9 +310,7 @@ class TJAParser:
if item != line:
notes.append(bar)
bar = []
if len(self.course_data[diff]) < 2:
return notes, None
return notes, self.course_data[diff][1]
return notes
def get_moji(self, play_note_list: deque[Note], ms_per_measure: float) -> None:
se_notes = {
@@ -287,66 +372,91 @@ class TJAParser:
else:
play_note_list[-3].moji = se_notes[play_note_list[-3].moji][2]
def notes_to_position(self, diff):
def notes_to_position(self, diff: int):
play_note_list: deque[Note | Drumroll | Balloon] = deque()
bar_list: deque[Note] = deque()
draw_note_list: deque[Note | Drumroll | Balloon] = deque()
notes, balloon = self.data_to_notes(diff)
balloon_index = 0
notes = self.data_to_notes(diff)
balloon = self.metadata.course_data[diff].balloon.copy()
count = 0
index = 0
time_signature = 4/4
bpm = self.metadata.bpm
scroll_modifier = 1
barline_display = True
gogo_time = False
skip_branch = False
for bar in notes:
#Length of the bar is determined by number of notes excluding commands
bar_length = sum(len(part) for part in bar if '#' not in part)
barline_added = False
for part in bar:
if part.startswith('#BRANCHSTART'):
skip_branch = True
continue
if '#JPOSSCROLL' in part:
continue
elif '#NMSCROLL' in part:
continue
elif '#MEASURE' in part:
divisor = part.find('/')
self.time_signature = float(part[9:divisor]) / float(part[divisor+1:])
time_signature = float(part[9:divisor]) / float(part[divisor+1:])
continue
elif '#SCROLL' in part:
self.scroll_modifier = float(part[7:])
scroll_modifier = float(part[7:])
continue
elif '#BPMCHANGE' in part:
self.bpm = float(part[11:])
bpm = float(part[11:])
continue
elif '#BARLINEOFF' in part:
self.barline_display = False
barline_display = False
continue
elif '#BARLINEON' in part:
self.barline_display = True
barline_display = True
continue
elif '#GOGOSTART' in part:
self.gogo_time = True
gogo_time = True
continue
elif '#GOGOEND' in part:
self.gogo_time = False
gogo_time = False
continue
elif '#LYRIC' in part:
continue
elif part.startswith('#M'):
skip_branch = False
continue
#Unrecognized commands will be skipped for now
elif '#' in part:
elif len(part) > 0 and not part[0].isdigit():
continue
if skip_branch:
continue
if bpm == 0:
ms_per_measure = 0
else:
#https://gist.github.com/KatieFrogs/e000f406bbc70a12f3c34a07303eec8b#measure
ms_per_measure = 60000 * (self.time_signature*4) / self.bpm
ms_per_measure = 60000 * (time_signature*4) / bpm
#Create note object
bar = Note()
bar_line = Note()
#Determines how quickly the notes need to move across the screen to reach the judgment circle in time
bar.pixels_per_frame = get_pixels_per_frame(self.bpm * self.time_signature * self.scroll_modifier, self.time_signature*4, self.distance)
pixels_per_ms = bar.pixels_per_frame / (1000 / 60)
bar_line.pixels_per_frame = get_pixels_per_frame(bpm * time_signature * scroll_modifier, time_signature*4, self.distance)
pixels_per_ms = bar_line.pixels_per_frame / (1000 / 60)
bar.hit_ms = self.current_ms
bar.load_ms = bar.hit_ms - (self.distance / pixels_per_ms)
bar.type = 0
bar_line.hit_ms = self.current_ms
if pixels_per_ms == 0:
bar_line.load_ms = bar_line.hit_ms
else:
bar_line.load_ms = bar_line.hit_ms - (self.distance / pixels_per_ms)
bar_line.type = 0
bar_line.display = barline_display
bar_line.bpm = bpm
if barline_added:
bar_line.display = False
if self.barline_display:
bar_list.append(bar)
bar_list.append(bar_line)
barline_added = True
#Empty bar is still a bar, otherwise start increment
if len(part) == 0:
@@ -355,28 +465,39 @@ class TJAParser:
else:
increment = ms_per_measure / bar_length
for item in (part):
for item in part:
if item == '0':
self.current_ms += increment
continue
note = Note()
note.hit_ms = self.current_ms
if pixels_per_ms == 0:
note.load_ms = note.hit_ms
else:
note.load_ms = note.hit_ms - (self.distance / pixels_per_ms)
note.type = int(item)
note.pixels_per_frame = bar.pixels_per_frame
note.pixels_per_frame = bar_line.pixels_per_frame
note.index = index
note.bpm = bpm
note.gogo_time = gogo_time
note.moji = -1
if item in {'5', '6'}:
note = Drumroll(note)
note.color = 255
elif item in {'7', '9'}:
elif item in {'7'}:
count += 1
if balloon is None:
raise Exception("Balloon note found, but no count was specified")
note = Balloon(note)
note.count = int(balloon[balloon_index])
balloon_index += 1
if not balloon:
note.count = 1
else:
note.count = balloon.pop(0)
elif item == '8':
new_pixels_per_ms = play_note_list[-1].pixels_per_frame / (1000 / 60)
if new_pixels_per_ms == 0:
note.load_ms = note.hit_ms
else:
note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms)
note.pixels_per_frame = play_note_list[-1].pixels_per_frame
self.current_ms += increment
@@ -385,7 +506,9 @@ class TJAParser:
index += 1
if len(play_note_list) > 3:
if isinstance(play_note_list[-2], Drumroll) and play_note_list[-1].type != 8:
raise Exception(play_note_list[-2])
print(self.file_path, diff)
print(bar)
raise Exception(f"{play_note_list[-2]}")
# https://stackoverflow.com/questions/72899/how-to-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary-in-python
# Sorting by load_ms is necessary for drawing, as some notes appear on the
# screen slower regardless of when they reach the judge circle
@@ -394,9 +517,23 @@ class TJAParser:
bar_list = deque(sorted(bar_list, key=lambda b: b.load_ms))
return play_note_list, draw_note_list, bar_list
def hash_note_data(self, notes: list):
def hash_note_data(self, play_notes: deque[Note | Drumroll | Balloon], bars: deque[Note]):
n = hashlib.sha256()
for bar in notes:
for part in bar:
n.update(part.encode('utf-8'))
list1 = list(play_notes)
list2 = list(bars)
merged: list[Note | Drumroll | Balloon] = []
i = 0
j = 0
while i < len(list1) and j < len(list2):
if list1[i] <= list2[j]:
merged.append(list1[i])
i += 1
else:
merged.append(list2[j])
j += 1
merged.extend(list1[i:])
merged.extend(list2[j:])
for item in merged:
n.update(item.get_hash().encode('utf-8'))
return n.hexdigest()

View File

@@ -1,3 +1,4 @@
import hashlib
import os
import tempfile
import time
@@ -85,6 +86,8 @@ def strip_comments(code: str) -> str:
return result
def get_pixels_per_frame(bpm: float, time_signature: float, distance: float) -> float:
if bpm == 0:
return 0
beat_duration = 60 / bpm
total_time = time_signature * beat_duration
total_frames = 60 * total_time
@@ -119,38 +122,164 @@ def reset_session():
@dataclass
class GlobalData:
selected_song: str = '' #Path
selected_song: Path = Path()
textures: dict[str, list[ray.Texture]] = field(default_factory=lambda: dict())
songs_played: int = 0
global_data = GlobalData()
rotation_cache = dict()
char_size_cache = dict()
horizontal_cache = dict()
text_cache = set()
for file in Path('cache/image').iterdir():
text_cache.add(file.stem)
@dataclass
class OutlinedText:
font: ray.Font
text: str
font_size: int
text_color: ray.Color
outline_color: ray.Color
font: ray.Font = ray.Font()
outline_thickness: int = 2
vertical: bool = False
line_spacing: float = 1.0 # Line spacing for vertical text
horizontal_spacing: float = 1.0 # Character spacing for horizontal text
lowercase_spacing_factor: float = 0.85 # Adjust spacing for lowercase letters and whitespace
vertical_chars: set = field(default_factory=lambda: {'-', '|', '/', '\\', ''})
vertical_chars: set = field(default_factory=lambda: {'-', '', '|', '/', '\\', '', '', '~', '', '', '(', ')',
'', '', '[', ']', '', '', '', '', '', '', '', ':', ''})
no_space_chars: set = field(default_factory=lambda: {
'', '','', '','', '','', '','', '',
'', '','', '','', '','', '','', '',
'', '','','','','','','','','','',
'','','','','','',''
})
# New field for horizontal exception strings
horizontal_exceptions: set = field(default_factory=lambda: {'!!!!', '!!!', '!!', '','','!?', '', '??', '', '†††', '(°∀°)', '(°∀°)'})
# New field for adjacent punctuation characters
adjacent_punctuation: set = field(default_factory=lambda: {'.', ',', '', '', "'", '"', '´', '`'})
def __post_init__(self):
# Cache for rotated characters
self._rotation_cache = {}
self._rotation_cache = rotation_cache
# Cache for character measurements
self._char_size_cache = {}
self._char_size_cache = char_size_cache
# Cache for horizontal exception measurements
self._horizontal_cache = horizontal_cache
self.hash = self._get_hash()
self.texture = self._create_texture()
def _load_font_for_text(self, text: str) -> ray.Font:
codepoint_count = ray.ffi.new('int *', 0)
unique_codepoints = set(text)
codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count)
return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), self.font_size, codepoints, 0)
def _get_hash(self):
n = hashlib.sha256()
n.update(self.text.encode('utf-8'))
n.update(str(self.vertical).encode('utf-8'))
n.update(str(self.horizontal_spacing).encode('utf-8')) # Include horizontal spacing in hash
n.update(str(self.outline_color.a).encode('utf-8'))
n.update(str(self.outline_color.r).encode('utf-8'))
n.update(str(self.outline_color.g).encode('utf-8'))
n.update(str(self.outline_color.b).encode('utf-8'))
n.update(str(self.text_color.a).encode('utf-8'))
n.update(str(self.text_color.r).encode('utf-8'))
n.update(str(self.text_color.g).encode('utf-8'))
n.update(str(self.text_color.b).encode('utf-8'))
n.update(str(self.font_size).encode('utf-8'))
return n.hexdigest()
def _parse_text_segments(self):
"""Parse text into segments, identifying horizontal exceptions"""
if not self.vertical:
return [{'text': self.text, 'is_horizontal': False}]
segments = []
i = 0
current_segment = ""
while i < len(self.text):
# Check if any horizontal exception starts at current position
found_exception = None
for exception in self.horizontal_exceptions:
if self.text[i:].startswith(exception):
found_exception = exception
break
if found_exception:
# Save current segment if it exists
if current_segment:
segments.append({'text': current_segment, 'is_horizontal': False})
current_segment = ""
# Add horizontal exception as separate segment
segments.append({'text': found_exception, 'is_horizontal': True})
i += len(found_exception)
else:
# Add character to current segment
current_segment += self.text[i]
i += 1
# Add remaining segment
if current_segment:
segments.append({'text': current_segment, 'is_horizontal': False})
return segments
def _group_characters_with_punctuation(self, text):
"""Group characters with their adjacent punctuation"""
groups = []
i = 0
while i < len(text):
current_char = text[i]
group = {'main_char': current_char, 'adjacent_punct': []}
# Look ahead for adjacent punctuation
j = i + 1
while j < len(text) and text[j] in self.adjacent_punctuation:
group['adjacent_punct'].append(text[j])
j += 1
groups.append(group)
i = j # Move to next non-punctuation character
return groups
def _get_horizontal_exception_texture(self, text: str, color):
"""Get or create a texture for horizontal exception text"""
cache_key = (text, color.r, color.g, color.b, color.a, 'horizontal')
if cache_key in self._horizontal_cache:
return self._horizontal_cache[cache_key]
# Measure the text
text_size = ray.measure_text_ex(self.font, text, self.font_size, 1.0)
padding = int(self.outline_thickness * 3)
# Create image with proper dimensions
img_width = int(text_size.x + padding * 2)
img_height = int(text_size.y + padding * 2)
temp_image = ray.gen_image_color(img_width, img_height, ray.Color(0, 0, 0, 0))
# Draw the text centered
ray.image_draw_text_ex(
temp_image,
self.font,
text,
ray.Vector2(padding, padding),
self.font_size,
1.0,
color
)
# Cache the image
self._horizontal_cache[cache_key] = temp_image
return temp_image
def _get_char_size(self, char):
"""Cache character size measurements"""
if char not in self._char_size_cache:
@@ -165,8 +294,7 @@ class OutlinedText:
"""Calculate vertical spacing between characters"""
# Check if current char is lowercase, whitespace or a special character
is_spacing_char = (current_char.islower() or
current_char.isspace() or
current_char in self.no_space_chars)
current_char.isspace())
# Additional check for capitalization transition
if next_char and ((current_char.isupper() and next_char.islower()) or
@@ -177,33 +305,27 @@ class OutlinedText:
spacing = self.line_spacing * (self.lowercase_spacing_factor if is_spacing_char else 1.0)
return self.font_size * spacing
def _get_rotated_char(self, char, color):
def _get_rotated_char(self, char: str, color):
"""Get or create a rotated character texture from cache"""
cache_key = (char, color[0], color[1], color[2], color[3])
cache_key = (char, color.r, color.g, color.b, color.a)
if cache_key in self._rotation_cache:
return self._rotation_cache[cache_key]
char_size = self._get_char_size(char)
# For rotated text, we need extra padding to prevent cutoff
padding = max(int(self.font_size * 0.2), 2) # Add padding proportional to font size
temp_width = int(char_size.y) + padding * 2
temp_height = int(char_size.x) + padding * 2
# Create a temporary image with padding to ensure characters aren't cut off
padding = int(self.outline_thickness * 3) # Increased padding
temp_width = max(int(char_size.y) + padding, self.font_size + padding)
temp_height = max(int(char_size.x) + padding, self.font_size + padding)
temp_image = ray.gen_image_color(temp_width, temp_height, ray.Color(0, 0, 0, 0))
# Calculate centering offsets
x_offset = padding
y_offset = padding
center_x = (temp_width - char_size.y) // 2
center_y = (temp_height - char_size.x) // 2
# Draw the character centered in the temporary image
ray.image_draw_text_ex(
temp_image,
self.font,
char,
ray.Vector2(x_offset-5, y_offset),
ray.Vector2(center_x-5, center_y), # Centered placement with padding
self.font_size,
1.0,
color
@@ -223,61 +345,105 @@ class OutlinedText:
self._rotation_cache[cache_key] = rotated_image
return rotated_image
def _calculate_horizontal_text_width(self):
"""Calculate the total width of horizontal text with custom spacing"""
if not self.text:
return 0
total_width = 0
for i, char in enumerate(self.text):
char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0)
total_width += char_size.x
# Add spacing between characters (except for the last character)
if i < len(self.text) - 1:
total_width += (char_size.x * (self.horizontal_spacing - 1.0))
return total_width
def _calculate_dimensions(self):
"""Calculate dimensions based on orientation"""
padding = int(self.outline_thickness * 3)
if not self.vertical:
# Horizontal text
if self.horizontal_spacing == 1.0:
# Use default raylib measurement for normal spacing
text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0)
# Add extra padding to prevent cutoff
extra_padding = max(int(self.font_size * 0.15), 2)
width = int(text_size.x + self.outline_thickness * 4 + extra_padding * 2)
height = int(text_size.y + self.outline_thickness * 4 + extra_padding * 2)
padding_x = self.outline_thickness * 2 + extra_padding
padding_y = self.outline_thickness * 2 + extra_padding
return width, height, padding_x, padding_y
return int(text_size.x + padding * 2), int(text_size.y + padding * 2)
else:
# For vertical text, pre-calculate all character heights and widths
# Calculate custom spacing width
text_width = self._calculate_horizontal_text_width()
text_height = ray.measure_text_ex(self.font, "Ag", self.font_size, 1.0).y # Use sample chars for height
return int(text_width + padding * 2), int(text_height + padding * 2)
else:
# Parse text into segments
segments = self._parse_text_segments()
char_heights = []
char_widths = []
for i, char in enumerate(self.text):
next_char = self.text[i+1] if i+1 < len(self.text) else None
char_heights.append(self._calculate_vertical_spacing(char, next_char))
for segment in segments:
if segment['is_horizontal']:
# For horizontal exceptions, add their height as spacing
text_size = ray.measure_text_ex(self.font, segment['text'], self.font_size, 1.0)
char_heights.append(text_size.y * self.line_spacing)
char_widths.append(text_size.x)
else:
# Process vertical text with character grouping
char_groups = self._group_characters_with_punctuation(segment['text'])
for i, group in enumerate(char_groups):
main_char = group['main_char']
adjacent_punct = group['adjacent_punct']
# Get next group's main character for spacing calculation
next_char = char_groups[i+1]['main_char'] if i+1 < len(char_groups) else None
char_heights.append(self._calculate_vertical_spacing(main_char, next_char))
# Calculate width considering main char + adjacent punctuation
main_char_size = self._get_char_size(main_char)
group_width = main_char_size.x
# Add width for adjacent punctuation
for punct in adjacent_punct:
punct_size = self._get_char_size(punct)
group_width += punct_size.x
# For vertical characters, consider rotated dimensions
if char in self.vertical_chars:
# Use padded width for rotated characters
padding = max(int(self.font_size * 0.2), 2) * 2
char_widths.append(self._get_char_size(char).x + padding)
if main_char in self.vertical_chars:
char_widths.append(group_width + padding)
else:
char_widths.append(self._get_char_size(char).x)
char_widths.append(group_width)
max_char_width = max(char_widths) if char_widths else 0
total_height = sum(char_heights) if char_heights else 0
# Add extra padding for vertical text
extra_padding = max(int(self.font_size * 0.15), 2)
width = int(max_char_width + self.outline_thickness * 4 + extra_padding * 2)
height = int(total_height + self.outline_thickness * 4 + extra_padding * 2)
padding_x = self.outline_thickness * 2 + extra_padding
padding_y = self.outline_thickness * 2 + extra_padding
width = int(max_char_width + padding * 2) # Padding on both sides
height = int(total_height + padding * 2) # Padding on top and bottom
return width, height, padding_x, padding_y
return width, height
def _draw_horizontal_text(self, image):
if self.horizontal_spacing == 1.0:
# Use original method for normal spacing
text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0)
position = ray.Vector2((image.width - text_size.x) / 2, (image.height - text_size.y) / 2)
def _draw_horizontal_text(self, image, padding_x, padding_y):
"""Draw horizontal text with outline"""
# Draw outline
for dx in range(-self.outline_thickness, self.outline_thickness + 1):
for dy in range(-self.outline_thickness, self.outline_thickness + 1):
# Skip the center position (will be drawn as main text)
if dx == 0 and dy == 0:
continue
# Calculate outline distance
dist = (dx*dx + dy*dy) ** 0.5
# Only draw outline positions that are near the outline thickness
if dist <= self.outline_thickness + 0.5:
ray.image_draw_text_ex(
image,
self.font,
self.text,
ray.Vector2(padding_x + dx, padding_y + dy),
ray.Vector2(position.x + dx, position.y + dy),
self.font_size,
1.0,
self.outline_color
@@ -288,54 +454,180 @@ class OutlinedText:
image,
self.font,
self.text,
ray.Vector2(padding_x, padding_y),
position,
self.font_size,
1.0,
self.text_color
)
def _draw_vertical_text(self, image, width, padding_x, padding_y):
"""Draw vertical text with outline"""
# Precalculate positions and spacings to avoid redundant calculations
positions = []
current_y = padding_y
for i, char in enumerate(self.text):
char_size = self._get_char_size(char)
char_height = self._calculate_vertical_spacing(
char,
self.text[i+1] if i+1 < len(self.text) else None
)
# Calculate center position for each character
if char in self.vertical_chars:
# For vertical characters, we need to use the rotated image dimensions
rotated_img = self._get_rotated_char(char, self.text_color)
char_width = rotated_img.width
center_offset = (width - char_width) // 2
else:
char_width = char_size.x
center_offset = (width - char_width) // 2
# Draw text with custom character spacing
text_width = self._calculate_horizontal_text_width()
text_height = ray.measure_text_ex(self.font, "Ag", self.font_size, 1.0).y
positions.append((char, center_offset, current_y, char_height, char in self.vertical_chars))
current_y += char_height
start_x = (image.width - text_width) / 2
start_y = (image.height - text_height) / 2
# First draw all outlines
current_x = start_x
for i, char in enumerate(self.text):
char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0)
for dx in range(-self.outline_thickness, self.outline_thickness + 1):
for dy in range(-self.outline_thickness, self.outline_thickness + 1):
if dx == 0 and dy == 0:
continue
for char, center_offset, y_pos, _, is_vertical in positions:
if is_vertical:
rotated_img = self._get_rotated_char(char, self.outline_color)
dist = (dx*dx + dy*dy) ** 0.5
if dist <= self.outline_thickness + 0.5:
ray.image_draw_text_ex(
image,
self.font,
char,
ray.Vector2(current_x + dx, start_y + dy),
self.font_size,
1.0,
self.outline_color
)
# Move to next character position
current_x += char_size.x
if i < len(self.text) - 1: # Add spacing except for last character
current_x += (char_size.x * (self.horizontal_spacing - 1.0))
# Then draw all main text
current_x = start_x
for i, char in enumerate(self.text):
char_size = ray.measure_text_ex(self.font, char, self.font_size, 1.0)
ray.image_draw_text_ex(
image,
self.font,
char,
ray.Vector2(current_x, start_y),
self.font_size,
1.0,
self.text_color
)
# Move to next character position
current_x += char_size.x
if i < len(self.text) - 1: # Add spacing except for last character
current_x += (char_size.x * (self.horizontal_spacing - 1.0))
def _draw_vertical_text(self, image, width):
padding = int(self.outline_thickness * 2)
segments = self._parse_text_segments()
positions = []
current_y = padding # Start with padding at the top
for segment in segments:
if segment['is_horizontal']:
# Handle horizontal exception
text_size = ray.measure_text_ex(self.font, segment['text'], self.font_size, 1.0)
center_offset = (width - text_size.x) // 2
char_height = text_size.y * self.line_spacing
positions.append({
'type': 'horizontal',
'text': segment['text'],
'x': center_offset,
'y': current_y,
'height': char_height
})
current_y += char_height
else:
# Handle vertical text with character grouping
char_groups = self._group_characters_with_punctuation(segment['text'])
for i, group in enumerate(char_groups):
main_char = group['main_char']
adjacent_punct = group['adjacent_punct']
# Get next group for spacing calculation
next_char = char_groups[i+1]['main_char'] if i+1 < len(char_groups) else None
char_height = self._calculate_vertical_spacing(main_char, next_char)
# Calculate positioning for main character
main_char_size = self._get_char_size(main_char)
if main_char in self.vertical_chars:
rotated_img = self._get_rotated_char(main_char, self.text_color)
main_char_width = rotated_img.width
center_offset = (width - main_char_width) // 2
else:
main_char_width = main_char_size.x
center_offset = (width - main_char_width) // 2
# Add main character position
positions.append({
'type': 'vertical',
'char': main_char,
'x': center_offset,
'y': current_y,
'height': char_height,
'is_vertical_char': main_char in self.vertical_chars
})
# Add adjacent punctuation positions
punct_x_offset = center_offset + main_char_width
for punct in adjacent_punct:
punct_size = self._get_char_size(punct)
positions.append({
'type': 'vertical',
'char': punct,
'x': punct_x_offset,
'y': current_y+5,
'height': 0, # No additional height for punctuation
'is_vertical_char': punct in self.vertical_chars,
'is_adjacent': True
})
punct_x_offset += punct_size.x
current_y += char_height
# First draw all outlines
outline_thickness = int(self.outline_thickness)
for pos in positions:
if pos['type'] == 'horizontal':
# Draw horizontal text outline
for dx in range(-outline_thickness, outline_thickness + 1):
for dy in range(-outline_thickness, outline_thickness + 1):
if dx == 0 and dy == 0:
continue
dist = (dx*dx + dy*dy) ** 0.5
if dist <= outline_thickness + 0.5:
ray.image_draw_text_ex(
image,
self.font,
pos['text'],
ray.Vector2(pos['x'] + dx, pos['y'] + dy),
self.font_size,
1.0,
self.outline_color
)
else:
# Draw vertical character outline
for dx in range(-outline_thickness, outline_thickness + 1):
for dy in range(-outline_thickness, outline_thickness + 1):
if dx == 0 and dy == 0:
continue
dist = (dx*dx + dy*dy) ** 0.5
if dist <= outline_thickness + 0.5:
if pos['is_vertical_char']:
rotated_img = self._get_rotated_char(pos['char'], self.outline_color)
ray.image_draw(
image,
rotated_img,
ray.Rectangle(0, 0, rotated_img.width, rotated_img.height),
ray.Rectangle(
int(center_offset + dx),
int(y_pos + dy),
int(pos['x'] + dx),
int(pos['y'] + dy),
rotated_img.width,
rotated_img.height
),
@@ -345,24 +637,37 @@ class OutlinedText:
ray.image_draw_text_ex(
image,
self.font,
char,
ray.Vector2(center_offset + dx, y_pos + dy),
pos['char'],
ray.Vector2(pos['x'] + dx, pos['y'] + dy),
self.font_size,
1.0,
self.outline_color
)
# Then draw all main text
for char, center_offset, y_pos, _, is_vertical in positions:
if is_vertical:
rotated_img = self._get_rotated_char(char, self.text_color)
for pos in positions:
if pos['type'] == 'horizontal':
# Draw horizontal text
ray.image_draw_text_ex(
image,
self.font,
pos['text'],
ray.Vector2(pos['x'], pos['y']),
self.font_size,
1.0,
self.text_color
)
else:
# Draw vertical character
if pos['is_vertical_char']:
rotated_img = self._get_rotated_char(pos['char'], self.text_color)
ray.image_draw(
image,
rotated_img,
ray.Rectangle(0, 0, rotated_img.width, rotated_img.height),
ray.Rectangle(
int(center_offset),
int(y_pos),
int(pos['x']),
int(pos['y']),
rotated_img.width,
rotated_img.height
),
@@ -372,42 +677,47 @@ class OutlinedText:
ray.image_draw_text_ex(
image,
self.font,
char,
ray.Vector2(center_offset, y_pos),
pos['char'],
ray.Vector2(pos['x'], pos['y']),
self.font_size,
1.0,
self.text_color
)
def _create_texture(self):
"""Create a texture with outlined text"""
# Calculate dimensions
width, height, padding_x, padding_y = self._calculate_dimensions()
if self.hash in text_cache:
texture = ray.load_texture(f'cache/image/{self.hash}.png')
return texture
self.font = self._load_font_for_text(self.text)
width, height = self._calculate_dimensions()
width += int(self.outline_thickness * 1.5)
height += int(self.outline_thickness * 1.5)
# Create transparent image
image = ray.gen_image_color(width, height, ray.Color(0, 0, 0, 0))
# Draw text based on orientation
if not self.vertical:
self._draw_horizontal_text(image, padding_x, padding_y)
self._draw_horizontal_text(image)
else:
self._draw_vertical_text(image, width, padding_x, padding_y)
self._draw_vertical_text(image, width)
# Create texture from image
ray.export_image(image, f'cache/image/{self.hash}.png')
texture = ray.load_texture_from_image(image)
ray.unload_image(image)
return texture
def draw(self, src: ray.Rectangle, dest: ray.Rectangle, origin: ray.Vector2, rotation: float, color: ray.Color):
"""Draw the outlined text"""
ray.draw_texture_pro(self.texture, src, dest, origin, rotation, color)
def unload(self):
"""Clean up resources"""
# Unload all cached rotated images
for img in self._rotation_cache.values():
ray.unload_image(img)
self._rotation_cache.clear()
# Unload texture
for img in self._horizontal_cache.values():
ray.unload_image(img)
self._horizontal_cache.clear()
ray.unload_texture(self.texture)

View File

@@ -1,3 +1,5 @@
from pathlib import Path
import pyray as ray
from moviepy import VideoFileClip
@@ -6,14 +8,14 @@ from libs.utils import get_current_ms
class VideoPlayer:
def __init__(self, path: str):
def __init__(self, path: Path):
"""Initialize a video player instance. Audio must have the same name and an ogg extension.
Todo: extract audio from video directly
"""
self.is_finished_list = [False, False]
self.video_path = path
self.video = VideoFileClip(path)
audio_path = path[:-4] + '.ogg'
audio_path = path.with_suffix('.ogg')
self.audio = audio.load_music_stream(audio_path)
self.buffer_size = 10 # Number of frames to keep in memory

View File

@@ -2,7 +2,7 @@ from pathlib import Path
import pyray as ray
from libs.utils import load_texture_from_zip
from libs.utils import get_config, load_texture_from_zip
class EntryScreen:
@@ -24,7 +24,9 @@ class EntryScreen:
def update(self):
self.on_screen_start()
if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER):
keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"]
for key in keys:
if ray.is_key_pressed(ord(key)):
return self.on_screen_end()
def draw(self):

View File

@@ -24,10 +24,12 @@ from libs.video import VideoPlayer
class GameScreen:
JUDGE_X = 414
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
def __init__(self, width: int, height: int):
self.width = width
self.height = height
self.judge_x = 414
self.current_ms = 0
self.result_transition = None
self.song_info = None
@@ -82,16 +84,14 @@ class GameScreen:
def load_sounds(self):
sounds_dir = Path("Sounds")
self.sound_don = audio.load_sound(str(sounds_dir / "inst_00_don.wav"))
self.sound_kat = audio.load_sound(str(sounds_dir / "inst_00_katsu.wav"))
self.sound_balloon_pop = audio.load_sound(str(sounds_dir / "balloon_pop.wav"))
self.sound_result_transition = audio.load_sound(str(sounds_dir / "result" / "VO_RESULT [1].ogg"))
self.sound_don = audio.load_sound(sounds_dir / "inst_00_don.wav")
self.sound_kat = audio.load_sound(sounds_dir / "inst_00_katsu.wav")
self.sound_restart = audio.load_sound(sounds_dir / 'song_select' / 'Skip.ogg')
self.sound_balloon_pop = audio.load_sound(sounds_dir / "balloon_pop.wav")
self.sound_result_transition = audio.load_sound(sounds_dir / "result" / "VO_RESULT [1].ogg")
self.sounds = [self.sound_don, self.sound_kat, self.sound_balloon_pop, self.sound_result_transition]
def init_tja(self, song: str, difficulty: int):
self.load_textures()
self.load_sounds()
def init_tja(self, song: Path, difficulty: int):
#Map notes to textures
self.note_type_list = [self.textures['lane_syousetsu'][0],
self.textures['onp_don'], self.textures['onp_katsu'],
@@ -103,26 +103,27 @@ class GameScreen:
self.textures['onp_renda_dai'][0], self.textures['onp_renda_dai'][1],
self.textures['onp_fusen'][0]]
self.tja = TJAParser(song, start_delay=self.start_delay)
metadata = self.tja.get_metadata()
if hasattr(self.tja, 'bg_movie'):
if Path(self.tja.bg_movie).exists():
self.movie = VideoPlayer(str(Path(self.tja.bg_movie)))
self.tja = TJAParser(song, start_delay=self.start_delay, distance=self.width - GameScreen.JUDGE_X)
if self.tja.metadata.bgmovie != Path() and self.tja.metadata.bgmovie.exists():
self.movie = VideoPlayer(self.tja.metadata.bgmovie)
self.movie.set_volume(0.0)
else:
self.movie = None
self.tja.distance = self.width - self.judge_x
session_data.song_title = self.tja.title
session_data.song_title = self.tja.metadata.title.get(get_config()['general']['language'].lower(), self.tja.metadata.title['en'])
self.player_1 = Player(self, 1, difficulty, metadata)
self.song_music = audio.load_sound(str(Path(self.tja.wave)))
self.start_ms = (get_current_ms() - self.tja.offset*1000)
self.player_1 = Player(self, 1, difficulty)
if not hasattr(self, 'song_music'):
self.song_music = audio.load_sound(self.tja.metadata.wave)
audio.normalize_sound(self.song_music, 0.1935)
self.start_ms = (get_current_ms() - self.tja.metadata.offset*1000)
def on_screen_start(self):
if not self.screen_init:
self.screen_init = True
self.load_textures()
self.load_sounds()
self.init_tja(global_data.selected_song, session_data.selected_difficulty)
self.song_info = SongInfo(self.tja.title, 'TEST')
self.song_info = SongInfo(session_data.song_title, 'TEST')
self.result_transition = None
def on_screen_end(self):
@@ -130,6 +131,8 @@ class GameScreen:
for zip in self.textures:
for texture in self.textures[zip]:
ray.unload_texture(texture)
audio.unload_sound(self.song_music)
del self.song_music
self.song_started = False
self.end_ms = 0
self.movie = None
@@ -140,7 +143,8 @@ class GameScreen:
return
with sqlite3.connect('scores.db') as con:
cursor = con.cursor()
hash = self.tja.hash_note_data(self.tja.data_to_notes(self.player_1.difficulty)[0])
notes, _, bars = TJAParser.notes_to_position(TJAParser(self.tja.file_path), self.player_1.difficulty)
hash = self.tja.hash_note_data(notes, bars)
check_query = "SELECT score FROM Scores WHERE hash = ? LIMIT 1"
cursor.execute(check_query, (hash,))
result = cursor.fetchone()
@@ -149,8 +153,8 @@ class GameScreen:
INSERT OR REPLACE INTO Scores (hash, en_name, jp_name, diff, score, good, ok, bad, drumroll, combo)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
'''
data = (hash, self.tja.title,
self.tja.title_ja, self.player_1.difficulty,
data = (hash, self.tja.metadata.title['en'],
self.tja.metadata.title['ja'], self.player_1.difficulty,
session_data.result_score, session_data.result_good,
session_data.result_ok, session_data.result_bad,
session_data.result_total_drumroll, session_data.result_max_combo)
@@ -160,10 +164,11 @@ class GameScreen:
def update(self):
self.on_screen_start()
self.current_ms = get_current_ms() - self.start_ms
if (self.current_ms >= self.tja.offset*1000 + self.start_delay - get_config()["general"]["judge_offset"]) and not self.song_started:
if (self.current_ms >= self.tja.metadata.offset*1000 + self.start_delay - get_config()["general"]["judge_offset"]) and not self.song_started:
if self.song_music is not None:
if not audio.is_sound_playing(self.song_music):
audio.play_sound(self.song_music)
print(f"Song started at {self.current_ms}")
if self.movie is not None:
self.movie.start(get_current_ms())
self.song_started = True
@@ -182,15 +187,21 @@ class GameScreen:
return self.on_screen_end()
elif len(self.player_1.play_notes) == 0:
session_data.result_score, session_data.result_good, session_data.result_ok, session_data.result_bad, session_data.result_max_combo, session_data.result_total_drumroll = self.player_1.get_result_score()
self.write_score()
session_data.result_gauge_length = self.player_1.gauge.gauge_length
if self.end_ms != 0:
if get_current_ms() >= self.end_ms + 8533.34:
self.result_transition = ResultTransition(self.height)
audio.play_sound(self.sound_result_transition)
else:
self.write_score()
self.end_ms = get_current_ms()
if ray.is_key_pressed(ray.KeyboardKey.KEY_F1):
audio.stop_sound(self.song_music)
self.init_tja(global_data.selected_song, session_data.selected_difficulty)
audio.play_sound(self.sound_restart)
self.song_started = False
def draw(self):
if self.movie is not None:
self.movie.draw()
@@ -207,7 +218,7 @@ class Player:
TIMING_OK = 75.0750045776367
TIMING_BAD = 108.441665649414
def __init__(self, game_screen: GameScreen, player_number: int, difficulty: int, metadata):
def __init__(self, game_screen: GameScreen, player_number: int, difficulty: int):
self.player_number = player_number
self.difficulty = difficulty
@@ -248,7 +259,7 @@ class Player:
self.input_log: dict[float, tuple] = dict()
self.gauge = Gauge(self.difficulty, metadata[-1][self.difficulty][0])
self.gauge = Gauge(self.difficulty, game_screen.tja.metadata.course_data[self.difficulty].level)
self.gauge_hit_effect: list[GaugeHitEffect] = []
self.autoplay_hit_side = 'L'
@@ -282,7 +293,7 @@ class Player:
for i in range(len(self.current_bars)-1, -1, -1):
bar = self.current_bars[i]
position = self.get_position(game_screen, bar.hit_ms, bar.pixels_per_frame)
if position < game_screen.judge_x + 650:
if position < GameScreen.JUDGE_X + 650:
self.current_bars.pop(i)
def play_note_manager(self, game_screen: GameScreen):
@@ -335,7 +346,7 @@ class Player:
if note.type in {5, 6, 7} and len(self.current_notes_draw) > 1:
note = self.current_notes_draw[1]
position = self.get_position(game_screen, note.hit_ms, note.pixels_per_frame)
if position < game_screen.judge_x + 650:
if position < GameScreen.JUDGE_X + 650:
self.current_notes_draw.pop(0)
def note_manager(self, game_screen: GameScreen):
@@ -420,7 +431,7 @@ class Player:
return
big = curr_note.type == 3 or curr_note.type == 4
if (curr_note.hit_ms - Player.TIMING_GOOD) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_GOOD):
self.draw_judge_list.append(Judgement('GOOD', big))
self.draw_judge_list.append(Judgement('GOOD', big, ms_display=game_screen.current_ms - curr_note.hit_ms))
self.lane_hit_effect = LaneHitEffect('GOOD')
self.good_count += 1
self.score += self.base_score
@@ -428,14 +439,14 @@ class Player:
self.note_correct(game_screen, curr_note)
elif (curr_note.hit_ms - Player.TIMING_OK) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_OK):
self.draw_judge_list.append(Judgement('OK', big))
self.draw_judge_list.append(Judgement('OK', big, ms_display=game_screen.current_ms - curr_note.hit_ms))
self.ok_count += 1
self.score += 10 * math.floor(self.base_score / 2 / 10)
self.base_score_list.append(ScoreCounterAnimation(10 * math.floor(self.base_score / 2 / 10)))
self.note_correct(game_screen, curr_note)
elif (curr_note.hit_ms - Player.TIMING_BAD) <= game_screen.current_ms <= (curr_note.hit_ms + Player.TIMING_BAD):
self.draw_judge_list.append(Judgement('BAD', big))
self.draw_judge_list.append(Judgement('BAD', big, ms_display=game_screen.current_ms - curr_note.hit_ms))
self.bad_count += 1
self.combo = 0
self.play_notes.popleft()
@@ -484,7 +495,10 @@ class Player:
return
note = self.play_notes[0]
if self.is_drumroll or self.is_balloon:
subdivision_in_ms = game_screen.current_ms // ((60000 * 4 / game_screen.tja.bpm) / 24)
if self.play_notes[0].bpm == 0:
subdivision_in_ms = 0
else:
subdivision_in_ms = game_screen.current_ms // ((60000 * 4 / self.play_notes[0].bpm) / 24)
if subdivision_in_ms > self.last_subdivision:
self.last_subdivision = subdivision_in_ms
hit_type = 'DON'
@@ -522,7 +536,6 @@ class Player:
self.check_note(game_screen, type)
if len(self.play_notes) > 0:
note = self.play_notes[0]
print(note)
else:
break
@@ -596,6 +609,8 @@ class Player:
return
for bar in reversed(self.current_bars):
if not bar.display:
continue
position = self.get_position(game_screen, bar.load_ms, bar.pixels_per_frame)
ray.draw_texture(game_screen.note_type_list[bar.type], position+60, 190, ray.WHITE)
@@ -603,9 +618,18 @@ class Player:
if len(self.current_notes_draw) <= 0:
return
eighth_in_ms = (60000 * 4 / game_screen.tja.bpm) / 8
if len(self.current_bars) > 0:
if self.current_bars[0].bpm == 0:
eighth_in_ms = 0
else:
eighth_in_ms = (60000 * 4 / self.current_bars[0].bpm) / 8
else:
if self.current_notes_draw[0].bpm == 0:
eighth_in_ms = 0
else:
eighth_in_ms = (60000 * 4 / self.current_notes_draw[0].bpm) / 8
current_eighth = 0
if self.combo >= 50:
if self.combo >= 50 and eighth_in_ms != 0:
current_eighth = int((game_screen.current_ms - game_screen.start_ms) // eighth_in_ms)
for note in reversed(self.current_notes_draw):
@@ -664,10 +688,13 @@ class Player:
anim.draw(game_screen)
class Judgement:
def __init__(self, type: str, big: bool):
def __init__(self, type: str, big: bool, ms_display: Optional[float]=None):
self.type = type
self.big = big
self.is_finished = False
self.curr_hit_ms = None
if ms_display is not None:
self.curr_hit_ms = str(round(ms_display, 2))
self.fade_animation_1 = Animation.create_fade(132, initial_opacity=0.5, delay=100)
self.fade_animation_2 = Animation.create_fade(316 - 233.3, delay=233.3)
@@ -696,6 +723,8 @@ class Judgement:
ray.draw_texture(textures_1[19], 342, 184, color)
ray.draw_texture(textures_2[index+5], 304, 143, hit_color)
ray.draw_texture(textures_2[9], 370, int(y), color)
if self.curr_hit_ms is not None:
ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.YELLOW, self.fade_animation_1.attribute))
elif self.type == 'OK':
if self.big:
ray.draw_texture(textures_1[20], 342, 184, color)
@@ -704,8 +733,12 @@ class Judgement:
ray.draw_texture(textures_1[18], 342, 184, color)
ray.draw_texture(textures_2[index], 304, 143, hit_color)
ray.draw_texture(textures_2[4], 370, int(y), color)
if self.curr_hit_ms is not None:
ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.WHITE, self.fade_animation_1.attribute))
elif self.type == 'BAD':
ray.draw_texture(textures_2[10], 370, int(y), color)
if self.curr_hit_ms is not None:
ray.draw_text(self.curr_hit_ms, 370, int(y)-20, 40, ray.fade(ray.BLUE, self.fade_animation_1.attribute))
class LaneHitEffect:
def __init__(self, type: str):
@@ -1133,21 +1166,13 @@ class SongInfo:
def __init__(self, song_name: str, genre: str):
self.song_name = song_name
self.genre = genre
self.font = self._load_font_for_text(song_name)
self.song_title = OutlinedText(
self.font, song_name, 40, ray.WHITE, ray.BLACK, outline_thickness=4
song_name, 40, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5
)
self.fade_in = Animation.create_fade(self.FADE_DURATION, initial_opacity=0.0, final_opacity=1.0)
self.fade_out = Animation.create_fade(self.FADE_DURATION, delay=self.DISPLAY_DURATION)
self.fade_fake = Animation.create_fade(0, delay=self.DISPLAY_DURATION*2 + self.FADE_DURATION)
def _load_font_for_text(self, text: str) -> ray.Font:
codepoint_count = ray.ffi.new('int *', 0)
unique_codepoints = set(text)
codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count)
return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
def update(self, current_ms: float):
self.fade_in.update(current_ms)
self.fade_out.update(current_ms)

View File

@@ -29,10 +29,10 @@ class ResultScreen:
def load_sounds(self):
sounds_dir = Path("Sounds")
self.sound_don = audio.load_sound(str(sounds_dir / "inst_00_don.wav"))
self.sound_kat = audio.load_sound(str(sounds_dir / "inst_00_katsu.wav"))
self.sound_num_up = audio.load_sound(str(sounds_dir / "result" / "SE_RESULT [4].ogg"))
self.bgm = audio.load_sound(str(sounds_dir / "result" / "JINGLE_SEISEKI [1].ogg"))
self.sound_don = audio.load_sound(sounds_dir / "inst_00_don.wav")
self.sound_kat = audio.load_sound(sounds_dir / "inst_00_katsu.wav")
self.sound_num_up = audio.load_sound(sounds_dir / "result" / "SE_RESULT [4].ogg")
self.bgm = audio.load_sound(sounds_dir / "result" / "JINGLE_SEISEKI [1].ogg")
def on_screen_start(self):
if not self.screen_init:
@@ -211,12 +211,7 @@ class FadeIn:
class FontText:
def __init__(self, text, font_size):
codepoint_count = ray.ffi.new('int *', 0)
codepoints_no_dup = set()
codepoints_no_dup.update(session_data.song_title)
codepoints = ray.load_codepoints(''.join(codepoints_no_dup), codepoint_count)
self.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
self.text = OutlinedText(self.font, str(text), font_size, ray.WHITE, ray.BLACK, outline_thickness=4)
self.text = OutlinedText(str(text), font_size, ray.Color(255, 255, 255, 255), ray.Color(0, 0, 0, 255), outline_thickness=5)
self.texture = self.text.texture

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ from pathlib import Path
import pyray as ray
from libs import song_hash
from libs.animation import Animation
from libs.audio import audio
from libs.utils import (
@@ -19,9 +20,9 @@ class TitleScreen:
self.width = width
self.height = height
video_dir = Path(get_config()["paths"]["video_path"]) / "op_videos"
self.op_video_list = [str(file) for file in video_dir.glob("**/*.mp4")]
self.op_video_list = [file for file in video_dir.glob("**/*.mp4")]
video_dir = Path(get_config()["paths"]["video_path"]) / "attract_videos"
self.attract_video_list = [str(file) for file in video_dir.glob("**/*.mp4")]
self.attract_video_list = [file for file in video_dir.glob("**/*.mp4")]
self.load_sounds()
self.screen_init = False
@@ -32,10 +33,10 @@ class TitleScreen:
sounds_dir = Path("Sounds")
title_dir = sounds_dir / "title"
self.sound_bachi_swipe = audio.load_sound(str(title_dir / "SE_ATTRACT_2.ogg"))
self.sound_bachi_hit = audio.load_sound(str(title_dir / "SE_ATTRACT_3.ogg"))
self.sound_warning_message = audio.load_sound(str(title_dir / "VO_ATTRACT_3.ogg"))
self.sound_warning_error = audio.load_sound(str(title_dir / "SE_ATTRACT_1.ogg"))
self.sound_bachi_swipe = audio.load_sound(title_dir / "SE_ATTRACT_2.ogg")
self.sound_bachi_hit = audio.load_sound(title_dir / "SE_ATTRACT_3.ogg")
self.sound_warning_message = audio.load_sound(title_dir / "VO_ATTRACT_3.ogg")
self.sound_warning_error = audio.load_sound(title_dir / "SE_ATTRACT_1.ogg")
self.sounds = [self.sound_bachi_swipe, self.sound_bachi_hit, self.sound_warning_message, self.sound_warning_error]
def load_textures(self):
@@ -47,6 +48,8 @@ class TitleScreen:
self.screen_init = True
self.load_textures()
song_hash.song_hashes = song_hash.build_song_hashes()
self.scene = 'Opening Video'
self.op_video = VideoPlayer(random.choice(self.op_video_list))
self.attract_video = VideoPlayer(random.choice(self.attract_video_list))
@@ -94,7 +97,9 @@ class TitleScreen:
self.on_screen_start()
self.scene_manager()
if ray.is_key_pressed(ray.KeyboardKey.KEY_ENTER):
keys = get_config()["keybinds"]["left_don"] + get_config()["keybinds"]["right_don"]
for key in keys:
if ray.is_key_pressed(ord(key)):
return self.on_screen_end()
def draw(self):