This commit is contained in:
mc08
2025-11-28 18:42:05 -08:00
7 changed files with 558 additions and 126 deletions

View File

@@ -137,6 +137,40 @@ def create_song_db():
con.commit()
logger.info("Scores database created successfully")
def update_camera_for_window_size(camera, virtual_width, virtual_height):
"""Update camera zoom, offset, scale, and rotation to maintain aspect ratio"""
screen_width = ray.get_screen_width()
screen_height = ray.get_screen_height()
if screen_width == 0 or screen_height == 0:
camera.zoom = 1.0
camera.offset = ray.Vector2(0, 0)
camera.rotation = 0.0
return
scale = min(screen_width / virtual_width, screen_height / virtual_height)
base_offset_x = (screen_width - (virtual_width * scale)) * 0.5
base_offset_y = (screen_height - (virtual_height * scale)) * 0.5
camera.zoom = scale * global_data.camera.zoom
zoom_offset_x = (virtual_width * scale * (global_data.camera.zoom - 1.0)) * 0.5
zoom_offset_y = (virtual_height * scale * (global_data.camera.zoom - 1.0)) * 0.5
h_scale = global_data.camera.h_scale
v_scale = global_data.camera.v_scale
h_scale_offset_x = (virtual_width * scale * (h_scale - 1.0)) * 0.5
v_scale_offset_y = (virtual_height * scale * (v_scale - 1.0)) * 0.5
camera.offset = ray.Vector2(
base_offset_x - zoom_offset_x - h_scale_offset_x + (global_data.camera.offset.x * scale),
base_offset_y - zoom_offset_y - v_scale_offset_y + (global_data.camera.offset.y * scale)
)
camera.rotation = global_data.camera.rotation
def main():
force_dedicated_gpu()
global_data.config = get_config()
@@ -171,6 +205,7 @@ def main():
ray.set_target_fps(global_data.config["video"]["target_fps"])
logger.info(f"Target FPS set to {global_data.config['video']['target_fps']}")
ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT)
ray.set_config_flags(ray.ConfigFlags.FLAG_WINDOW_RESIZABLE)
ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING)
ray.init_window(screen_width, screen_height, "PyTaiko")
@@ -267,15 +302,20 @@ def main():
Screens.DAN_RESULT: dan_result_screen,
Screens.LOADING: load_screen
}
target = ray.load_render_texture(screen_width, screen_height)
ray.gen_texture_mipmaps(target.texture)
ray.set_texture_filter(target.texture, ray.TextureFilter.TEXTURE_FILTER_TRILINEAR)
camera = ray.Camera2D()
camera.target = ray.Vector2(0, 0)
camera.rotation = 0.0
update_camera_for_window_size(camera, screen_width, screen_height)
logger.info("Camera2D initialized")
ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD)
ray.set_exit_key(global_data.config["keys"]["exit_key"])
ray.hide_cursor()
logger.info("Cursor hidden")
last_fps = 1
last_color = ray.BLACK
while not ray.window_should_close():
if ray.is_key_pressed(global_data.config["keys"]["fullscreen_key"]):
@@ -285,25 +325,21 @@ def main():
ray.toggle_borderless_windowed()
logger.info("Toggled borderless windowed mode")
curr_screen_width = ray.get_screen_width()
curr_screen_height = ray.get_screen_height()
update_camera_for_window_size(camera, screen_width, screen_height)
if curr_screen_width == 0 or curr_screen_height == 0:
dest_rect = ray.Rectangle(0, 0, screen_width, screen_height)
else:
scale = min(curr_screen_width / screen_width, curr_screen_height / screen_height)
dest_rect = ray.Rectangle((curr_screen_width - (screen_width * scale)) * 0.5,
(curr_screen_height - (screen_height * scale)) * 0.5,
screen_width * scale, screen_height * scale)
ray.begin_drawing()
ray.begin_texture_mode(target)
if global_data.camera.border_color != last_color:
ray.clear_background(global_data.camera.border_color)
last_color = global_data.camera.border_color
ray.begin_mode_2d(camera)
ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE)
screen = screen_mapping[current_screen]
next_screen = screen.update()
if screen.screen_init:
ray.clear_background(ray.BLACK)
screen._do_draw()
if next_screen is not None:
@@ -321,19 +357,16 @@ def main():
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.YELLOW)
else:
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.LIME)
ray.draw_rectangle(-screen_width, 0, screen_width, screen_height, last_color)
ray.draw_rectangle(screen_width, 0, screen_width, screen_height, last_color)
ray.draw_rectangle(0, -screen_height, screen_width, screen_height, last_color)
ray.draw_rectangle(0, screen_height, screen_width, screen_height, last_color)
ray.end_blend_mode()
ray.end_texture_mode()
ray.begin_drawing()
ray.clear_background(ray.BLACK)
ray.draw_texture_pro(
target.texture,
ray.Rectangle(0, 0, target.texture.width, -target.texture.height),
dest_rect,
ray.Vector2(0,0),
0,
ray.WHITE
)
ray.end_mode_2d()
ray.end_drawing()
ray.close_window()
audio.close_audio_device()
logger.info("Window closed and audio device shut down")

View File

@@ -24,6 +24,8 @@ SCENEPRESET,Supported,OpenTaiko (0auBSQ) v0.6.0
#LYRIC,Supported,TJAPlayer2 for.PC
#SUDDEN,Supported,TJAPlayer2 for.PC
#JPOSSCROLL,Supported,TJAPlayer2 for.PC
#OBJ / #CAM,Supported,TJAPlayer3-Extended
#BORDERCOLOR,Supported,TJAPlayer3-Extended
SONGVOL,Unsupported,TaikoJiro v1.66
SEVOL,Unsupported,TaikoJiro v1.66
@@ -67,8 +69,6 @@ BGOFFSET,Unsupported,Unknown
#NEXTSONG,Unsupported,TJAPlayer3 v1.5.0
#PAPAMAMA,Unsupported,TJAPlayer3-f v1.7.2.0
#ENABLEDORON / #DISABLEDORON,Unsupported,TJAPlayer3-Extended
#OBJ / #CAM,Unsupported,TJAPlayer3-Extended
#BORDERCOLOR,Unsupported,TJAPlayer3-Extended
#CHANGETEXTURE / #RESETTEXTURE,Unsupported,TJAPlayer3-Extended
#SETCONFIG,Unsupported,TJAPlayer3-Extended
#BARLINE,Unsupported,taiko-web (plugin Custom Barlines)
1 Command/Header Support Status Initial Sim Release
24 #SUDDEN Supported TJAPlayer2 for.PC
25 #JPOSSCROLL Supported TJAPlayer2 for.PC
26 SONGVOL #OBJ / #CAM Unsupported Supported TaikoJiro v1.66 TJAPlayer3-Extended
27 #BORDERCOLOR Supported TJAPlayer3-Extended
28 SONGVOL Unsupported TaikoJiro v1.66
29 SEVOL Unsupported TaikoJiro v1.66
30 SCOREINIT Unsupported TaikoJiro v1.67
31 SCOREDIFF Unsupported TaikoJiro v1.67
69 #OBJ / #CAM #CHANGETEXTURE / #RESETTEXTURE Unsupported TJAPlayer3-Extended
70 #BORDERCOLOR #SETCONFIG Unsupported TJAPlayer3-Extended
71 #CHANGETEXTURE / #RESETTEXTURE #BARLINE Unsupported TJAPlayer3-Extended taiko-web (plugin Custom Barlines)
#SETCONFIG Unsupported TJAPlayer3-Extended
#BARLINE Unsupported taiko-web (plugin Custom Barlines)
72 #BARLINESCROLL Unsupported taiko-web (plugin Custom Barlines)
73 #GAMETYPE Unsupported OpenTaiko (0auBSQ) v0.6.0
74 #SPLITLANE / #MERGELANE Unsupported OpenTaiko (0auBSQ) v0.6.0

View File

@@ -120,6 +120,14 @@ class SessionData:
result_data: ResultData = field(default_factory=lambda: ResultData())
dan_result_data: DanResultData = field(default_factory=lambda: DanResultData())
class Camera:
offset: ray.Vector2 = ray.Vector2(0, 0)
zoom: float = 1.0
h_scale: float = 1.0
v_scale: float = 1.0
rotation: float = 0.0
border_color: ray.Color = ray.BLACK
@dataclass
class GlobalData:
"""
@@ -139,6 +147,7 @@ class GlobalData:
session_data (list[SessionData]): Session data for both players.
"""
songs_played: int = 0
camera: Camera = Camera()
font: ray.Font = ray.get_font_default()
font_codepoints = set()
config: Config = field(default_factory=dict)

View File

@@ -10,6 +10,8 @@ from functools import lru_cache
from pathlib import Path
from typing import Optional
import pyray as ray
from libs.global_data import Modifiers
from libs.utils import get_pixels_per_frame, strip_comments, global_tex
@@ -44,6 +46,36 @@ class ScrollType(IntEnum):
BMSCROLL = 1
HBSCROLL = 2
@dataclass()
class TimelineObject:
hit_ms: float = field(init=False)
load_ms: float = field(init=False)
judge_pos_x: float = field(init=False)
judge_pos_y: float = field(init=False)
border_color: ray.Color = field(init=False)
cam_h_offset: float = field(init=False)
cam_v_offset: float = field(init=False)
cam_h_scale: float = field(init=False)
cam_v_scale: float = field(init=False)
cam_zoom: float = field(init=False)
cam_rotation: float = field(init=False)
bpm: float = field(init=False)
'''
gogo_time: bool = field(init=False)
branch_params: str = field(init=False)
is_branch_start: bool = False
is_section_marker: bool = False
sudden_appear_ms: float = 0
sudden_moving_ms: float = 0
'''
def __lt__(self, other):
"""Allow sorting by load_ms"""
return self.load_ms < other.load_ms
@dataclass()
class Note:
"""A note in a TJA file.
@@ -56,7 +88,6 @@ class Note:
pixels_per_frame_y (float): The number of pixels per frame in the y direction.
display (bool): Whether the note should be displayed.
index (int): The index of the note.
bpm (float): The beats per minute of the song.
gogo_time (bool): Whether the note is a gogo time note.
moji (int): The text drawn below the note.
is_branch_start (bool): Whether the note is the start of a branch.
@@ -71,7 +102,6 @@ class Note:
pixels_per_frame_y: float = field(init=False)
display: bool = field(init=False)
index: int = field(init=False)
bpm: float = field(init=False)
gogo_time: bool = field(init=False)
moji: int = field(init=False)
is_branch_start: bool = field(init=False)
@@ -79,8 +109,6 @@ class Note:
lyric: str = field(init=False)
sudden_appear_ms: float = field(init=False)
sudden_moving_ms: float = field(init=False)
judge_pos_x: float = field(init=False)
judge_pos_y: float = field(init=False)
bpmchange: float = field(init=False)
delay: float = field(init=False)
@@ -194,18 +222,21 @@ class NoteList:
play_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
draw_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
bars: list[Note] = field(default_factory=lambda: [])
timeline: list[TimelineObject] = field(default_factory=lambda: [])
def __add__(self, other: 'NoteList') -> 'NoteList':
return NoteList(
play_notes=self.play_notes + other.play_notes,
draw_notes=self.draw_notes + other.draw_notes,
bars=self.bars + other.bars
bars=self.bars + other.bars,
timeline=self.timeline + other.timeline
)
def __iadd__(self, other: 'NoteList') -> 'NoteList':
self.play_notes += other.play_notes
self.draw_notes += other.draw_notes
self.bars += other.bars
self.timeline += other.timeline
return self
@dataclass
@@ -645,6 +676,45 @@ class TJAParser:
else:
play_note_list[-3].moji = 2
def apply_easing(self, t, easing_point, easing_function):
"""Apply easing function to normalized time value t (0 to 1)"""
if easing_point == 'IN':
pass # t stays as is
elif easing_point == 'OUT':
t = 1 - t
elif easing_point == 'IN_OUT':
if t < 0.5:
t = t * 2
else:
t = (1 - t) * 2
if easing_function == 'LINEAR':
result = t
elif easing_function == 'CUBIC':
result = t ** 3
elif easing_function == 'QUARTIC':
result = t ** 4
elif easing_function == 'QUINTIC':
result = t ** 5
elif easing_function == 'SINUSOIDAL':
import math
result = 1 - math.cos((t * math.pi) / 2)
elif easing_function == 'EXPONENTIAL':
result = 0 if t == 0 else 2 ** (10 * (t - 1))
elif easing_function == 'CIRCULAR':
import math
result = 1 - math.sqrt(1 - t ** 2)
else:
result = t
if easing_point == 'OUT':
result = 1 - result
elif easing_point == 'IN_OUT':
if t >= 0.5:
result = 1 - result
return result
def notes_to_position(self, diff: int):
"""Parse a TJA's notes into a NoteList."""
master_notes = NoteList()
@@ -657,10 +727,54 @@ class TJAParser:
index = 0
sudden_appear = 0
sudden_moving = 0
judge_pos_x = 0 # Offset from default judgment position
judge_pos_x = 0
judge_pos_y = 0
judge_target_x = 0 # Target position for interpolation
judge_target_x = 0
judge_target_y = 0
border_color = ray.BLACK
cam_h_offset = 0
cam_v_offset = 0
cam_h_move_active = False
cam_h_move_start_ms = 0
cam_h_move_duration_ms = 0
cam_h_move_start_offset = 0
cam_h_move_end_offset = 0
cam_h_easing_point = None
cam_h_easing_function = None
cam_v_move_active = False
cam_v_move_start_ms = 0
cam_v_move_duration_ms = 0
cam_v_move_start_offset = 0
cam_v_move_end_offset = 0
cam_v_easing_point = None
cam_v_easing_function = None
cam_zoom_move_active = False
cam_zoom_move_start_ms = 0
cam_zoom_start = 1.0
cam_zoom_end = 1.0
cam_zoom_easing_point = ""
cam_zoom_easing_function = ""
cam_h_scale = 1.0
cam_h_scale_move_active = False
cam_h_scale_move_start_ms = 0
cam_h_scale_start = 1.0
cam_h_scale_end = 1.0
cam_h_scale_easing_point = ""
cam_h_scale_easing_function = ""
cam_v_scale = 1.0
cam_v_scale_move_active = False
cam_v_scale_move_start_ms = 0
cam_v_scale_start = 1.0
cam_v_scale_end = 1.0
cam_v_scale_easing_point = ""
cam_v_scale_easing_function = ""
cam_rotation = 0.0
cam_rotation_move_active = False
cam_rotation_move_start_ms = 0
cam_rotation_start = 0.0
cam_rotation_end = 0.0
cam_rotation_easing_point = ""
cam_rotation_easing_function = ""
time_signature = 4/4
bpm = self.metadata.bpm
x_scroll_modifier = 1
@@ -670,6 +784,11 @@ class TJAParser:
curr_note_list = master_notes.play_notes
curr_draw_list = master_notes.draw_notes
curr_bar_list = master_notes.bars
curr_timeline = master_notes.timeline
init_bpm = TimelineObject()
init_bpm.hit_ms = self.current_ms
init_bpm.bpm = bpm
curr_timeline.append(init_bpm)
start_branch_ms = 0
start_branch_bpm = bpm
start_branch_time_sig = time_signature
@@ -710,14 +829,269 @@ class TJAParser:
bisect.insort(curr_bar_list, delay_bar, key=lambda x: x.load_ms)
for bar in notes:
#Length of the bar is determined by number of notes excluding commands
bar_length = sum(len(part) for part in bar if '#' not in part)
barline_added = False
for part in bar:
if part.startswith('#BORDERCOLOR'):
r, g, b = part[13:].split(',')
border_color = ray.Color(int(r), int(g), int(b), 255)
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.border_color = border_color
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
if part.startswith('#CAMRESET'):
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_h_offset = 0
timeline_obj.cam_v_offset = 0
timeline_obj.cam_zoom = 1
timeline_obj.cam_h_scale = 1
timeline_obj.cam_v_scale = 1
timeline_obj.cam_rotation = 0
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
# Horizontal Offset Commands
if part.startswith('#CAMHOFFSET'):
cam_h_offset = float(part[12:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_h_offset = cam_h_offset
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
if part.startswith('#CAMHMOVESTART'):
parts = part[15:].split(',')
if len(parts) >= 4:
cam_h_move_start_offset = float(parts[0].strip())
cam_h_move_end_offset = float(parts[1].strip())
cam_h_easing_point = parts[2].strip()
cam_h_easing_function = parts[3].strip()
cam_h_move_active = True
cam_h_move_start_ms = self.current_ms
cam_h_offset = cam_h_move_start_offset
continue
if part.startswith('#CAMHMOVEEND'):
if cam_h_move_active:
cam_h_move_duration_ms = self.current_ms - cam_h_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_h_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_h_easing_point, cam_h_easing_function)
interpolated_ms = cam_h_move_start_ms + (step * interpolation_interval_ms)
interp_offset = cam_h_move_start_offset + (
(cam_h_move_end_offset - cam_h_move_start_offset) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_h_offset = interp_offset
curr_timeline.append(cam_timeline)
cam_h_offset = cam_h_move_end_offset
cam_h_move_active = False
continue
# Vertical Offset Commands
if part.startswith('#CAMVOFFSET'):
cam_v_offset = float(part[12:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_v_offset = cam_v_offset
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
if part.startswith('#CAMVMOVESTART'):
parts = part[15:].split(',')
if len(parts) >= 4:
cam_v_move_start_offset = float(parts[0].strip())
cam_v_move_end_offset = float(parts[1].strip())
cam_v_easing_point = parts[2].strip()
cam_v_easing_function = parts[3].strip()
cam_v_move_active = True
cam_v_move_start_ms = self.current_ms
cam_v_offset = cam_v_move_start_offset
continue
if part.startswith('#CAMVMOVEEND'):
if cam_v_move_active:
cam_v_move_duration_ms = self.current_ms - cam_v_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_v_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_v_easing_point, cam_v_easing_function)
interpolated_ms = cam_v_move_start_ms + (step * interpolation_interval_ms)
interp_offset = cam_v_move_start_offset + (
(cam_v_move_end_offset - cam_v_move_start_offset) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_v_offset = interp_offset
curr_timeline.append(cam_timeline)
cam_v_offset = cam_v_move_end_offset
cam_v_move_active = False
continue
# Zoom Commands
if part.startswith('#CAMZOOMSTART'):
parts = part[14:].split(',')
if len(parts) >= 4:
cam_zoom_start = float(parts[0].strip())
cam_zoom_end = float(parts[1].strip())
cam_zoom_easing_point = parts[2].strip()
cam_zoom_easing_function = parts[3].strip()
cam_zoom_move_active = True
cam_zoom_move_start_ms = self.current_ms
cam_zoom = cam_zoom_start
continue
if part.startswith('#CAMZOOMEND'):
if cam_zoom_move_active:
cam_zoom_move_duration_ms = self.current_ms - cam_zoom_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_zoom_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_zoom_easing_point, cam_zoom_easing_function)
interpolated_ms = cam_zoom_move_start_ms + (step * interpolation_interval_ms)
interp_zoom = cam_zoom_start + (
(cam_zoom_end - cam_zoom_start) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_zoom = interp_zoom
curr_timeline.append(cam_timeline)
cam_zoom = cam_zoom_end
cam_zoom_move_active = False
continue
if part.startswith('#CAMZOOM'):
cam_zoom = float(part[9:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_zoom = cam_zoom
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
# Horizontal Scale Commands
if part.startswith('#CAMHSCALESTART'):
parts = part[16:].split(',')
if len(parts) >= 4:
cam_h_scale_start = float(parts[0].strip())
cam_h_scale_end = float(parts[1].strip())
cam_h_scale_easing_point = parts[2].strip()
cam_h_scale_easing_function = parts[3].strip()
cam_h_scale_move_active = True
cam_h_scale_move_start_ms = self.current_ms
cam_h_scale = cam_h_scale_start
continue
if part.startswith('#CAMHSCALEEND'):
if cam_h_scale_move_active:
cam_h_scale_move_duration_ms = self.current_ms - cam_h_scale_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_h_scale_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_h_scale_easing_point, cam_h_scale_easing_function)
interpolated_ms = cam_h_scale_move_start_ms + (step * interpolation_interval_ms)
interp_scale = cam_h_scale_start + (
(cam_h_scale_end - cam_h_scale_start) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_h_scale = interp_scale
curr_timeline.append(cam_timeline)
cam_h_scale = cam_h_scale_end
cam_h_scale_move_active = False
continue
if part.startswith('#CAMHSCALE'):
cam_h_scale = float(part[11:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_h_scale = cam_h_scale
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
# Vertical Scale Commands
if part.startswith('#CAMVSCALESTART'):
parts = part[16:].split(',')
if len(parts) >= 4:
cam_v_scale_start = float(parts[0].strip())
cam_v_scale_end = float(parts[1].strip())
cam_v_scale_easing_point = parts[2].strip()
cam_v_scale_easing_function = parts[3].strip()
cam_v_scale_move_active = True
cam_v_scale_move_start_ms = self.current_ms
cam_v_scale = cam_v_scale_start
continue
if part.startswith('#CAMVSCALEEND'):
if cam_v_scale_move_active:
cam_v_scale_move_duration_ms = self.current_ms - cam_v_scale_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_v_scale_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_v_scale_easing_point, cam_v_scale_easing_function)
interpolated_ms = cam_v_scale_move_start_ms + (step * interpolation_interval_ms)
interp_scale = cam_v_scale_start + (
(cam_v_scale_end - cam_v_scale_start) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_v_scale = interp_scale
curr_timeline.append(cam_timeline)
cam_v_scale = cam_v_scale_end
cam_v_scale_move_active = False
continue
if part.startswith('#CAMVSCALE'):
cam_v_scale = float(part[11:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_v_scale = cam_v_scale
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
# Rotation Commands
if part.startswith('#CAMROTATIONSTART'):
parts = part[18:].split(',')
if len(parts) >= 4:
cam_rotation_start = float(parts[0].strip())
cam_rotation_end = float(parts[1].strip())
cam_rotation_easing_point = parts[2].strip()
cam_rotation_easing_function = parts[3].strip()
cam_rotation_move_active = True
cam_rotation_move_start_ms = self.current_ms
cam_rotation = cam_rotation_start
continue
if part.startswith('#CAMROTATIONEND'):
if cam_rotation_move_active:
cam_rotation_move_duration_ms = self.current_ms - cam_rotation_move_start_ms
interpolation_interval_ms = 8
num_steps = int(cam_rotation_move_duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1)
eased_t = self.apply_easing(t, cam_rotation_easing_point, cam_rotation_easing_function)
interpolated_ms = cam_rotation_move_start_ms + (step * interpolation_interval_ms)
interp_rotation = cam_rotation_start + (
(cam_rotation_end - cam_rotation_start) * eased_t
)
cam_timeline = TimelineObject()
cam_timeline.hit_ms = interpolated_ms
cam_timeline.cam_rotation = interp_rotation
curr_timeline.append(cam_timeline)
cam_rotation = cam_rotation_end
cam_rotation_move_active = False
continue
if part.startswith('#CAMROTATION'):
cam_rotation = float(part[13:])
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.cam_rotation = cam_rotation
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
if part.startswith('#SECTION'):
is_section_start = True
continue
if part.startswith('#BRANCHSTART'):
start_branch_ms = self.current_ms
start_branch_bpm = bpm
@@ -753,7 +1127,6 @@ class TJAParser:
bar_line.type = 0
bar_line.display = False
bar_line.gogo_time = gogo_time
bar_line.bpm = bpm
bar_line.branch_params = branch_params
bar_list.append(bar_line)
@@ -765,16 +1138,20 @@ class TJAParser:
if section_bar:
section_bar = None
continue
elif part.startswith('#BRANCHEND'):
curr_note_list = master_notes.play_notes
curr_draw_list = master_notes.draw_notes
curr_bar_list = master_notes.bars
curr_timeline = master_notes.timeline
continue
if part == '#M':
branch_m.append(NoteList())
curr_note_list = branch_m[-1].play_notes
curr_draw_list = branch_m[-1].draw_notes
curr_bar_list = branch_m[-1].bars
curr_timeline = branch_m[-1].timeline
self.current_ms = start_branch_ms
bpm = start_branch_bpm
time_signature = start_branch_time_sig
@@ -790,6 +1167,7 @@ class TJAParser:
curr_note_list = branch_e[-1].play_notes
curr_draw_list = branch_e[-1].draw_notes
curr_bar_list = branch_e[-1].bars
curr_timeline = branch_e[-1].timeline
self.current_ms = start_branch_ms
bpm = start_branch_bpm
time_signature = start_branch_time_sig
@@ -805,6 +1183,7 @@ class TJAParser:
curr_note_list = branch_n[-1].play_notes
curr_draw_list = branch_n[-1].draw_notes
curr_bar_list = branch_n[-1].bars
curr_timeline = branch_n[-1].timeline
self.current_ms = start_branch_ms
bpm = start_branch_bpm
time_signature = start_branch_time_sig
@@ -815,71 +1194,50 @@ class TJAParser:
count = branch_balloon_count
is_branching = True
continue
if '#LYRIC' in part:
lyric = part[6:]
continue
if '#JPOSSCROLL' in part:
parts = part.split()
if len(parts) >= 4:
duration_ms = float(parts[1]) * 1000
distance_str = parts[2]
direction_deg = float(parts[3])
direction = int(parts[3]) # 0 = normal, 1 = reverse
delta_x = 0
delta_y = 0
if 'i' in distance_str:
normalized = distance_str.replace('.i', 'j').replace('i', 'j')
normalized = normalized.replace(',', '')
c = complex(normalized)
direction_rad = math.radians(direction_deg)
cos_dir = math.cos(direction_rad)
sin_dir = math.sin(direction_rad)
delta_x = c.real * cos_dir - c.imag * sin_dir
delta_y = c.real * sin_dir + c.imag * cos_dir
delta_x = c.real
delta_y = c.imag
else:
distance = float(distance_str)
direction_rad = math.radians(direction_deg)
delta_x = distance * math.cos(direction_rad)
delta_y = distance * math.sin(direction_rad)
delta_x = distance
delta_y = 0
if direction == 0:
delta_x = -delta_x
delta_y = -delta_y
judge_target_x = judge_pos_x + delta_x
judge_target_y = judge_pos_y + delta_y
interpolation_interval_ms = 8
num_steps = int(duration_ms / interpolation_interval_ms)
for step in range(num_steps + 1):
t = step / max(num_steps, 1) # Interpolation factor (0 to 1)
t = step / max(num_steps, 1)
interpolated_ms = self.current_ms + (step * interpolation_interval_ms)
# Linear interpolation
interp_x = judge_pos_x + (delta_x * t)
interp_y = judge_pos_y + (delta_y * t)
# Create invisible bar line to store position
jpos_bar = Note()
jpos_bar.pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
jpos_bar.pixels_per_frame_y = get_pixels_per_frame(bpm * time_signature * y_scroll_modifier, time_signature*4, self.distance)
pixels_per_ms = get_pixels_per_ms(jpos_bar.pixels_per_frame_x)
jpos_bar.hit_ms = interpolated_ms
if pixels_per_ms == 0:
jpos_bar.load_ms = jpos_bar.hit_ms
else:
jpos_bar.load_ms = jpos_bar.hit_ms - (self.distance / pixels_per_ms)
jpos_bar.type = 0
jpos_bar.display = False
jpos_bar.gogo_time = gogo_time
jpos_bar.bpm = bpm
jpos_bar.judge_pos_x = interp_x
jpos_bar.judge_pos_y = interp_y
bisect.insort(curr_bar_list, jpos_bar, key=lambda x: x.load_ms)
jpos_timeline = TimelineObject()
jpos_timeline.hit_ms = interpolated_ms
jpos_timeline.judge_pos_x = interp_x
jpos_timeline.judge_pos_y = interp_y
bisect.insort(curr_timeline, jpos_timeline, key=lambda x: x.hit_ms)
judge_pos_x = judge_target_x
judge_pos_y = judge_target_y
continue
elif '#NMSCROLL' in part:
scroll_type = ScrollType.NMSCROLL
@@ -934,6 +1292,10 @@ class TJAParser:
bisect.insort(curr_bar_list, bpmchange_bar, key=lambda x: x.load_ms)
else:
bpm = parsed_bpm
timeline_obj = TimelineObject()
timeline_obj.hit_ms = self.current_ms
timeline_obj.bpm = bpm
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
continue
elif '#BARLINEOFF' in part:
barline_display = False
@@ -962,33 +1324,26 @@ class TJAParser:
self.current_ms += delay_ms
continue
elif part.startswith("#SUDDEN"):
# Parse #SUDDEN command
parts = part.split()
if len(parts) >= 3:
appear_duration = float(parts[1])
moving_duration = float(parts[2])
# Convert to milliseconds
sudden_appear = appear_duration * 1000
sudden_moving = moving_duration * 1000
# Handle special case: if value is 0, treat as infinity
if sudden_appear == 0:
sudden_appear = float('inf')
if sudden_moving == 0:
sudden_moving = float('inf')
continue
#Unrecognized commands will be skipped for now
elif len(part) > 0 and not part[0].isdigit():
logger.warning(f"Unrecognized command: {part} in TJA {self.file_path}")
continue
ms_per_measure = get_ms_per_measure(bpm, time_signature)
#Create note object
bar_line = Note()
#Determines how quickly the notes need to move across the screen to reach the judgment circle in time
bar_line.pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
bar_line.pixels_per_frame_y = get_pixels_per_frame(bpm * time_signature * y_scroll_modifier, time_signature*4, self.distance)
pixels_per_ms = get_pixels_per_ms(bar_line.pixels_per_frame_x)
@@ -1001,7 +1356,6 @@ class TJAParser:
bar_line.type = 0
bar_line.display = barline_display
bar_line.gogo_time = gogo_time
bar_line.bpm = bpm
if barline_added:
bar_line.display = False
@@ -1016,7 +1370,6 @@ class TJAParser:
bisect.insort(curr_bar_list, bar_line, key=lambda x: x.load_ms)
barline_added = True
#Empty bar is still a bar, otherwise start increment
if len(part) == 0:
self.current_ms += ms_per_measure
increment = 0
@@ -1040,6 +1393,7 @@ class TJAParser:
add_delay_bar(delay_last_note_ms, delay_current)
delay_current = 0
note = Note()
delay_last_note_ms = self.current_ms
note.hit_ms = self.current_ms
@@ -1051,7 +1405,6 @@ class TJAParser:
else note.hit_ms - (self.distance / pixels_per_ms))
note.type = int(item)
note.index = index
note.bpm = bpm
note.gogo_time = gogo_time
note.moji = -1
note.lyric = lyric
@@ -1081,15 +1434,14 @@ class TJAParser:
else:
note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms)
note.pixels_per_frame_x = prev_note.pixels_per_frame_x
self.current_ms += increment
curr_note_list.append(note)
bisect.insort(curr_draw_list, note, key=lambda x: x.load_ms)
self.get_moji(curr_note_list, ms_per_measure)
index += 1
prev_note = note
# Sorting by load_ms is necessary for drawing, as some notes appear on the
# screen slower regardless of when they reach the judge circle
# Bars can be sorted like this because they don't need hit detection
return master_notes, branch_m, branch_e, branch_n
def hash_note_data(self, notes: NoteList):
@@ -1120,10 +1472,16 @@ def modifier_speed(notes: NoteList, value: float):
modded_bars = notes.bars.copy()
for note in modded_notes:
note.pixels_per_frame_x *= value
note.load_ms = note.hit_ms - (866 * global_tex.screen_scale / get_pixels_per_ms(note.pixels_per_frame_x))
pixels_per_ms = get_pixels_per_ms(note.pixels_per_frame_x)
if pixels_per_ms == 0:
continue
note.load_ms = note.hit_ms - (866 * global_tex.screen_scale / pixels_per_ms)
for bar in modded_bars:
bar.pixels_per_frame_x *= value
bar.load_ms = bar.hit_ms - (866 * global_tex.screen_scale / get_pixels_per_ms(bar.pixels_per_frame_x))
pixels_per_ms = get_pixels_per_ms(bar.pixels_per_frame_x)
if pixels_per_ms == 0:
continue
bar.load_ms = bar.hit_ms - (866 * global_tex.screen_scale / pixels_per_ms)
return modded_notes, modded_bars
def modifier_display(notes: NoteList):

View File

@@ -214,11 +214,6 @@ class OutlinedText:
global_data.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, len(global_data.font_codepoints))
logger.info(f"Reloaded font with {len(global_data.font_codepoints)} codepoints")
return global_data.font
codepoint_count = ray.ffi.new('int *', 0)
unique_codepoints = set(text)
codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count)
font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
return font
def _create_text_vertical(self, text: str, font_size: int, color: ray.Color, bg_color: ray.Color, font: Optional[ray.Font]=None, padding: int=10):
rotate_chars = {'-', '', '|', '/', '\\', '', '', '~', '', '', '(', ')',

View File

@@ -1,4 +1,3 @@
from pathlib import Path
import pyray as ray
from libs.screen import Screen
@@ -8,12 +7,6 @@ from libs.texture import tex
class DevScreen(Screen):
def on_screen_start(self):
super().on_screen_start()
self.text = ".⁉ゃん座組ス5れへデ7x事音ょ野ダHズパに相村束虹神狂'Uqはたt朗♢弥ウち”作Wシら黒さドカモ金章よ方りj沙べ口ぃご歌こ制みわ険時行×ワ獣ぺ阿啓R哀肉乱終鼓ツ,0かVしでw?2⒒悟マ乙ィの女アラA疾浄u+も’グ怒[ャロ冒陽ね路想ベ#ト醜ペ!太悪χキn初あKン〜<原Qハ1s旅をガ分ビNゼ玄沢≠食@フ拝テM豚幻濤ま人腹世P愴)っピやナJ社びB一6c畑譚]gてd曲花Oくkル第◇校*⒓森・バコ談ヤ急め愛プ重ー勝DE:Zチ東二じ車>ブ刑ミX焼おyつλ♪オい憎aFe竜そ大84得渉/◆ソC番、l†レ悲暴う胸るG“ゆS転fゅとセo「風輔双zr―-vケp‼b…響3メ罪 クL自(Iイタニムき夜幽T&楽m学走ジ島h田i美心Yボサッリュひ寅9」達"
unique_codepoints = set(self.text)
codepoint_count = ray.ffi.new('int *', 0)
unique_string = ''.join(unique_codepoints)
codepoints = ray.load_codepoints(unique_string, codepoint_count)
self.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, len(unique_codepoints))
def on_screen_end(self, next_screen: str):
return super().on_screen_end(next_screen)
@@ -23,4 +16,3 @@ class DevScreen(Screen):
def draw(self):
ray.draw_rectangle(0, 0, tex.screen_width, tex.screen_height, ray.GREEN)
ray.draw_text_ex(self.font, "幽玄ノ乱", ray.Vector2(tex.screen_width//2, tex.screen_height//2), 60, 20, ray.BLACK)

View File

@@ -445,6 +445,8 @@ class Player:
self.base_score = calculate_base_score(total_notes)
#Note management
self.timeline = notes.timeline
self.timeline_index = 0
self.current_bars: list[Note] = []
self.current_notes_draw: list[Note | Drumroll | Balloon] = []
self.is_drumroll = False
@@ -456,7 +458,9 @@ class Player:
self.branch_condition_count = 0
self.branch_condition = ''
self.balloon_index = 0
self.bpm = self.play_notes[0].bpm if self.play_notes else 120
self.bpm = 120
if self.timeline and hasattr(self.timeline[self.timeline_index], 'bpm'):
self.bpm = self.timeline[self.timeline_index].bpm
def merge_branch_section(self, branch_section: NoteList, current_ms: float):
"""Merges the branch notes into the current notes"""
@@ -492,35 +496,80 @@ class Player:
if self.delay_start:
current_ms = self.delay_start
time_diff = load_ms - current_ms
if pixels_per_frame_x == 0:
return int(pixels_per_frame * 0.06 * time_diff)
return int((pixels_per_frame * 0.06 * time_diff) + ((self.tja.distance * pixels_per_frame) / pixels_per_frame_x))
def handle_tjap3_extended_commands(self, current_ms: float):
if not self.timeline:
return
timeline_object = self.timeline[self.timeline_index]
should_advance = False
if hasattr(timeline_object, 'border_color') and timeline_object.hit_ms <= current_ms:
global_data.camera.border_color = timeline_object.border_color
should_advance = True
if hasattr(timeline_object, 'cam_h_offset') and timeline_object.hit_ms <= current_ms:
orig_offset = global_data.camera.offset
global_data.camera.offset = ray.Vector2(timeline_object.cam_h_offset, orig_offset.y)
should_advance = True
if hasattr(timeline_object, 'cam_v_offset') and timeline_object.hit_ms <= current_ms:
orig_offset = global_data.camera.offset
global_data.camera.offset = ray.Vector2(orig_offset.x, timeline_object.cam_v_offset)
should_advance = True
if hasattr(timeline_object, 'cam_zoom') and timeline_object.hit_ms <= current_ms:
global_data.camera.zoom = timeline_object.cam_zoom
should_advance = True
if hasattr(timeline_object, 'cam_h_scale') and timeline_object.hit_ms <= current_ms:
global_data.camera.h_scale = timeline_object.cam_h_scale
should_advance = True
if hasattr(timeline_object, 'cam_v_scale') and timeline_object.hit_ms <= current_ms:
global_data.camera.v_scale = timeline_object.cam_v_scale
should_advance = True
if hasattr(timeline_object, 'cam_rotation') and timeline_object.hit_ms <= current_ms:
global_data.camera.rotation = timeline_object.cam_rotation
should_advance = True
if should_advance and self.timeline_index < len(self.timeline) - 1:
self.timeline_index += 1
def get_judge_position(self, current_ms: float):
"""Get the current judgment circle position based on bar data"""
judge_x = 0
judge_y = 0
if not self.timeline:
return
timeline_object = self.timeline[self.timeline_index]
if hasattr(timeline_object, 'judge_pos_x') and timeline_object.hit_ms <= current_ms:
self.judge_x = timeline_object.judge_pos_x * tex.screen_scale
self.judge_y = timeline_object.judge_pos_y * tex.screen_scale
if self.timeline_index < len(self.timeline) - 1:
self.timeline_index += 1
# Find the most recent bar with judge position data
for bar in self.current_bars:
if hasattr(bar, 'judge_pos_x') and bar.hit_ms <= current_ms:
judge_x = bar.judge_pos_x * tex.screen_scale
judge_y = bar.judge_pos_y * tex.screen_scale
elif bar.hit_ms > current_ms:
break
return judge_x, judge_y
def update_bpm(self, current_ms: float):
if not self.timeline:
return
timeline_object = self.timeline[self.timeline_index]
if hasattr(timeline_object, 'bpm') and timeline_object.hit_ms <= current_ms:
self.bpm = timeline_object.bpm
if self.timeline_index < len(self.timeline) - 1:
self.timeline_index += 1
def animation_manager(self, animation_list: list, current_time: float):
if not animation_list:
return
# More efficient: use list comprehension to filter out finished animations
remaining_animations = []
for animation in animation_list:
animation.update(current_time)
if not animation.is_finished:
remaining_animations.append(animation)
# Replace the original list contents
animation_list[:] = remaining_animations
def bar_manager(self, current_ms: float):
@@ -678,6 +727,7 @@ class Player:
self.bar_manager(current_ms)
self.play_note_manager(current_ms, background)
self.draw_note_manager(current_ms)
self.handle_tjap3_extended_commands(current_ms)
def note_correct(self, note: Note, current_time: float):
"""Removes a note from the appropriate separated list"""
@@ -902,14 +952,10 @@ class Player:
# Handle drumroll and balloon hits
if self.is_drumroll or self.is_balloon:
if not self.other_notes:
return
note = self.other_notes[0]
bpm = note.bpm
if bpm == 0:
if self.bpm == 0:
subdivision_in_ms = 0
else:
subdivision_in_ms = ms_from_start // ((60000 * 4 / bpm) / 24)
subdivision_in_ms = ms_from_start // ((60000 * 4 / self.bpm) / 24)
if subdivision_in_ms > self.last_subdivision:
self.last_subdivision = subdivision_in_ms
hit_type = DrumType.DON
@@ -920,7 +966,6 @@ class Player:
else:
# Handle DON notes
while self.don_notes and ms_from_start >= self.don_notes[0].hit_ms:
note = self.don_notes[0]
hit_type = DrumType.DON
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
@@ -929,7 +974,6 @@ class Player:
# Handle KAT notes
while self.kat_notes and ms_from_start >= self.kat_notes[0].hit_ms:
note = self.kat_notes[0]
hit_type = DrumType.KAT
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
@@ -1029,7 +1073,9 @@ class Player:
if self.lane_hit_effect is not None:
self.lane_hit_effect.update(current_time)
self.animation_manager(self.draw_drum_hit_list, current_time)
self.judge_x, self.judge_y = self.get_judge_position(ms_from_start)
self.get_judge_position(ms_from_start)
self.handle_tjap3_extended_commands(ms_from_start)
self.update_bpm(ms_from_start)
# More efficient arc management
finished_arcs = []
@@ -1073,7 +1119,6 @@ class Player:
next_note = min(candidates, key=lambda note: note.load_ms)
if next_note:
self.bpm = next_note.bpm
if next_note.gogo_time and not self.is_gogo_time:
self.is_gogo_time = True
self.gogo_time = GogoTime(self.is_2p)