mirror of
https://github.com/Yonokid/PyTaiko.git
synced 2026-02-04 11:40:13 +01:00
add camera and tjap3 extended commands
This commit is contained in:
83
PyTaiko.py
83
PyTaiko.py
@@ -137,6 +137,40 @@ def create_song_db():
|
|||||||
con.commit()
|
con.commit()
|
||||||
logger.info("Scores database created successfully")
|
logger.info("Scores database created successfully")
|
||||||
|
|
||||||
|
def update_camera_for_window_size(camera, virtual_width, virtual_height):
|
||||||
|
"""Update camera zoom, offset, scale, and rotation to maintain aspect ratio"""
|
||||||
|
screen_width = ray.get_screen_width()
|
||||||
|
screen_height = ray.get_screen_height()
|
||||||
|
|
||||||
|
if screen_width == 0 or screen_height == 0:
|
||||||
|
camera.zoom = 1.0
|
||||||
|
camera.offset = ray.Vector2(0, 0)
|
||||||
|
camera.rotation = 0.0
|
||||||
|
return
|
||||||
|
|
||||||
|
scale = min(screen_width / virtual_width, screen_height / virtual_height)
|
||||||
|
|
||||||
|
base_offset_x = (screen_width - (virtual_width * scale)) * 0.5
|
||||||
|
base_offset_y = (screen_height - (virtual_height * scale)) * 0.5
|
||||||
|
|
||||||
|
camera.zoom = scale * global_data.camera.zoom
|
||||||
|
|
||||||
|
zoom_offset_x = (virtual_width * scale * (global_data.camera.zoom - 1.0)) * 0.5
|
||||||
|
zoom_offset_y = (virtual_height * scale * (global_data.camera.zoom - 1.0)) * 0.5
|
||||||
|
|
||||||
|
h_scale = global_data.camera.h_scale
|
||||||
|
v_scale = global_data.camera.v_scale
|
||||||
|
|
||||||
|
h_scale_offset_x = (virtual_width * scale * (h_scale - 1.0)) * 0.5
|
||||||
|
v_scale_offset_y = (virtual_height * scale * (v_scale - 1.0)) * 0.5
|
||||||
|
|
||||||
|
camera.offset = ray.Vector2(
|
||||||
|
base_offset_x - zoom_offset_x - h_scale_offset_x + (global_data.camera.offset.x * scale),
|
||||||
|
base_offset_y - zoom_offset_y - v_scale_offset_y + (global_data.camera.offset.y * scale)
|
||||||
|
)
|
||||||
|
|
||||||
|
camera.rotation = global_data.camera.rotation
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
force_dedicated_gpu()
|
force_dedicated_gpu()
|
||||||
global_data.config = get_config()
|
global_data.config = get_config()
|
||||||
@@ -171,6 +205,7 @@ def main():
|
|||||||
ray.set_target_fps(global_data.config["video"]["target_fps"])
|
ray.set_target_fps(global_data.config["video"]["target_fps"])
|
||||||
logger.info(f"Target FPS set to {global_data.config['video']['target_fps']}")
|
logger.info(f"Target FPS set to {global_data.config['video']['target_fps']}")
|
||||||
ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT)
|
ray.set_config_flags(ray.ConfigFlags.FLAG_MSAA_4X_HINT)
|
||||||
|
ray.set_config_flags(ray.ConfigFlags.FLAG_WINDOW_RESIZABLE)
|
||||||
ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING)
|
ray.set_trace_log_level(ray.TraceLogLevel.LOG_WARNING)
|
||||||
|
|
||||||
ray.init_window(screen_width, screen_height, "PyTaiko")
|
ray.init_window(screen_width, screen_height, "PyTaiko")
|
||||||
@@ -267,15 +302,20 @@ def main():
|
|||||||
Screens.DAN_RESULT: dan_result_screen,
|
Screens.DAN_RESULT: dan_result_screen,
|
||||||
Screens.LOADING: load_screen
|
Screens.LOADING: load_screen
|
||||||
}
|
}
|
||||||
target = ray.load_render_texture(screen_width, screen_height)
|
|
||||||
ray.gen_texture_mipmaps(target.texture)
|
camera = ray.Camera2D()
|
||||||
ray.set_texture_filter(target.texture, ray.TextureFilter.TEXTURE_FILTER_TRILINEAR)
|
camera.target = ray.Vector2(0, 0)
|
||||||
|
camera.rotation = 0.0
|
||||||
|
update_camera_for_window_size(camera, screen_width, screen_height)
|
||||||
|
logger.info("Camera2D initialized")
|
||||||
|
|
||||||
ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD)
|
ray.rl_set_blend_factors_separate(RL_SRC_ALPHA, RL_ONE_MINUS_SRC_ALPHA, RL_ONE, RL_ONE_MINUS_SRC_ALPHA, RL_FUNC_ADD, RL_FUNC_ADD)
|
||||||
ray.set_exit_key(global_data.config["keys"]["exit_key"])
|
ray.set_exit_key(global_data.config["keys"]["exit_key"])
|
||||||
|
|
||||||
ray.hide_cursor()
|
ray.hide_cursor()
|
||||||
logger.info("Cursor hidden")
|
logger.info("Cursor hidden")
|
||||||
last_fps = 1
|
last_fps = 1
|
||||||
|
last_color = ray.BLACK
|
||||||
|
|
||||||
while not ray.window_should_close():
|
while not ray.window_should_close():
|
||||||
if ray.is_key_pressed(global_data.config["keys"]["fullscreen_key"]):
|
if ray.is_key_pressed(global_data.config["keys"]["fullscreen_key"]):
|
||||||
@@ -285,25 +325,21 @@ def main():
|
|||||||
ray.toggle_borderless_windowed()
|
ray.toggle_borderless_windowed()
|
||||||
logger.info("Toggled borderless windowed mode")
|
logger.info("Toggled borderless windowed mode")
|
||||||
|
|
||||||
curr_screen_width = ray.get_screen_width()
|
update_camera_for_window_size(camera, screen_width, screen_height)
|
||||||
curr_screen_height = ray.get_screen_height()
|
|
||||||
|
|
||||||
if curr_screen_width == 0 or curr_screen_height == 0:
|
ray.begin_drawing()
|
||||||
dest_rect = ray.Rectangle(0, 0, screen_width, screen_height)
|
|
||||||
else:
|
|
||||||
scale = min(curr_screen_width / screen_width, curr_screen_height / screen_height)
|
|
||||||
dest_rect = ray.Rectangle((curr_screen_width - (screen_width * scale)) * 0.5,
|
|
||||||
(curr_screen_height - (screen_height * scale)) * 0.5,
|
|
||||||
screen_width * scale, screen_height * scale)
|
|
||||||
|
|
||||||
ray.begin_texture_mode(target)
|
if global_data.camera.border_color != last_color:
|
||||||
|
ray.clear_background(global_data.camera.border_color)
|
||||||
|
last_color = global_data.camera.border_color
|
||||||
|
|
||||||
|
ray.begin_mode_2d(camera)
|
||||||
ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE)
|
ray.begin_blend_mode(ray.BlendMode.BLEND_CUSTOM_SEPARATE)
|
||||||
|
|
||||||
screen = screen_mapping[current_screen]
|
screen = screen_mapping[current_screen]
|
||||||
|
|
||||||
next_screen = screen.update()
|
next_screen = screen.update()
|
||||||
if screen.screen_init:
|
if screen.screen_init:
|
||||||
ray.clear_background(ray.BLACK)
|
|
||||||
screen._do_draw()
|
screen._do_draw()
|
||||||
|
|
||||||
if next_screen is not None:
|
if next_screen is not None:
|
||||||
@@ -321,19 +357,16 @@ def main():
|
|||||||
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.YELLOW)
|
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.YELLOW)
|
||||||
else:
|
else:
|
||||||
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.LIME)
|
ray.draw_text(f'{last_fps} FPS', 20, 20, 20, ray.LIME)
|
||||||
|
|
||||||
|
ray.draw_rectangle(-screen_width, 0, screen_width, screen_height, last_color)
|
||||||
|
ray.draw_rectangle(screen_width, 0, screen_width, screen_height, last_color)
|
||||||
|
ray.draw_rectangle(0, -screen_height, screen_width, screen_height, last_color)
|
||||||
|
ray.draw_rectangle(0, screen_height, screen_width, screen_height, last_color)
|
||||||
|
|
||||||
ray.end_blend_mode()
|
ray.end_blend_mode()
|
||||||
ray.end_texture_mode()
|
ray.end_mode_2d()
|
||||||
ray.begin_drawing()
|
|
||||||
ray.clear_background(ray.BLACK)
|
|
||||||
ray.draw_texture_pro(
|
|
||||||
target.texture,
|
|
||||||
ray.Rectangle(0, 0, target.texture.width, -target.texture.height),
|
|
||||||
dest_rect,
|
|
||||||
ray.Vector2(0,0),
|
|
||||||
0,
|
|
||||||
ray.WHITE
|
|
||||||
)
|
|
||||||
ray.end_drawing()
|
ray.end_drawing()
|
||||||
|
|
||||||
ray.close_window()
|
ray.close_window()
|
||||||
audio.close_audio_device()
|
audio.close_audio_device()
|
||||||
logger.info("Window closed and audio device shut down")
|
logger.info("Window closed and audio device shut down")
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ SCENEPRESET,Supported,OpenTaiko (0auBSQ) v0.6.0
|
|||||||
#LYRIC,Supported,TJAPlayer2 for.PC
|
#LYRIC,Supported,TJAPlayer2 for.PC
|
||||||
#SUDDEN,Supported,TJAPlayer2 for.PC
|
#SUDDEN,Supported,TJAPlayer2 for.PC
|
||||||
#JPOSSCROLL,Supported,TJAPlayer2 for.PC
|
#JPOSSCROLL,Supported,TJAPlayer2 for.PC
|
||||||
|
#OBJ / #CAM,Supported,TJAPlayer3-Extended
|
||||||
|
#BORDERCOLOR,Supported,TJAPlayer3-Extended
|
||||||
|
|
||||||
SONGVOL,Unsupported,TaikoJiro v1.66
|
SONGVOL,Unsupported,TaikoJiro v1.66
|
||||||
SEVOL,Unsupported,TaikoJiro v1.66
|
SEVOL,Unsupported,TaikoJiro v1.66
|
||||||
@@ -67,8 +69,6 @@ BGOFFSET,Unsupported,Unknown
|
|||||||
#NEXTSONG,Unsupported,TJAPlayer3 v1.5.0
|
#NEXTSONG,Unsupported,TJAPlayer3 v1.5.0
|
||||||
#PAPAMAMA,Unsupported,TJAPlayer3-f v1.7.2.0
|
#PAPAMAMA,Unsupported,TJAPlayer3-f v1.7.2.0
|
||||||
#ENABLEDORON / #DISABLEDORON,Unsupported,TJAPlayer3-Extended
|
#ENABLEDORON / #DISABLEDORON,Unsupported,TJAPlayer3-Extended
|
||||||
#OBJ / #CAM,Unsupported,TJAPlayer3-Extended
|
|
||||||
#BORDERCOLOR,Unsupported,TJAPlayer3-Extended
|
|
||||||
#CHANGETEXTURE / #RESETTEXTURE,Unsupported,TJAPlayer3-Extended
|
#CHANGETEXTURE / #RESETTEXTURE,Unsupported,TJAPlayer3-Extended
|
||||||
#SETCONFIG,Unsupported,TJAPlayer3-Extended
|
#SETCONFIG,Unsupported,TJAPlayer3-Extended
|
||||||
#BARLINE,Unsupported,taiko-web (plugin Custom Barlines)
|
#BARLINE,Unsupported,taiko-web (plugin Custom Barlines)
|
||||||
|
|||||||
|
@@ -120,6 +120,14 @@ class SessionData:
|
|||||||
result_data: ResultData = field(default_factory=lambda: ResultData())
|
result_data: ResultData = field(default_factory=lambda: ResultData())
|
||||||
dan_result_data: DanResultData = field(default_factory=lambda: DanResultData())
|
dan_result_data: DanResultData = field(default_factory=lambda: DanResultData())
|
||||||
|
|
||||||
|
class Camera:
|
||||||
|
offset: ray.Vector2 = ray.Vector2(0, 0)
|
||||||
|
zoom: float = 1.0
|
||||||
|
h_scale: float = 1.0
|
||||||
|
v_scale: float = 1.0
|
||||||
|
rotation: float = 0.0
|
||||||
|
border_color: ray.Color = ray.BLACK
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class GlobalData:
|
class GlobalData:
|
||||||
"""
|
"""
|
||||||
@@ -139,6 +147,7 @@ class GlobalData:
|
|||||||
session_data (list[SessionData]): Session data for both players.
|
session_data (list[SessionData]): Session data for both players.
|
||||||
"""
|
"""
|
||||||
songs_played: int = 0
|
songs_played: int = 0
|
||||||
|
camera: Camera = Camera()
|
||||||
font: ray.Font = ray.get_font_default()
|
font: ray.Font = ray.get_font_default()
|
||||||
font_codepoints = set()
|
font_codepoints = set()
|
||||||
config: Config = field(default_factory=dict)
|
config: Config = field(default_factory=dict)
|
||||||
|
|||||||
484
libs/tja.py
484
libs/tja.py
@@ -10,6 +10,8 @@ from functools import lru_cache
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
import pyray as ray
|
||||||
|
|
||||||
from libs.global_data import Modifiers
|
from libs.global_data import Modifiers
|
||||||
from libs.utils import get_pixels_per_frame, strip_comments, global_tex
|
from libs.utils import get_pixels_per_frame, strip_comments, global_tex
|
||||||
|
|
||||||
@@ -39,6 +41,36 @@ class NoteType(IntEnum):
|
|||||||
TAIL = 8
|
TAIL = 8
|
||||||
KUSUDAMA = 9
|
KUSUDAMA = 9
|
||||||
|
|
||||||
|
@dataclass()
|
||||||
|
class TimelineObject:
|
||||||
|
hit_ms: float = field(init=False)
|
||||||
|
load_ms: float = field(init=False)
|
||||||
|
|
||||||
|
judge_pos_x: float = field(init=False)
|
||||||
|
judge_pos_y: float = field(init=False)
|
||||||
|
border_color: ray.Color = field(init=False)
|
||||||
|
cam_h_offset: float = field(init=False)
|
||||||
|
cam_v_offset: float = field(init=False)
|
||||||
|
cam_h_scale: float = field(init=False)
|
||||||
|
cam_v_scale: float = field(init=False)
|
||||||
|
cam_zoom: float = field(init=False)
|
||||||
|
cam_rotation: float = field(init=False)
|
||||||
|
|
||||||
|
bpm: float = field(init=False)
|
||||||
|
'''
|
||||||
|
gogo_time: bool = field(init=False)
|
||||||
|
branch_params: str = field(init=False)
|
||||||
|
is_branch_start: bool = False
|
||||||
|
is_section_marker: bool = False
|
||||||
|
sudden_appear_ms: float = 0
|
||||||
|
sudden_moving_ms: float = 0
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
"""Allow sorting by load_ms"""
|
||||||
|
return self.load_ms < other.load_ms
|
||||||
|
|
||||||
|
|
||||||
@dataclass()
|
@dataclass()
|
||||||
class Note:
|
class Note:
|
||||||
"""A note in a TJA file.
|
"""A note in a TJA file.
|
||||||
@@ -51,7 +83,6 @@ class Note:
|
|||||||
pixels_per_frame_y (float): The number of pixels per frame in the y direction.
|
pixels_per_frame_y (float): The number of pixels per frame in the y direction.
|
||||||
display (bool): Whether the note should be displayed.
|
display (bool): Whether the note should be displayed.
|
||||||
index (int): The index of the note.
|
index (int): The index of the note.
|
||||||
bpm (float): The beats per minute of the song.
|
|
||||||
gogo_time (bool): Whether the note is a gogo time note.
|
gogo_time (bool): Whether the note is a gogo time note.
|
||||||
moji (int): The text drawn below the note.
|
moji (int): The text drawn below the note.
|
||||||
is_branch_start (bool): Whether the note is the start of a branch.
|
is_branch_start (bool): Whether the note is the start of a branch.
|
||||||
@@ -64,7 +95,6 @@ class Note:
|
|||||||
pixels_per_frame_y: float = field(init=False)
|
pixels_per_frame_y: float = field(init=False)
|
||||||
display: bool = field(init=False)
|
display: bool = field(init=False)
|
||||||
index: int = field(init=False)
|
index: int = field(init=False)
|
||||||
bpm: float = field(init=False)
|
|
||||||
gogo_time: bool = field(init=False)
|
gogo_time: bool = field(init=False)
|
||||||
moji: int = field(init=False)
|
moji: int = field(init=False)
|
||||||
is_branch_start: bool = field(init=False)
|
is_branch_start: bool = field(init=False)
|
||||||
@@ -72,8 +102,6 @@ class Note:
|
|||||||
lyric: str = field(init=False)
|
lyric: str = field(init=False)
|
||||||
sudden_appear_ms: float = field(init=False)
|
sudden_appear_ms: float = field(init=False)
|
||||||
sudden_moving_ms: float = field(init=False)
|
sudden_moving_ms: float = field(init=False)
|
||||||
judge_pos_x: float = field(init=False)
|
|
||||||
judge_pos_y: float = field(init=False)
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
def __lt__(self, other):
|
||||||
return self.hit_ms < other.hit_ms
|
return self.hit_ms < other.hit_ms
|
||||||
@@ -185,18 +213,21 @@ class NoteList:
|
|||||||
play_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
|
play_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
|
||||||
draw_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
|
draw_notes: list[Note | Drumroll | Balloon] = field(default_factory=lambda: [])
|
||||||
bars: list[Note] = field(default_factory=lambda: [])
|
bars: list[Note] = field(default_factory=lambda: [])
|
||||||
|
timeline: list[TimelineObject] = field(default_factory=lambda: [])
|
||||||
|
|
||||||
def __add__(self, other: 'NoteList') -> 'NoteList':
|
def __add__(self, other: 'NoteList') -> 'NoteList':
|
||||||
return NoteList(
|
return NoteList(
|
||||||
play_notes=self.play_notes + other.play_notes,
|
play_notes=self.play_notes + other.play_notes,
|
||||||
draw_notes=self.draw_notes + other.draw_notes,
|
draw_notes=self.draw_notes + other.draw_notes,
|
||||||
bars=self.bars + other.bars
|
bars=self.bars + other.bars,
|
||||||
|
timeline=self.timeline + other.timeline
|
||||||
)
|
)
|
||||||
|
|
||||||
def __iadd__(self, other: 'NoteList') -> 'NoteList':
|
def __iadd__(self, other: 'NoteList') -> 'NoteList':
|
||||||
self.play_notes += other.play_notes
|
self.play_notes += other.play_notes
|
||||||
self.draw_notes += other.draw_notes
|
self.draw_notes += other.draw_notes
|
||||||
self.bars += other.bars
|
self.bars += other.bars
|
||||||
|
self.timeline += other.timeline
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -618,6 +649,45 @@ class TJAParser:
|
|||||||
else:
|
else:
|
||||||
play_note_list[-3].moji = 2
|
play_note_list[-3].moji = 2
|
||||||
|
|
||||||
|
def apply_easing(self, t, easing_point, easing_function):
|
||||||
|
"""Apply easing function to normalized time value t (0 to 1)"""
|
||||||
|
if easing_point == 'IN':
|
||||||
|
pass # t stays as is
|
||||||
|
elif easing_point == 'OUT':
|
||||||
|
t = 1 - t
|
||||||
|
elif easing_point == 'IN_OUT':
|
||||||
|
if t < 0.5:
|
||||||
|
t = t * 2
|
||||||
|
else:
|
||||||
|
t = (1 - t) * 2
|
||||||
|
|
||||||
|
if easing_function == 'LINEAR':
|
||||||
|
result = t
|
||||||
|
elif easing_function == 'CUBIC':
|
||||||
|
result = t ** 3
|
||||||
|
elif easing_function == 'QUARTIC':
|
||||||
|
result = t ** 4
|
||||||
|
elif easing_function == 'QUINTIC':
|
||||||
|
result = t ** 5
|
||||||
|
elif easing_function == 'SINUSOIDAL':
|
||||||
|
import math
|
||||||
|
result = 1 - math.cos((t * math.pi) / 2)
|
||||||
|
elif easing_function == 'EXPONENTIAL':
|
||||||
|
result = 0 if t == 0 else 2 ** (10 * (t - 1))
|
||||||
|
elif easing_function == 'CIRCULAR':
|
||||||
|
import math
|
||||||
|
result = 1 - math.sqrt(1 - t ** 2)
|
||||||
|
else:
|
||||||
|
result = t
|
||||||
|
|
||||||
|
if easing_point == 'OUT':
|
||||||
|
result = 1 - result
|
||||||
|
elif easing_point == 'IN_OUT':
|
||||||
|
if t >= 0.5:
|
||||||
|
result = 1 - result
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
def notes_to_position(self, diff: int):
|
def notes_to_position(self, diff: int):
|
||||||
"""Parse a TJA's notes into a NoteList."""
|
"""Parse a TJA's notes into a NoteList."""
|
||||||
master_notes = NoteList()
|
master_notes = NoteList()
|
||||||
@@ -630,10 +700,54 @@ class TJAParser:
|
|||||||
index = 0
|
index = 0
|
||||||
sudden_appear = 0
|
sudden_appear = 0
|
||||||
sudden_moving = 0
|
sudden_moving = 0
|
||||||
judge_pos_x = 0 # Offset from default judgment position
|
judge_pos_x = 0
|
||||||
judge_pos_y = 0
|
judge_pos_y = 0
|
||||||
judge_target_x = 0 # Target position for interpolation
|
judge_target_x = 0
|
||||||
judge_target_y = 0
|
judge_target_y = 0
|
||||||
|
border_color = ray.BLACK
|
||||||
|
cam_h_offset = 0
|
||||||
|
cam_v_offset = 0
|
||||||
|
cam_h_move_active = False
|
||||||
|
cam_h_move_start_ms = 0
|
||||||
|
cam_h_move_duration_ms = 0
|
||||||
|
cam_h_move_start_offset = 0
|
||||||
|
cam_h_move_end_offset = 0
|
||||||
|
cam_h_easing_point = None
|
||||||
|
cam_h_easing_function = None
|
||||||
|
cam_v_move_active = False
|
||||||
|
cam_v_move_start_ms = 0
|
||||||
|
cam_v_move_duration_ms = 0
|
||||||
|
cam_v_move_start_offset = 0
|
||||||
|
cam_v_move_end_offset = 0
|
||||||
|
cam_v_easing_point = None
|
||||||
|
cam_v_easing_function = None
|
||||||
|
cam_zoom_move_active = False
|
||||||
|
cam_zoom_move_start_ms = 0
|
||||||
|
cam_zoom_start = 1.0
|
||||||
|
cam_zoom_end = 1.0
|
||||||
|
cam_zoom_easing_point = ""
|
||||||
|
cam_zoom_easing_function = ""
|
||||||
|
cam_h_scale = 1.0
|
||||||
|
cam_h_scale_move_active = False
|
||||||
|
cam_h_scale_move_start_ms = 0
|
||||||
|
cam_h_scale_start = 1.0
|
||||||
|
cam_h_scale_end = 1.0
|
||||||
|
cam_h_scale_easing_point = ""
|
||||||
|
cam_h_scale_easing_function = ""
|
||||||
|
cam_v_scale = 1.0
|
||||||
|
cam_v_scale_move_active = False
|
||||||
|
cam_v_scale_move_start_ms = 0
|
||||||
|
cam_v_scale_start = 1.0
|
||||||
|
cam_v_scale_end = 1.0
|
||||||
|
cam_v_scale_easing_point = ""
|
||||||
|
cam_v_scale_easing_function = ""
|
||||||
|
cam_rotation = 0.0
|
||||||
|
cam_rotation_move_active = False
|
||||||
|
cam_rotation_move_start_ms = 0
|
||||||
|
cam_rotation_start = 0.0
|
||||||
|
cam_rotation_end = 0.0
|
||||||
|
cam_rotation_easing_point = ""
|
||||||
|
cam_rotation_easing_function = ""
|
||||||
time_signature = 4/4
|
time_signature = 4/4
|
||||||
bpm = self.metadata.bpm
|
bpm = self.metadata.bpm
|
||||||
x_scroll_modifier = 1
|
x_scroll_modifier = 1
|
||||||
@@ -643,6 +757,11 @@ class TJAParser:
|
|||||||
curr_note_list = master_notes.play_notes
|
curr_note_list = master_notes.play_notes
|
||||||
curr_draw_list = master_notes.draw_notes
|
curr_draw_list = master_notes.draw_notes
|
||||||
curr_bar_list = master_notes.bars
|
curr_bar_list = master_notes.bars
|
||||||
|
curr_timeline = master_notes.timeline
|
||||||
|
init_bpm = TimelineObject()
|
||||||
|
init_bpm.hit_ms = self.current_ms
|
||||||
|
init_bpm.bpm = bpm
|
||||||
|
curr_timeline.append(init_bpm)
|
||||||
start_branch_ms = 0
|
start_branch_ms = 0
|
||||||
start_branch_bpm = bpm
|
start_branch_bpm = bpm
|
||||||
start_branch_time_sig = time_signature
|
start_branch_time_sig = time_signature
|
||||||
@@ -656,14 +775,269 @@ class TJAParser:
|
|||||||
is_section_start = False
|
is_section_start = False
|
||||||
section_bar = None
|
section_bar = None
|
||||||
lyric = ""
|
lyric = ""
|
||||||
|
|
||||||
for bar in notes:
|
for bar in notes:
|
||||||
#Length of the bar is determined by number of notes excluding commands
|
|
||||||
bar_length = sum(len(part) for part in bar if '#' not in part)
|
bar_length = sum(len(part) for part in bar if '#' not in part)
|
||||||
barline_added = False
|
barline_added = False
|
||||||
|
|
||||||
for part in bar:
|
for part in bar:
|
||||||
|
if part.startswith('#BORDERCOLOR'):
|
||||||
|
r, g, b = part[13:].split(',')
|
||||||
|
border_color = ray.Color(int(r), int(g), int(b), 255)
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.border_color = border_color
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMRESET'):
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_h_offset = 0
|
||||||
|
timeline_obj.cam_v_offset = 0
|
||||||
|
timeline_obj.cam_zoom = 1
|
||||||
|
timeline_obj.cam_h_scale = 1
|
||||||
|
timeline_obj.cam_v_scale = 1
|
||||||
|
timeline_obj.cam_rotation = 0
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Horizontal Offset Commands
|
||||||
|
if part.startswith('#CAMHOFFSET'):
|
||||||
|
cam_h_offset = float(part[12:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_h_offset = cam_h_offset
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMHMOVESTART'):
|
||||||
|
parts = part[15:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_h_move_start_offset = float(parts[0].strip())
|
||||||
|
cam_h_move_end_offset = float(parts[1].strip())
|
||||||
|
cam_h_easing_point = parts[2].strip()
|
||||||
|
cam_h_easing_function = parts[3].strip()
|
||||||
|
cam_h_move_active = True
|
||||||
|
cam_h_move_start_ms = self.current_ms
|
||||||
|
cam_h_offset = cam_h_move_start_offset
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMHMOVEEND'):
|
||||||
|
if cam_h_move_active:
|
||||||
|
cam_h_move_duration_ms = self.current_ms - cam_h_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_h_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_h_easing_point, cam_h_easing_function)
|
||||||
|
interpolated_ms = cam_h_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_offset = cam_h_move_start_offset + (
|
||||||
|
(cam_h_move_end_offset - cam_h_move_start_offset) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_h_offset = interp_offset
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_h_offset = cam_h_move_end_offset
|
||||||
|
cam_h_move_active = False
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Vertical Offset Commands
|
||||||
|
if part.startswith('#CAMVOFFSET'):
|
||||||
|
cam_v_offset = float(part[12:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_v_offset = cam_v_offset
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMVMOVESTART'):
|
||||||
|
parts = part[15:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_v_move_start_offset = float(parts[0].strip())
|
||||||
|
cam_v_move_end_offset = float(parts[1].strip())
|
||||||
|
cam_v_easing_point = parts[2].strip()
|
||||||
|
cam_v_easing_function = parts[3].strip()
|
||||||
|
cam_v_move_active = True
|
||||||
|
cam_v_move_start_ms = self.current_ms
|
||||||
|
cam_v_offset = cam_v_move_start_offset
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMVMOVEEND'):
|
||||||
|
if cam_v_move_active:
|
||||||
|
cam_v_move_duration_ms = self.current_ms - cam_v_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_v_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_v_easing_point, cam_v_easing_function)
|
||||||
|
interpolated_ms = cam_v_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_offset = cam_v_move_start_offset + (
|
||||||
|
(cam_v_move_end_offset - cam_v_move_start_offset) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_v_offset = interp_offset
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_v_offset = cam_v_move_end_offset
|
||||||
|
cam_v_move_active = False
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Zoom Commands
|
||||||
|
if part.startswith('#CAMZOOMSTART'):
|
||||||
|
parts = part[14:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_zoom_start = float(parts[0].strip())
|
||||||
|
cam_zoom_end = float(parts[1].strip())
|
||||||
|
cam_zoom_easing_point = parts[2].strip()
|
||||||
|
cam_zoom_easing_function = parts[3].strip()
|
||||||
|
cam_zoom_move_active = True
|
||||||
|
cam_zoom_move_start_ms = self.current_ms
|
||||||
|
cam_zoom = cam_zoom_start
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMZOOMEND'):
|
||||||
|
if cam_zoom_move_active:
|
||||||
|
cam_zoom_move_duration_ms = self.current_ms - cam_zoom_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_zoom_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_zoom_easing_point, cam_zoom_easing_function)
|
||||||
|
interpolated_ms = cam_zoom_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_zoom = cam_zoom_start + (
|
||||||
|
(cam_zoom_end - cam_zoom_start) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_zoom = interp_zoom
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_zoom = cam_zoom_end
|
||||||
|
cam_zoom_move_active = False
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMZOOM'):
|
||||||
|
cam_zoom = float(part[9:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_zoom = cam_zoom
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Horizontal Scale Commands
|
||||||
|
if part.startswith('#CAMHSCALESTART'):
|
||||||
|
parts = part[16:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_h_scale_start = float(parts[0].strip())
|
||||||
|
cam_h_scale_end = float(parts[1].strip())
|
||||||
|
cam_h_scale_easing_point = parts[2].strip()
|
||||||
|
cam_h_scale_easing_function = parts[3].strip()
|
||||||
|
cam_h_scale_move_active = True
|
||||||
|
cam_h_scale_move_start_ms = self.current_ms
|
||||||
|
cam_h_scale = cam_h_scale_start
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMHSCALEEND'):
|
||||||
|
if cam_h_scale_move_active:
|
||||||
|
cam_h_scale_move_duration_ms = self.current_ms - cam_h_scale_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_h_scale_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_h_scale_easing_point, cam_h_scale_easing_function)
|
||||||
|
interpolated_ms = cam_h_scale_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_scale = cam_h_scale_start + (
|
||||||
|
(cam_h_scale_end - cam_h_scale_start) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_h_scale = interp_scale
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_h_scale = cam_h_scale_end
|
||||||
|
cam_h_scale_move_active = False
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMHSCALE'):
|
||||||
|
cam_h_scale = float(part[11:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_h_scale = cam_h_scale
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Vertical Scale Commands
|
||||||
|
if part.startswith('#CAMVSCALESTART'):
|
||||||
|
parts = part[16:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_v_scale_start = float(parts[0].strip())
|
||||||
|
cam_v_scale_end = float(parts[1].strip())
|
||||||
|
cam_v_scale_easing_point = parts[2].strip()
|
||||||
|
cam_v_scale_easing_function = parts[3].strip()
|
||||||
|
cam_v_scale_move_active = True
|
||||||
|
cam_v_scale_move_start_ms = self.current_ms
|
||||||
|
cam_v_scale = cam_v_scale_start
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMVSCALEEND'):
|
||||||
|
if cam_v_scale_move_active:
|
||||||
|
cam_v_scale_move_duration_ms = self.current_ms - cam_v_scale_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_v_scale_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_v_scale_easing_point, cam_v_scale_easing_function)
|
||||||
|
interpolated_ms = cam_v_scale_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_scale = cam_v_scale_start + (
|
||||||
|
(cam_v_scale_end - cam_v_scale_start) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_v_scale = interp_scale
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_v_scale = cam_v_scale_end
|
||||||
|
cam_v_scale_move_active = False
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMVSCALE'):
|
||||||
|
cam_v_scale = float(part[11:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_v_scale = cam_v_scale
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Rotation Commands
|
||||||
|
if part.startswith('#CAMROTATIONSTART'):
|
||||||
|
parts = part[18:].split(',')
|
||||||
|
if len(parts) >= 4:
|
||||||
|
cam_rotation_start = float(parts[0].strip())
|
||||||
|
cam_rotation_end = float(parts[1].strip())
|
||||||
|
cam_rotation_easing_point = parts[2].strip()
|
||||||
|
cam_rotation_easing_function = parts[3].strip()
|
||||||
|
cam_rotation_move_active = True
|
||||||
|
cam_rotation_move_start_ms = self.current_ms
|
||||||
|
cam_rotation = cam_rotation_start
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMROTATIONEND'):
|
||||||
|
if cam_rotation_move_active:
|
||||||
|
cam_rotation_move_duration_ms = self.current_ms - cam_rotation_move_start_ms
|
||||||
|
interpolation_interval_ms = 8
|
||||||
|
num_steps = int(cam_rotation_move_duration_ms / interpolation_interval_ms)
|
||||||
|
for step in range(num_steps + 1):
|
||||||
|
t = step / max(num_steps, 1)
|
||||||
|
eased_t = self.apply_easing(t, cam_rotation_easing_point, cam_rotation_easing_function)
|
||||||
|
interpolated_ms = cam_rotation_move_start_ms + (step * interpolation_interval_ms)
|
||||||
|
interp_rotation = cam_rotation_start + (
|
||||||
|
(cam_rotation_end - cam_rotation_start) * eased_t
|
||||||
|
)
|
||||||
|
cam_timeline = TimelineObject()
|
||||||
|
cam_timeline.hit_ms = interpolated_ms
|
||||||
|
cam_timeline.cam_rotation = interp_rotation
|
||||||
|
curr_timeline.append(cam_timeline)
|
||||||
|
cam_rotation = cam_rotation_end
|
||||||
|
cam_rotation_move_active = False
|
||||||
|
continue
|
||||||
|
if part.startswith('#CAMROTATION'):
|
||||||
|
cam_rotation = float(part[13:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.cam_rotation = cam_rotation
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
|
continue
|
||||||
if part.startswith('#SECTION'):
|
if part.startswith('#SECTION'):
|
||||||
is_section_start = True
|
is_section_start = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if part.startswith('#BRANCHSTART'):
|
if part.startswith('#BRANCHSTART'):
|
||||||
start_branch_ms = self.current_ms
|
start_branch_ms = self.current_ms
|
||||||
start_branch_bpm = bpm
|
start_branch_bpm = bpm
|
||||||
@@ -699,7 +1073,6 @@ class TJAParser:
|
|||||||
bar_line.type = 0
|
bar_line.type = 0
|
||||||
bar_line.display = False
|
bar_line.display = False
|
||||||
bar_line.gogo_time = gogo_time
|
bar_line.gogo_time = gogo_time
|
||||||
bar_line.bpm = bpm
|
|
||||||
bar_line.branch_params = branch_params
|
bar_line.branch_params = branch_params
|
||||||
bar_list.append(bar_line)
|
bar_list.append(bar_line)
|
||||||
|
|
||||||
@@ -711,16 +1084,20 @@ class TJAParser:
|
|||||||
if section_bar:
|
if section_bar:
|
||||||
section_bar = None
|
section_bar = None
|
||||||
continue
|
continue
|
||||||
|
|
||||||
elif part.startswith('#BRANCHEND'):
|
elif part.startswith('#BRANCHEND'):
|
||||||
curr_note_list = master_notes.play_notes
|
curr_note_list = master_notes.play_notes
|
||||||
curr_draw_list = master_notes.draw_notes
|
curr_draw_list = master_notes.draw_notes
|
||||||
curr_bar_list = master_notes.bars
|
curr_bar_list = master_notes.bars
|
||||||
|
curr_timeline = master_notes.timeline
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if part == '#M':
|
if part == '#M':
|
||||||
branch_m.append(NoteList())
|
branch_m.append(NoteList())
|
||||||
curr_note_list = branch_m[-1].play_notes
|
curr_note_list = branch_m[-1].play_notes
|
||||||
curr_draw_list = branch_m[-1].draw_notes
|
curr_draw_list = branch_m[-1].draw_notes
|
||||||
curr_bar_list = branch_m[-1].bars
|
curr_bar_list = branch_m[-1].bars
|
||||||
|
curr_timeline = branch_m[-1].timeline
|
||||||
self.current_ms = start_branch_ms
|
self.current_ms = start_branch_ms
|
||||||
bpm = start_branch_bpm
|
bpm = start_branch_bpm
|
||||||
time_signature = start_branch_time_sig
|
time_signature = start_branch_time_sig
|
||||||
@@ -736,6 +1113,7 @@ class TJAParser:
|
|||||||
curr_note_list = branch_e[-1].play_notes
|
curr_note_list = branch_e[-1].play_notes
|
||||||
curr_draw_list = branch_e[-1].draw_notes
|
curr_draw_list = branch_e[-1].draw_notes
|
||||||
curr_bar_list = branch_e[-1].bars
|
curr_bar_list = branch_e[-1].bars
|
||||||
|
curr_timeline = branch_e[-1].timeline
|
||||||
self.current_ms = start_branch_ms
|
self.current_ms = start_branch_ms
|
||||||
bpm = start_branch_bpm
|
bpm = start_branch_bpm
|
||||||
time_signature = start_branch_time_sig
|
time_signature = start_branch_time_sig
|
||||||
@@ -751,6 +1129,7 @@ class TJAParser:
|
|||||||
curr_note_list = branch_n[-1].play_notes
|
curr_note_list = branch_n[-1].play_notes
|
||||||
curr_draw_list = branch_n[-1].draw_notes
|
curr_draw_list = branch_n[-1].draw_notes
|
||||||
curr_bar_list = branch_n[-1].bars
|
curr_bar_list = branch_n[-1].bars
|
||||||
|
curr_timeline = branch_n[-1].timeline
|
||||||
self.current_ms = start_branch_ms
|
self.current_ms = start_branch_ms
|
||||||
bpm = start_branch_bpm
|
bpm = start_branch_bpm
|
||||||
time_signature = start_branch_time_sig
|
time_signature = start_branch_time_sig
|
||||||
@@ -761,71 +1140,56 @@ class TJAParser:
|
|||||||
count = branch_balloon_count
|
count = branch_balloon_count
|
||||||
is_branching = True
|
is_branching = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if '#LYRIC' in part:
|
if '#LYRIC' in part:
|
||||||
lyric = part[6:]
|
lyric = part[6:]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if '#JPOSSCROLL' in part:
|
if '#JPOSSCROLL' in part:
|
||||||
parts = part.split()
|
parts = part.split()
|
||||||
if len(parts) >= 4:
|
if len(parts) >= 4:
|
||||||
duration_ms = float(parts[1]) * 1000
|
duration_ms = float(parts[1]) * 1000
|
||||||
distance_str = parts[2]
|
distance_str = parts[2]
|
||||||
direction_deg = float(parts[3])
|
direction = int(parts[3]) # 0 = normal, 1 = reverse
|
||||||
|
|
||||||
delta_x = 0
|
delta_x = 0
|
||||||
delta_y = 0
|
delta_y = 0
|
||||||
|
|
||||||
if 'i' in distance_str:
|
if 'i' in distance_str:
|
||||||
normalized = distance_str.replace('.i', 'j').replace('i', 'j')
|
normalized = distance_str.replace('.i', 'j').replace('i', 'j')
|
||||||
normalized = normalized.replace(',', '')
|
normalized = normalized.replace(',', '')
|
||||||
c = complex(normalized)
|
c = complex(normalized)
|
||||||
direction_rad = math.radians(direction_deg)
|
delta_x = c.real
|
||||||
cos_dir = math.cos(direction_rad)
|
delta_y = c.imag
|
||||||
sin_dir = math.sin(direction_rad)
|
|
||||||
delta_x = c.real * cos_dir - c.imag * sin_dir
|
|
||||||
delta_y = c.real * sin_dir + c.imag * cos_dir
|
|
||||||
else:
|
else:
|
||||||
distance = float(distance_str)
|
distance = float(distance_str)
|
||||||
direction_rad = math.radians(direction_deg)
|
delta_x = distance
|
||||||
delta_x = distance * math.cos(direction_rad)
|
delta_y = 0
|
||||||
delta_y = distance * math.sin(direction_rad)
|
|
||||||
|
if direction == 0:
|
||||||
|
delta_x = -delta_x
|
||||||
|
delta_y = -delta_y
|
||||||
|
|
||||||
judge_target_x = judge_pos_x + delta_x
|
judge_target_x = judge_pos_x + delta_x
|
||||||
judge_target_y = judge_pos_y + delta_y
|
judge_target_y = judge_pos_y + delta_y
|
||||||
interpolation_interval_ms = 8
|
interpolation_interval_ms = 8
|
||||||
num_steps = int(duration_ms / interpolation_interval_ms)
|
num_steps = int(duration_ms / interpolation_interval_ms)
|
||||||
|
|
||||||
for step in range(num_steps + 1):
|
for step in range(num_steps + 1):
|
||||||
t = step / max(num_steps, 1) # Interpolation factor (0 to 1)
|
t = step / max(num_steps, 1)
|
||||||
interpolated_ms = self.current_ms + (step * interpolation_interval_ms)
|
interpolated_ms = self.current_ms + (step * interpolation_interval_ms)
|
||||||
|
|
||||||
# Linear interpolation
|
|
||||||
interp_x = judge_pos_x + (delta_x * t)
|
interp_x = judge_pos_x + (delta_x * t)
|
||||||
interp_y = judge_pos_y + (delta_y * t)
|
interp_y = judge_pos_y + (delta_y * t)
|
||||||
|
jpos_timeline = TimelineObject()
|
||||||
# Create invisible bar line to store position
|
pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
|
||||||
jpos_bar = Note()
|
pixels_per_ms = get_pixels_per_ms(pixels_per_frame_x)
|
||||||
jpos_bar.pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
|
jpos_timeline.hit_ms = interpolated_ms
|
||||||
jpos_bar.pixels_per_frame_y = get_pixels_per_frame(bpm * time_signature * y_scroll_modifier, time_signature*4, self.distance)
|
|
||||||
pixels_per_ms = get_pixels_per_ms(jpos_bar.pixels_per_frame_x)
|
|
||||||
|
|
||||||
jpos_bar.hit_ms = interpolated_ms
|
|
||||||
if pixels_per_ms == 0:
|
if pixels_per_ms == 0:
|
||||||
jpos_bar.load_ms = jpos_bar.hit_ms
|
jpos_timeline.load_ms = jpos_timeline.hit_ms
|
||||||
else:
|
else:
|
||||||
jpos_bar.load_ms = jpos_bar.hit_ms - (self.distance / pixels_per_ms)
|
jpos_timeline.load_ms = jpos_timeline.hit_ms - (self.distance / pixels_per_ms)
|
||||||
jpos_bar.type = 0
|
jpos_timeline.judge_pos_x = interp_x
|
||||||
jpos_bar.display = False
|
jpos_timeline.judge_pos_y = interp_y
|
||||||
jpos_bar.gogo_time = gogo_time
|
bisect.insort(curr_timeline, jpos_timeline, key=lambda x: x.load_ms)
|
||||||
jpos_bar.bpm = bpm
|
|
||||||
|
|
||||||
jpos_bar.judge_pos_x = interp_x
|
|
||||||
jpos_bar.judge_pos_y = interp_y
|
|
||||||
|
|
||||||
bisect.insort(curr_bar_list, jpos_bar, key=lambda x: x.load_ms)
|
|
||||||
|
|
||||||
judge_pos_x = judge_target_x
|
judge_pos_x = judge_target_x
|
||||||
judge_pos_y = judge_target_y
|
judge_pos_y = judge_target_y
|
||||||
|
|
||||||
continue
|
continue
|
||||||
elif '#NMSCROLL' in part:
|
elif '#NMSCROLL' in part:
|
||||||
continue
|
continue
|
||||||
@@ -847,6 +1211,10 @@ class TJAParser:
|
|||||||
continue
|
continue
|
||||||
elif '#BPMCHANGE' in part:
|
elif '#BPMCHANGE' in part:
|
||||||
bpm = float(part[11:])
|
bpm = float(part[11:])
|
||||||
|
timeline_obj = TimelineObject()
|
||||||
|
timeline_obj.hit_ms = self.current_ms
|
||||||
|
timeline_obj.bpm = bpm
|
||||||
|
bisect.insort(curr_timeline, timeline_obj, key=lambda x: x.hit_ms)
|
||||||
continue
|
continue
|
||||||
elif '#BARLINEOFF' in part:
|
elif '#BARLINEOFF' in part:
|
||||||
barline_display = False
|
barline_display = False
|
||||||
@@ -864,33 +1232,26 @@ class TJAParser:
|
|||||||
self.current_ms += float(part[6:]) * 1000
|
self.current_ms += float(part[6:]) * 1000
|
||||||
continue
|
continue
|
||||||
elif part.startswith("#SUDDEN"):
|
elif part.startswith("#SUDDEN"):
|
||||||
# Parse #SUDDEN command
|
|
||||||
parts = part.split()
|
parts = part.split()
|
||||||
if len(parts) >= 3:
|
if len(parts) >= 3:
|
||||||
appear_duration = float(parts[1])
|
appear_duration = float(parts[1])
|
||||||
moving_duration = float(parts[2])
|
moving_duration = float(parts[2])
|
||||||
|
|
||||||
# Convert to milliseconds
|
|
||||||
sudden_appear = appear_duration * 1000
|
sudden_appear = appear_duration * 1000
|
||||||
sudden_moving = moving_duration * 1000
|
sudden_moving = moving_duration * 1000
|
||||||
|
|
||||||
# Handle special case: if value is 0, treat as infinity
|
|
||||||
if sudden_appear == 0:
|
if sudden_appear == 0:
|
||||||
sudden_appear = float('inf')
|
sudden_appear = float('inf')
|
||||||
if sudden_moving == 0:
|
if sudden_moving == 0:
|
||||||
sudden_moving = float('inf')
|
sudden_moving = float('inf')
|
||||||
continue
|
continue
|
||||||
#Unrecognized commands will be skipped for now
|
|
||||||
elif len(part) > 0 and not part[0].isdigit():
|
elif len(part) > 0 and not part[0].isdigit():
|
||||||
logger.warning(f"Unrecognized command: {part} in TJA {self.file_path}")
|
logger.warning(f"Unrecognized command: {part} in TJA {self.file_path}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ms_per_measure = get_ms_per_measure(bpm, time_signature)
|
ms_per_measure = get_ms_per_measure(bpm, time_signature)
|
||||||
|
|
||||||
#Create note object
|
|
||||||
bar_line = Note()
|
bar_line = Note()
|
||||||
|
|
||||||
#Determines how quickly the notes need to move across the screen to reach the judgment circle in time
|
|
||||||
bar_line.pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
|
bar_line.pixels_per_frame_x = get_pixels_per_frame(bpm * time_signature * x_scroll_modifier, time_signature*4, self.distance)
|
||||||
bar_line.pixels_per_frame_y = get_pixels_per_frame(bpm * time_signature * y_scroll_modifier, time_signature*4, self.distance)
|
bar_line.pixels_per_frame_y = get_pixels_per_frame(bpm * time_signature * y_scroll_modifier, time_signature*4, self.distance)
|
||||||
pixels_per_ms = get_pixels_per_ms(bar_line.pixels_per_frame_x)
|
pixels_per_ms = get_pixels_per_ms(bar_line.pixels_per_frame_x)
|
||||||
@@ -903,7 +1264,6 @@ class TJAParser:
|
|||||||
bar_line.type = 0
|
bar_line.type = 0
|
||||||
bar_line.display = barline_display
|
bar_line.display = barline_display
|
||||||
bar_line.gogo_time = gogo_time
|
bar_line.gogo_time = gogo_time
|
||||||
bar_line.bpm = bpm
|
|
||||||
if barline_added:
|
if barline_added:
|
||||||
bar_line.display = False
|
bar_line.display = False
|
||||||
|
|
||||||
@@ -918,7 +1278,6 @@ class TJAParser:
|
|||||||
bisect.insort(curr_bar_list, bar_line, key=lambda x: x.load_ms)
|
bisect.insort(curr_bar_list, bar_line, key=lambda x: x.load_ms)
|
||||||
barline_added = True
|
barline_added = True
|
||||||
|
|
||||||
#Empty bar is still a bar, otherwise start increment
|
|
||||||
if len(part) == 0:
|
if len(part) == 0:
|
||||||
self.current_ms += ms_per_measure
|
self.current_ms += ms_per_measure
|
||||||
increment = 0
|
increment = 0
|
||||||
@@ -934,6 +1293,7 @@ class TJAParser:
|
|||||||
if item == '9' and curr_note_list and curr_note_list[-1].type == 9:
|
if item == '9' and curr_note_list and curr_note_list[-1].type == 9:
|
||||||
self.current_ms += increment
|
self.current_ms += increment
|
||||||
continue
|
continue
|
||||||
|
|
||||||
note = Note()
|
note = Note()
|
||||||
note.hit_ms = self.current_ms
|
note.hit_ms = self.current_ms
|
||||||
note.display = True
|
note.display = True
|
||||||
@@ -944,7 +1304,6 @@ class TJAParser:
|
|||||||
else note.hit_ms - (self.distance / pixels_per_ms))
|
else note.hit_ms - (self.distance / pixels_per_ms))
|
||||||
note.type = int(item)
|
note.type = int(item)
|
||||||
note.index = index
|
note.index = index
|
||||||
note.bpm = bpm
|
|
||||||
note.gogo_time = gogo_time
|
note.gogo_time = gogo_time
|
||||||
note.moji = -1
|
note.moji = -1
|
||||||
note.lyric = lyric
|
note.lyric = lyric
|
||||||
@@ -974,15 +1333,14 @@ class TJAParser:
|
|||||||
else:
|
else:
|
||||||
note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms)
|
note.load_ms = note.hit_ms - (self.distance / new_pixels_per_ms)
|
||||||
note.pixels_per_frame_x = prev_note.pixels_per_frame_x
|
note.pixels_per_frame_x = prev_note.pixels_per_frame_x
|
||||||
|
|
||||||
self.current_ms += increment
|
self.current_ms += increment
|
||||||
curr_note_list.append(note)
|
curr_note_list.append(note)
|
||||||
bisect.insort(curr_draw_list, note, key=lambda x: x.load_ms)
|
bisect.insort(curr_draw_list, note, key=lambda x: x.load_ms)
|
||||||
self.get_moji(curr_note_list, ms_per_measure)
|
self.get_moji(curr_note_list, ms_per_measure)
|
||||||
index += 1
|
index += 1
|
||||||
prev_note = note
|
prev_note = note
|
||||||
# Sorting by load_ms is necessary for drawing, as some notes appear on the
|
|
||||||
# screen slower regardless of when they reach the judge circle
|
|
||||||
# Bars can be sorted like this because they don't need hit detection
|
|
||||||
return master_notes, branch_m, branch_e, branch_n
|
return master_notes, branch_m, branch_e, branch_n
|
||||||
|
|
||||||
def hash_note_data(self, notes: NoteList):
|
def hash_note_data(self, notes: NoteList):
|
||||||
@@ -1013,10 +1371,16 @@ def modifier_speed(notes: NoteList, value: float):
|
|||||||
modded_bars = notes.bars.copy()
|
modded_bars = notes.bars.copy()
|
||||||
for note in modded_notes:
|
for note in modded_notes:
|
||||||
note.pixels_per_frame_x *= value
|
note.pixels_per_frame_x *= value
|
||||||
note.load_ms = note.hit_ms - (866 * global_tex.screen_scale / get_pixels_per_ms(note.pixels_per_frame_x))
|
pixels_per_ms = get_pixels_per_ms(note.pixels_per_frame_x)
|
||||||
|
if pixels_per_ms == 0:
|
||||||
|
continue
|
||||||
|
note.load_ms = note.hit_ms - (866 * global_tex.screen_scale / pixels_per_ms)
|
||||||
for bar in modded_bars:
|
for bar in modded_bars:
|
||||||
bar.pixels_per_frame_x *= value
|
bar.pixels_per_frame_x *= value
|
||||||
bar.load_ms = bar.hit_ms - (866 * global_tex.screen_scale / get_pixels_per_ms(bar.pixels_per_frame_x))
|
pixels_per_ms = get_pixels_per_ms(bar.pixels_per_frame_x)
|
||||||
|
if pixels_per_ms == 0:
|
||||||
|
continue
|
||||||
|
bar.load_ms = bar.hit_ms - (866 * global_tex.screen_scale / pixels_per_ms)
|
||||||
return modded_notes, modded_bars
|
return modded_notes, modded_bars
|
||||||
|
|
||||||
def modifier_display(notes: NoteList):
|
def modifier_display(notes: NoteList):
|
||||||
|
|||||||
@@ -214,11 +214,6 @@ class OutlinedText:
|
|||||||
global_data.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, len(global_data.font_codepoints))
|
global_data.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, len(global_data.font_codepoints))
|
||||||
logger.info(f"Reloaded font with {len(global_data.font_codepoints)} codepoints")
|
logger.info(f"Reloaded font with {len(global_data.font_codepoints)} codepoints")
|
||||||
return global_data.font
|
return global_data.font
|
||||||
codepoint_count = ray.ffi.new('int *', 0)
|
|
||||||
unique_codepoints = set(text)
|
|
||||||
codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count)
|
|
||||||
font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
|
|
||||||
return font
|
|
||||||
|
|
||||||
def _create_text_vertical(self, text: str, font_size: int, color: ray.Color, bg_color: ray.Color, font: Optional[ray.Font]=None, padding: int=10):
|
def _create_text_vertical(self, text: str, font_size: int, color: ray.Color, bg_color: ray.Color, font: Optional[ray.Font]=None, padding: int=10):
|
||||||
rotate_chars = {'-', '‐', '|', '/', '\\', 'ー', '~', '~', '(', ')', '(', ')',
|
rotate_chars = {'-', '‐', '|', '/', '\\', 'ー', '~', '~', '(', ')', '(', ')',
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
from pathlib import Path
|
|
||||||
import pyray as ray
|
import pyray as ray
|
||||||
|
|
||||||
from libs.screen import Screen
|
from libs.screen import Screen
|
||||||
@@ -8,12 +7,6 @@ from libs.texture import tex
|
|||||||
class DevScreen(Screen):
|
class DevScreen(Screen):
|
||||||
def on_screen_start(self):
|
def on_screen_start(self):
|
||||||
super().on_screen_start()
|
super().on_screen_start()
|
||||||
self.text = ".⁉ゃん座組ス5れへデ7?x事音ょ野ダHズパに相村束虹神狂'Uqはたt朗♢弥ウち”作Wシら黒さドカモ金章よ方りj沙べ口ぃご歌!こ制みわ険時行×ワ獣ぺ阿啓R哀肉乱終鼓ツ,0かVしでw?2⒒悟マ乙ィの女アラA疾浄u+も’グ怒[ャロ冒陽ね路想ベ#ト醜ペ!太悪χキn初あKン〜<原Qハ1s旅をガ分ビNゼ玄沢≠食@フ拝テM豚幻濤ま人腹世P愴)っピやナJ社びB一6c畑譚]gてd~曲花Oくkル第◇校*⒓森・バコ談ヤ急め愛プ重ー勝DE:Zチ東二じ車>ブ刑ミ+X:焼おyつλ♪オい憎aFe竜そ大84得渉/◆ソC番、l†レ悲暴う胸るG“ゆS転fゅとセo「風輔@双zr―-vノケp‼b…響3メ罪 クL自(Iイタニムき夜幽T&楽m学走ジ島h田i美心Yボサッリュひ寅9」達"
|
|
||||||
unique_codepoints = set(self.text)
|
|
||||||
codepoint_count = ray.ffi.new('int *', 0)
|
|
||||||
unique_string = ''.join(unique_codepoints)
|
|
||||||
codepoints = ray.load_codepoints(unique_string, codepoint_count)
|
|
||||||
self.font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, len(unique_codepoints))
|
|
||||||
|
|
||||||
def on_screen_end(self, next_screen: str):
|
def on_screen_end(self, next_screen: str):
|
||||||
return super().on_screen_end(next_screen)
|
return super().on_screen_end(next_screen)
|
||||||
@@ -23,4 +16,3 @@ class DevScreen(Screen):
|
|||||||
|
|
||||||
def draw(self):
|
def draw(self):
|
||||||
ray.draw_rectangle(0, 0, tex.screen_width, tex.screen_height, ray.GREEN)
|
ray.draw_rectangle(0, 0, tex.screen_width, tex.screen_height, ray.GREEN)
|
||||||
ray.draw_text_ex(self.font, "幽玄ノ乱", ray.Vector2(tex.screen_width//2, tex.screen_height//2), 60, 20, ray.BLACK)
|
|
||||||
|
|||||||
@@ -411,6 +411,8 @@ class Player:
|
|||||||
self.base_score = calculate_base_score(total_notes)
|
self.base_score = calculate_base_score(total_notes)
|
||||||
|
|
||||||
#Note management
|
#Note management
|
||||||
|
self.timeline = notes.timeline
|
||||||
|
self.timeline_index = 0
|
||||||
self.current_bars: list[Note] = []
|
self.current_bars: list[Note] = []
|
||||||
self.current_notes_draw: list[Note | Drumroll | Balloon] = []
|
self.current_notes_draw: list[Note | Drumroll | Balloon] = []
|
||||||
self.is_drumroll = False
|
self.is_drumroll = False
|
||||||
@@ -422,7 +424,9 @@ class Player:
|
|||||||
self.branch_condition_count = 0
|
self.branch_condition_count = 0
|
||||||
self.branch_condition = ''
|
self.branch_condition = ''
|
||||||
self.balloon_index = 0
|
self.balloon_index = 0
|
||||||
self.bpm = self.play_notes[0].bpm if self.play_notes else 120
|
self.bpm = 120
|
||||||
|
if self.timeline and hasattr(self.timeline[self.timeline_index], 'bpm'):
|
||||||
|
self.bpm = self.timeline[self.timeline_index].bpm
|
||||||
|
|
||||||
def merge_branch_section(self, branch_section: NoteList, current_ms: float):
|
def merge_branch_section(self, branch_section: NoteList, current_ms: float):
|
||||||
"""Merges the branch notes into the current notes"""
|
"""Merges the branch notes into the current notes"""
|
||||||
@@ -452,35 +456,80 @@ class Player:
|
|||||||
def get_position_y(self, current_ms: float, load_ms: float, pixels_per_frame: float, pixels_per_frame_x) -> int:
|
def get_position_y(self, current_ms: float, load_ms: float, pixels_per_frame: float, pixels_per_frame_x) -> int:
|
||||||
"""Calculates the y-coordinate of a note based on its load time and current time"""
|
"""Calculates the y-coordinate of a note based on its load time and current time"""
|
||||||
time_diff = load_ms - current_ms
|
time_diff = load_ms - current_ms
|
||||||
|
if pixels_per_frame_x == 0:
|
||||||
|
return int(pixels_per_frame * 0.06 * time_diff)
|
||||||
return int((pixels_per_frame * 0.06 * time_diff) + ((self.tja.distance * pixels_per_frame) / pixels_per_frame_x))
|
return int((pixels_per_frame * 0.06 * time_diff) + ((self.tja.distance * pixels_per_frame) / pixels_per_frame_x))
|
||||||
|
|
||||||
|
def handle_tjap3_extended_commands(self, current_ms: float):
|
||||||
|
if not self.timeline:
|
||||||
|
return
|
||||||
|
|
||||||
|
timeline_object = self.timeline[self.timeline_index]
|
||||||
|
should_advance = False
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'border_color') and timeline_object.hit_ms <= current_ms:
|
||||||
|
global_data.camera.border_color = timeline_object.border_color
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_h_offset') and timeline_object.hit_ms <= current_ms:
|
||||||
|
orig_offset = global_data.camera.offset
|
||||||
|
global_data.camera.offset = ray.Vector2(timeline_object.cam_h_offset, orig_offset.y)
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_v_offset') and timeline_object.hit_ms <= current_ms:
|
||||||
|
orig_offset = global_data.camera.offset
|
||||||
|
global_data.camera.offset = ray.Vector2(orig_offset.x, timeline_object.cam_v_offset)
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_zoom') and timeline_object.hit_ms <= current_ms:
|
||||||
|
global_data.camera.zoom = timeline_object.cam_zoom
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_h_scale') and timeline_object.hit_ms <= current_ms:
|
||||||
|
global_data.camera.h_scale = timeline_object.cam_h_scale
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_v_scale') and timeline_object.hit_ms <= current_ms:
|
||||||
|
global_data.camera.v_scale = timeline_object.cam_v_scale
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if hasattr(timeline_object, 'cam_rotation') and timeline_object.hit_ms <= current_ms:
|
||||||
|
global_data.camera.rotation = timeline_object.cam_rotation
|
||||||
|
should_advance = True
|
||||||
|
|
||||||
|
if should_advance and self.timeline_index < len(self.timeline) - 1:
|
||||||
|
self.timeline_index += 1
|
||||||
|
|
||||||
def get_judge_position(self, current_ms: float):
|
def get_judge_position(self, current_ms: float):
|
||||||
"""Get the current judgment circle position based on bar data"""
|
"""Get the current judgment circle position based on bar data"""
|
||||||
judge_x = 0
|
if not self.timeline:
|
||||||
judge_y = 0
|
return
|
||||||
|
timeline_object = self.timeline[self.timeline_index]
|
||||||
|
if hasattr(timeline_object, 'judge_pos_x') and timeline_object.hit_ms <= current_ms:
|
||||||
|
self.judge_x = timeline_object.judge_pos_x * tex.screen_scale
|
||||||
|
self.judge_y = timeline_object.judge_pos_y * tex.screen_scale
|
||||||
|
if self.timeline_index < len(self.timeline) - 1:
|
||||||
|
self.timeline_index += 1
|
||||||
|
|
||||||
# Find the most recent bar with judge position data
|
def update_bpm(self, current_ms: float):
|
||||||
for bar in self.current_bars:
|
if not self.timeline:
|
||||||
if hasattr(bar, 'judge_pos_x') and bar.hit_ms <= current_ms:
|
return
|
||||||
judge_x = bar.judge_pos_x * tex.screen_scale
|
timeline_object = self.timeline[self.timeline_index]
|
||||||
judge_y = bar.judge_pos_y * tex.screen_scale
|
if hasattr(timeline_object, 'bpm') and timeline_object.hit_ms <= current_ms:
|
||||||
elif bar.hit_ms > current_ms:
|
self.bpm = timeline_object.bpm
|
||||||
break
|
if self.timeline_index < len(self.timeline) - 1:
|
||||||
|
self.timeline_index += 1
|
||||||
return judge_x, judge_y
|
|
||||||
|
|
||||||
def animation_manager(self, animation_list: list, current_time: float):
|
def animation_manager(self, animation_list: list, current_time: float):
|
||||||
if not animation_list:
|
if not animation_list:
|
||||||
return
|
return
|
||||||
|
|
||||||
# More efficient: use list comprehension to filter out finished animations
|
|
||||||
remaining_animations = []
|
remaining_animations = []
|
||||||
for animation in animation_list:
|
for animation in animation_list:
|
||||||
animation.update(current_time)
|
animation.update(current_time)
|
||||||
if not animation.is_finished:
|
if not animation.is_finished:
|
||||||
remaining_animations.append(animation)
|
remaining_animations.append(animation)
|
||||||
|
|
||||||
# Replace the original list contents
|
|
||||||
animation_list[:] = remaining_animations
|
animation_list[:] = remaining_animations
|
||||||
|
|
||||||
def bar_manager(self, current_ms: float):
|
def bar_manager(self, current_ms: float):
|
||||||
@@ -638,6 +687,7 @@ class Player:
|
|||||||
self.bar_manager(current_ms)
|
self.bar_manager(current_ms)
|
||||||
self.play_note_manager(current_ms, background)
|
self.play_note_manager(current_ms, background)
|
||||||
self.draw_note_manager(current_ms)
|
self.draw_note_manager(current_ms)
|
||||||
|
self.handle_tjap3_extended_commands(current_ms)
|
||||||
|
|
||||||
def note_correct(self, note: Note, current_time: float):
|
def note_correct(self, note: Note, current_time: float):
|
||||||
"""Removes a note from the appropriate separated list"""
|
"""Removes a note from the appropriate separated list"""
|
||||||
@@ -862,14 +912,10 @@ class Player:
|
|||||||
|
|
||||||
# Handle drumroll and balloon hits
|
# Handle drumroll and balloon hits
|
||||||
if self.is_drumroll or self.is_balloon:
|
if self.is_drumroll or self.is_balloon:
|
||||||
if not self.other_notes:
|
if self.bpm == 0:
|
||||||
return
|
|
||||||
note = self.other_notes[0]
|
|
||||||
bpm = note.bpm
|
|
||||||
if bpm == 0:
|
|
||||||
subdivision_in_ms = 0
|
subdivision_in_ms = 0
|
||||||
else:
|
else:
|
||||||
subdivision_in_ms = ms_from_start // ((60000 * 4 / bpm) / 24)
|
subdivision_in_ms = ms_from_start // ((60000 * 4 / self.bpm) / 24)
|
||||||
if subdivision_in_ms > self.last_subdivision:
|
if subdivision_in_ms > self.last_subdivision:
|
||||||
self.last_subdivision = subdivision_in_ms
|
self.last_subdivision = subdivision_in_ms
|
||||||
hit_type = DrumType.DON
|
hit_type = DrumType.DON
|
||||||
@@ -880,7 +926,6 @@ class Player:
|
|||||||
else:
|
else:
|
||||||
# Handle DON notes
|
# Handle DON notes
|
||||||
while self.don_notes and ms_from_start >= self.don_notes[0].hit_ms:
|
while self.don_notes and ms_from_start >= self.don_notes[0].hit_ms:
|
||||||
note = self.don_notes[0]
|
|
||||||
hit_type = DrumType.DON
|
hit_type = DrumType.DON
|
||||||
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
|
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
|
||||||
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
|
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
|
||||||
@@ -889,7 +934,6 @@ class Player:
|
|||||||
|
|
||||||
# Handle KAT notes
|
# Handle KAT notes
|
||||||
while self.kat_notes and ms_from_start >= self.kat_notes[0].hit_ms:
|
while self.kat_notes and ms_from_start >= self.kat_notes[0].hit_ms:
|
||||||
note = self.kat_notes[0]
|
|
||||||
hit_type = DrumType.KAT
|
hit_type = DrumType.KAT
|
||||||
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
|
self.autoplay_hit_side = Side.RIGHT if self.autoplay_hit_side == Side.LEFT else Side.LEFT
|
||||||
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
|
self.spawn_hit_effects(hit_type, self.autoplay_hit_side)
|
||||||
@@ -946,7 +990,9 @@ class Player:
|
|||||||
if self.lane_hit_effect is not None:
|
if self.lane_hit_effect is not None:
|
||||||
self.lane_hit_effect.update(current_time)
|
self.lane_hit_effect.update(current_time)
|
||||||
self.animation_manager(self.draw_drum_hit_list, current_time)
|
self.animation_manager(self.draw_drum_hit_list, current_time)
|
||||||
self.judge_x, self.judge_y = self.get_judge_position(ms_from_start)
|
self.get_judge_position(ms_from_start)
|
||||||
|
self.handle_tjap3_extended_commands(ms_from_start)
|
||||||
|
self.update_bpm(ms_from_start)
|
||||||
|
|
||||||
# More efficient arc management
|
# More efficient arc management
|
||||||
finished_arcs = []
|
finished_arcs = []
|
||||||
@@ -990,7 +1036,6 @@ class Player:
|
|||||||
next_note = min(candidates, key=lambda note: note.load_ms)
|
next_note = min(candidates, key=lambda note: note.load_ms)
|
||||||
|
|
||||||
if next_note:
|
if next_note:
|
||||||
self.bpm = next_note.bpm
|
|
||||||
if next_note.gogo_time and not self.is_gogo_time:
|
if next_note.gogo_time and not self.is_gogo_time:
|
||||||
self.is_gogo_time = True
|
self.is_gogo_time = True
|
||||||
self.gogo_time = GogoTime(self.is_2p)
|
self.gogo_time = GogoTime(self.is_2p)
|
||||||
|
|||||||
Reference in New Issue
Block a user