mirror of
https://github.com/Yonokid/PyTaiko.git
synced 2026-02-04 03:30:13 +01:00
refactoring
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
class Animation:
|
||||
def __init__(self, current_ms, duration, type):
|
||||
def __init__(self, current_ms: float, duration: float, type: str):
|
||||
self.type = type
|
||||
self.start_ms = current_ms
|
||||
self.attribute = 0
|
||||
@@ -7,7 +7,7 @@ class Animation:
|
||||
self.params = dict()
|
||||
self.is_finished = False
|
||||
|
||||
def update(self, current_ms):
|
||||
def update(self, current_ms: float):
|
||||
if self.type == 'fade':
|
||||
self.fade(current_ms,
|
||||
self.duration,
|
||||
@@ -50,8 +50,8 @@ class Animation:
|
||||
initial_size=self.params.get('final_size', 1.0),
|
||||
delay=self.params.get('delay', 0.0) + self.duration)
|
||||
|
||||
def fade(self, current_ms, duration, initial_opacity, final_opacity, delay, ease_in, ease_out):
|
||||
def ease_out_progress(progress, ease):
|
||||
def fade(self, current_ms: float, duration: float, initial_opacity: float, final_opacity: float, delay: float, ease_in: str | None, ease_out: str | None) -> None:
|
||||
def _ease_out_progress(progress: float, ease: str | None) -> float:
|
||||
if ease == 'quadratic':
|
||||
return progress * (2 - progress)
|
||||
elif ease == 'cubic':
|
||||
@@ -60,7 +60,7 @@ class Animation:
|
||||
return 1 - pow(2, -10 * progress)
|
||||
else:
|
||||
return progress
|
||||
def ease_in_progress(progress, ease):
|
||||
def _ease_in_progress(progress: float, ease: str | None) -> float:
|
||||
if ease == 'quadratic':
|
||||
return progress * progress
|
||||
elif ease == 'cubic':
|
||||
@@ -79,15 +79,15 @@ class Animation:
|
||||
self.is_finished = True
|
||||
|
||||
if ease_in is not None:
|
||||
progress = ease_in_progress(elapsed_time / duration, ease_in)
|
||||
progress = _ease_in_progress(elapsed_time / duration, ease_in)
|
||||
elif ease_out is not None:
|
||||
progress = ease_out_progress(elapsed_time / duration, ease_out)
|
||||
progress = _ease_out_progress(elapsed_time / duration, ease_out)
|
||||
else:
|
||||
progress = elapsed_time / duration
|
||||
|
||||
current_opacity = initial_opacity + (final_opacity - initial_opacity) * progress
|
||||
self.attribute = current_opacity
|
||||
def move(self, current_ms, duration, total_distance, start_position, delay):
|
||||
def move(self, current_ms: float, duration: float, total_distance: float, start_position: float, delay: float) -> None:
|
||||
elapsed_time = current_ms - self.start_ms
|
||||
if elapsed_time < delay:
|
||||
self.attribute = start_position
|
||||
@@ -99,7 +99,7 @@ class Animation:
|
||||
else:
|
||||
self.attribute = start_position + total_distance
|
||||
self.is_finished = True
|
||||
def texture_change(self, current_ms, duration, textures):
|
||||
def texture_change(self, current_ms: float, duration: float, textures: list[tuple[float, float, float]]) -> None:
|
||||
elapsed_time = current_ms - self.start_ms
|
||||
if elapsed_time <= duration:
|
||||
for start, end, index in textures:
|
||||
@@ -107,7 +107,7 @@ class Animation:
|
||||
self.attribute = index
|
||||
else:
|
||||
self.is_finished = True
|
||||
def text_stretch(self, current_ms, duration):
|
||||
def text_stretch(self, current_ms: float, duration: float):
|
||||
elapsed_time = current_ms - self.start_ms
|
||||
if elapsed_time <= duration:
|
||||
self.attribute = 2 + 5 * (elapsed_time // 25)
|
||||
@@ -117,7 +117,7 @@ class Animation:
|
||||
else:
|
||||
self.attribute = 0
|
||||
self.is_finished = True
|
||||
def texture_resize(self, current_ms, duration, initial_size, final_size, delay):
|
||||
def texture_resize(self, current_ms: float, duration: float, initial_size: float, final_size: float, delay: float):
|
||||
elapsed_time = current_ms - self.start_ms
|
||||
if elapsed_time < delay:
|
||||
self.attribute = initial_size
|
||||
|
||||
30
libs/tja.py
30
libs/tja.py
@@ -4,7 +4,7 @@ from collections import deque
|
||||
from libs.utils import get_pixels_per_frame, strip_comments
|
||||
|
||||
|
||||
def calculate_base_score(play_note_list: list[dict]) -> int:
|
||||
def calculate_base_score(play_note_list: deque[dict]) -> int:
|
||||
total_notes = 0
|
||||
balloon_num = 0
|
||||
balloon_count = 0
|
||||
@@ -55,7 +55,7 @@ class TJAParser:
|
||||
self.barline_display = True
|
||||
self.gogo_time = False
|
||||
|
||||
def file_to_data(self):
|
||||
def _file_to_data(self):
|
||||
with open(self.file_path, 'rt', encoding='utf-8-sig') as tja_file:
|
||||
for line in tja_file:
|
||||
line = strip_comments(line).strip()
|
||||
@@ -64,7 +64,7 @@ class TJAParser:
|
||||
return self.data
|
||||
|
||||
def get_metadata(self):
|
||||
self.file_to_data()
|
||||
self._file_to_data()
|
||||
diff_index = 1
|
||||
highest_diff = -1
|
||||
for item in self.data:
|
||||
@@ -126,7 +126,7 @@ class TJAParser:
|
||||
self.bpm, self.wave, self.offset, self.demo_start, self.course_data]
|
||||
|
||||
def data_to_notes(self, diff):
|
||||
self.file_to_data()
|
||||
self._file_to_data()
|
||||
#Get notes start and end
|
||||
note_start = -1
|
||||
note_end = -1
|
||||
@@ -144,11 +144,23 @@ class TJAParser:
|
||||
bar = []
|
||||
#Check for measures and separate when comma exists
|
||||
for i in range(note_start, note_end):
|
||||
item = self.data[i].strip(',')
|
||||
bar.append(item)
|
||||
if item != self.data[i]:
|
||||
notes.append(bar)
|
||||
bar = []
|
||||
line = self.data[i]
|
||||
if line.startswith("#"):
|
||||
bar.append(line)
|
||||
else:
|
||||
item = line.strip(',')
|
||||
if item == '':
|
||||
if bar == []:
|
||||
bar.append(item)
|
||||
else:
|
||||
notes.append(bar)
|
||||
bar = []
|
||||
continue
|
||||
else:
|
||||
bar.append(item)
|
||||
if item != line:
|
||||
notes.append(bar)
|
||||
bar = []
|
||||
return notes, self.course_data[diff][1]
|
||||
|
||||
def get_se_note(self, play_note_list, ms_per_measure, note, note_ms):
|
||||
|
||||
@@ -10,6 +10,14 @@ import tomllib
|
||||
|
||||
#TJA Format creator is unknown. I did not create the format, but I did write the parser though.
|
||||
|
||||
def get_zip_filenames(zip_path: str) -> list[str]:
|
||||
result = []
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
file_list = zip_ref.namelist()
|
||||
for file_name in file_list:
|
||||
result.append(file_name)
|
||||
return result
|
||||
|
||||
def load_image_from_zip(zip_path: str, filename: str) -> ray.Image:
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
with zip_ref.open(filename) as image_file:
|
||||
@@ -30,6 +38,28 @@ def load_texture_from_zip(zip_path: str, filename: str) -> ray.Texture:
|
||||
os.remove(temp_file_path)
|
||||
return texture
|
||||
|
||||
def load_all_textures_from_zip(zip_path: str) -> dict[str, list[ray.Texture]]:
|
||||
result_dict = dict()
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
files = zip_ref.namelist()
|
||||
for file in files:
|
||||
with zip_ref.open(file) as image_file:
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as temp_file:
|
||||
temp_file.write(image_file.read())
|
||||
temp_file_path = temp_file.name
|
||||
texture = ray.load_texture(temp_file_path)
|
||||
os.remove(temp_file_path)
|
||||
|
||||
true_filename, index = file.split('_img')
|
||||
index = int(index.split('.')[0])
|
||||
if true_filename not in result_dict:
|
||||
result_dict[true_filename] = []
|
||||
while len(result_dict[true_filename]) <= index:
|
||||
result_dict[true_filename].append(None)
|
||||
result_dict[true_filename][index] = texture
|
||||
return result_dict
|
||||
|
||||
|
||||
def rounded(num: float) -> int:
|
||||
sign = 1 if (num >= 0) else -1
|
||||
num = abs(num)
|
||||
@@ -41,7 +71,7 @@ def rounded(num: float) -> int:
|
||||
def get_current_ms() -> int:
|
||||
return rounded(time.time() * 1000)
|
||||
|
||||
def strip_comments(code: str):
|
||||
def strip_comments(code: str) -> str:
|
||||
result = ''
|
||||
index = 0
|
||||
for line in code.splitlines():
|
||||
@@ -53,7 +83,7 @@ def strip_comments(code: str):
|
||||
index += 1
|
||||
return result
|
||||
|
||||
def get_pixels_per_frame(bpm: float, time_signature: float, distance: float):
|
||||
def get_pixels_per_frame(bpm: float, time_signature: float, distance: float) -> float:
|
||||
beat_duration = 60 / bpm
|
||||
total_time = time_signature * beat_duration
|
||||
total_frames = 60 * total_time
|
||||
@@ -66,6 +96,7 @@ def get_config() -> dict[str, Any]:
|
||||
|
||||
@dataclass
|
||||
class GlobalData:
|
||||
videos_cleared = False
|
||||
start_song: bool = False
|
||||
selected_song: str = ''
|
||||
selected_difficulty: int = -1
|
||||
@@ -73,3 +104,66 @@ class GlobalData:
|
||||
result_ok: int = -1
|
||||
result_bad: int = -1
|
||||
result_score: int = -1
|
||||
songs_played: int = 0
|
||||
|
||||
global_data = GlobalData()
|
||||
|
||||
@dataclass
|
||||
class OutlinedText:
|
||||
font: ray.Font
|
||||
text: str
|
||||
font_size: int
|
||||
text_color: ray.Color
|
||||
outline_color: ray.Color
|
||||
outline_thickness: int = 2
|
||||
|
||||
def __post_init__(self):
|
||||
self.texture = self._create_texture()
|
||||
|
||||
def _create_texture(self):
|
||||
text_size = ray.measure_text_ex(self.font, self.text, self.font_size, 1.0)
|
||||
|
||||
padding = self.outline_thickness * 2
|
||||
width = int(text_size.x + padding * 2)
|
||||
height = int(text_size.y + padding * 2)
|
||||
|
||||
image = ray.gen_image_color(width, height, ray.Color(0, 0, 0, 0))
|
||||
|
||||
for dx in range(-self.outline_thickness, self.outline_thickness + 1):
|
||||
for dy in range(-self.outline_thickness, self.outline_thickness + 1):
|
||||
if dx == 0 and dy == 0:
|
||||
continue
|
||||
|
||||
distance = (dx * dx + dy * dy) ** 0.5
|
||||
if distance <= self.outline_thickness:
|
||||
ray.image_draw_text_ex(
|
||||
image,
|
||||
self.font,
|
||||
self.text,
|
||||
ray.Vector2(padding + dx, padding + dy),
|
||||
self.font_size,
|
||||
1.0,
|
||||
self.outline_color
|
||||
)
|
||||
|
||||
ray.image_draw_text_ex(
|
||||
image,
|
||||
self.font,
|
||||
self.text,
|
||||
ray.Vector2(padding, padding),
|
||||
self.font_size,
|
||||
1.0,
|
||||
self.text_color
|
||||
)
|
||||
|
||||
texture = ray.load_texture_from_image(image)
|
||||
|
||||
ray.unload_image(image)
|
||||
|
||||
return texture
|
||||
|
||||
def draw(self, x: int, y: int, color: ray.Color):
|
||||
ray.draw_texture(self.texture, x, y, color)
|
||||
|
||||
def unload(self):
|
||||
ray.unload_texture(self.texture)
|
||||
|
||||
@@ -21,25 +21,26 @@ class VideoPlayer:
|
||||
audio_path = path[:-4] + '.ogg'
|
||||
self.audio = audio.load_music_stream(audio_path)
|
||||
|
||||
def convert_frames_background(self, index: int):
|
||||
def _convert_frames_background(self):
|
||||
if not self.cap.isOpened():
|
||||
raise ValueError("Error: Could not open video file.")
|
||||
|
||||
total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
if len(self.frames) == total_frames:
|
||||
return 0
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, index)
|
||||
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_index)
|
||||
|
||||
success, frame = self.cap.read()
|
||||
|
||||
timestamp = (index / self.fps * 1000)
|
||||
timestamp = (self.frame_index / self.fps * 1000)
|
||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
|
||||
new_frame = ray.Image(frame_rgb.tobytes(), frame_rgb.shape[1], frame_rgb.shape[0], 1, ray.PixelFormat.PIXELFORMAT_UNCOMPRESSED_R8G8B8)
|
||||
|
||||
self.frames.append((timestamp, new_frame))
|
||||
self.frame_index += 1
|
||||
|
||||
def convert_frames(self):
|
||||
def _convert_frames(self):
|
||||
if not self.cap.isOpened():
|
||||
raise ValueError("Error: Could not open video file.")
|
||||
|
||||
@@ -61,13 +62,13 @@ class VideoPlayer:
|
||||
print(f"Extracted {len(self.frames)} frames.")
|
||||
self.start_ms = get_current_ms()
|
||||
|
||||
def check_for_start(self):
|
||||
def _check_for_start(self):
|
||||
if self.frames == []:
|
||||
self.convert_frames()
|
||||
self._convert_frames()
|
||||
if not audio.is_music_stream_playing(self.audio):
|
||||
audio.play_music_stream(self.audio)
|
||||
|
||||
def audio_manager(self):
|
||||
def _audio_manager(self):
|
||||
audio.update_music_stream(self.audio)
|
||||
time_played = audio.get_music_time_played(self.audio) / audio.get_music_time_length(self.audio)
|
||||
ending_lenience = 0.95
|
||||
@@ -75,8 +76,8 @@ class VideoPlayer:
|
||||
self.is_finished[1] = True
|
||||
|
||||
def update(self):
|
||||
self.check_for_start()
|
||||
self.audio_manager()
|
||||
self._check_for_start()
|
||||
self._audio_manager()
|
||||
|
||||
if self.frame_index == len(self.frames)-1:
|
||||
self.is_finished[0] = True
|
||||
|
||||
Reference in New Issue
Block a user