mirror of
https://github.com/Yonokid/PyTaiko.git
synced 2026-02-04 11:40:13 +01:00
add new texture wrapper
This commit is contained in:
@@ -1,6 +1,17 @@
|
||||
from typing import Optional
|
||||
import time
|
||||
from typing import Any, Optional
|
||||
|
||||
from libs.utils import get_current_ms
|
||||
|
||||
def rounded(num: float) -> int:
|
||||
sign = 1 if (num >= 0) else -1
|
||||
num = abs(num)
|
||||
result = int(num)
|
||||
if (num - result >= 0.5):
|
||||
result += 1
|
||||
return sign * result
|
||||
|
||||
def get_current_ms() -> int:
|
||||
return rounded(time.time() * 1000)
|
||||
|
||||
|
||||
class BaseAnimation():
|
||||
@@ -15,17 +26,39 @@ class BaseAnimation():
|
||||
"""
|
||||
self.duration = duration
|
||||
self.delay = delay
|
||||
self.delay_saved = delay
|
||||
self.start_ms = get_current_ms()
|
||||
self.is_finished = False
|
||||
self.attribute = 0
|
||||
self.is_started = False
|
||||
|
||||
def __repr__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
"""Update the animation based on the current time."""
|
||||
pass
|
||||
|
||||
def restart(self) -> None:
|
||||
self.start_ms = get_current_ms()
|
||||
self.is_finished = False
|
||||
self.delay = self.delay_saved
|
||||
|
||||
def start(self) -> None:
|
||||
self.is_started = True
|
||||
self.restart()
|
||||
|
||||
def pause(self):
|
||||
self.is_started = False
|
||||
|
||||
def unpause(self):
|
||||
self.is_started = True
|
||||
|
||||
def reset(self):
|
||||
self.restart()
|
||||
self.pause()
|
||||
|
||||
def _ease_in(self, progress: float, ease_type: str) -> float:
|
||||
if ease_type == "quadratic":
|
||||
@@ -60,6 +93,7 @@ class FadeAnimation(BaseAnimation):
|
||||
reverse_delay: Optional[float] = None) -> None:
|
||||
super().__init__(duration, delay)
|
||||
self.initial_opacity = initial_opacity
|
||||
self.attribute = initial_opacity
|
||||
self.final_opacity = final_opacity
|
||||
self.initial_opacity_saved = initial_opacity
|
||||
self.final_opacity_saved = final_opacity
|
||||
@@ -73,8 +107,13 @@ class FadeAnimation(BaseAnimation):
|
||||
self.reverse_delay = self.reverse_delay_saved
|
||||
self.initial_opacity = self.initial_opacity_saved
|
||||
self.final_opacity = self.final_opacity_saved
|
||||
self.attribute = self.initial_opacity
|
||||
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
if not self.is_started:
|
||||
return
|
||||
else:
|
||||
self.is_started = not self.is_finished
|
||||
elapsed_time = current_time_ms - self.start_ms
|
||||
|
||||
if elapsed_time <= self.delay:
|
||||
@@ -116,8 +155,13 @@ class MoveAnimation(BaseAnimation):
|
||||
self.reverse_delay = self.reverse_delay_saved
|
||||
self.total_distance = self.total_distance_saved
|
||||
self.start_position = self.start_position_saved
|
||||
self.attribute = self.start_position
|
||||
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
if not self.is_started:
|
||||
return
|
||||
else:
|
||||
self.is_started = not self.is_finished
|
||||
elapsed_time = current_time_ms - self.start_ms
|
||||
if elapsed_time < self.delay:
|
||||
self.attribute = self.start_position
|
||||
@@ -142,12 +186,22 @@ class TextureChangeAnimation(BaseAnimation):
|
||||
super().__init__(duration)
|
||||
self.textures = textures
|
||||
self.delay = delay
|
||||
self.delay_saved = delay
|
||||
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
elapsed_time = current_time_ms - self.start_ms - self.delay
|
||||
if elapsed_time <= self.duration:
|
||||
super().update(current_time_ms)
|
||||
if not self.is_started:
|
||||
return
|
||||
else:
|
||||
self.is_started = not self.is_finished
|
||||
elapsed_time = current_time_ms - self.start_ms
|
||||
if elapsed_time < self.delay:
|
||||
return
|
||||
|
||||
animation_time = elapsed_time - self.delay
|
||||
if animation_time <= self.duration:
|
||||
for start, end, index in self.textures:
|
||||
if start < elapsed_time <= end:
|
||||
if start < animation_time <= end:
|
||||
self.attribute = index
|
||||
else:
|
||||
self.is_finished = True
|
||||
@@ -156,6 +210,10 @@ class TextStretchAnimation(BaseAnimation):
|
||||
def __init__(self, duration: float) -> None:
|
||||
super().__init__(duration)
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
if not self.is_started:
|
||||
return
|
||||
else:
|
||||
self.is_started = not self.is_finished
|
||||
elapsed_time = current_time_ms - self.start_ms
|
||||
if elapsed_time <= self.duration:
|
||||
self.attribute = 2 + 5 * (elapsed_time // 25)
|
||||
@@ -189,6 +247,10 @@ class TextureResizeAnimation(BaseAnimation):
|
||||
|
||||
|
||||
def update(self, current_time_ms: float) -> None:
|
||||
if not self.is_started:
|
||||
return
|
||||
else:
|
||||
self.is_started = not self.is_finished
|
||||
elapsed_time = current_time_ms - self.start_ms
|
||||
|
||||
if elapsed_time <= self.delay:
|
||||
@@ -286,3 +348,86 @@ class Animation:
|
||||
reverse_delay: If provided, resize will play in reverse after this delay
|
||||
"""
|
||||
return TextureResizeAnimation(duration, **kwargs)
|
||||
|
||||
ANIMATION_CLASSES = {
|
||||
"fade": FadeAnimation,
|
||||
"move": MoveAnimation,
|
||||
"texture_change": TextureChangeAnimation,
|
||||
"text_stretch": TextStretchAnimation,
|
||||
"texture_resize": TextureResizeAnimation
|
||||
}
|
||||
|
||||
def parse_animations(animation_json):
|
||||
raw_anims = {}
|
||||
for item in animation_json:
|
||||
if "id" not in item:
|
||||
raise Exception("Animation requires id")
|
||||
if "type" not in item:
|
||||
raise Exception("Animation requires type")
|
||||
|
||||
raw_anims[item["id"]] = item.copy()
|
||||
|
||||
def find_refs(anim_id: int, visited: Optional[set] = None):
|
||||
if visited is None:
|
||||
visited = set()
|
||||
|
||||
if anim_id in visited:
|
||||
raise Exception(f"Circular reference detected involving animation {anim_id}")
|
||||
|
||||
visited.add(anim_id)
|
||||
animation = raw_anims[anim_id].copy()
|
||||
|
||||
for key, value in animation.items():
|
||||
if isinstance(value, dict) and "reference_id" in value:
|
||||
animation[key] = resolve_value(value, visited.copy())
|
||||
|
||||
visited.remove(anim_id)
|
||||
return animation
|
||||
|
||||
def resolve_value(ref_obj: dict[str, Any], visited: set):
|
||||
if "property" not in ref_obj:
|
||||
raise Exception("Reference requires 'property' field")
|
||||
|
||||
ref_id = ref_obj["reference_id"]
|
||||
ref_property = ref_obj["property"]
|
||||
|
||||
if ref_id not in raw_anims:
|
||||
raise Exception(f"Referenced animation {ref_id} not found")
|
||||
|
||||
resolved_ref_animation = find_refs(ref_id, visited)
|
||||
|
||||
if ref_property not in resolved_ref_animation:
|
||||
raise Exception(f"Property '{ref_property}' not found in animation {ref_id}")
|
||||
|
||||
base_value = resolved_ref_animation[ref_property]
|
||||
|
||||
if "init_val" in ref_obj:
|
||||
init_val = ref_obj["init_val"]
|
||||
|
||||
if isinstance(init_val, dict) and "reference_id" in init_val:
|
||||
init_val = resolve_value(init_val, visited)
|
||||
|
||||
try:
|
||||
return base_value + init_val
|
||||
except TypeError:
|
||||
raise Exception(f"Cannot add init_val {init_val} to referenced value {base_value}")
|
||||
|
||||
return base_value
|
||||
|
||||
anim_dict = dict()
|
||||
|
||||
for id in raw_anims:
|
||||
absolute_anim = find_refs(id)
|
||||
type = absolute_anim.pop("type")
|
||||
id_val = absolute_anim.pop("id")
|
||||
if "comment" in absolute_anim:
|
||||
absolute_anim.pop("comment")
|
||||
if type not in ANIMATION_CLASSES:
|
||||
raise Exception(f"Unknown Animation type: {type}")
|
||||
|
||||
anim_class = ANIMATION_CLASSES[type]
|
||||
|
||||
anim_object = anim_class(**absolute_anim)
|
||||
anim_dict[id_val] = anim_object
|
||||
|
||||
return anim_dict
|
||||
|
||||
186
libs/audio.py
186
libs/audio.py
@@ -17,6 +17,7 @@ from numpy import (
|
||||
interp,
|
||||
mean,
|
||||
ndarray,
|
||||
ones,
|
||||
sqrt,
|
||||
uint8,
|
||||
zeros,
|
||||
@@ -570,93 +571,116 @@ class AudioEngine:
|
||||
return True
|
||||
|
||||
def _audio_callback(self, outdata: ndarray, frames: int, time: int, status: str) -> None:
|
||||
"""Callback function for the sounddevice stream"""
|
||||
"""callback function for the sounddevice stream"""
|
||||
if status:
|
||||
print(f"Status: {status}")
|
||||
|
||||
# Process any new sound play requests
|
||||
while not self.sound_queue.empty():
|
||||
try:
|
||||
sound_name = self.sound_queue.get_nowait()
|
||||
if sound_name in self.sounds:
|
||||
self.sounds[sound_name].play()
|
||||
except queue.Empty:
|
||||
break
|
||||
self._process_sound_queue()
|
||||
self._process_music_queue()
|
||||
|
||||
# Process any new music play requests
|
||||
while not self.music_queue.empty():
|
||||
try:
|
||||
music_name, action, *args = self.music_queue.get_nowait()
|
||||
if music_name in self.music_streams:
|
||||
music = self.music_streams[music_name]
|
||||
if action == 'play':
|
||||
music.play()
|
||||
elif action == 'stop':
|
||||
music.stop()
|
||||
elif action == 'pause':
|
||||
music.pause()
|
||||
elif action == 'resume':
|
||||
music.resume()
|
||||
elif action == 'seek' and args:
|
||||
music.seek(args[0])
|
||||
except queue.Empty:
|
||||
break
|
||||
# Pre-allocate output buffer (reuse if possible)
|
||||
if not hasattr(self, '_output_buffer') or self._output_buffer.shape != (frames, self.output_channels):
|
||||
self._output_buffer = zeros((frames, self.output_channels), dtype=float32)
|
||||
else:
|
||||
self._output_buffer.fill(0.0) # Clear previous data
|
||||
|
||||
# Mix all playing sounds and music
|
||||
output = zeros((frames, self.output_channels), dtype=float32)
|
||||
self._mix_sounds(self._output_buffer, frames)
|
||||
|
||||
# Mix sounds
|
||||
for sound_name, sound in self.sounds.items():
|
||||
if sound.is_playing:
|
||||
sound_data = sound.get_frames(frames)
|
||||
self._mix_music(self._output_buffer, frames)
|
||||
|
||||
# If mono sound but stereo output, duplicate to both channels
|
||||
if isinstance(sound_data, ndarray):
|
||||
if sound.channels == 1 and self.output_channels > 1:
|
||||
sound_data = column_stack([sound_data] * self.output_channels)
|
||||
# Apply master volume in-place
|
||||
if self.master_volume != 1.0:
|
||||
self._output_buffer *= self.master_volume
|
||||
|
||||
# Ensure sound_data matches the output format
|
||||
if sound.channels > self.output_channels:
|
||||
# Down-mix if needed
|
||||
if self.output_channels == 1:
|
||||
sound_data = mean(sound_data, axis=1)
|
||||
else:
|
||||
# Keep only the first output_channels
|
||||
sound_data = sound_data[:, :self.output_channels]
|
||||
|
||||
# Add to the mix (simple additive mixing)
|
||||
output += sound_data
|
||||
|
||||
# Mix music streams
|
||||
for music_name, music in self.music_streams.items():
|
||||
if music.is_playing:
|
||||
music_data = music.get_frames(frames)
|
||||
|
||||
# If mono music but stereo output, duplicate to both channels
|
||||
if music.channels == 1 and self.output_channels > 1:
|
||||
music_data = column_stack([music_data] * self.output_channels)
|
||||
|
||||
# Ensure music_data matches the output format
|
||||
if music.channels > self.output_channels:
|
||||
# Down-mix if needed
|
||||
if self.output_channels == 1:
|
||||
music_data = mean(music_data, axis=1)
|
||||
else:
|
||||
# Keep only the first output_channels
|
||||
music_data = music_data[:, :self.output_channels]
|
||||
|
||||
# Add to the mix
|
||||
output += music_data
|
||||
|
||||
# Apply master volume
|
||||
output *= self.master_volume
|
||||
|
||||
# Apply simple limiter to prevent clipping
|
||||
max_val = np_max(np_abs(output))
|
||||
# Apply limiter only if needed
|
||||
max_val = np_max(np_abs(self._output_buffer))
|
||||
if max_val > 1.0:
|
||||
output = output / max_val
|
||||
self._output_buffer /= max_val
|
||||
|
||||
outdata[:] = output
|
||||
outdata[:] = self._output_buffer
|
||||
|
||||
def _process_sound_queue(self) -> None:
|
||||
"""Process sound queue"""
|
||||
sounds_to_play = []
|
||||
try:
|
||||
while True:
|
||||
sounds_to_play.append(self.sound_queue.get_nowait())
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
for sound_name in sounds_to_play:
|
||||
if sound_name in self.sounds:
|
||||
self.sounds[sound_name].play()
|
||||
|
||||
def _process_music_queue(self) -> None:
|
||||
"""Process music queue"""
|
||||
music_commands = []
|
||||
try:
|
||||
while True:
|
||||
music_commands.append(self.music_queue.get_nowait())
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
for command in music_commands:
|
||||
music_name, action, *args = command
|
||||
if music_name in self.music_streams:
|
||||
music = self.music_streams[music_name]
|
||||
if action == 'play':
|
||||
music.play()
|
||||
elif action == 'stop':
|
||||
music.stop()
|
||||
elif action == 'pause':
|
||||
music.pause()
|
||||
elif action == 'resume':
|
||||
music.resume()
|
||||
elif action == 'seek' and args:
|
||||
music.seek(args[0])
|
||||
|
||||
def _mix_sounds(self, output: ndarray, frames: int) -> None:
|
||||
"""sound mixing"""
|
||||
for sound in self.sounds.values():
|
||||
if not sound.is_playing:
|
||||
continue
|
||||
|
||||
sound_data = sound.get_frames(frames)
|
||||
if sound_data is None or not isinstance(sound_data, ndarray):
|
||||
continue
|
||||
|
||||
# Handle channel mismatch
|
||||
if sound.channels != self.output_channels:
|
||||
sound_data = self._convert_channels(sound_data, sound.channels)
|
||||
|
||||
output += sound_data
|
||||
|
||||
def _mix_music(self, output: ndarray, frames: int) -> None:
|
||||
"""music mixing"""
|
||||
for music in self.music_streams.values():
|
||||
if not music.is_playing:
|
||||
continue
|
||||
|
||||
music_data = music.get_frames(frames)
|
||||
if music_data is None:
|
||||
continue
|
||||
|
||||
if music.channels != self.output_channels:
|
||||
music_data = self._convert_channels(music_data, music.channels)
|
||||
|
||||
output += music_data
|
||||
|
||||
def _convert_channels(self, data: ndarray, input_channels: int) -> ndarray:
|
||||
"""channel conversion with caching"""
|
||||
if input_channels == self.output_channels:
|
||||
return data
|
||||
|
||||
if input_channels == 1 and self.output_channels > 1:
|
||||
return data[:, None] * ones((1, self.output_channels), dtype=float32)
|
||||
elif input_channels > self.output_channels:
|
||||
if self.output_channels == 1:
|
||||
return mean(data, axis=1, keepdims=True)
|
||||
else:
|
||||
return data[:, :self.output_channels]
|
||||
|
||||
return data
|
||||
|
||||
def _start_update_thread(self) -> None:
|
||||
"""Start a thread to update music streams"""
|
||||
@@ -671,16 +695,13 @@ class AudioEngine:
|
||||
active_streams = [music for music in self.music_streams.values() if music.is_playing]
|
||||
|
||||
if not active_streams:
|
||||
# Sleep longer when no streams are active
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
for music in active_streams:
|
||||
music.update()
|
||||
|
||||
# Adjust sleep based on number of active streams
|
||||
sleep_time = max(0.05, 0.1 / len(active_streams))
|
||||
time.sleep(sleep_time)
|
||||
time.sleep(0.1)
|
||||
|
||||
def init_audio_device(self):
|
||||
if self.audio_device_ready:
|
||||
@@ -770,6 +791,9 @@ class AudioEngine:
|
||||
if sound in self.sounds:
|
||||
del self.sounds[sound]
|
||||
|
||||
def unload_all_sounds(self) -> None:
|
||||
self.sounds.clear()
|
||||
|
||||
def normalize_sound(self, sound: str, rms: float) -> None:
|
||||
if sound in self.sounds:
|
||||
self.sounds[sound].normalize_vol(rms)
|
||||
|
||||
@@ -52,12 +52,14 @@ class DonBGBase:
|
||||
self.name = 'donbg_a_' + str(index).zfill(2)
|
||||
self.textures = (load_all_textures_from_zip(Path(f'Graphics/lumendata/enso_original/{self.name}_{self.player_num}p.zip')))
|
||||
self.move = Animation.create_move(3000, start_position=0, total_distance=-self.textures[self.name + f'_{self.player_num}p'][0].width)
|
||||
self.move.start()
|
||||
self.is_clear = False
|
||||
self.clear_fade = None
|
||||
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
if not self.is_clear and is_clear:
|
||||
self.clear_fade = Animation.create_fade(150, initial_opacity=0.0, final_opacity=1.0)
|
||||
self.clear_fade.start()
|
||||
self.is_clear = is_clear
|
||||
self.move.update(current_time_ms)
|
||||
if self.clear_fade is not None:
|
||||
@@ -74,6 +76,7 @@ class DonBG1(DonBGBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int, player_num: int):
|
||||
super().__init__(index, screen_width, screen_height, player_num)
|
||||
self.overlay_move = Animation.create_move(1000, start_position=0, total_distance=20, reverse_delay=0)
|
||||
self.overlay_move.start()
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
self.overlay_move.update(current_time_ms)
|
||||
@@ -100,6 +103,7 @@ class DonBG2(DonBGBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int, player_num: int):
|
||||
super().__init__(index, screen_width, screen_height, player_num)
|
||||
self.overlay_move = Animation.create_move(1500, start_position=0, total_distance=20, reverse_delay=0)
|
||||
self.overlay_move.start()
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
self.overlay_move.update(current_time_ms)
|
||||
@@ -124,9 +128,13 @@ class DonBG3(DonBGBase):
|
||||
duration = 266
|
||||
bounce_distance = 40
|
||||
self.bounce_up = Animation.create_move(duration, total_distance=-bounce_distance, ease_out='quadratic')
|
||||
self.bounce_up.start()
|
||||
self.bounce_down = Animation.create_move(duration, total_distance=-bounce_distance, ease_in='quadratic', delay=self.bounce_up.duration)
|
||||
self.bounce_down.start()
|
||||
self.overlay_move = Animation.create_move(duration*3, total_distance=20, reverse_delay=0, ease_in='quadratic', ease_out='quadratic', delay=self.bounce_up.duration+self.bounce_down.duration)
|
||||
self.overlay_move.start()
|
||||
self.overlay_move_2 = Animation.create_move(duration*3, total_distance=20, reverse_delay=0, ease_in='quadratic', ease_out='quadratic', delay=self.bounce_up.duration+self.bounce_down.duration+self.overlay_move.duration)
|
||||
self.overlay_move_2.start()
|
||||
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
@@ -157,6 +165,7 @@ class DonBG4(DonBGBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int, player_num: int):
|
||||
super().__init__(index, screen_width, screen_height, player_num)
|
||||
self.overlay_move = Animation.create_move(1500, start_position=0, total_distance=20, reverse_delay=0)
|
||||
self.overlay_move.start()
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
self.overlay_move.update(current_time_ms)
|
||||
@@ -182,8 +191,11 @@ class DonBG5(DonBGBase):
|
||||
duration = 266
|
||||
bounce_distance = 40
|
||||
self.bounce_up = Animation.create_move(duration, total_distance=-bounce_distance, ease_out='quadratic')
|
||||
self.bounce_up.start()
|
||||
self.bounce_down = Animation.create_move(duration, total_distance=-bounce_distance, ease_in='quadratic', delay=self.bounce_up.duration)
|
||||
self.bounce_down.start()
|
||||
self.adjust = Animation.create_move(1000, total_distance=10, reverse_delay=0, delay=self.bounce_up.duration+self.bounce_down.duration)
|
||||
self.adjust.start()
|
||||
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
@@ -212,6 +224,7 @@ class DonBG6(DonBGBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int, player_num: int):
|
||||
super().__init__(index, screen_width, screen_height, player_num)
|
||||
self.overlay_move = Animation.create_move(1000, start_position=0, total_distance=20, reverse_delay=0)
|
||||
self.overlay_move.start()
|
||||
def update(self, current_time_ms: float, is_clear: bool):
|
||||
super().update(current_time_ms, is_clear)
|
||||
self.overlay_move.update(current_time_ms)
|
||||
@@ -260,6 +273,7 @@ class BGNormal1(BGNormalBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.flicker = Animation.create_fade(16.67*4, initial_opacity=0.5, final_opacity=0.4, reverse_delay=0)
|
||||
self.flicker.start()
|
||||
def update(self, current_time_ms: float):
|
||||
self.flicker.update(current_time_ms)
|
||||
if self.flicker.is_finished:
|
||||
@@ -272,6 +286,7 @@ class BGNormal2(BGNormalBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.flicker = Animation.create_fade(16.67*4, initial_opacity=0.5, final_opacity=0.4, reverse_delay=0)
|
||||
self.flicker.start()
|
||||
def update(self, current_time_ms: float):
|
||||
self.flicker.update(current_time_ms)
|
||||
if self.flicker.is_finished:
|
||||
@@ -284,6 +299,7 @@ class BGNormal3(BGNormalBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.flicker = Animation.create_fade(16.67*10, initial_opacity=0.5, final_opacity=0.4, reverse_delay=0)
|
||||
self.flicker.start()
|
||||
def update(self, current_time_ms):
|
||||
self.flicker.update(current_time_ms)
|
||||
if self.flicker.is_finished:
|
||||
@@ -325,7 +341,9 @@ class BGNormal4(BGNormalBase):
|
||||
self.spawn_point = self.random_excluding_range()
|
||||
duration = random.randint(1400, 2000)
|
||||
self.move_y = Animation.create_move(duration, total_distance=360)
|
||||
self.move_y.start()
|
||||
self.move_x = Animation.create_move(duration, total_distance=random.randint(-300, 300))
|
||||
self.move_x.start()
|
||||
def random_excluding_range(self):
|
||||
while True:
|
||||
num = random.randint(0, 1280)
|
||||
@@ -339,9 +357,12 @@ class BGNormal4(BGNormalBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.flicker = Animation.create_fade(16.67*3, initial_opacity=0.5, final_opacity=0.4, reverse_delay=0)
|
||||
self.flicker.start()
|
||||
self.turtle_move = Animation.create_move(3333*2, start_position=screen_width+112, total_distance=-(screen_width+(112*4)))
|
||||
self.turtle_move.start()
|
||||
textures = ((0, 100, 3), (100, 200, 4), (200, 300, 5), (300, 400, 6), (400, 500, 7), (500, 600, 8))
|
||||
self.turtle_change = Animation.create_texture_change(600, textures=textures)
|
||||
self.turtle_change.start()
|
||||
self.petals = {self.Petal(), self.Petal(), self.Petal(), self.Petal(), self.Petal()}
|
||||
def update(self, current_time_ms: float):
|
||||
self.flicker.update(current_time_ms)
|
||||
@@ -372,6 +393,7 @@ class BGNormal5(BGNormalBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.flicker = Animation.create_fade(16.67*10, initial_opacity=0.75, final_opacity=0.4, reverse_delay=0)
|
||||
self.flicker.start()
|
||||
def update(self, current_time_ms: float):
|
||||
self.flicker.update(current_time_ms)
|
||||
if self.flicker.is_finished:
|
||||
@@ -434,13 +456,17 @@ class BGFever4(BGFeverBase):
|
||||
def __init__(self, index: int, screen_width: int, screen_height: int):
|
||||
super().__init__(index, screen_width, screen_height)
|
||||
self.vertical_move = Animation.create_move(1300, start_position=0, total_distance=50, reverse_delay=0)
|
||||
self.vertical_move.start()
|
||||
self.horizontal_move = Animation.create_move(5000, start_position=0, total_distance=self.textures[self.name][2].width)
|
||||
self.horizontal_move.start()
|
||||
self.bg_texture_move_down = None
|
||||
self.bg_texture_move_up = None
|
||||
|
||||
def start(self):
|
||||
self.bg_texture_move_down = Animation.create_move(516, total_distance=400, ease_in='cubic')
|
||||
self.bg_texture_move_down.start()
|
||||
self.bg_texture_move_up = Animation.create_move(200, total_distance=40, delay=self.bg_texture_move_down.duration, ease_out='quadratic')
|
||||
self.bg_texture_move_up.start()
|
||||
|
||||
def update(self, current_time_ms: float):
|
||||
if self.bg_texture_move_down is not None:
|
||||
|
||||
126
libs/texture.py
126
libs/texture.py
@@ -1,10 +1,130 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import pyray as ray
|
||||
|
||||
from libs.animation import BaseAnimation, parse_animations
|
||||
|
||||
|
||||
class Texture:
|
||||
def __init__(self, name: str, texture: Union[ray.Texture, list[ray.Texture]], init_vals: dict[str, int]):
|
||||
self.name = name
|
||||
self.texture = texture
|
||||
self.init_vals = init_vals
|
||||
if isinstance(self.texture, list):
|
||||
self.width = self.texture[0].width
|
||||
self.height = self.texture[0].height
|
||||
else:
|
||||
self.width = self.texture.width
|
||||
self.height = self.texture.height
|
||||
self.is_frames = isinstance(self.texture, list)
|
||||
|
||||
self.x = 0
|
||||
self.y = 0
|
||||
self.x2 = self.width
|
||||
self.y2 = self.height
|
||||
|
||||
class TextureWrapper:
|
||||
def __init__(self):
|
||||
pass
|
||||
def load_texture(self, texture: Path) -> ray.Texture:
|
||||
return ray.load_texture(str(texture))
|
||||
self.textures: dict[str, dict[str, Texture]] = dict()
|
||||
self.animations: dict[int, BaseAnimation] = dict()
|
||||
self.graphics_path = Path("Graphics")
|
||||
|
||||
def unload_textures(self):
|
||||
for zip in self.textures:
|
||||
for file in self.textures[zip]:
|
||||
tex_object = self.textures[zip][file]
|
||||
if isinstance(tex_object.texture, list):
|
||||
for texture in tex_object.texture:
|
||||
ray.unload_texture(texture)
|
||||
else:
|
||||
ray.unload_texture(tex_object.texture)
|
||||
|
||||
def get_animation(self, index: int):
|
||||
return self.animations[index]
|
||||
|
||||
def update_attr(self, subset: str, texture: str, attr: str, value: float | int):
|
||||
tex_object = self.textures[subset][texture]
|
||||
if hasattr(tex_object, attr):
|
||||
setattr(tex_object, attr, tex_object.init_vals[attr] + value)
|
||||
|
||||
def _read_tex_obj_data(self, tex_mapping: dict, tex_object: Texture):
|
||||
tex_object.x = tex_mapping.get("x", 0)
|
||||
tex_object.y = tex_mapping.get("y", 0)
|
||||
tex_object.x2 = tex_mapping.get("x2", tex_object.width)
|
||||
tex_object.y2 = tex_mapping.get("y2", tex_object.height)
|
||||
|
||||
def load_screen_textures(self, screen_name: str) -> None:
|
||||
self.unload_textures()
|
||||
screen_path = self.graphics_path / screen_name
|
||||
if (screen_path / 'animation.json').exists():
|
||||
with open(screen_path / 'animation.json') as json_file:
|
||||
self.animations = parse_animations(json.loads(json_file.read()))
|
||||
for zip in screen_path.iterdir():
|
||||
if zip.is_dir() or zip.suffix != ".zip":
|
||||
continue
|
||||
with zipfile.ZipFile(zip, 'r') as zip_ref:
|
||||
if 'texture.json' not in zip_ref.namelist():
|
||||
raise Exception(f"texture.json file missing from {zip}")
|
||||
|
||||
with zip_ref.open('texture.json') as json_file:
|
||||
tex_mapping_data = json.loads(json_file.read().decode('utf-8'))
|
||||
self.textures[zip.stem] = dict()
|
||||
|
||||
for tex_name in tex_mapping_data:
|
||||
if f"{tex_name}/" in zip_ref.namelist():
|
||||
tex_mapping = tex_mapping_data[tex_name]
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
zip_ref.extractall(temp_dir, members=[name for name in zip_ref.namelist()
|
||||
if name.startswith(tex_name)])
|
||||
|
||||
extracted_path = Path(temp_dir) / tex_name
|
||||
if extracted_path.is_dir():
|
||||
frames = [ray.load_texture(str(frame)) for frame in sorted(extracted_path.iterdir(),
|
||||
key=lambda x: int(x.stem)) if frame.is_file()]
|
||||
else:
|
||||
frames = [ray.load_texture(str(extracted_path))]
|
||||
self.textures[zip.stem][tex_name] = Texture(tex_name, frames, tex_mapping)
|
||||
self._read_tex_obj_data(tex_mapping, self.textures[zip.stem][tex_name])
|
||||
elif f"{tex_name}.png" in zip_ref.namelist():
|
||||
tex_mapping = tex_mapping_data[tex_name]
|
||||
|
||||
png_filename = f"{tex_name}.png"
|
||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||
temp_file.write(zip_ref.read(png_filename))
|
||||
temp_path = temp_file.name
|
||||
|
||||
try:
|
||||
tex = ray.load_texture(temp_path)
|
||||
self.textures[zip.stem][tex_name] = Texture(tex_name, tex, tex_mapping)
|
||||
self._read_tex_obj_data(tex_mapping, self.textures[zip.stem][tex_name])
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
else:
|
||||
raise Exception(f"Texture {tex_name} was not found in {zip}")
|
||||
|
||||
|
||||
def draw_texture(self, subset: str, texture: str, color: ray.Color=ray.WHITE, frame: int = 0, scale: float = 1.0, center: bool = False, mirror: str = '', x: int | float = 0, y: int | float = 0, x2: int | float = 0, y2: int | float = 0) -> None:
|
||||
mirror_x = -1 if mirror == 'horizontal' else 1
|
||||
mirror_y = -1 if mirror == 'vertical' else 1
|
||||
tex_object = self.textures[subset][texture]
|
||||
source_rect = ray.Rectangle(0, 0, tex_object.width * mirror_x, tex_object.height * mirror_y)
|
||||
if center:
|
||||
dest_rect = ray.Rectangle(tex_object.x + (tex_object.width//2) - ((tex_object.width * scale)//2) + x, tex_object.y + (tex_object.height//2) - ((tex_object.height * scale)//2) + y, tex_object.x2*scale + x2, tex_object.y2*scale + y2)
|
||||
else:
|
||||
dest_rect = ray.Rectangle(tex_object.x + x, tex_object.y + y, tex_object.x2*scale + x2, tex_object.y2*scale + y2)
|
||||
if tex_object.is_frames:
|
||||
if not isinstance(tex_object.texture, list):
|
||||
raise Exception("Texture was marked as multiframe but is only 1 texture")
|
||||
ray.draw_texture_pro(tex_object.texture[frame], source_rect, dest_rect, ray.Vector2(0, 0), 0, color)
|
||||
else:
|
||||
if isinstance(tex_object.texture, list):
|
||||
raise Exception("Texture is multiframe but was called as 1 texture")
|
||||
ray.draw_texture_pro(tex_object.texture, source_rect, dest_rect, ray.Vector2(0, 0), 0, color)
|
||||
|
||||
tex = TextureWrapper()
|
||||
|
||||
125
libs/transition.py
Normal file
125
libs/transition.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import pyray as ray
|
||||
|
||||
from libs.animation import Animation
|
||||
from libs.utils import OutlinedText, global_data
|
||||
|
||||
|
||||
class Transition:
|
||||
def __init__(self, title: str, subtitle: str) -> None:
|
||||
self.is_finished = False
|
||||
self.rainbow_up = global_data.tex.get_animation(0)
|
||||
self.mini_up = global_data.tex.get_animation(1)
|
||||
self.chara_down = global_data.tex.get_animation(2)
|
||||
self.song_info_fade = global_data.tex.get_animation(3)
|
||||
self.song_info_fade_out = global_data.tex.get_animation(4)
|
||||
self.title = OutlinedText(title, 40, ray.WHITE, ray.BLACK, outline_thickness=5)
|
||||
self.subtitle = OutlinedText(subtitle, 30, ray.WHITE, ray.BLACK, outline_thickness=5)
|
||||
self.is_second = False
|
||||
|
||||
def start(self):
|
||||
self.rainbow_up.start()
|
||||
self.mini_up.start()
|
||||
self.chara_down.start()
|
||||
self.song_info_fade.start()
|
||||
self.song_info_fade_out.start()
|
||||
|
||||
def update(self, current_time_ms: float):
|
||||
self.rainbow_up.update(current_time_ms)
|
||||
self.chara_down.update(current_time_ms)
|
||||
self.mini_up.update(current_time_ms)
|
||||
self.song_info_fade.update(current_time_ms)
|
||||
self.song_info_fade_out.update(current_time_ms)
|
||||
self.is_finished = self.song_info_fade.is_finished
|
||||
|
||||
def draw_song_info(self):
|
||||
color_1 = ray.fade(ray.WHITE, self.song_info_fade.attribute)
|
||||
color_2 = ray.fade(ray.WHITE, min(0.70, self.song_info_fade.attribute))
|
||||
offset = 0
|
||||
if self.is_second:
|
||||
color_1 = ray.fade(ray.WHITE, self.song_info_fade_out.attribute)
|
||||
color_2 = ray.fade(ray.WHITE, min(0.70, self.song_info_fade_out.attribute))
|
||||
offset = 816 - self.rainbow_up.attribute
|
||||
global_data.tex.draw_texture('rainbow_transition', 'text_bg', y=-self.rainbow_up.attribute - offset, color=color_2)
|
||||
|
||||
texture = self.title.texture
|
||||
y = 1176 - texture.height//2 - int(self.rainbow_up.attribute) - offset
|
||||
dest = ray.Rectangle(1280//2 - texture.width//2, y - 20, texture.width, texture.height)
|
||||
self.title.draw(self.title.default_src, dest, ray.Vector2(0, 0), 0, color_1)
|
||||
|
||||
texture = self.subtitle.texture
|
||||
dest = ray.Rectangle(1280//2 - texture.width//2, y + 30, texture.width, texture.height)
|
||||
self.subtitle.draw(self.subtitle.default_src, dest, ray.Vector2(0, 0), 0, color_1)
|
||||
|
||||
def draw(self):
|
||||
total_offset = 0
|
||||
if self.is_second:
|
||||
total_offset = 816
|
||||
global_data.tex.draw_texture('rainbow_transition', 'rainbow_bg_bottom', y=-self.rainbow_up.attribute - total_offset)
|
||||
global_data.tex.draw_texture('rainbow_transition', 'rainbow_bg_top', y=-self.rainbow_up.attribute - total_offset)
|
||||
global_data.tex.draw_texture('rainbow_transition', 'rainbow_bg', y=-self.rainbow_up.attribute - total_offset)
|
||||
offset = self.chara_down.attribute
|
||||
chara_offset = 0
|
||||
if self.is_second:
|
||||
offset = self.chara_down.attribute - self.mini_up.attribute//3
|
||||
chara_offset = 408
|
||||
global_data.tex.draw_texture('rainbow_transition', 'chara_left', x=-self.mini_up.attribute//2 - chara_offset, y=-self.mini_up.attribute + offset - total_offset)
|
||||
global_data.tex.draw_texture('rainbow_transition', 'chara_right', x=self.mini_up.attribute//2 + chara_offset, y=-self.mini_up.attribute + offset - total_offset)
|
||||
global_data.tex.draw_texture('rainbow_transition', 'chara_center', y=-self.rainbow_up.attribute + offset - total_offset)
|
||||
|
||||
self.draw_song_info()
|
||||
|
||||
class Transition2:
|
||||
def __init__(self, screen_height: int, title: str, subtitle: str) -> None:
|
||||
duration = 266
|
||||
self.is_finished = False
|
||||
self.rainbow_up = Animation.create_move(duration, start_position=0, total_distance=screen_height + global_data.textures['scene_change_rainbow'][2].height, ease_in='cubic')
|
||||
self.rainbow_up.start()
|
||||
self.chara_down = None
|
||||
self.title = OutlinedText(title, 40, ray.WHITE, ray.BLACK, outline_thickness=5)
|
||||
self.subtitle = OutlinedText(subtitle, 30, ray.WHITE, ray.BLACK, outline_thickness=5)
|
||||
self.song_info_fade = Animation.create_fade(duration/2)
|
||||
self.song_info_fade.start()
|
||||
def update(self, current_time_ms: float):
|
||||
self.rainbow_up.update(current_time_ms)
|
||||
self.song_info_fade.update(current_time_ms)
|
||||
if self.rainbow_up.is_finished and self.chara_down is None:
|
||||
self.chara_down = Animation.create_move(33, start_position=0, total_distance=30)
|
||||
self.chara_down.start()
|
||||
|
||||
if self.chara_down is not None:
|
||||
self.chara_down.update(current_time_ms)
|
||||
self.is_finished = self.chara_down.is_finished
|
||||
|
||||
def draw_song_info(self):
|
||||
texture = global_data.textures['scene_change_rainbow'][6]
|
||||
y = 720//2 - texture.height
|
||||
src = ray.Rectangle(0, 0, texture.width, texture.height)
|
||||
dest = ray.Rectangle(1280//2 - (texture.width*3)//2, y, texture.width*3, texture.height*2)
|
||||
ray.draw_texture_pro(texture, src, dest, ray.Vector2(0, 0), 0, ray.fade(ray.WHITE, min(0.70, self.song_info_fade.attribute)))
|
||||
|
||||
texture = self.title.texture
|
||||
y = 720//2 - texture.height//2 - 20
|
||||
src = ray.Rectangle(0, 0, texture.width, texture.height)
|
||||
dest = ray.Rectangle(1280//2 - texture.width//2, y, texture.width, texture.height)
|
||||
self.title.draw(src, dest, ray.Vector2(0, 0), 0, ray.fade(ray.WHITE, self.song_info_fade.attribute))
|
||||
|
||||
texture = self.subtitle.texture
|
||||
src = ray.Rectangle(0, 0, texture.width, texture.height)
|
||||
dest = ray.Rectangle(1280//2 - texture.width//2, y + 50, texture.width, texture.height)
|
||||
self.subtitle.draw(src, dest, ray.Vector2(0, 0), 0, ray.fade(ray.WHITE, self.song_info_fade.attribute))
|
||||
|
||||
def draw(self, screen_height: int):
|
||||
ray.draw_texture(global_data.textures['scene_change_rainbow'][1], 0, screen_height - int(self.rainbow_up.attribute), ray.WHITE)
|
||||
texture = global_data.textures['scene_change_rainbow'][0]
|
||||
src = ray.Rectangle(0, 0, texture.width, texture.height)
|
||||
dest = ray.Rectangle(0, -int(self.rainbow_up.attribute), texture.width, screen_height)
|
||||
ray.draw_texture_pro(texture, src, dest, ray.Vector2(0, 0), 0, ray.WHITE)
|
||||
texture = global_data.textures['scene_change_rainbow'][3]
|
||||
offset = 0
|
||||
if self.chara_down is not None:
|
||||
offset = int(self.chara_down.attribute)
|
||||
ray.draw_texture(global_data.textures['scene_change_rainbow'][4], 142, 14 -int(self.rainbow_up.attribute*3) - offset, ray.WHITE)
|
||||
ray.draw_texture(global_data.textures['scene_change_rainbow'][5], 958, 144 -int(self.rainbow_up.attribute*3) - offset, ray.WHITE)
|
||||
ray.draw_texture(texture, 76, -int(self.rainbow_up.attribute*3) - offset, ray.WHITE)
|
||||
|
||||
self.draw_song_info()
|
||||
@@ -1,5 +1,8 @@
|
||||
import ctypes
|
||||
import hashlib
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import zipfile
|
||||
@@ -16,7 +19,25 @@ from raylib import (
|
||||
SHADER_UNIFORM_VEC4,
|
||||
)
|
||||
|
||||
#TJA Format creator is unknown. I did not create the format, but I did write the parser though.
|
||||
from libs.texture import TextureWrapper
|
||||
|
||||
|
||||
def force_dedicated_gpu():
|
||||
"""Force Windows to use dedicated GPU for this application"""
|
||||
if sys.platform == "win32":
|
||||
try:
|
||||
# NVIDIA Optimus
|
||||
nvapi = ctypes.windll.kernel32.LoadLibraryW("nvapi64.dll")
|
||||
if nvapi:
|
||||
ctypes.windll.kernel32.SetEnvironmentVariableW("SHIM_MCCOMPAT", "0x800000001")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
try:
|
||||
# AMD PowerXpress
|
||||
ctypes.windll.kernel32.SetEnvironmentVariableW("AMD_VULKAN_ICD", "DISABLE")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
def get_zip_filenames(zip_path: Path) -> list[str]:
|
||||
result = []
|
||||
@@ -230,6 +251,7 @@ def reset_session():
|
||||
class GlobalData:
|
||||
selected_song: Path = Path()
|
||||
textures: dict[str, list[ray.Texture]] = field(default_factory=lambda: dict())
|
||||
tex: TextureWrapper = field(default_factory=lambda: TextureWrapper())
|
||||
songs_played: int = 0
|
||||
config: dict = field(default_factory=lambda: dict())
|
||||
song_hashes: dict[str, list[dict]] = field(default_factory=lambda: dict()) #Hash to path
|
||||
@@ -239,13 +261,24 @@ class GlobalData:
|
||||
|
||||
global_data = GlobalData()
|
||||
|
||||
text_cache = set()
|
||||
if not Path('cache/image').exists():
|
||||
Path('cache/image').mkdir()
|
||||
|
||||
for file in Path('cache/image').iterdir():
|
||||
text_cache.add(file.stem)
|
||||
|
||||
class OutlinedText:
|
||||
def __init__(self, text: str, font_size: int, color: ray.Color, outline_color: ray.Color, outline_thickness=5.0, vertical=False):
|
||||
self.font = self._load_font_for_text(text)
|
||||
if vertical:
|
||||
self.texture = self._create_text_vertical(text, font_size, color, ray.BLANK, self.font)
|
||||
self.hash = self._hash_text(text, font_size, color, vertical)
|
||||
if self.hash in text_cache:
|
||||
self.texture = ray.load_texture(f'cache/image/{self.hash}.png')
|
||||
else:
|
||||
self.texture = self._create_text_horizontal(text, font_size, color, ray.BLANK, self.font)
|
||||
self.font = self._load_font_for_text(text)
|
||||
if vertical:
|
||||
self.texture = self._create_text_vertical(text, font_size, color, ray.BLANK, self.font)
|
||||
else:
|
||||
self.texture = self._create_text_horizontal(text, font_size, color, ray.BLANK, self.font)
|
||||
outline_size = ray.ffi.new('float*', outline_thickness)
|
||||
if isinstance(outline_color, tuple):
|
||||
outline_color_alloc = ray.ffi.new("float[4]", [
|
||||
@@ -272,11 +305,31 @@ class OutlinedText:
|
||||
ray.set_shader_value(self.shader, outline_color_loc, outline_color_alloc, SHADER_UNIFORM_VEC4)
|
||||
ray.set_shader_value(self.shader, texture_size_loc, texture_size, SHADER_UNIFORM_VEC2)
|
||||
|
||||
self.default_src = ray.Rectangle(0, 0, self.texture.width, self.texture.height)
|
||||
|
||||
def _hash_text(self, text: str, font_size: int, color: ray.Color, vertical: bool):
|
||||
n = hashlib.sha256()
|
||||
n.update(text.encode('utf-8'))
|
||||
n.update(str(font_size).encode('utf-8'))
|
||||
if isinstance(color, tuple):
|
||||
n.update(str(color[0]).encode('utf-8'))
|
||||
n.update(str(color[1]).encode('utf-8'))
|
||||
n.update(str(color[2]).encode('utf-8'))
|
||||
n.update(str(color[3]).encode('utf-8'))
|
||||
else:
|
||||
n.update(str(color.r).encode('utf-8'))
|
||||
n.update(str(color.g).encode('utf-8'))
|
||||
n.update(str(color.b).encode('utf-8'))
|
||||
n.update(str(color.a).encode('utf-8'))
|
||||
n.update(str(vertical).encode('utf-8'))
|
||||
return n.hexdigest()
|
||||
|
||||
def _load_font_for_text(self, text: str) -> ray.Font:
|
||||
codepoint_count = ray.ffi.new('int *', 0)
|
||||
unique_codepoints = set(text)
|
||||
codepoints = ray.load_codepoints(''.join(unique_codepoints), codepoint_count)
|
||||
return ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
|
||||
font = ray.load_font_ex(str(Path('Graphics/Modified-DFPKanteiryu-XB.ttf')), 40, codepoints, 0)
|
||||
return font
|
||||
|
||||
def _create_text_vertical(self, text: str, font_size: int, color: ray.Color, bg_color: ray.Color, font: Optional[ray.Font]=None, padding: int=10):
|
||||
rotate_chars = {'-', '‐', '|', '/', '\\', 'ー', '~', '~', '(', ')', '(', ')',
|
||||
@@ -427,6 +480,7 @@ class OutlinedText:
|
||||
ray.WHITE)
|
||||
ray.unload_image(char_image)
|
||||
|
||||
ray.export_image(image, f'cache/image/{self.hash}.png')
|
||||
texture = ray.load_texture_from_image(image)
|
||||
ray.unload_image(image)
|
||||
return texture
|
||||
@@ -452,6 +506,7 @@ class OutlinedText:
|
||||
ray.WHITE)
|
||||
ray.unload_image(text_image)
|
||||
|
||||
ray.export_image(image, f'cache/image/{self.hash}.png')
|
||||
texture = ray.load_texture_from_image(image)
|
||||
ray.unload_image(image)
|
||||
return texture
|
||||
|
||||
Reference in New Issue
Block a user