I finally got it working... this is a bit long because it does more than the eyes and mouth. It also does the Phoenix Wright text blips that are coordinated to the CPS.
Code: Select all
init -1 python in speakers:
from renpy.text.textsupport import TAG, TEXT
import renpy.text.textsupport as textsupport
import re
speakers = set()
def Character(name, image=None):
if image == None:
image = name.lower()
def character_callback(character):
global speakers
def the_callback(event, interact=True, **kwargs):
if not interact:
return
if event == "show":
speakers.add(character)
elif event == "show_done":
#renpy.sound.stop()
pass
elif event == "slow_done":
speakers.discard(character)
renpy.sound.stop()
return the_callback
def queue_blips(who, what, cps):
global speakers
tokens = textsupport.tokenize(unicode(what))
odd = False
queue = []
cps_stack = []
queue.append("<silence %0.2f>" % (1.0/cps))
for token_type, token_text in tokens:
if token_type == TEXT:
speed = 1.0/cps
for letter in token_text:
odd = not odd
if letter in ', ':
queue.append("<silence %0.2f>" % speed)
odd = False
else:
if odd:
queue.append("<from 0 to %0.2f>audio/sfx-blipfemale.ogg" % speed)
else:
queue.append("<silence %0.2f>" % speed)
if token_type == TAG:
match_cps_multiplier = re.match( r'cps=\*([0-9\.]+)', token_text)
match_cps = re.match( r'cps=([0-9\.]+)', token_text)
match_close_cps = re.match( r'/cps', token_text)
if match_cps_multiplier:
cps_stack.append(cps)
cps *= float(match_cps_multiplier.group(1))
elif match_cps:
cps_stack.append(cps)
cps = float(match_cps_multiplier.group(1))
elif match_close_cps:
cps = cps_stack.pop()
odd = False
renpy.sound.queue(queue, clear_queue=True)
def blip_show_function(who, what, **kwargs):
cps = renpy.game.preferences.text_cps
if (cps > 0):
queue_blips(who, what, cps)
return renpy.character.show_display_say(
who,
what,
**kwargs)
return renpy.character.Character(name,
image=image,
callback=character_callback(image),
show_function=blip_show_function
)
def MouthSwitch(character, talking_displayable, quiet_displayable):
return renpy.display.layout.ConditionSwitch(
"speakers.IsSpeaking('%s')" % character, talking_displayable,
"True", quiet_displayable
)
def IsSpeaking(character):
return character in speakers
init -1 python in animate:
def ImageSequence(path, start, end, pause=1.0/30.0, reverse=False, repeat=True, **properties):
frames = ["{:s}/{:04d}.png".format(path, x) for x in xrange(start, end + 1)]
if (reverse):
frames += ["{:s}/{:04d}.png".format(path, x) for x in reversed(xrange(start+1, end))]
# Arguments to Animation
args = [ ]
for frame in frames:
args.append(frame)
args.append(pause)
if not repeat:
args.pop()
return renpy.display.anim.Animation(*args, **properties)
Usage example
Code: Select all
image composite zoe casual = Composite(
(853, 1440),
(0, 0), "composite zoe casual body",
(280, 340), "composite zoe eyes",
(280, 475), speakers.MouthSwitch("zoe", "composite zoe mouth talking","composite zoe mouth quiet")
)
# I don't know how to make choice in Python.
image composite zoe eyes:
choice:
"zoe casual/0208.png"
choice 0.1:
animate.ImageSequence("zoe casual", 208, 220, repeat = False)
crop(280, 340, 273,123)
pause (220 - 208)/30.0
repeat