psychopy / psychopy

For running psychology and neuroscience experiments
http://www.psychopy.org
GNU General Public License v3.0
1.68k stars 907 forks source link

[Bug]: The sound stimulus does not play. #6943

Open JeongWoo7780 opened 2 days ago

JeongWoo7780 commented 2 days ago

PsychoPy Version

2024.2.1

What OS are your PsychoPy running on?

Windows 10

Bug Description

My python version: 3.10.11

I'm using a VENV virtual environment. I recently changed the version of python (3.9 > current) and found that my experimental code doesn't work.

Previously, my code was working fine, and the sound was outputting correctly. I have attached a video of it.

But now, when I run the same code, it does not output any error message.

The text and image stimulus presentation works fine as expected, but the sound stimulus is not outputting.

I have checked the sampling rate of the wav file, sound settings, etc. In the past, it has worked fine to display a message that an error occurred due to a sampling rate error.

However, I have no idea what the current problem is.

https://github.com/user-attachments/assets/4d76b760-77df-4733-a2c8-da538d851b39

I left the code in the additional part

Expected Behaviour

The correct sound stimulus should be output.

Currently, the text and image stimuli are working properly.

Steps to Reproduce

  1. I ran the experimental paradigm using psychopy on a Python 3.10.11 venv virtual machine.
  2. ...

Additional context

from psychopy import visual, core, event
from psychopy.sound import Sound
import time
from psychopy import prefs

prefs.hardware['audioLib'] = ['ptb']
#prefs.hardware["audioDevice"] == "default"

win = visual.Window(size=(2560, 1440), fullscr=True, color="white", winType='pyglet', allowGUI=True, title='CBT Cognitive')

def create_text_stim(text, height, wrap_width=1.7):
    return visual.TextStim(win, text=text, color="black", height=height, wrapWidth=wrap_width)

# image stim

def create_image_stim(image_path):
    return visual.ImageStim(win, image=image_path)

# sound stim
def create_sound_stim(sound_path, sample_rate=44100):
    return Sound(sound_path, sampleRate=sample_rate)

# countdown

def countdown(seconds=3):
    for i in range(seconds, 0, -1):
        count = visual.TextStim(win, text=str(i), color="black", height=0.3)
        count.draw()
        win.flip()
        core.wait(1)

# speaker image & text

def show_speaker_with_text(text, duration):
    speaker = create_image_stim("exp_images/speaker.png")
    text_stim = create_text_stim(text, height=0.1)
    speaker.pos = (0, -0.3)
    text_stim.pos = (0, 0.2)
    speaker.draw()
    text_stim.draw()
    win.flip()
    core.wait(duration)

# text stim1
def present_text_stimulus_no_countdown_with_sound(text, duration, height, sound_path):
    stimulus = create_text_stim(text, height, wrap_width=1.7)
    stimulus.draw()
    win.flip()
    sound = create_sound_stim(sound_path, sample_rate=44100)
    sound.play()
    core.wait(duration)

# text stim2

def present_text_stimulus_with_countdown_with_sound(text, duration, height, speaker_duration=10, sound_path=None):
    stimulus = create_text_stim(text, height, wrap_width=1.7)
    stimulus.draw()
    win.flip()
    if sound_path:
        sound = create_sound_stim(sound_path, sample_rate=44100)
        sound.play()
    core.wait(duration)
    countdown()
    show_speaker_with_text("Speak", speaker_duration)

# text stim
def present_text_stimulus_no_countdown(text, duration, height):
    stimulus = create_text_stim(text, height, wrap_width=1.7) 
    stimulus.draw()
    win.flip()
    core.wait(duration)

# two images stim

def present_two_images(image_path1, image_path2, duration, speaker_duration=10, sound_path=None):
    img1 = create_image_stim(image_path1)
    img2 = create_image_stim(image_path2)
    img1.pos = (-0.5, 0)
    img2.pos = (0.5, 0)
    img1.draw()
    img2.draw()
    win.flip()
    if sound_path:
        sound = create_sound_stim(sound_path, sample_rate=44100)
        sound.play()
    core.wait(duration)
    countdown()
    show_speaker_with_text("Speak", speaker_duration)

# image & text
def present_image_and_text_above(image_path, text, duration, text_height, speaker_duration=10, sound_path=None):
    img = create_image_stim(image_path)
    txt = create_text_stim(text, text_height, wrap_width=1.8)
    img.pos = (0, -0.3)
    txt.pos = (0, 0.6)
    txt.draw()
    img.draw()
    win.flip()
    if sound_path:
        sound = create_sound_stim(sound_path, sample_rate=44100)
        sound.play()
    core.wait(duration)
    countdown()
    show_speaker_with_text("Speak", speaker_duration)

# Exp roop
def run_experiment():
    welcome_text = "Example"
    present_text_stimulus_no_countdown_with_sound(welcome_text, 13, height=0.07, sound_path="audio_44100/welcome.wav")

    instruction_text = "Example"
    present_text_stimulus_no_countdown_with_sound(instruction_text, 11, height=0.07, sound_path="audio_44100/instruction1.wav")

    sentence = "Example"
    present_text_stimulus_with_countdown_with_sound(sentence, 8, height=0.09, sound_path="audio_44100/sentence.wav", speaker_duration=9)

    instruction_text2 = "Example"
    present_text_stimulus_no_countdown_with_sound(instruction_text2, 12, height=0.07, sound_path="audio_44100/instruction2.wav")

    present_text_stimulus_with_countdown_with_sound(sentence, 8, height=0.09, sound_path="audio_44100/sentence.wav", speaker_duration=9)

    instruction_text3 = "Example"
    present_text_stimulus_no_countdown_with_sound(instruction_text3, 8, height=0.08, sound_path="audio_44100/instruction3.wav")

    questions = [
        "Example",
        "Example",
        "Example",
        "Example",
        "Example",
        "Example"
    ]

     for i, question in enumerate(questions, 1):
        present_text_stimulus_with_countdown_with_sound(question, 5, height=0.1, sound_path=f"audio_44100/question_{i}.wav", speaker_duration=7)

    instruction_text4 = "Example"
    present_text_stimulus_no_countdown_with_sound(instruction_text4, 9, height=0.07, sound_path="audio_44100/instruction4.wav")

    num_sentence_1 = "Example"
    present_text_stimulus_with_countdown_with_sound(num_sentence_1, 5, height=0.25, sound_path="audio_44100/num_sentence_1.wav", speaker_duration=7)

    num_sentence_2 = "Example"
    present_text_stimulus_with_countdown_with_sound(num_sentence_2, 6, height=0.25, sound_path="audio_44100/num_sentence_2.wav", speaker_duration=8)

    reverse_text = "Example"
    present_text_stimulus_no_countdown_with_sound(reverse_text, 8, height=0.09, sound_path="audio_44100/reverse_text.wav")

    reverse_word_1 = "Example"
    present_text_stimulus_with_countdown_with_sound(reverse_word_1, 5, height=0.25, sound_path="audio_44100/reverse_word_90.wav", speaker_duration=8)

    instruction_text6 = "Example"
    present_image_and_text_above("exp_images/cat_dog.png", instruction_text6, 8, text_height=0.09, sound_path="audio_44100/instruction6.wav", speaker_duration=14)

    figure_sequential_instruction = "Example"
    present_image_and_text_above("exp_images/sequential.png", figure_sequential_instruction, 22, text_height=0.08, sound_path="audio_44100/sequential.wav", speaker_duration=10)

    rotation_instruction = "Example"
    present_image_and_text_above("exp_images/rotation_1.png", rotation_instruction, 17, text_height=0.09, sound_path="audio_44100/rotation.wav", speaker_duration=10)

    image_instruction = "Example"
    present_image_and_text_above("exp_images/toothpaste.png", image_instruction, 9, text_height=0.08, sound_path="audio_44100/image_instruction.wav", speaker_duration=8)

    image_instruction2 = "Example"
    present_image_and_text_above("exp_images/eraser.png", image_instruction2, 9, text_height=0.08, sound_path="audio_44100/image_instruction.wav", speaker_duration=8)

    end_text = "Example"
    present_text_stimulus_no_countdown(end_text, 5, height=0.1)
win.close()

run_experiment()
zeyus commented 2 days ago

Does the sound work normally in the virtual machine?

Do you have the standalone psychopy installed as well, or only the venv version?