aaron-randreth / modulation-mfcc

0 stars 0 forks source link

bug dernière version #1

Closed NataTCHA closed 4 months ago

NataTCHA commented 4 months ago
NataTCHA commented 4 months ago

So if you just copy and paste the read_AG50x function into your own program, you need basically 5 lines of code to work with ADAs EMA routine and to extract the necessary data for plotting. Below is a minimal example for a simple plot of a random pos file for Natacha. The most important lines are bold


import numpy
import xarray
import matplotlib.pyplot as plt # for plotting

ema_data = read_AG50x("path/to/the/pos/file/filename.pos")

#get time values from the xarray
time = ema_data.time.values     

# get the vertical tongue tip (TTIP) movement, assuming the TTIP sensor was recorded in channel 3 (so 3-1 internally)
# channel and dimensions have to be selected via the .sel() function
TTIP = ema_data.sel(channels=2).sel(dimensions="y").ema.values 

# initialize plot
fig, ax = plt.subplots(1,1)

# plot TTIP trajectory
ax.plot(time, TTIP)
NataTCHA commented 4 months ago

Voici la fonction complété pour charger des courbes EMA (eva), il faut maintenant relier les channel a leur nom pour se faire il y a un fichier d'allocation possible mais j'ai l'impression que c est bordelique les formats donc je propose que l'utilisateur quand il charge un fichier EMA en plus de permettre de choisir les channels on lui demande d'inserer le nom du channel


import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import wavfile

def read_AG50x(path_to_pos_file):
    dims = ["x","z","y","phi","theta","rms","extra"]
    channel_sample_size = {
        8 : 56,
        16 : 112,
        32 : 256
        }
    #read file
    pos_file = open(path_to_pos_file,mode="rb")
    file_content = pos_file.read()
    #read header
    pos_file.seek(0)
    pos_file.readline()
    #extract header
    header_size = int(pos_file.readline().decode("utf8"))
    header_section = file_content[0:header_size]
    header = header_section.decode("utf8").split("\n")
    #extract file information (number of channels, ema samplerate)
    num_of_channels = int(header[2].split("=")[1])
    ema_samplerate = int(header[3].split("=")[1])
    #read data
    data = file_content[header_size:]
    data = np.frombuffer(data,np.float32)
    data = np.reshape(data,newshape=(-1,channel_sample_size[num_of_channels]))
    pos = data.reshape(len(data),-1,7) # reshape to [sample, channel, values]
    time = np.linspace(0,pos.shape[0]/ema_samplerate,pos.shape[0])
    ema_data = xr.Dataset(
                            data_vars=dict(
                                            ema=(["time","channels","dimensions"],pos)
                                        ),
                            coords=dict(
                                        time=(["time"],time),
                                        channels=(["channels"],np.arange(pos.shape[1])),
                                        dimensions=(["dimensions"],dims)
                                        ),
                            attrs=dict(
                                        device="AG50x",
                                        duration=time[-1],
                                        samplerate=ema_samplerate

                            )

                        )
    return ema_data
import numpy
import xarray
import matplotlib.pyplot as plt # for plotting

# Lire les données AG50x depuis le fichier
ema_data = read_AG50x("0003.pos")

#get time values from the xarray
time = ema_data.time.values     

# get the vertical tongue tip (TTIP) movement, assuming the TTIP sensor was recorded in channel 3 (so 3-1 internally)
# channel and dimensions have to be selected via the .sel() function
TTIP = ema_data.sel(channels=2).sel(dimensions="y").ema.values 

# initialize plot
fig, ax = plt.subplots(1,1)

# plot TTIP trajectory
ax.plot(time, TTIP)

audio_samplerate, audio_data = wavfile.read("0003.wav")
audio_time = np.linspace(0, len(audio_data) / audio_samplerate, num=len(audio_data))

# Initialiser le plot
fig, axs = plt.subplots(2, 1, figsize=(12, 8), sharex=True)

# Plot du signal acoustique
axs[0].plot(audio_time, audio_data, color='gray', label="Signal Acoustique")
axs[0].set_title('Signal Acoustique')
axs[0].set_ylabel('Amplitude')
axs[0].legend()

# Plot du mouvement TTIP
axs[1].plot(time, TTIP, label="Mouvement TTIP")
axs[1].set_title('Mouvement Vertical de la Pointe de la Langue (TTIP)')
axs[1].set_xlabel('Temps (s)')
axs[1].set_ylabel('Position Verticale TTIP (mm)')
axs[1].legend()

# Afficher les plots
plt.tight_layout()
plt.show()