Commit f9ad4206 authored by mjboos's avatar mjboos

init

parents
*.yml
*.jpg
*.png
*.svg
*~
# coding: utf-8
import joblib
import numpy as np
import matplotlib.pyplot as plt
import yaml
import seaborn as sns
import glob
import os
import pandas as pd
def get_yaml_content(fn):
with open(fn, 'r') as f:
smpl_dict = yaml.load(f)
smpl_dict = { int(key.split('.')[0]) : val[0] for key, val in smpl_dict.iteritems()}
rating_dict = { key : np.array([smpl_dict[idx] for idx in clf[key]]) for key in clf.keys()}
return rating_dict
def ratings_to_df(ratings):
df_dict = {'noise level' : np.concatenate([ratings[key] for key in sorted(ratings.keys())]),
'signal to noise ratio' : np.concatenate([np.repeat([key.split('_')[1]], ratings[key].shape[0]) for key in sorted(ratings.keys())])}
return pd.DataFrame.from_dict(df_dict)
clf = joblib.load('classification_FG_ridge_logBSC_H200_predictions.pkl')
clf = {lbl : np.argsort(data)[-50:] for lbl, data in clf.iteritems()}
files = glob.glob('[0-9][0-9].yml')
vp_dict = {fn.split('.')[0] : ratings_to_df(get_yaml_content(fn)) for fn in files}
for vp, ratings in vp_dict.iteritems():
if os.path.exists('vp_{}.svg'.format(vp)):
continue
# plt.boxplot([ratings['speech_{}db_snr'.format(n)] for n in [0,5,10,15]], labels=['{} db'.format(n) for n in [0,5,10,15]], showmeans=True)
# sns.swarmplot(data=ratings, x='signal to noise ratio', y='noise level', order=['{}db'.format(n) for n in [15, 10, 5, 0]])
sns.boxplot(data=ratings, x='signal to noise ratio', y='noise level', order=['{}db'.format(n) for n in [15, 10, 5, 0]])
plt.savefig('vp_{}.svg'.format(vp))
plt.close()
# aggregate results
all_vp_dict = pd.concat([vp_dict[vp] for vp in vp_dict.keys()])
#sns.swarmplot(data=all_vp_dict, x='signal to noise ratio', y='noise level', order=['{}db'.format(n) for n in [15, 10, 5, 0]])
sns.boxplot(data=all_vp_dict, x='signal to noise ratio', y='noise level', order=['{}db'.format(n) for n in [15, 10, 5, 0]])
plt.savefig('all_participants.svg')
plt.close()
File added
File added
from __future__ import division
from psychopy import visual, event, core, logging, sound
import os
import joblib
import numpy as np
import yaml
import sys
def get_indices(file_name, n_lim=50):
probabilities = joblib.load(file_name)
indices_to_use = np.unique(np.concatenate(
[np.argsort(probabilities[key])[-n_lim:] for key in probabilities.keys()]))
return indices_to_use
vp = sys.argv[1]
file_name = 'classification_FG_ridge_logBSC_H200_predictions.pkl'
indices_to_use = get_indices(file_name, n_lim=50)
sample_list = [str(idx) + '.wav' for idx in indices_to_use]
np.random.shuffle(sample_list)
# create a window before creating your rating scale, whatever units you like:
win = visual.Window(fullscr=False, size=[1100, 800], units='pix', monitor='testMonitor')
instr = visual.TextStim(win, text="""Hi! Your task is to rate a set of stimuli on their noisiness -- how clear you can hear speech versus background noise.
First, you will listen to a stimulus, where very little noise is present.
Press any button to continue.
""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
win.flip()
core.wait(0.35)
myItem = sound.Sound('sn15_trim.wav')
myItem.play()
core.wait(6)
instr = visual.TextStim(win, text="""Now you will hear the same audio sample corrupted by a large amount of noise.
Afterwards, you will see a rating scale of the noise level. Please choose your perceived level of noisiness.
Press any button to continue.""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
win.flip()
core.wait(0.35)
myItem = sound.Sound('sn0_trim.wav')
myItem.play()
core.wait(6)
myRatingScale = visual.RatingScale(win, low=0, high=6, precision=1, scale='Noise level',
labels=['No noise', 'noisy', 'very noisy'], name='Noisiness')
while myRatingScale.noResponse:
myRatingScale.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
instr = visual.TextStim(win, text="""The first example had very little noise present, the second example a large amount.
In the following experiment you will hear different audio samples. Some change their level of noisiness throughout the audio sample. Please indicate the average noisiness over the sample in your answer. Additionally, the type of noise differs between samples. Try to separate the perceived loudness from the noise level: A sample that contains quiet speech but no noise might be harder to understand than a loud, noisy sample, but it still contains less noise.
The experiment will take around 25 minutes.
Press any button to continue.
""")
event.clearEvents()
instr.draw()
win.flip()
if 'escape' in event.waitKeys():
core.quit()
myRatingScale.reset() # reset between repeated uses of the same scale
event.clearEvents()
win.flip()
core.wait(0.35)
data = {}
for n, speech in enumerate(sample_list):
counter = visual.TextStim(win, text='{}/{} samples'.format(n+1,len(sample_list)), alignHoriz='right', alignVert='top')
counter.draw()
x, y = myRatingScale.win.size
myItem = sound.Sound('/home/mboos/Work/sample_wavs/' + speech)
# rate each speech on two dimensions
myRatingScale.reset() # reset between repeated uses of the same scale
event.clearEvents()
# core.wait(0.5) # brief pause, slightly smoother for the subject
myItem.play()
core.wait(6)
while myRatingScale.noResponse:
myRatingScale.draw()
win.flip()
if event.getKeys(['escape']):
core.quit()
data[speech] = (myRatingScale.getRating(), myRatingScale.getRT(), n) # save for later
# clear the screen & pause between ratings
win.flip()
core.wait(0.35) # brief pause, slightly smoother for the subject
with open('{}.yml'.format(vp), 'w+') as f:
yaml.dump(data, f)
win.close()
core.quit()
File added
File added
File added
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment