if transpose:
midi_pitches = [[n.pitch.midi for n in chorale.parts[voice_id].flat.notes] for voice_id in voice_ids]
min_midi_pitches_current = np.array([min(l) for l in midi_pitches])
max_midi_pitches_current = np.array([max(l) for l in midi_pitches])
min_transposition = max(min_midi_pitches - min_midi_pitches_current)
max_transposition = min(max_midi_pitches - max_midi_pitches_current)
for semi_tone in range(min_transposition, max_transposition + 1):
try:
interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(semi_tone)
transposition_interval = interval.Interval(str(interval_nature) + interval_type)
chorale_tranposed = chorale.transpose(transposition_interval)
if transpose: midi_pitches = [[n.pitch.midi for n in chorale.parts[voice_id].flat.notes] for voice_id in voice_ids] min_midi_pitches_current = np.array([min(l) for l in midi_pitches]) max_midi_pitches_current = np.array([max(l) for l in midi_pitches]) min_transposition = max(min_midi_pitches - min_midi_pitches_current) max_transposition = min(max_midi_pitches - max_midi_pitches_current) for semi_tone in range(min_transposition, max_transposition + 1): try: interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(semi_tone) transposition_interval = interval.Interval(str(interval_nature) + interval_type) chorale_tranposed = chorale.transpose(transposition_interval)