Skip to content

Commit 42d45ac

Browse files
author
Ömer Sezer
committed
version 0.6
1 parent ee49eaf commit 42d45ac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+3410
-2
lines changed

MusicGeneration

-1
This file was deleted.
File renamed without changes.

MusicGenerationProject/.idea/modules.xml

+8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

MusicGenerationProject/.idea/workspace.xml

+330
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

MusicGenerationProject/Results.txt

+310
Large diffs are not rendered by default.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
90.4 KB
Binary file not shown.

MusicGenerationProject/data_utils.py

+137
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
from music_utils import *
2+
from preprocess import *
3+
from keras.utils import to_categorical
4+
5+
chords, abstract_grammars = get_musical_data('data/original_metheny.mid')
6+
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
7+
N_tones = len(set(corpus))
8+
n_a = 64
9+
x_initializer = np.zeros((1, 1, 78))
10+
a_initializer = np.zeros((1, n_a))
11+
c_initializer = np.zeros((1, n_a))
12+
13+
def load_music_utils():
14+
chords, abstract_grammars = get_musical_data('data/original_metheny.mid')
15+
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
16+
N_tones = len(set(corpus))
17+
X, Y, N_tones = data_processing(corpus, tones_indices, 60, 30)
18+
return (X, Y, N_tones, indices_tones)
19+
20+
21+
def generate_music(inference_model, corpus = corpus, abstract_grammars = abstract_grammars, tones = tones, tones_indices = tones_indices, indices_tones = indices_tones, T_y = 10, max_tries = 1000, diversity = 0.5):
22+
"""
23+
Generates music using a model trained to learn musical patterns of a jazz soloist. Creates an audio stream
24+
to save the music and play it.
25+
26+
Arguments:
27+
model -- Keras model Instance, output of djmodel()
28+
corpus -- musical corpus, list of 193 tones as strings (ex: 'C,0.333,<P1,d-5>')
29+
abstract_grammars -- list of grammars, on element can be: 'S,0.250,<m2,P-4> C,0.250,<P4,m-2> A,0.250,<P4,m-2>'
30+
tones -- set of unique tones, ex: 'A,0.250,<M2,d-4>' is one element of the set.
31+
tones_indices -- a python dictionary mapping unique tone (ex: A,0.250,< m2,P-4 >) into their corresponding indices (0-77)
32+
indices_tones -- a python dictionary mapping indices (0-77) into their corresponding unique tone (ex: A,0.250,< m2,P-4 >)
33+
Tx -- integer, number of time-steps used at training time
34+
temperature -- scalar value, defines how conservative/creative the model is when generating music
35+
36+
Returns:
37+
predicted_tones -- python list containing predicted tones
38+
"""
39+
40+
# set up audio stream
41+
out_stream = stream.Stream()
42+
43+
# Initialize chord variables
44+
curr_offset = 0.0 # variable used to write sounds to the Stream.
45+
num_chords = int(len(chords) / 3) # number of different set of chords
46+
47+
print("Predicting new values for different set of chords.")
48+
# Loop over all 18 set of chords. At each iteration generate a sequence of tones
49+
# and use the current chords to convert it into actual sounds
50+
for i in range(1, num_chords):
51+
52+
# Retrieve current chord from stream
53+
curr_chords = stream.Voice()
54+
55+
# Loop over the chords of the current set of chords
56+
for j in chords[i]:
57+
# Add chord to the current chords with the adequate offset, no need to understand this
58+
curr_chords.insert((j.offset % 4), j)
59+
60+
# Generate a sequence of tones using the model
61+
_, indices = predict_and_sample(inference_model)
62+
indices = list(indices.squeeze())
63+
pred = [indices_tones[p] for p in indices]
64+
65+
predicted_tones = 'C,0.25 '
66+
for k in range(len(pred) - 1):
67+
predicted_tones += pred[k] + ' '
68+
69+
predicted_tones += pred[-1]
70+
71+
#### POST PROCESSING OF THE PREDICTED TONES ####
72+
# consider "A" and "X" as "C" tones. It is a common choice.
73+
predicted_tones = predicted_tones.replace(' A',' C').replace(' X',' C')
74+
75+
# Pruning #1: smoothing measure
76+
predicted_tones = prune_grammar(predicted_tones)
77+
78+
# Use predicted tones and current chords to generate sounds
79+
sounds = unparse_grammar(predicted_tones, curr_chords)
80+
81+
# Pruning #2: removing repeated and too close together sounds
82+
sounds = prune_notes(sounds)
83+
84+
# Quality assurance: clean up sounds
85+
sounds = clean_up_notes(sounds)
86+
87+
# Print number of tones/notes in sounds
88+
print('Generated %s sounds using the predicted values for the set of chords ("%s") and after pruning' % (len([k for k in sounds if isinstance(k, note.Note)]), i))
89+
90+
# Insert sounds into the output stream
91+
for m in sounds:
92+
out_stream.insert(curr_offset + m.offset, m)
93+
for mc in curr_chords:
94+
out_stream.insert(curr_offset + mc.offset, mc)
95+
96+
curr_offset += 4.0
97+
98+
# Initialize tempo of the output stream with 130 bit per minute
99+
out_stream.insert(0.0, tempo.MetronomeMark(number=130))
100+
101+
# Save audio stream to fine
102+
mf = midi.translate.streamToMidiFile(out_stream)
103+
mf.open("output/my_music.midi", 'wb')
104+
mf.write()
105+
print("Your generated music is saved in output/my_music.midi")
106+
mf.close()
107+
108+
# Play the final stream through output (see 'play' lambda function above)
109+
# play = lambda x: midi.realtime.StreamPlayer(x).play()
110+
# play(out_stream)
111+
112+
return out_stream
113+
114+
115+
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
116+
c_initializer = c_initializer):
117+
"""
118+
Predicts the next value of values using the inference model.
119+
120+
Arguments:
121+
inference_model -- Keras model instance for inference time
122+
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
123+
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
124+
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
125+
Ty -- length of the sequence you'd like to generate.
126+
127+
Returns:
128+
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
129+
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
130+
"""
131+
132+
pred = inference_model.predict([x_initializer, a_initializer, c_initializer])
133+
indices = np.argmax(pred, axis = -1)
134+
results = to_categorical(indices, num_classes=78)
135+
136+
137+
return results, indices

0 commit comments

Comments
 (0)