_This is my code_
-- coding: utf-8 --
"""
Created on Fri Mar 03 10:07:42 2017@author: Shreyans
"""import os
import pickle
import numpy as np
from keras.models import Sequential
import gensim
from keras.layers.recurrent import LSTM, SimpleRNN
from sklearn.model_selection import train_test_split
import theano
theano.config.optimizer = "None"
import syssys.setdefaultencoding("ISO-8859-1")
conversation.pickle.encode('utf-8').strip()
with open('chatbot.pickle', 'rb') as f:
(vec_x, vec_y) = pickle.load(f)
print("results")
print(vec_x)
vec_x = np.array(vec_x, dtype=np.object)
vec_y = np.array(vec_y, dtype=np.object)x_train, x_test, y_train, y_test = train_test_split(vec_x, vec_y, test_size=0.2, random_state=1)
model = Sequential()
model.add(LSTM(output_dim=500, input_shape=x_train.shape[1:], return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))
model.add(LSTM(output_dim=300, input_shape=x_train.shape[1:], return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))
model.add(LSTM(output_dim=300, input_shape=x_train.shape[1:], return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))
model.add(LSTM(output_dim=300, input_shape=x_train.shape[1:], return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))
model.compile(loss='cosine_proximity', optimizer='adam', metrics=['accuracy'])model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
model.save('LSTM5000.h5')
predictions = model.predict(x_test)
mod = gensim.models.Word2Vec.load('word2vec.bin')
[mod.most_similar([predictions[10][i]])[0] for i in range(15)]
*error getting *
model.add(LSTM(output_dim=300, input_shape=x_train.shape[1:], return_sequences=True, init='glorot_normal', inner_init='glorot_normal', activation='sigmoid'))
/usr/local/lib/python3.5/dist-packages/keras/models.py:939: UserWarning: The nb_epoch argument in fit has been renamed epochs.
warnings.warn('The nb_epoch argument in fit '
Train on 0 samples, validate on 1 samples
Epoch 1/500
Traceback (most recent call last):
File "chatbotlstmtrain.py", line 54, in
model.fit(x_train, y_train, nb_epoch=500, validation_data=(x_test, y_test))
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 960, in fit
validation_steps=validation_steps)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1657, in fit
validation_steps=validation_steps)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1233, in _fit_loop
callbacks.on_epoch_end(epoch, epoch_logs)
File "/usr/local/lib/python3.5/dist-packages/keras/callbacks.py", line 73, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "/usr/local/lib/python3.5/dist-packages/keras/callbacks.py", line 307, in on_epoch_end
self.progbar.update(self.seen, self.log_values, force=True)
AttributeError: 'ProgbarLogger' object has no attribute 'log_values'
I also have this same problem, sometimes it works, sometimes it doesn't, it doesn't happen on linux for me, only on windows. There is no obvoius cause and I can't replicate this bug at all, it will suddenly appear and then disappear on random code changes.
Here is one way to reproduce this error using degenerate input data:
import numpy as np
import keras.layers
import keras.models
import keras.optimizers
inputs = keras.layers.Input((1, 1))
model = keras.models.Model(inputs=inputs, outputs=inputs)
model.compile(optimizer=keras.optimizers.Adam(), loss='binary_crossentropy')
model.fit(np.empty((0, 1, 1)), np.empty((0, 1, 1)))
Resulting in:
Using TensorFlow backend.
Epoch 1/1
Traceback (most recent call last):
File "bug.py", line 10, in <module>
model.fit(np.empty((0, 1, 1)), np.empty((0, 1, 1)))
File "C:\Program Files\Python36\lib\site-packages\keras\engine\training.py", line 1705, in fit
validation_steps=validation_steps)
File "C:\Program Files\Python36\lib\site-packages\keras\engine\training.py", line 1255, in _fit_loop
callbacks.on_epoch_end(epoch, epoch_logs)
File "C:\Program Files\Python36\lib\site-packages\keras\callbacks.py", line 77, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "C:\Program Files\Python36\lib\site-packages\keras\callbacks.py", line 339, in on_epoch_end
self.progbar.update(self.seen, self.log_values)
AttributeError: 'ProgbarLogger' object has no attribute 'log_values'
I am not saying everyone who sees this errors has degenerate input data, but if input data was being tested for degeneracy, this could help separate those cases from potential other ones.
将batch_size设置为较小的数字。当您的batch_size设置为比样本集大小更大的值时,会出现此错误。
I also met this problem but fixed. I found my data list is empty.
I decided to post a link to a comment I made in another issue report. That one was closed, so I don't want it to be missed. I propose a change to keras that I believe will improve usage: https://github.com/keras-team/keras/issues/3657#issuecomment-455375399
将batch_size设置为较小的数字。当您的batch_size设置为比样本集大小更大的值时,会出现此错误。
没错确实是这个问题 十分感谢
Please, see PR with fix above.
It fix this and some related bugs and contains tests for regressions and related bugs manifestation. Concerns behind change set are described as well.
This type of error occurs when we use a small size dataset and large batch size. So I replace batch size with 4 and it works.
Happy Coding.
I also met this problem but fixed. I found my data list is empty.
Yes it is like that.
Most helpful comment
将batch_size设置为较小的数字。当您的batch_size设置为比样本集大小更大的值时,会出现此错误。