1 2 3 4 5 6 7 8 9 10 |
# image data shape (7438, 1, 28, 28) # 7438samples, 1ch, 28x28 import cv2 n = train_x[0][0].astype(np.float_) n = np.log(n + 1.) * (255. / math.log(256)) n = n.astype(np.uint8) # float to uint8 plt.gray() plt.imshow(n, cmap='gray') plt.show() |
(MNIST) How to read ‘mnist.pkl.gz’ ?
1 2 3 |
f = gzip.open('../data/mnist.pkl.gz', 'rb') training_data, validation_data, test_data = cPickle.load(f,encoding='latin1') f.close() |
How to install cPickle on Python3 ? # NameError: name ‘cPickle’ is not defined
# NameError: name ‘cPickle’ is not defined
‘cPickle’ can work only on Python2. It doesn’t work on Python3. You should do like this.
1 2 |
#import cPickle import _pickle as cPickle |
UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ? ‘Discrepancy between trainable weights and collected trainable’
UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set
model.trainable
without callingmodel.compile
after ? 'Discrepancy between trainable weights and collected trainable'
Solution: You should define layers before define models, and setting ‘trainable’ each time
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
# F: FeatureExtractor (=G: Generator) f1_dense = Dense(emb_dim, input_shape=(128,vocab_size,), name='F_1') f2_activation = Activation('relu') f3_lstm = LSTM(hid_dim,return_sequences=False, name='F_3') f4_activation = Activation('relu') # D: Discriminator d1_dense = Dense(1, input_shape=(hid_dim,)) d2_activation = Activation('sigmoid') def design_model_GAN_for_train_F(): f1_dense.trainable = True f3_lstm.trainable = True d1_dense.trainable = False inputs = Input(shape=(128,vocab_size,)) y = f1_dense(inputs) y = f2_activation(y) y = f3_lstm(y) y = f4_activation(y) y = d1_dense(y) outputs = d2_activation(y) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer=Adam(lr=1e-3, beta_1=0.1), loss='binary_crossentropy') return model def design_model_GAN_for_train_D(): f1_dense.trainable = False f3_lstm.trainable = False d1_dense.trainable = True inputs = Input(shape=(128,vocab_size,)) y = f1_dense(inputs) y = f2_activation(y) y = f3_lstm(y) y = f4_activation(y) y = d1_dense(y) outputs = d2_activation(y) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer=Adam(lr=1e-3, beta_1=0.1), loss='binary_crossentropy') return model |