I created a model in Keras (I am a newbie) and somehow managed to train it nicely. It takes 300x300 images and try to classify them in two groups.
# size of image in pixel
img_rows, img_cols = 300, 300
# number of classes (here digits 1 to 10)
nb_classes = 2
# number of convolutional filters to use
nb_filters = 16
# size of pooling area for max pooling
nb_pool = 20
# convolution kernel size
nb_conv = 20
X = np.vstack([X_train, X_test]).reshape(-1, 1, img_rows, img_cols)
y = np_utils.to_categorical(np.concatenate([y_train, y_test]), nb_classes)
# build model
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# run model
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
Now I would like to visualize the second convolutional layer and if possible also the first dense layer. "Inspiration" was taken from keras blog. By using model.summary()
I found out the name of the layers. Then I created the following frankenstein code:
from __future__ import print_function
from scipy.misc import imsave
import numpy as np
import time
#from keras.applications import vgg16
import keras
from keras import backend as K
# dimensions of the generated pictures for each filter.
img_width = 300
img_height = 300
# the name of the layer we want to visualize
# (see model definition at keras/applications/vgg16.py)
layer_name = 'convolution2d_2'
#layer_name = 'dense_1'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# load model
loc_json = 'my_model_short_architecture.json'
loc_h5 = 'my_model_short_weights.h5'
with open(loc_json, 'r') as json_file:
loaded_model_json = json_file.read()
model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(loc_h5)
print('Model loaded.')
model.summary()
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
kept_filters = []
for filter_index in range(0, 200):
# we only scan through the first 200 filters,
# but there are actually 512 of them
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_dim_ordering() == 'th':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_dim_ordering() == 'th':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best 64 filters on a 8 x 8 grid.
n = 8
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
# save the result to disk
imsave('stitched_filters_%dx%d.png' % (n, n), stitched_filters)
After executing it I get:
ValueError Traceback (most recent call last)
/home/user/conv_filter_visualization.py in <module>()
97 # we run gradient ascent for 20 steps
/home/user/.local/lib/python3.4/site-packages/theano/compile/function_module.py in __call__(self, *args, **kwargs)
857 t0_fn = time.time()
858 try:
--> 859 outputs = self.fn()
860 except Exception:
861 if hasattr(self.fn, 'position_of_error'):
ValueError: CorrMM images and kernel must have the same stack size
Apply node that caused the error: CorrMM{valid, (1, 1)}(convolution2d_input_1, Subtensor{::, ::, ::int64, ::int64}.0)
Toposort index: 8
Inputs types: [TensorType(float32, 4D), TensorType(float32, 4D)]
Inputs shapes: [(1, 3, 300, 300), (16, 1, 20, 20)]
Inputs strides: [(1080000, 360000, 1200, 4), (1600, 1600, -80, -4)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[Elemwise{add,no_inplace}(CorrMM{valid, (1, 1)}.0, Reshape{4}.0), Elemwise{Composite{(i0 * (Abs(i1) + i2 + i3))}}[(0, 1)](TensorConstant{(1, 1, 1, 1) of 0.5}, Elemwise{add,no_inplace}.0, CorrMM{valid, (1, 1)}.0, Reshape{4}.0)]]
Backtrace when the node is created(use Theano flag traceback.limit=N to make it longer):
File "/home/user/.local/lib/python3.4/site-packages/keras/models.py", line 787, in from_config
model.add(layer)
File "/home/user/.local/lib/python3.4/site-packages/keras/models.py", line 114, in add
layer.create_input_layer(batch_input_shape, input_dtype)
File "/home/user/.local/lib/python3.4/site-packages/keras/engine/topology.py", line 341, in create_input_layer
self(x)
File "/home/user/.local/lib/python3.4/site-packages/keras/engine/topology.py", line 485, in __call__
self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
File "/home/user/.local/lib/python3.4/site-packages/keras/engine/topology.py", line 543, in add_inbound_node
Node.create_node(self, inbound_layers, node_indices, tensor_indices)
File "/home/user/.local/lib/python3.4/site-packages/keras/engine/topology.py", line 148, in create_node
output_tensors = to_list(outbound_layer.call(input_tensors[0], mask=input_masks[0]))
File "/home/user/.local/lib/python3.4/site-packages/keras/layers/convolutional.py", line 356, in call
filter_shape=self.W_shape)
File "/home/user/.local/lib/python3.4/site-packages/keras/backend/theano_backend.py", line 862, in conv2d
filter_shape=filter_shape)
I guess I am having some bad dimensions, but don't even know where to start. Any help would be appreciated. Thanks.
Keras makes it quite easy to get layers' weights and outputs. Have a look at https://keras.io/layers/about-keras-layers/ or https://keras.io/getting-started/functional-api-guide/#the-concept-of-layer-node.
You can basically get it with the properties weights
and output
of each layer.
Have a look at this project:
https://github.com/philipperemy/keras-visualize-activations
You can extract the activations map of every layer. It works for all the Keras models.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With