Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

How to have a variable number of hidden layers in Tensorflow?

Suppose that we want to try sort of hidden layer numbers and their size. How can we do in Tensorflow?

Consider following example to make it clear:

# Create a  Neural Network Layer

def fc_layer(input, size_in, size_out):
        w = tf.Variable(tf.truncated_normal([None, size_in, size_out]), name="W")
        b = tf.Variable(tf.constant(0.1, shape=[size_out]))
        act = tf.matmul(input, w) + b
        return act
n_hiddenlayers=3 #number of hidden layers
hidden_layer=tf.placeholder(tf.float32,[n_hiddenlayers, None, None])
#considering 4 as size of inputs and outputs of all layers
sizeInpOut=4
for i in range(n_hiddenlayers):
    hidden_layer(i,:,:)= tf.nn.sigmoid(fc_layer(X, sizeInpOut, sizeInpOut))

It results in an error about hidden_layer(i,:,:)= ... In the other word, I need tensor of tensors.

like image 674
H.Radmard Avatar asked Nov 22 '25 15:11

H.Radmard


1 Answers

I did this just using a list to hold the different layers as follows, seemed to work fine.

    # inputs
    x_size=2 # first layer nodes
    y_size=1 # final layer nodes
    h_size=[3,4,3] # variable length list of hidden layer nodes

    # set up input and output
    X = tf.placeholder(tf.float32, [None,x_size])
    y_true = tf.placeholder(tf.float32, [None,y_size])

    # set up parameters
    W = []
    b = []
    layer = []

    # first layer
    W.append(tf.Variable(tf.random_normal([x_size, h_size[0]], stddev=0.1)))
    b.append(tf.Variable(tf.zeros([h_size[0]])))

    # add hidden layers (variable number)
    for i in range(1,len(h_size)):
        W.append(tf.Variable(tf.random_normal([h_size[i-1], h_size[i]], stddev=0.1)))
        b.append(tf.Variable(tf.zeros([h_size[i]])))

    # add final layer
    W.append(tf.Variable(tf.random_normal([h_size[-1], y_size], stddev=0.1)))
    b.append(tf.Variable(tf.zeros([y_size])))

    # define model
    layer.append(tf.nn.relu(tf.matmul(X, W[0]) + b[0]))

    for i in range(1,len(h_size)):
        layer.append(tf.nn.relu(tf.matmul(layer[i-1], W[i]) + b[i]))

    if self.type_in == "classification":
        y_pred = tf.nn.sigmoid(tf.matmul(layer[-1], W[-1]) + b[-1])
        loss = tf.reduce_mean(-1. * ((y_true * tf.log(y_pred)) + ((1.-y_true) * tf.log(1.-y_pred))))
        correct_prediction = tf.equal(tf.round(y_pred), tf.round(y_true))
        metric = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        metric_name = "accuracy"
like image 115
tea_pea Avatar answered Nov 25 '25 11:11

tea_pea