Hello,
As a start, thanks for this nice book.
I have defined the following layer which adds 2:
class add_2(Model):
def __init__(self):
#initiate class
super().__init__()
def call(self, X):
return X+2
I then defined this layer which adds takes a parameter K
as input and adds it:
class add_K(Model):
def __init__(self, K):
#initiate class
super().__init__()
self.K = K
def call(self, X):
return X+self.K
Now, assume I realise I always want to add 2 to layers, I can do that:
class add_2_to_layer(Model):
def __init__(self, layer):
#initiate class
super().__init__()
self.layer = layer
def call(self, X):
return self.layer(X)+2
If I do add_2_to_layer(add_K(K=3))
it works fine.
Now, assume I always want to add 2 on add_K
, implicitly, without calling add_2_to_layer
, then I think I need some kind of ‘‘decorator’’ (though I am not familiar with them). I have tried:
class add_2_to_layer_class(Model):
def __init__(self, layer):
#initiate class
super().__init__()
self.layer = layer
def call(self, X):
return self.layer(X)+2
def add_2_to_layer_func(layer):
return add_2_to_layer_class(layer)
@add_2_to_layer_func
class add_K(Model):
def __init__(self, K):
#initiate class
super().__init__()
self.K = K
def call(self, X):
return X+self.K
and
class add_2_to_layer(Model):
def __init__(self, layer):
#initiate class
super().__init__()
self.layer = layer
def call(self, X):
return self.layer(X)+2
@add_2_to_layer
class add_K(Model):
def __init__(self, K):
#initiate class
super().__init__()
self.K = K
def call(self, X):
return X+self.K
but none of them work.
I could define a new class:
class add_K_with_2(Model):
def __init__(self, K):
#initiate class
super().__init__()
self.K = K
self.layer = add_2_to_layer(add_K(K=self.K))
def call(self, X):
return self.layer(X)
but it’s not very elegant nor versatile (if I want to add 2 to another layer type, I have to write another piece of code). Any idea?
import tensorflow as tf
import tensorflow.keras as keras
import numpy as npclass ReducedDense(keras.Model):
def init(self, units, k):
super().init()
self.units = units
self.no_of_weights = kdef build(self, input):
self.weight = tf.Variable(np.random.uniform(0, 1, size=(self.no_of_weights, self.units , input[-1])), dtype=tf.float32)
print(self.weight.shape)def call(self, X ):
out = tf.matmul(tf.transpose(X), X)
y = self.weight[2]
return [tf.matmul(self.weight[i] , out ) for i in range(self.no_of_weights)]dense = ReducedDense(3, 4 )
dense(tf.random.uniform((2, 5)))
a layer that takes an input and computes a tensor reduction
I think there is a mistake in the code of 6.5.1
Y = net(tf.random.uniform((4, 8)))
tf.reduce_mean(Y)
this code gives reduce_mean for all the distribution where we need to print for each row in tf.random.uniform
instead it can be
for row in Y:
…print(tf.reduce_mean(row))