我正在研究一个多输出模型,在计算总体损失之前,我需要权衡所有输出损失。我有一个定制的model. fit()
训练循环 https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit为了达成这个。
由于我需要计算所有四个输出的样本损失并在应用权重后融合这些样本损失,因此我定制了标准代码。现在,损失是按样本计算的,但在计算梯度时,所有梯度值都计算为“无”。我试着把tape.watch(loss)
也,但它不起作用。请帮我解决这个问题。
class CustomModel(keras.Model):
def train_step(self, data):
print(tf.executing_eagerly())
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
alpha = 0.1
loss = 0
y_pred_all = []
with tf.GradientTape() as tape:
bce = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
for spl in range(1 if np.shape(x)[0] == None else np.shape(x)[0]):
tape.watch(loss)
tape.watch(loss_mean)
tape.watch(loss_element)
x_spl = np.reshape(x[spl], (1, np.shape(x)[1], np.shape(x)[2], np.shape(x)[3]))
y_pred = self(x_spl, training=True) # Forward pass
y_pred_all.append(y_pred)
loss_element = bce(y[spl], y_pred)
loss_mean = [np.mean(loss_element[0]), np.mean(loss_element[1]), np.mean(loss_element[2]), np.mean(loss_element[3])]
id = np.argmin(loss_mean)
for i, ele in enumerate(loss_mean):
if i == id:
loss_mean[i] *= 1
else:
loss_mean[i] *= alpha
loss = loss + np.sum(loss_mean)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred_all)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
UPDATE我按照建议做了一些更改@rvinas现在它正在计算梯度,没有任何错误,但我不确定我所做的更改是否正确:
class CustomModel(keras.Model):
def train_step(self, data):
# print(tf.executing_eagerly())
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
alpha = 0.1
loss = tf.Variable(0, dtype='float32')
y_pred_all = []
with tf.GradientTape() as tape:
bce = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
for spl in tf.range(1 if tf.shape(x)[0] == None else tf.shape(x)[0]):
loss_mean=tf.convert_to_tensor([])
x_spl = tf.reshape(x[spl], (1, tf.shape(x)[1], tf.shape(x)[2], tf.shape(x)[3]))
y_pred = self(x_spl, training=True) # Forward pass
y_pred_all.append(y_pred)
loss_element = bce(y[spl], y_pred)
loss_mean = [tf.reduce_mean(loss_element[0]), tf.reduce_mean(loss_element[1]), tf.reduce_mean(loss_element[2]), tf.reduce_mean(loss_element[3])]
id = tf.argmin(loss_mean)
for i, ele in enumerate(loss_mean):
if i == id:
loss_mean[i] = tf.multiply(loss_mean[i], 1)
else:
loss_mean[i] = tf.multiply(loss_mean[i], alpha)
loss = tf.add(loss, tf.add(tf.add(tf.add(loss_mean[0],loss_mean[1]), loss_mean[2]), loss_mean[3]))
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred_all)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}