# Tensorflow.js 神经网络中的反向传播

2023-01-06 16:18:38

var X = tf.tensor([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])

var Y = tf.tensor([[0,0,0],[0,0,0], [1,1,1]])

var m = X.shape[0]

var a0 = tf.zeros([1,3])

var y_hat = tf.zeros([1,3])

var parameters = {

"Wax": tf.randomUniform([1,3]),

"Waa": tf.randomUniform([3,3]),

"ba": tf.zeros([1,3]),

"Wya": tf.randomUniform([3,3]),

"by": tf.zeros([1,3])

}

function RNN_cell_Foward(xt, a_prev, parameters){

var Wax = parameters["Wax"]

var Waa = parameters["Waa"]

var ba = parameters["ba"]

return a_next

}

## 1 回答

TA贡献1590条经验 获得超3个赞

optimizer.minimize期望函数作为参数而不是张量。由于代码试图最小化 meanSquaredError，the argumentofminimize可以是一个计算预测值和预期值之间的 meanSquaredError 的函数。

const loss = (pred, label) => pred.sub(label).square().mean();

for (let f = 0; f < 10; f++) {

optimizer.minimize(() => tf.losses.meanSquaredError(Y, model))

}

variableGrads() expects at least one of the input variables to be trainable

var Y = tf.tensor([[0,0,0],[0,0,0], [1,1,1]]).variable() // a variable instead

// var loss = tf.losses.meanSquaredError(Y, model)

// computed below in the minimize function

const learningRate = 0.01;

var optimizer = tf.train.sgd(learningRate);

var model = RNN_FowardProp(X, a0, parameters);

const loss = (pred, label) => pred.sub(label).square().mean();

for (let f = 0; f < 10; f++) {

optimizer.minimize(() => tf.losses.meanSquaredError(Y, model))

}

• 1 回答
• 0 关注
• 15 浏览

0/150