为了账号安全,请及时绑定邮箱和手机立即绑定

“TypeError: unhashable type: 'Dimension'”

“TypeError: unhashable type: 'Dimension'”

饮歌长啸 2023-07-27 10:07:51
我有以下从 github 获得的 ResNet 3D 架构。它是 R3D 的 Keras 实现。该架构旨在训练视频分类模型当我在视频上训练网络时,出现以下错误:Error: unhashable type: 'Dimension'  File "<ipython-input-29-788d091a6763>", line 1961, in main    trained_model_name)  File "<ipython-input-29-788d091a6763>", line 1805, in train    model = train_load_model(model_type, training_condition, sample_input.shape, nb_classes)  File "<ipython-input-29-788d091a6763>", line 1684, in train_load_model    model = Resnet3DBuilder.build_resnet_50((96, 96, 96, 1), 20)  File "<ipython-input-29-788d091a6763>", line 1543, in build_resnet_50    [3, 4, 6, 3], reg_factor=reg_factor)  File "<ipython-input-29-788d091a6763>", line 1501, in build    )(block)  File "<ipython-input-29-788d091a6763>", line 1372, in f    )(input)  File "<ipython-input-29-788d091a6763>", line 1419, in f    )(input)  File "<ipython-input-29-788d091a6763>", line 1334, in f    activation = _bn_relu(input)  File "<ipython-input-29-788d091a6763>", line 1300, in _bn_relu    norm = BatchNormalization(axis=CHANNEL_AXIS)(input)  File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py", line 440, in __call__    self.assert_input_compatibility(inputs)  File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py", line 345, in assert_input_compatibility    x_shape[int(axis)] not in {value, None}):我有以下 Tensorflow、Keras 和 Python 版本:张量流:1.15.0困难:2.2.4蟒蛇:3.6你能告诉我如何解决这个错误吗?我看到将 Dimension 转换为 int 解决了其他地方的问题,但我不知道应该在这里转换什么。
查看完整描述

1 回答

?
慕运维8079593

TA贡献1876条经验 获得超5个赞

为了解决这个问题,我们需要将每个形状的访问都转换为 int。


示例: residual.shape[CHANNEL_AXIS]需要重写int(residual.shape[CHANNEL_AXIS])


新版本代码如下:


## Resnet 3D architecture

# Taken from https://github.com/JihongJu/keras-resnet3d/blob/master/resnet3d/resnet3d.py


def _bn_relu(input):

    """Helper to build a BN -> relu block (by @raghakot)."""

    norm = BatchNormalization(axis=CHANNEL_AXIS)(input)

    return Activation("relu")(norm)


def _conv_bn_relu3D(**conv_params):

    filters = conv_params["filters"]

    kernel_size = conv_params["kernel_size"]

    strides = conv_params.setdefault("strides", (1, 1, 1))

    kernel_initializer = conv_params.setdefault(

        "kernel_initializer", "he_normal")

    padding = conv_params.setdefault("padding", "same")

    kernel_regularizer = conv_params.setdefault("kernel_regularizer",

                                                l2(1e-4))


    def f(input):

        conv = Conv3D(filters=filters, kernel_size=kernel_size,

                      strides=strides, kernel_initializer=kernel_initializer,

                      padding=padding,

                      kernel_regularizer=kernel_regularizer)(input)

        return _bn_relu(conv)


    return f


def _bn_relu_conv3d(**conv_params):

    """Helper to build a  BN -> relu -> conv3d block."""

    filters = conv_params["filters"]

    kernel_size = conv_params["kernel_size"]

    strides = conv_params.setdefault("strides", (1, 1, 1))

    kernel_initializer = conv_params.setdefault("kernel_initializer",

                                                "he_normal")

    padding = conv_params.setdefault("padding", "same")

    kernel_regularizer = conv_params.setdefault("kernel_regularizer",

                                                l2(1e-4))


    def f(input):

        activation = _bn_relu(input)

        return Conv3D(filters=filters, kernel_size=kernel_size,

                      strides=strides, kernel_initializer=kernel_initializer,

                      padding=padding,

                      kernel_regularizer=kernel_regularizer)(activation)

    return f



def _shortcut3d(input, residual):

    """3D shortcut to match input and residual and merges them with "sum"."""

    stride_dim1 = math.ceil(int(input.shape[DIM1_AXIS]) \

        / int(residual.shape[DIM1_AXIS]))

    stride_dim2 = math.ceil(int(input.shape[DIM2_AXIS]) \

        / int(residual.shape[DIM2_AXIS]))

    stride_dim3 = math.ceil(int(input.shape[DIM3_AXIS]) \

        / int(residual.shape[DIM3_AXIS]))

    equal_channels = int(residual.shape[CHANNEL_AXIS]) \

        == int(input.shape[CHANNEL_AXIS])


    shortcut = input

    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \

            or not equal_channels:

        shortcut = Conv3D(

            filters=int(residual.shape[CHANNEL_AXIS]),

            kernel_size=(1, 1, 1),

            strides=(stride_dim1, stride_dim2, stride_dim3),

            kernel_initializer="he_normal", padding="valid",

            kernel_regularizer=l2(1e-4)

            )(input)

    return add([shortcut, residual])



def _residual_block3d(block_function, filters, kernel_regularizer, repetitions,

                      is_first_layer=False):

    def f(input):

        for i in range(repetitions):

            strides = (1, 1, 1)

            if i == 0 and not is_first_layer:

                strides = (2, 2, 2)

            input = block_function(filters=filters, strides=strides,

                                   kernel_regularizer=kernel_regularizer,

                                   is_first_block_of_first_layer=(

                                       is_first_layer and i == 0)

                                   )(input)

        return input


    return f



def basic_block(filters, strides=(1, 1, 1), kernel_regularizer=l2(1e-4),

                is_first_block_of_first_layer=False):

    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""

    def f(input):

        if is_first_block_of_first_layer:

            # don't repeat bn->relu since we just did bn->relu->maxpool

            conv1 = Conv3D(filters=filters, kernel_size=(3, 3, 3),

                           strides=strides, padding="same",

                           kernel_initializer="he_normal",

                           kernel_regularizer=kernel_regularizer

                           )(input)

        else:

            conv1 = _bn_relu_conv3d(filters=filters,

                                    kernel_size=(3, 3, 3),

                                    strides=strides,

                                    kernel_regularizer=kernel_regularizer

                                    )(input)


        residual = _bn_relu_conv3d(filters=filters, kernel_size=(3, 3, 3),

                                   kernel_regularizer=kernel_regularizer

                                   )(conv1)

        return _shortcut3d(input, residual)


    return f



def bottleneck(filters, strides=(1, 1, 1), kernel_regularizer=l2(1e-4),

               is_first_block_of_first_layer=False):

    """Basic 3 X 3 X 3 convolution blocks. Extended from raghakot's 2D impl."""

    def f(input):

        if is_first_block_of_first_layer:

            # don't repeat bn->relu since we just did bn->relu->maxpool

            conv_1_1 = Conv3D(filters=filters, kernel_size=(1, 1, 1),

                              strides=strides, padding="same",

                              kernel_initializer="he_normal",

                              kernel_regularizer=kernel_regularizer

                              )(input)

        else:

            conv_1_1 = _bn_relu_conv3d(filters=filters, kernel_size=(1, 1, 1),

                                       strides=strides,

                                       kernel_regularizer=kernel_regularizer

                                       )(input)


        conv_3_3 = _bn_relu_conv3d(filters=filters, kernel_size=(3, 3, 3),

                                   kernel_regularizer=kernel_regularizer

                                   )(conv_1_1)

        residual = _bn_relu_conv3d(filters=filters * 4, kernel_size=(1, 1, 1),

                                   kernel_regularizer=kernel_regularizer

                                   )(conv_3_3)


        return _shortcut3d(input, residual)


    return f



def _handle_data_format():

    global DIM1_AXIS

    global DIM2_AXIS

    global DIM3_AXIS

    global CHANNEL_AXIS

    if K.image_data_format() == 'channels_last':

        print("here CHANNELS last")

        DIM1_AXIS = 1

        DIM2_AXIS = 2

        DIM3_AXIS = 3

        CHANNEL_AXIS = 4

    else:

        CHANNEL_AXIS = 1

        DIM1_AXIS = 2

        DIM2_AXIS = 3

        DIM3_AXIS = 4



def _get_block(identifier):

    if isinstance(identifier, six.string_types):

        res = globals().get(identifier)

        if not res:

            raise ValueError('Invalid {}'.format(identifier))

        return res

    return identifier



class Resnet3DBuilder(object):

    """ResNet3D."""


    @staticmethod

    def build(input_shape, num_outputs, block_fn, repetitions, reg_factor):

        """Instantiate a vanilla ResNet3D keras model.

        # Arguments

            input_shape: Tuple of input shape in the format

            (conv_dim1, conv_dim2, conv_dim3, channels) if dim_ordering='tf'

            (filter, conv_dim1, conv_dim2, conv_dim3) if dim_ordering='th'

            num_outputs: The number of outputs at the final softmax layer

            block_fn: Unit block to use {'basic_block', 'bottlenack_block'}

            repetitions: Repetitions of unit blocks

        # Returns

            model: a 3D ResNet model that takes a 5D tensor (volumetric images

            in batch) as input and returns a 1D vector (prediction) as output.

        """

        _handle_data_format()

        if len(input_shape) != 4:

            raise ValueError("Input shape should be a tuple "

                             "(conv_dim1, conv_dim2, conv_dim3, channels) "

                             "for tensorflow as backend or "

                             "(channels, conv_dim1, conv_dim2, conv_dim3) "

                             "for theano as backend")


        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)

        # first conv

        conv1 = _conv_bn_relu3D(filters=64, kernel_size=(7, 7, 7),

                                strides=(2, 2, 2),

                                kernel_regularizer=l2(reg_factor)

                                )(input)

        pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),

                             padding="same")(conv1)


        # repeat blocks

        block = pool1

        filters = 64

        for i, r in enumerate(repetitions):

            block = _residual_block3d(block_fn, filters=filters,

                                      kernel_regularizer=l2(reg_factor),

                                      repetitions=r, is_first_layer=(i == 0)

                                      )(block)

            filters *= 2


        # last activation

        block_output = _bn_relu(block)


        # average poll and classification

        pool2 = AveragePooling3D(pool_size=(int(block.shape[DIM1_AXIS]),

                                            int(block.shape[DIM2_AXIS]),

                                            int(block.shape[DIM3_AXIS])),

                                 strides=(1, 1, 1))(block_output)

        flatten1 = Flatten()(pool2)

        if num_outputs > 1:

            dense = Dense(units=num_outputs,

                          kernel_initializer="he_normal",

                          activation="softmax",

                          kernel_regularizer=l2(reg_factor))(flatten1)

        else:

            dense = Dense(units=num_outputs,

                          kernel_initializer="he_normal",

                          activation="sigmoid",

                          kernel_regularizer=l2(reg_factor))(flatten1)


        model = Model(inputs=input, outputs=dense)

        return model


    @staticmethod

    def build_resnet_18(input_shape, num_outputs, reg_factor=1e-4):

        """Build resnet 18."""

        return Resnet3DBuilder.build(input_shape, num_outputs, basic_block,

                                     [2, 2, 2, 2], reg_factor=reg_factor)


    @staticmethod

    def build_resnet_34(input_shape, num_outputs, reg_factor=1e-4):

        """Build resnet 34."""

        return Resnet3DBuilder.build(input_shape, num_outputs, basic_block,

                                     [3, 4, 6, 3], reg_factor=reg_factor)


    @staticmethod

    def build_resnet_50(input_shape, num_outputs, reg_factor=1e-4):

        """Build resnet 50."""

        return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,

                                     [3, 4, 6, 3], reg_factor=reg_factor)


    @staticmethod

    def build_resnet_101(input_shape, num_outputs, reg_factor=1e-4):

        """Build resnet 101."""

        return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,

                                     [3, 4, 23, 3], reg_factor=reg_factor)


    @staticmethod

    def build_resnet_152(input_shape, num_outputs, reg_factor=1e-4):

        """Build resnet 152."""

        return Resnet3DBuilder.build(input_shape, num_outputs, bottleneck,

                                     [3, 8, 36, 3], reg_factor=reg_factor)

最新版本的 Keras 和 Tensorflow 不会出现此问题,但我需要保留这两个库的旧版本,因为我的其他脚本无法在最新版本的 Tensorflow/Keras 上运行。



查看完整回答
反对 回复 2023-07-27
  • 1 回答
  • 0 关注
  • 93 浏览
慕课专栏
更多

添加回答

举报

0/150
提交
取消
意见反馈 帮助中心 APP下载
官方微信