import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, LeakyReLU, concatenate
def tiny_yolo_v2(input_shape, num_classes):
    inputs = Input(shape=input_shape)
    # Tiny YOLOv2 architecture
    x = Conv2D(16, (3, 3), padding='same')(inputs)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(64, (3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(128, (3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = Conv2D(256, (3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    # Final output layer
    x = Conv2D(num_classes, (1, 1), padding='same')(x)
    model = Model(inputs, x)
    return model
def load_darknet_weights(model, weights_file):
    with open(weights_file, "rb") as f:
        header = np.fromfile(f, dtype=np.int32, count=5)  # Read header
        weights = np.fromfile(f, dtype=np.float32)  # Read weights
    layer_index = 0
    for layer in model.layers:
        if isinstance(layer, Conv2D):
            filters = layer.filters
            kernel_size = layer.kernel_size[0]
            input_dim = layer.input_shape[-1]
            
            # Load weights
            if layer.use_bias:
                start = layer_index * (filters * (kernel_size ** 2) * input_dim + filters)
            else:
                start = layer_index * (filters * (kernel_size ** 2) * input_dim)
            end = start + (filters * (kernel_size ** 2) * input_dim)
            kernel = weights[start:end].reshape((kernel_size, kernel_size, input_dim, filters))
            layer.set_weights([kernel])
            
            
            if layer.use_bias:
                start = end
                end = start + filters
                bias = weights[start:end]
                layer.set_weights([kernel, bias])
            layer_index += 1
# Specify parameters
input_shape = (416, 416, 3)
num_classes = 20  # Adjust based on your dataset
# Create the Tiny YOLOv2 model
model = tiny_yolo_v2(input_shape, num_classes)
# Load weights from Darknet
weights_file = 'yolov2-tiny.weights'  # Path to your weights file
load_darknet_weights(model, weights_file)
# Save the model in H5 format
model.save('tiny_yolo_v2.h5')
print("Model saved to 'tiny_yolo_v2.h5'")
I am getting this error
> %Run 'file reading.py'
2024-10-10 14:42:55.306715: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-10-10 14:43:07.741864: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
C:\Users\manojroy\AppData\Roaming\Python\Python310\site-packages\keras\src\layers\activations\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.
 warnings.warn(
Traceback (most recent call last):
 File "C:\Users\manojroy\Desktop\CircuitDigest\AssistiveTechForBlind\Yolo2Tiny\file reading.py", line 76, in <module>
 load_darknet_weights(model, weights_file)
 File "C:\Users\manojroy\Desktop\CircuitDigest\AssistiveTechForBlind\Yolo2Tiny\file reading.py", line 46, in load_darknet_weights
 input_dim = layer.input_shape[-1]
AttributeError: 'Conv2D' object has no attribute 'input_shape'
the code is for convertion of yolov2 tiny weights to tensorflow *.h5 weights. If some one has any better solutions please let me know about it.