Преобразование txt в tfrecords - PullRequest
0 голосов
/ 11 июля 2020

Я пытаюсь преобразовать свои аннотации (формат YOLO в файлах .txt) в tfrecords. В моей папке набора данных (обнаружение людей сверху) у меня около 4000 файлов .jpgs и .txt для каждой фотографии. В каждом файле .txt есть несколько аннотаций для каждого человека на этой фотографии.

Я получил этот код:

    #!/usr/bin/python3
import tensorflow as tf
import numpy
import cv2
import os
import hashlib

import config
import dataset_util

def parse_test_example(f, images_path):
    height = None # Image height
    width = None # Image width
    filename = None # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes
    image_format = b'jpeg' # b'jpeg' or b'png'

    filename = f.readline().rstrip()
    if not filename:
        raise IOError()
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    is_good_ratio = 1.2 < width/height < 1.25
    if not is_good_ratio:
        return None
    
    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        }))


    return tf_example


def parse_example(f, images_path):
    height = None # Image height
    width = None # Image width
    filename = None # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes
    image_format = b'jpeg' # b'jpeg' or b'png'

    xmins = [] # List of normalized left x coordinates in bounding box (1 per box)
    xmaxs = [] # List of normalized right x coordinates in bounding box (1 per box)
    ymins = [] # List of normalized top y coordinates in bounding box (1 per box)
    ymaxs = [] # List of normalized bottom y coordinates in bounding box (1 per box)
    classes_text = [] # List of string class name of bounding box (1 per box)
    classes = [] # List of integer class id of bounding box (1 per box)
    poses = []
    truncated = []
    difficult_obj = []

    filename = f.readline().rstrip()
    if not filename:
        print("FN:"+filename)
        raise IOError()

    print(filename)
    
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    face_num = f.readline().rstrip()
    print(face_num)
    face_num=int(face_num)
    if not face_num:
        x = f.readline().rstrip()
        #raise Exception()
    
    is_there_a_face_large_enough = False
    min_face_width_px = 15
    min_face_width = min_face_width_px/640

    for i in range(face_num):
        annot = f.readline().rstrip().split()
        if not annot:
            raise Exception()

        # WIDER FACE DATASET CONTAINS SOME ANNOTATIONS WHAT EXCEEDS THE IMAGE BOUNDARY
        if(float(annot[2]) > 25.0):
            if(float(annot[3]) > 30.0):
                w_face = float(annot[2])/width
                if w_face >= min_face_width and int(annot[8]) < 2: 
                    is_there_a_face_large_enough=True
                    xmins.append( max(0.025, (float(annot[0]) / width) ) )
                    ymins.append( max(0.025, (float(annot[1]) / height) ) )
                    xmaxs.append( min(0.975, ((float(annot[0]) + float(annot[2])) / width) ) )
                    ymaxs.append( min(0.975, ((float(annot[1]) + float(annot[3])) / height) ) )
                    classes_text.append(b'face')
                    classes.append(2)
                    poses.append("front".encode('utf8'))
                    truncated.append(int(0))


    is_good_ratio = 1.2 < width/height < 1.85
    if not is_good_ratio or not is_there_a_face_large_enough:
        return None

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
        'image/object/difficult': dataset_util.int64_list_feature(int(0)),
        'image/object/truncated': dataset_util.int64_list_feature(truncated),
        'image/object/view': dataset_util.bytes_list_feature(poses),
        }))


    return tf_example

def parse_mafa_example(f, images_path):
    height = None # Image height
    width = None # Image width
    filename = None # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes

    xmins = [] # List of normalized left x coordinates in bounding box (1 per box)
    xmaxs = [] # List of normalized right x coordinates in bounding box (1 per box)
    ymins = [] # List of normalized top y coordinates in bounding box (1 per box)
    ymaxs = [] # List of normalized bottom y coordinates in bounding box (1 per box)
    classes_text = [] # List of string class name of bounding box (1 per box)
    classes = [] # List of integer class id of bounding box (1 per box)
    poses = []
    truncated = []

    filename = f.readline().rstrip()
    if not filename:
        print("FN:"+filename)
        raise IOError()

    print(filename)
    
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    face_num = f.readline().rstrip()
    print(face_num)
    face_num=int(face_num)
    if not face_num:
        x = f.readline().rstrip()
        #raise Exception()
    
    is_there_a_face_large_enough = False
    min_face_width_px = 15
    min_face_width = min_face_width_px/640

    for i in range(face_num):
        annot = f.readline().rstrip().split()
        if not annot:
            raise Exception()

        # WIDER FACE DATASET CONTAINS SOME ANNOTATIONS WHAT EXCEEDS THE IMAGE BOUNDARY
        if(float(annot[2]) > 25.0):
            if(float(annot[3]) > 30.0):
                w_face = float(annot[2])/width
                if w_face >= min_face_width: 
                    is_there_a_face_large_enough=True
                    xmins.append( max(0.025, (float(annot[0]) / width) ) )
                    ymins.append( max(0.025, (float(annot[1]) / height) ) )
                    xmaxs.append( min(0.975, ((float(annot[0]) + float(annot[2])) / width) ) )
                    ymaxs.append( min(0.975, ((float(annot[1]) + float(annot[3])) / height) ) )
                    if(int(annot[8]) < 0):
                        classes_text.append(b'face')
                        classes.append(2)
                    else:
                        classes_text.append(b'masked')
                        classes.append(1)
                    poses.append("front".encode('utf8'))
                    truncated.append(int(0))


    is_good_ratio = 1.2 < width/height < 1.85
    if not is_good_ratio or not is_there_a_face_large_enough:
        return None

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
        'image/object/difficult': dataset_util.int64_list_feature(int(0)),
        'image/object/truncated': dataset_util.int64_list_feature(truncated),
        'image/object/view': dataset_util.bytes_list_feature(poses),
        }))


    return tf_example


def run(images_path, description_file, mafa_images_path, mafa_description_file, output_path, no_bbox=False):
    writer = tf.python_io.TFRecordWriter(output_path)
    
    i = 0

    f1 = open(mafa_description_file)

    print("Processing {}".format(mafa_images_path))
    while True:
        try:
            tf_example = parse_mafa_example(f1, mafa_images_path)

            if tf_example is not None:
                writer.write(tf_example.SerializeToString())
                i += 1

        except IOError:
            print('io')
            break
        except Exception:
            print('e')
            raise

    f1.close()

    f2 = open(description_file)        
    print("Processing {}".format(images_path))
    while True:
        try:
            if no_bbox:
                tf_example = parse_test_example(f2, images_path)
            else:
                tf_example = parse_example(f2, images_path)

            if tf_example is not None:
                writer.write(tf_example.SerializeToString())
                i += 1

        except IOError:
            print('io')
            break
        except Exception:
            print('e')
            raise

    f2.close()

    writer.close()

    print("Correctly created record for {} images\n".format(i))


def main(unused_argv):
    # Training
    if config.TRAIN_WIDER_PATH is not None and config.TRAIN_MAFA_PATH is not None:
        images_path = os.path.join(config.TRAIN_WIDER_PATH, "images")
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "wider_face_train_bbx_gt.txt")
        mafa_images_path = os.path.join(config.TRAIN_MAFA_PATH, "images")
        mafa_description_file = os.path.join(config.GROUND_TRUTH_MAFA_PATH, "mafa_train_bbx_gt.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "train.landscape.15pxat640_wider_mafa.tfrecord")
        run(images_path, description_file, mafa_images_path, mafa_description_file, output_path)
    
    # Validation
    if config.VAL_WIDER_PATH is not None:
        images_path = os.path.join(config.VAL_WIDER_PATH, "images")
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "wider_face_val_bbx_gt.txt")
        mafa_images_path = os.path.join(config.VAL_MAFA_PATH, "images")
        mafa_description_file = os.path.join(config.GROUND_TRUTH_MAFA_PATH, "mafa_test_bbx_gt.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "val.landscape.15pxat640_wider_mafa.tfrecord")
        run(images_path, description_file, mafa_images_path, mafa_description_file, output_path)
    return
    # Testing. This set does not contain bounding boxes, so the tfrecord will contain images only
    if config.TEST_WIDER_PATH is not None:
        images_path = os.path.join(config.TEST_WIDER_PATH, "images")
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "wider_face_test_filelist.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "test.landscape.35pxat640.tfrecord")
        run(images_path, description_file, output_path, no_bbox=True)


if __name__ == '__main__':
    tf.app.run()

Как мне изменить этот код? Я полагаю, что мне нужно прочитать все аннотации для каждого файла .txt в моей папке, верно? Как я могу это сделать? Помогите пожалуйста, спасибо

Вот что я пока делал. Я не знаю, правильно это или нет

 #usr/bin/python3
#import tensorflow as tf
import tensorflow.compat.v1 as tf

import numpy
import cv2
import os
import hashlib

import config
from object_detection.utils import dataset_util
#import dataset_util

def parse_test_example(f, images_path):
    height = 1280 # Image height
    width = 960 # Image width
    filename = 'cam_1_00000000' # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes
    image_format = b'jpeg' # b'jpeg' or b'png'

    filename = f.readline().rstrip()
    if not filename:
        raise IOError()
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    is_good_ratio = 1.2 < width/height < 1.25
    if not is_good_ratio:
        return None
    
    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        }))


    return tf_example


def parse_example(f, images_path):
    height = 1280 # Image height
    width = 960 # Image width
    filename = 'cam_1_00000000' # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes
    image_format = b'jpeg' # b'jpeg' or b'png'

    x_center_box = [] # List of normalized left x coordinates in bounding box (1 per box)
    y_center_box = [] # List of normalized right x coordinates in bounding box # (1 per box)
    box_width = [] # List of normalized top y coordinates in bounding box (1 per box)
    box_height = [] # List of normalized bottom y coordinates in bounding box# (1 per box)
    classes_text = ['Person'] # List of string class name of bounding box (1 per box)
    classes = [1] # List of integer class id of bounding box (1 per box)
    poses = []
    truncated = []
    difficult_obj = []

    filename = f.readline().rstrip()
    if not filename:
        print("FN:"+filename)
        raise IOError()

    print(filename)
    
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    face_num = f.readline().rstrip()
    print(face_num)
    face_num=int(face_num)
    if not face_num:
        x = f.readline().rstrip()
        #raise Exception()
    
    is_there_a_face_large_enough = False
    min_face_width_px = 15
    min_face_width = min_face_width_px/640

    for i in range(face_num):
        annot = f.readline().rstrip().split()
        if not annot:
            raise Exception()

        # WIDER FACE DATASET CONTAINS SOME ANNOTATIONS WHAT EXCEEDS THE IMAGE BOUNDARY
        if(float(annot[2]) > 25.0):
            if(float(annot[3]) > 30.0):
                w_face = float(annot[2])/width
                if w_face >= min_face_width and int(annot[8]) < 2: 
                    is_there_a_face_large_enough=True
                    xmins.append( max(0.025, (float(annot[0]) / width) ) )
                    ymins.append( max(0.025, (float(annot[1]) / height) ) )
                    xmaxs.append( min(0.975, ((float(annot[0]) + float(annot[2])) / width) ) )
                    ymaxs.append( min(0.975, ((float(annot[1]) + float(annot[3])) / height) ) )
                    classes_text.append(b'face')
                    classes.append(2)
                    poses.append("front".encode('utf8'))
                    truncated.append(int(0))


    is_good_ratio = 1.2 < width/height < 1.85
    if not is_good_ratio or not is_there_a_face_large_enough:
        return None

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
        'image/object/difficult': dataset_util.int64_list_feature(int(0)),
        'image/object/truncated': dataset_util.int64_list_feature(truncated),
        'image/object/view': dataset_util.bytes_list_feature(poses),
        }))


    return tf_example

def parse_mafa_example(f, images_path):
    height = 1280 # Image height
    width = 960 # Image width
    filename = 'cam_1_00000000' # Filename of the image. Empty if image is not from file
    encoded_image_data = None # Encoded image bytes

    x_center_box = [] # List of normalized left x coordinates in bounding box (1 per box)
    y_center_box = [] # List of normalized right x coordinates in bounding box # (1 per box)
    box_width = [] # List of normalized top y coordinates in bounding box (1 per box)
    box_height = [] # List of normalized bottom y coordinates in bounding box# (1 per box)
    classes_text = ['Person'] # List of string class name of bounding box (1 per box)
    classes = [1] # List of integer class id of bounding box (1 per box)
    poses = []
    truncated = []

    filename = f.readline().rstrip()
    if not filename:
        print("FN:"+filename)
        raise IOError()

    print(filename)
    
    filepath = os.path.join(images_path, filename)

    image_raw = cv2.imread(filepath)

    encoded_image_data = open(filepath, "rb").read()
    key = hashlib.sha256(encoded_image_data).hexdigest()

    height, width, channel = image_raw.shape

    face_num = f.readline().rstrip()
    print(face_num)
    face_num=int(face_num)
    if not face_num:
        x = f.readline().rstrip()
        #raise Exception()
    
    is_there_a_face_large_enough = False
    min_face_width_px = 15
    min_face_width = min_face_width_px/640

    for i in range(face_num):
        annot = f.readline().rstrip().split()
        if not annot:
            raise Exception()

        # WIDER FACE DATASET CONTAINS SOME ANNOTATIONS WHAT EXCEEDS THE IMAGE BOUNDARY
        if(float(annot[2]) > 25.0):
            if(float(annot[3]) > 30.0):
                w_face = float(annot[2])/width
                if w_face >= min_face_width: 
                    is_there_a_face_large_enough=True
                    xmins.append( max(0.025, (float(annot[0]) / width) ) )
                    ymins.append( max(0.025, (float(annot[1]) / height) ) )
                    xmaxs.append( min(0.975, ((float(annot[0]) + float(annot[2])) / width) ) )
                    ymaxs.append( min(0.975, ((float(annot[1]) + float(annot[3])) / height) ) )
                    if(int(annot[8]) < 0):
                        classes_text.append(b'face')
                        classes.append(2)
                    else:
                        classes_text.append(b'masked')
                        classes.append(1)
                    poses.append("front".encode('utf8'))
                    truncated.append(int(0))


    is_good_ratio = 1.2 < width/height < 1.85
    if not is_good_ratio or not is_there_a_face_large_enough:
        return None

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(int(height)),
        'image/width': dataset_util.int64_feature(int(width)),
        'image/filename': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/source_id': dataset_util.bytes_feature(filename.encode('utf-8')),
        'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
        'image/encoded': dataset_util.bytes_feature(encoded_image_data),
        'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
        'image/object/difficult': dataset_util.int64_list_feature(int(0)),
        'image/object/truncated': dataset_util.int64_list_feature(truncated),
        'image/object/view': dataset_util.bytes_list_feature(poses),
        }))


    return tf_example
def run(images_path, description_file, output_path, no_bbox=False):
    f = open(description_file)
    writer = tf.python_io.TFRecordWriter(output_path)

    i = 0

    print("Processing {}".format(images_path))
    while True:
        try:
            if no_bbox:
                tf_example = parse_test_example(f, images_path)
            else:
                tf_example = parse_example(f, images_path)

            writer.write(tf_example.SerializeToString())
            i += 1

        except IOError:
            break
        except Exception:
            raise

    writer.close()

    print("Correctly created record for {} images\n".format(i))


def main(unused_argv):
    # Training
    if config.TRAIN_WIDER_PATH is not None and config.TRAIN_MAFA_PATH is not None:
        images_path = os.path.join(config.TRAIN_WIDER_PATH, "train")
        #images_path = config.TRAIN_WIDER_PATH
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "cam_1_00000000.txt")
        #mafa_images_path = os.path.join(config.TRAIN_MAFA_PATH, "images")
        #mafa_description_file = os.path.join(config.GROUND_TRUTH_MAFA_PATH, "mafa_train_bbx_gt.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "train/output")
        #output_path = config.OUTPUT_PATH
        #run(images_path, description_file, mafa_images_path, mafa_description_file, output_path)
        run(images_path, description_file, output_path)
    
    # Validation
    if config.VAL_WIDER_PATH is not None:
        images_path = os.path.join(config.VAL_WIDER_PATH, "images")
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "wider_face_val_bbx_gt.txt")
        mafa_images_path = os.path.join(config.VAL_MAFA_PATH, "images")
        mafa_description_file = os.path.join(config.GROUND_TRUTH_MAFA_PATH, "mafa_test_bbx_gt.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "val.landscape.15pxat640_wider_mafa.tfrecord")
        run(images_path, description_file, mafa_images_path, mafa_description_file, output_path)
    return
    # Testing. This set does not contain bounding boxes, so the tfrecord will contain images only
    if config.TEST_WIDER_PATH is not None:
        images_path = os.path.join(config.TEST_WIDER_PATH, "images")
        description_file = os.path.join(config.GROUND_TRUTH_PATH, "wider_face_test_filelist.txt")
        output_path = os.path.join(config.OUTPUT_PATH, "test.landscape.35pxat640.tfrecord")
        run(images_path, description_file, output_path, no_bbox=True)


if __name__ == '__main__':
    tf.app.run()

и это мой текущий результат

2020-07-11 12: 13: 03.636857: W tenorflow / stream_executor / platform /default/dso_loader.cc:55] Не удалось загрузить динамическую c библиотеку 'cudart64_101.dll'; dlerror: cudart64_101.dll не найден 2020-07-11 12: 13: 03.644643: I tensorflow / stream_executor / cuda / cudart_stub. cc: 29] Игнорируйте приведенную выше ошибку cudart dlerror, если на вашем компьютере не установлен графический процессор. Обработка C: / envs / TFRecords / train 0 0,689844 0,172222 0,070312 0,163889 Правильно созданная запись для 0 изображений

, где эти числа являются только первой строкой файла .txt

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...