json files contains labelled data which is useful for the extraction of relevant frames from the conversion of video to frames. That said, Hamideh recomments to have a look to https://gitlab.com/vital-ultrasound/lung-ultrasound/-/blob/master/Bline-Classification-Localization/jsondivide.py which might help with the implementation.
Additional context
__author__ = "Hamideh Kerdegari"
__copyright__ = "Copyright 2020"
__credits__ = ["Hamideh Kerdegari"]
__license__ = "Hamideh Kerdegari"
__version__ = "0.0.1"
__maintainer__ = "Hamideh Kerdegari"
__email__ = "hamideh.kerdegari@gmail.com"
__status__ = "R&D"
# This script is used to divide the whole json file separately for each 4-second video file.
import os
import json
def read_video_annotation(json_data, fid):
annotation_dict = {"segments": []}
for key in json_data['metadata']:
if json_data['metadata'][key]['vid'] == fid: # If the right recording
if len(json_data['metadata'][key]['z']) == 2: # If it is a segment
annotation_dict["segments"].append(dict(start_time=json_data['metadata'][key]["z"][0],
end_time=json_data['metadata'][key]["z"][1],
events=[],
label=int(json_data['metadata'][key]["av"]["1"].replace("_DEFAULT,", ""))))
annotation_dict["segments"] = sorted(annotation_dict["segments"], key=lambda k: k['start_time'])
for key in json_data['metadata']:
if json_data['metadata'][key]['vid'] == fid:
if len(json_data['metadata'][key]['z']) == 1: # If it is an event
for seg in annotation_dict["segments"]:
if seg["start_time"] <= json_data['metadata'][key]["z"][0] <= seg["end_time"]:
seg["events"].append(dict(time=json_data['metadata'][key]["z"][0],
xy=json_data['metadata'][key]["xy"],
label=int(json_data['metadata'][key]["av"]["1"].replace("_DEFAULT,", ""))))
for seg in annotation_dict["segments"]:
seg["events"] = sorted(seg["events"], key=lambda k: k['time'])
return annotation_dict
json_file_paths = ['/home/nhatpth/PhD/DATA/test/28EI_003_003/28EI_003_003_0/pt003-0.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_003/28EI_003_003_1/pt003-1.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_203/28EI_003_203_1/pt203-1.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_203/28EI_003_203_2/pt203-2.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_205/28EI_003_205_0/pt205-0.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_207/28EI_003_207_3/pt207-3.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_209/28EI_003_209_0/pt209-0.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_216/28EI_003_216_0/pt216-0.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_216/28EI_003_216_2/pt216-2.json',
'/home/nhatpth/PhD/DATA/test/28EI_003_216/28EI_003_216_3/pt216-3.json']
for json_file_path in json_file_paths:
root = "/".join(json_file_path.split("/")[0:-1])
with open(json_file_path, "r") as json_file:
json_data = json.load(json_file)
for key in json_data['file']:
fid = json_data['file'][key]['fid']
annotation = read_video_annotation(json_data, fid)
recording_name = json_data['file'][key]['fname']
recording_path = os.path.join(root, recording_name)
out_folder_path = os.path.join(root, recording_name.replace(".mp4", ""))
os.system("mkdir -p " + out_folder_path)
os.system("mv " + recording_path + " " + os.path.join(out_folder_path, "recording.mp4"))
with open(os.path.join(out_folder_path, "annotation.json"), "w") as json_file:
json.dump(annotation, json_file, indent=4)
š Feature
Motivation
json files contains labelled data which is useful for the extraction of relevant frames from the conversion of video to frames. That said, Hamideh recomments to have a look to
https://gitlab.com/vital-ultrasound/lung-ultrasound/-/blob/master/Bline-Classification-Localization/jsondivide.py
which might help with the implementation.Additional context