Added command to extract single reels

This commit is contained in:
Jamie Hardt
2020-05-17 11:27:06 -07:00
parent 9bb2ae136a
commit 93a014bdc0
3 changed files with 64 additions and 13 deletions

View File

@@ -11,14 +11,20 @@ def main():
parser = OptionParser()
parser.usage = "ptulsconv TEXT_EXPORT.txt"
parser.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode, and offset"
" all events relative to this timecode.", metavar='TC')
parser.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.", metavar='TC')
parser.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.", metavar='TC')
# parser.add_option('-P', '--progress', default=False, action='store_true', dest='show_progress',
# help='Show progress bar.')
parser.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
help='Include muted clips.')
parser.add_option('-R', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate start "
" times relative to that reel's start time.",
default=None, metavar='N')
parser.add_option('--json', default=False, action='store_true', dest='write_json',
help='Output a JSON document instead of XML.')
parser.add_option('--xform', dest='xslt', help="Convert with built-is XSLT transform.",
default=None, metavar='NAME')
@@ -68,8 +74,12 @@ def main():
print_status_style("Muted regions are ignored.")
try:
convert(input_file=args[1], start=options.in_time, end=options.out_time,
include_muted=options.include_muted, xsl=options.xslt,
output_format = 'fmpxml'
if options.write_json:
output_format = 'json'
convert(input_file=args[1], output_format=output_format, start=options.in_time, end=options.out_time,
include_muted=options.include_muted, xsl=options.xslt, select_reel=options.select_reel,
progress=False, output=sys.stdout, log_output=sys.stderr)
except FileNotFoundError as e:
print_fatal_error("Error trying to read input file")

View File

@@ -139,11 +139,11 @@ def fmp_transformed_dump(data, input_file, xsl_name, output):
xsl_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'xslt', xsl_name + ".xsl")
print_status_style("Using xsl: %s" % (xsl_path))
result = subprocess.run(['xsltproc', xsl_path, '-'], input=strdata, text=True,
subprocess.run(['xsltproc', xsl_path, '-'], input=strdata, text=True,
stdout=output, shell=False, check=True)
def convert(input_file, output_format='fmpxml', start=None, end=None,
def convert(input_file, output_format='fmpxml', start=None, end=None, select_reel=None,
progress=False, include_muted=False, xsl=None,
output=sys.stdout, log_output=sys.stderr):
with open(input_file, 'r') as file:
@@ -176,6 +176,11 @@ def convert(input_file, output_format='fmpxml', start=None, end=None,
subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
parsed = subclipxform.transform(parsed)
if select_reel is not None:
reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
parsed = reel_xform.transform(parsed)
if output_format == 'json':
json.dump(parsed, output)
elif output_format == 'fmpxml':

View File

@@ -7,6 +7,7 @@ from .reporting import print_advisory_tagging_error, print_section_header_style,
from tqdm import tqdm
class Transformation:
def transform(self, input_dict) -> dict:
return input_dict
@@ -163,7 +164,8 @@ class TagInterpreter(Transformation):
if clip['state'] == 'Muted' and self.ignore_muted:
continue
clip_tags = self.parse_tags(clip['clip_name'], parent_track_name=track['name'], clip_time=clip['start_time'])
clip_tags = self.parse_tags(clip['clip_name'], parent_track_name=track['name'],
clip_time=clip['start_time'])
clip_start = clip['start_time_decoded']['frame_count']
if clip_tags['mode'] == 'Normal':
event = dict()
@@ -176,6 +178,7 @@ class TagInterpreter(Transformation):
event['PT.Track.Name'] = track_tags['line']
event['PT.Session.Name'] = title_tags['line']
event['PT.Session.TimecodeFormat'] = input_dict['header']['timecode_format']
event['PT.Clip.Number'] = clip['event']
event['PT.Clip.Name'] = clip_tags['line']
event['PT.Clip.Start'] = clip['start_time']
@@ -194,11 +197,14 @@ class TagInterpreter(Transformation):
transformed[-1]['PT.Clip.Name'] = transformed[-1]['PT.Clip.Name'] + " " + clip_tags['line']
transformed[-1]['PT.Clip.Finish_Frames'] = clip['end_time_decoded']['frame_count']
transformed[-1]['PT.Clip.Finish'] = clip['end_time']
transformed[-1]['PT.Clip.Finish_Seconds'] = clip['end_time_decoded']['frame_count'] / input_dict['header'][
'timecode_format']
transformed[-1]['PT.Clip.Finish_Seconds'] = clip['end_time_decoded']['frame_count'] / \
input_dict['header']['timecode_format']
elif clip_tags['mode'] == 'Timespan':
rule = dict(start_time=clip_start,
rule = dict(start_time_literal=clip['start_time'],
start_time=clip_start,
start_time_seconds=clip_start / input_dict['header']['timecode_format'],
end_time=clip['end_time_decoded']['frame_count'],
tags=clip_tags['tags'])
timespan_rules.append(rule)
@@ -211,7 +217,17 @@ class TagInterpreter(Transformation):
for rule in rules:
if rule['start_time'] <= time <= rule['end_time']:
tag_keys = list(rule['tags'].keys())
tag_times = dict()
for key in tag_keys:
key: str
time_value = rule['start_time']
tag_times["Timespan." + key + ".Start_Frames"] = time_value
tag_times["Timespan." + key + ".Start"] = rule['start_time_literal']
tag_times["Timespan." + key + ".Start_Seconds"] = rule['start_time_seconds']
retval.update(rule['tags'])
retval.update(tag_times)
return retval
@@ -244,6 +260,26 @@ class TagInterpreter(Transformation):
return self.visitor.visit(parse_tree)
class SelectReel(Transformation):
def __init__(self, reel_num):
self.reel_num = reel_num
def transform(self, input_dict) -> dict:
out_events = []
for event in input_dict['events']:
if event['Reel'] == str(self.reel_num):
offset = event.get('Timespan.Reel.Start_Frames', 0)
offset_sec = event.get('Timespan.Reel.Start_Seconds', 0.)
event['PT.Clip.Start_Frames'] -= offset
event['PT.Clip.Finish_Frames'] -= offset
event['PT.Clip.Start_Seconds'] -= offset_sec
event['PT.Clip.Finish_Seconds'] -= offset_sec
out_events.append(event)
return dict(header=input_dict['header'], events=out_events)
class SubclipOfSequence(Transformation):
def __init__(self, start, end):
@@ -252,8 +288,8 @@ class SubclipOfSequence(Transformation):
def transform(self, input_dict: dict) -> dict:
out_events = []
offset = self.start
offset_sec = self.start / input_dict['header']['timecode_format']
offset = 0 #self.start
offset_sec = 0. #self.start / input_dict['header']['timecode_format']
for event in input_dict['events']:
if self.start <= event['PT.Clip.Start_Frames'] <= self.end:
e = event
@@ -263,4 +299,4 @@ class SubclipOfSequence(Transformation):
e['PT.Clip.Finish_Seconds'] = event['PT.Clip.Finish_Seconds'] - offset_sec
out_events.append(e)
return dict(events=out_events)
return dict(header=input_dict['header'], events=out_events)