Removed some old options that aren't needed anymore

This commit is contained in:
Jamie Hardt
2021-05-25 22:39:08 -07:00
parent 9633bcdefb
commit 1294d5e208
3 changed files with 117 additions and 97 deletions

View File

@@ -15,16 +15,16 @@ def main():
filter_opts = OptionGroup(title='Filtering Options', parser=parser)
filter_opts.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.",
metavar='TC')
filter_opts.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.",
metavar='TC')
# filter_opts.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.",
# metavar='TC')
# filter_opts.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.",
# metavar='TC')
filter_opts.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
help='Include muted clips.')
filter_opts.add_option('-r', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate "
" start times relative to that reel's start time.",
default=None, metavar='N')
# filter_opts.add_option('-r', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate "
# " start times relative to that reel's start time.",
# default=None, metavar='N')
parser.add_option_group(filter_opts)
@@ -78,15 +78,16 @@ def main():
sys.exit(22)
print_status_style("Input file is %s" % (args[1]))
if options.in_time:
print_status_style("Start at time %s" % (options.in_time))
else:
print_status_style("No start time given.")
if options.out_time:
print_status_style("End at time %s." % (options.out_time))
else:
print_status_style("No end time given.")
# if options.in_time:
# print_status_style("Start at time %s" % (options.in_time))
# else:
# print_status_style("No start time given.")
#
# if options.out_time:
# print_status_style("End at time %s." % (options.out_time))
# else:
# print_status_style("No end time given.")
if options.include_muted:
print_status_style("Muted regions are included.")
@@ -99,10 +100,11 @@ def main():
output_format = 'fmpxml'
convert(input_file=args[1], output_format=output_format,
start=options.in_time,
end=options.out_time,
#start=options.in_time,
#end=options.out_time,
include_muted=options.include_muted,
xsl=options.xslt, select_reel=options.select_reel,
xsl=options.xslt,
#select_reel=options.select_reel,
progress=False, output=sys.stdout, log_output=sys.stderr,
warnings=options.warnings)
except FileNotFoundError as e:

View File

@@ -110,42 +110,94 @@ def normalize_record_keys_for_adr(records):
return records
def convert(input_file, output_format='fmpxml', start=None, end=None, select_reel=None,
def output_adr_csv(lines):
adr_keys = ('Title', 'Cue Number', 'Character Name', 'Reel', 'Version', 'Line',
'Start', 'Finish', 'Reason', 'Note', 'TV', 'Version')
reels = set([ln['Reel'] for ln in lines])
reels.add(None)
for n in [n['Character Number'] for n in lines]:
for reel in reels:
these_lines = [ln for ln in lines
if ln['Character Number'] == n and
ln.get('Reel', None) == reel]
if len(these_lines) == 0:
continue
outfile_name = "%s_%s_%s_%s.csv" % (these_lines[0]['Title'],
n, these_lines[0]['Character Name'], reel,)
with open(outfile_name, mode='w', newline='') as outfile:
dump_keyed_csv(these_lines, adr_keys, outfile)
def create_adr_reports(parsed):
lines = [e for e in parsed['events'] if 'ADR' in e.keys()]
print_section_header_style("Creating PDF Reports")
print_status_style("Creating ADR Report")
output_summary(lines)
print_status_style("Creating Line Count")
output_line_count(lines)
print_status_style("Creating Supervisor Logs directory and reports")
os.makedirs("Supervisor Logs", exist_ok=True)
os.chdir("Supervisor Logs")
output_supervisor_1pg(lines)
os.chdir("..")
print_status_style("Creating Director's Logs director and reports")
os.makedirs("Director Logs", exist_ok=True)
os.chdir("Director Logs")
output_summary(lines, by_character=True)
os.chdir("..")
print_status_style("Creating CSV outputs")
os.makedirs("CSV", exist_ok=True)
os.chdir("CSV")
output_adr_csv(lines)
os.chdir("..")
print_status_style("Creating Scripts directory and reports")
os.makedirs("Talent Scripts", exist_ok=True)
os.chdir("Talent Scripts")
output_talent_sides(lines)
def convert(input_file, output_format='fmpxml',
progress=False, include_muted=False, xsl=None,
output=sys.stdout, log_output=sys.stderr, warnings=True):
with open(input_file, 'r') as file:
print_section_header_style('Parsing')
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
dict_parser = ptulsconv.DictionaryParserVisitor()
parsed = dict_parser.visit(ast)
print_status_style('Session title: %s' % parsed['header']['session_name'])
print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
print_status_style('Fount %i tracks' % len(parsed['tracks']))
print_status_style('Found %i markers' % len(parsed['markers']))
parsed = parse_text_export(file)
tcxform = ptulsconv.transformations.TimecodeInterpreter()
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress, ignore_muted=(not include_muted),
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress,
ignore_muted=(not include_muted),
log_output=log_output)
parsed = tcxform.transform(parsed)
parsed = tagxform.transform(parsed)
if start is not None and end is not None:
start_fs = tcxform.convert_time(start,
frame_rate=parsed['header']['timecode_format'],
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
end_fs = tcxform.convert_time(end,
frame_rate=parsed['header']['timecode_format'],
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
parsed = subclipxform.transform(parsed)
if select_reel is not None:
reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
parsed = reel_xform.transform(parsed)
# start=None, end=None, select_reel=None
#
# if start is not None and end is not None:
# start_fs = tcxform.convert_time(start,
# frame_rate=parsed['header']['timecode_format'],
# drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
#
# end_fs = tcxform.convert_time(end,
# frame_rate=parsed['header']['timecode_format'],
# drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
#
# subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
# parsed = subclipxform.transform(parsed)
#
# if select_reel is not None:
# reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
# parsed = reel_xform.transform(parsed)
parsed = normalize_record_keys_for_adr(parsed)
@@ -170,38 +222,7 @@ def convert(input_file, output_format='fmpxml', start=None, end=None, select_ree
dump_csv(parsed['events'])
elif output_format == 'adr':
lines = [e for e in parsed['events'] if 'ADR' in e.keys()]
print_section_header_style("Creating PDF Reports")
print_status_style("Creating ADR Report")
output_summary(lines)
print_status_style("Creating Line Count")
output_line_count(lines)
print_status_style("Creating Supervisor Logs directory and reports")
os.makedirs("Supervisor Logs", exist_ok=True)
os.chdir("Supervisor Logs")
output_supervisor_1pg(lines)
os.chdir("..")
print_status_style("Creating Director's Logs director and reports")
os.makedirs("Director Logs", exist_ok=True)
os.chdir("Director Logs")
output_summary(lines, by_character=True)
os.chdir("..")
print_status_style("Creating CSV outputs")
os.makedirs("CSV", exist_ok=True)
os.chdir("CSV")
output_adr_csv(lines)
os.chdir("..")
print_status_style("Creating Scripts directory and reports")
os.makedirs("Talent Scripts", exist_ok=True)
os.chdir("Talent Scripts")
output_talent_sides(lines)
create_adr_reports(parsed)
elif output_format == 'fmpxml':
if xsl is None:
@@ -212,22 +233,14 @@ def convert(input_file, output_format='fmpxml', start=None, end=None, select_ree
fmp_transformed_dump(parsed, input_file, xsl, output)
def output_adr_csv(lines):
adr_keys = ('Title', 'Cue Number', 'Character Name', 'Reel', 'Version', 'Line',
'Start', 'Finish', 'Reason', 'Note', 'TV', 'Version')
reels = set([ln['Reel'] for ln in lines])
reels.add(None)
for n in [n['Character Number'] for n in lines]:
for reel in reels:
these_lines = [ln for ln in lines
if ln['Character Number'] == n and
ln.get('Reel', None) == reel]
def parse_text_export(file):
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
dict_parser = ptulsconv.DictionaryParserVisitor()
parsed = dict_parser.visit(ast)
print_status_style('Session title: %s' % parsed['header']['session_name'])
print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
print_status_style('Fount %i tracks' % len(parsed['tracks']))
print_status_style('Found %i markers' % len(parsed['markers']))
return parsed
if len(these_lines) == 0:
continue
outfile_name = "%s_%s_%s_%s.csv" % (these_lines[0]['Title'],
n, these_lines[0]['Character Name'], reel,)
with open(outfile_name, mode='w', newline='') as outfile:
dump_keyed_csv(these_lines, adr_keys, outfile)

View File

@@ -153,10 +153,12 @@ class TagInterpreter(Transformation):
print_section_header_style('Parsing Tags')
self.title_tags = self.parse_tags(input_dict['header']['session_name'])
self.markers = sorted(input_dict['markers'], key=lambda m: m['location_decoded']['frame_count'])
self.markers = sorted(input_dict['markers'],
key=lambda m: m['location_decoded']['frame_count'])
if self.show_progress:
track_iter = tqdm(input_dict['tracks'], desc="Reading tracks...", unit='Track')
track_iter = tqdm(input_dict['tracks'],
desc="Reading tracks...", unit='Track')
else:
track_iter = input_dict['tracks']
@@ -164,8 +166,10 @@ class TagInterpreter(Transformation):
if 'Muted' in track['state'] and self.ignore_muted:
continue
track_tags = self.parse_tags(track['name'], parent_track_name=track['name'])
comment_tags = self.parse_tags(track['comments'], parent_track_name=track['name'])
track_tags = self.parse_tags(track['name'],
parent_track_name=track['name'])
comment_tags = self.parse_tags(track['comments'],
parent_track_name=track['name'])
track_context_tags = track_tags['tags']
track_context_tags.update(comment_tags['tags'])
@@ -178,7 +182,8 @@ class TagInterpreter(Transformation):
clip_time=clip['start_time'])
if clip_tags['mode'] == 'Normal':
event = self.decorate_event(clip, clip_tags, input_dict['header'], track_context_tags, track_tags)
event = self.decorate_event(clip, clip_tags, input_dict['header'],
track_context_tags, track_tags)
self.transformed.append(event)
elif clip_tags['mode'] == 'Append':