mirror of
https://github.com/iluvcapra/ptulsconv.git
synced 2026-01-02 09:50:48 +00:00
Removed some old options that aren't needed anymore
This commit is contained in:
@@ -15,16 +15,16 @@ def main():
|
|||||||
|
|
||||||
filter_opts = OptionGroup(title='Filtering Options', parser=parser)
|
filter_opts = OptionGroup(title='Filtering Options', parser=parser)
|
||||||
|
|
||||||
filter_opts.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.",
|
# filter_opts.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.",
|
||||||
metavar='TC')
|
# metavar='TC')
|
||||||
filter_opts.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.",
|
# filter_opts.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.",
|
||||||
metavar='TC')
|
# metavar='TC')
|
||||||
filter_opts.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
|
filter_opts.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
|
||||||
help='Include muted clips.')
|
help='Include muted clips.')
|
||||||
|
|
||||||
filter_opts.add_option('-r', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate "
|
# filter_opts.add_option('-r', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate "
|
||||||
" start times relative to that reel's start time.",
|
# " start times relative to that reel's start time.",
|
||||||
default=None, metavar='N')
|
# default=None, metavar='N')
|
||||||
|
|
||||||
parser.add_option_group(filter_opts)
|
parser.add_option_group(filter_opts)
|
||||||
|
|
||||||
@@ -78,15 +78,16 @@ def main():
|
|||||||
sys.exit(22)
|
sys.exit(22)
|
||||||
|
|
||||||
print_status_style("Input file is %s" % (args[1]))
|
print_status_style("Input file is %s" % (args[1]))
|
||||||
if options.in_time:
|
|
||||||
print_status_style("Start at time %s" % (options.in_time))
|
|
||||||
else:
|
|
||||||
print_status_style("No start time given.")
|
|
||||||
|
|
||||||
if options.out_time:
|
# if options.in_time:
|
||||||
print_status_style("End at time %s." % (options.out_time))
|
# print_status_style("Start at time %s" % (options.in_time))
|
||||||
else:
|
# else:
|
||||||
print_status_style("No end time given.")
|
# print_status_style("No start time given.")
|
||||||
|
#
|
||||||
|
# if options.out_time:
|
||||||
|
# print_status_style("End at time %s." % (options.out_time))
|
||||||
|
# else:
|
||||||
|
# print_status_style("No end time given.")
|
||||||
|
|
||||||
if options.include_muted:
|
if options.include_muted:
|
||||||
print_status_style("Muted regions are included.")
|
print_status_style("Muted regions are included.")
|
||||||
@@ -99,10 +100,11 @@ def main():
|
|||||||
output_format = 'fmpxml'
|
output_format = 'fmpxml'
|
||||||
|
|
||||||
convert(input_file=args[1], output_format=output_format,
|
convert(input_file=args[1], output_format=output_format,
|
||||||
start=options.in_time,
|
#start=options.in_time,
|
||||||
end=options.out_time,
|
#end=options.out_time,
|
||||||
include_muted=options.include_muted,
|
include_muted=options.include_muted,
|
||||||
xsl=options.xslt, select_reel=options.select_reel,
|
xsl=options.xslt,
|
||||||
|
#select_reel=options.select_reel,
|
||||||
progress=False, output=sys.stdout, log_output=sys.stderr,
|
progress=False, output=sys.stdout, log_output=sys.stderr,
|
||||||
warnings=options.warnings)
|
warnings=options.warnings)
|
||||||
except FileNotFoundError as e:
|
except FileNotFoundError as e:
|
||||||
|
|||||||
@@ -110,42 +110,94 @@ def normalize_record_keys_for_adr(records):
|
|||||||
return records
|
return records
|
||||||
|
|
||||||
|
|
||||||
def convert(input_file, output_format='fmpxml', start=None, end=None, select_reel=None,
|
def output_adr_csv(lines):
|
||||||
|
adr_keys = ('Title', 'Cue Number', 'Character Name', 'Reel', 'Version', 'Line',
|
||||||
|
'Start', 'Finish', 'Reason', 'Note', 'TV', 'Version')
|
||||||
|
reels = set([ln['Reel'] for ln in lines])
|
||||||
|
reels.add(None)
|
||||||
|
for n in [n['Character Number'] for n in lines]:
|
||||||
|
for reel in reels:
|
||||||
|
these_lines = [ln for ln in lines
|
||||||
|
if ln['Character Number'] == n and
|
||||||
|
ln.get('Reel', None) == reel]
|
||||||
|
|
||||||
|
if len(these_lines) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
outfile_name = "%s_%s_%s_%s.csv" % (these_lines[0]['Title'],
|
||||||
|
n, these_lines[0]['Character Name'], reel,)
|
||||||
|
|
||||||
|
with open(outfile_name, mode='w', newline='') as outfile:
|
||||||
|
dump_keyed_csv(these_lines, adr_keys, outfile)
|
||||||
|
|
||||||
|
|
||||||
|
def create_adr_reports(parsed):
|
||||||
|
lines = [e for e in parsed['events'] if 'ADR' in e.keys()]
|
||||||
|
|
||||||
|
print_section_header_style("Creating PDF Reports")
|
||||||
|
print_status_style("Creating ADR Report")
|
||||||
|
output_summary(lines)
|
||||||
|
|
||||||
|
print_status_style("Creating Line Count")
|
||||||
|
output_line_count(lines)
|
||||||
|
|
||||||
|
print_status_style("Creating Supervisor Logs directory and reports")
|
||||||
|
os.makedirs("Supervisor Logs", exist_ok=True)
|
||||||
|
os.chdir("Supervisor Logs")
|
||||||
|
output_supervisor_1pg(lines)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating Director's Logs director and reports")
|
||||||
|
os.makedirs("Director Logs", exist_ok=True)
|
||||||
|
os.chdir("Director Logs")
|
||||||
|
output_summary(lines, by_character=True)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating CSV outputs")
|
||||||
|
os.makedirs("CSV", exist_ok=True)
|
||||||
|
os.chdir("CSV")
|
||||||
|
output_adr_csv(lines)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating Scripts directory and reports")
|
||||||
|
os.makedirs("Talent Scripts", exist_ok=True)
|
||||||
|
os.chdir("Talent Scripts")
|
||||||
|
output_talent_sides(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def convert(input_file, output_format='fmpxml',
|
||||||
progress=False, include_muted=False, xsl=None,
|
progress=False, include_muted=False, xsl=None,
|
||||||
output=sys.stdout, log_output=sys.stderr, warnings=True):
|
output=sys.stdout, log_output=sys.stderr, warnings=True):
|
||||||
|
|
||||||
with open(input_file, 'r') as file:
|
with open(input_file, 'r') as file:
|
||||||
print_section_header_style('Parsing')
|
print_section_header_style('Parsing')
|
||||||
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
|
parsed = parse_text_export(file)
|
||||||
dict_parser = ptulsconv.DictionaryParserVisitor()
|
|
||||||
parsed = dict_parser.visit(ast)
|
|
||||||
|
|
||||||
print_status_style('Session title: %s' % parsed['header']['session_name'])
|
|
||||||
print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
|
|
||||||
print_status_style('Fount %i tracks' % len(parsed['tracks']))
|
|
||||||
print_status_style('Found %i markers' % len(parsed['markers']))
|
|
||||||
|
|
||||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress, ignore_muted=(not include_muted),
|
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress,
|
||||||
|
ignore_muted=(not include_muted),
|
||||||
log_output=log_output)
|
log_output=log_output)
|
||||||
|
|
||||||
parsed = tcxform.transform(parsed)
|
parsed = tcxform.transform(parsed)
|
||||||
parsed = tagxform.transform(parsed)
|
parsed = tagxform.transform(parsed)
|
||||||
|
|
||||||
if start is not None and end is not None:
|
# start=None, end=None, select_reel=None
|
||||||
start_fs = tcxform.convert_time(start,
|
#
|
||||||
frame_rate=parsed['header']['timecode_format'],
|
# if start is not None and end is not None:
|
||||||
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
# start_fs = tcxform.convert_time(start,
|
||||||
|
# frame_rate=parsed['header']['timecode_format'],
|
||||||
end_fs = tcxform.convert_time(end,
|
# drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
||||||
frame_rate=parsed['header']['timecode_format'],
|
#
|
||||||
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
# end_fs = tcxform.convert_time(end,
|
||||||
|
# frame_rate=parsed['header']['timecode_format'],
|
||||||
subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
|
# drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
||||||
parsed = subclipxform.transform(parsed)
|
#
|
||||||
|
# subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
|
||||||
if select_reel is not None:
|
# parsed = subclipxform.transform(parsed)
|
||||||
reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
|
#
|
||||||
parsed = reel_xform.transform(parsed)
|
# if select_reel is not None:
|
||||||
|
# reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
|
||||||
|
# parsed = reel_xform.transform(parsed)
|
||||||
|
|
||||||
parsed = normalize_record_keys_for_adr(parsed)
|
parsed = normalize_record_keys_for_adr(parsed)
|
||||||
|
|
||||||
@@ -170,38 +222,7 @@ def convert(input_file, output_format='fmpxml', start=None, end=None, select_ree
|
|||||||
dump_csv(parsed['events'])
|
dump_csv(parsed['events'])
|
||||||
|
|
||||||
elif output_format == 'adr':
|
elif output_format == 'adr':
|
||||||
lines = [e for e in parsed['events'] if 'ADR' in e.keys()]
|
create_adr_reports(parsed)
|
||||||
|
|
||||||
print_section_header_style("Creating PDF Reports")
|
|
||||||
|
|
||||||
print_status_style("Creating ADR Report")
|
|
||||||
output_summary(lines)
|
|
||||||
|
|
||||||
print_status_style("Creating Line Count")
|
|
||||||
output_line_count(lines)
|
|
||||||
|
|
||||||
print_status_style("Creating Supervisor Logs directory and reports")
|
|
||||||
os.makedirs("Supervisor Logs", exist_ok=True)
|
|
||||||
os.chdir("Supervisor Logs")
|
|
||||||
output_supervisor_1pg(lines)
|
|
||||||
|
|
||||||
os.chdir("..")
|
|
||||||
print_status_style("Creating Director's Logs director and reports")
|
|
||||||
os.makedirs("Director Logs", exist_ok=True)
|
|
||||||
os.chdir("Director Logs")
|
|
||||||
output_summary(lines, by_character=True)
|
|
||||||
|
|
||||||
os.chdir("..")
|
|
||||||
print_status_style("Creating CSV outputs")
|
|
||||||
os.makedirs("CSV", exist_ok=True)
|
|
||||||
os.chdir("CSV")
|
|
||||||
output_adr_csv(lines)
|
|
||||||
|
|
||||||
os.chdir("..")
|
|
||||||
print_status_style("Creating Scripts directory and reports")
|
|
||||||
os.makedirs("Talent Scripts", exist_ok=True)
|
|
||||||
os.chdir("Talent Scripts")
|
|
||||||
output_talent_sides(lines)
|
|
||||||
|
|
||||||
elif output_format == 'fmpxml':
|
elif output_format == 'fmpxml':
|
||||||
if xsl is None:
|
if xsl is None:
|
||||||
@@ -212,22 +233,14 @@ def convert(input_file, output_format='fmpxml', start=None, end=None, select_ree
|
|||||||
fmp_transformed_dump(parsed, input_file, xsl, output)
|
fmp_transformed_dump(parsed, input_file, xsl, output)
|
||||||
|
|
||||||
|
|
||||||
def output_adr_csv(lines):
|
def parse_text_export(file):
|
||||||
adr_keys = ('Title', 'Cue Number', 'Character Name', 'Reel', 'Version', 'Line',
|
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
|
||||||
'Start', 'Finish', 'Reason', 'Note', 'TV', 'Version')
|
dict_parser = ptulsconv.DictionaryParserVisitor()
|
||||||
reels = set([ln['Reel'] for ln in lines])
|
parsed = dict_parser.visit(ast)
|
||||||
reels.add(None)
|
print_status_style('Session title: %s' % parsed['header']['session_name'])
|
||||||
for n in [n['Character Number'] for n in lines]:
|
print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
|
||||||
for reel in reels:
|
print_status_style('Fount %i tracks' % len(parsed['tracks']))
|
||||||
these_lines = [ln for ln in lines
|
print_status_style('Found %i markers' % len(parsed['markers']))
|
||||||
if ln['Character Number'] == n and
|
return parsed
|
||||||
ln.get('Reel', None) == reel]
|
|
||||||
|
|
||||||
if len(these_lines) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
outfile_name = "%s_%s_%s_%s.csv" % (these_lines[0]['Title'],
|
|
||||||
n, these_lines[0]['Character Name'], reel,)
|
|
||||||
|
|
||||||
with open(outfile_name, mode='w', newline='') as outfile:
|
|
||||||
dump_keyed_csv(these_lines, adr_keys, outfile)
|
|
||||||
|
|||||||
@@ -153,10 +153,12 @@ class TagInterpreter(Transformation):
|
|||||||
print_section_header_style('Parsing Tags')
|
print_section_header_style('Parsing Tags')
|
||||||
|
|
||||||
self.title_tags = self.parse_tags(input_dict['header']['session_name'])
|
self.title_tags = self.parse_tags(input_dict['header']['session_name'])
|
||||||
self.markers = sorted(input_dict['markers'], key=lambda m: m['location_decoded']['frame_count'])
|
self.markers = sorted(input_dict['markers'],
|
||||||
|
key=lambda m: m['location_decoded']['frame_count'])
|
||||||
|
|
||||||
if self.show_progress:
|
if self.show_progress:
|
||||||
track_iter = tqdm(input_dict['tracks'], desc="Reading tracks...", unit='Track')
|
track_iter = tqdm(input_dict['tracks'],
|
||||||
|
desc="Reading tracks...", unit='Track')
|
||||||
else:
|
else:
|
||||||
track_iter = input_dict['tracks']
|
track_iter = input_dict['tracks']
|
||||||
|
|
||||||
@@ -164,8 +166,10 @@ class TagInterpreter(Transformation):
|
|||||||
if 'Muted' in track['state'] and self.ignore_muted:
|
if 'Muted' in track['state'] and self.ignore_muted:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
track_tags = self.parse_tags(track['name'], parent_track_name=track['name'])
|
track_tags = self.parse_tags(track['name'],
|
||||||
comment_tags = self.parse_tags(track['comments'], parent_track_name=track['name'])
|
parent_track_name=track['name'])
|
||||||
|
comment_tags = self.parse_tags(track['comments'],
|
||||||
|
parent_track_name=track['name'])
|
||||||
track_context_tags = track_tags['tags']
|
track_context_tags = track_tags['tags']
|
||||||
track_context_tags.update(comment_tags['tags'])
|
track_context_tags.update(comment_tags['tags'])
|
||||||
|
|
||||||
@@ -178,7 +182,8 @@ class TagInterpreter(Transformation):
|
|||||||
clip_time=clip['start_time'])
|
clip_time=clip['start_time'])
|
||||||
|
|
||||||
if clip_tags['mode'] == 'Normal':
|
if clip_tags['mode'] == 'Normal':
|
||||||
event = self.decorate_event(clip, clip_tags, input_dict['header'], track_context_tags, track_tags)
|
event = self.decorate_event(clip, clip_tags, input_dict['header'],
|
||||||
|
track_context_tags, track_tags)
|
||||||
self.transformed.append(event)
|
self.transformed.append(event)
|
||||||
|
|
||||||
elif clip_tags['mode'] == 'Append':
|
elif clip_tags['mode'] == 'Append':
|
||||||
|
|||||||
Reference in New Issue
Block a user