mirror of
https://github.com/iluvcapra/ptulsconv.git
synced 2025-12-31 17:00:46 +00:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3226e63f1d | ||
|
|
3a597b5046 | ||
|
|
b5d9b5acc2 | ||
|
|
9f2a080f6b | ||
|
|
1903e2a1f9 | ||
|
|
69491d98d7 | ||
|
|
7816f08912 | ||
|
|
44388c6b7d | ||
|
|
9daedca4de | ||
|
|
93a014bdc0 | ||
|
|
9bb2ae136a | ||
|
|
3718541e09 | ||
|
|
a58451d225 | ||
|
|
319ef3800d | ||
|
|
1d63234447 | ||
|
|
edb641b7ec | ||
|
|
eaf24ad6a8 | ||
|
|
6d5cd04c50 | ||
|
|
013081ef96 | ||
|
|
d57ee88bc2 | ||
|
|
d29a08dadf | ||
|
|
e806de0c1f | ||
|
|
22edecdbbf | ||
|
|
7c8a74aed9 | ||
|
|
96a4cdb612 | ||
|
|
8720087bb2 | ||
|
|
f734aae227 | ||
|
|
17e9c77ed7 | ||
|
|
fc7dde8fd6 | ||
|
|
3021721299 | ||
|
|
cf9be9abf1 | ||
|
|
73936510cd | ||
|
|
d118554443 | ||
|
|
22c205d638 | ||
|
|
36ac320b44 | ||
|
|
6fe0ff4314 | ||
|
|
a23119eb8c | ||
|
|
af29318a0c | ||
|
|
80f1114f05 | ||
|
|
9e8518a321 | ||
|
|
5ff1df7273 | ||
|
|
e05e56bcb5 | ||
|
|
4eba5b6b17 | ||
|
|
3e6e2b5776 | ||
|
|
b9a2db2037 | ||
|
|
4943277bed | ||
|
|
83ca77b305 | ||
|
|
2995840f9f |
29
.github/workflows/pythonpublish.yml
vendored
Normal file
29
.github/workflows/pythonpublish.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Upload Python Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Install parsimonious
|
||||
run: |
|
||||
pip install parsimonious
|
||||
- name: Build and publish
|
||||
env:
|
||||
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
twine upload dist/*
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -102,3 +102,4 @@ venv.bak/
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.DS_Store
|
||||
|
||||
9
.travis.yml
Normal file
9
.travis.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
language: python
|
||||
python:
|
||||
- "3.7"
|
||||
- "3.8
|
||||
script:
|
||||
- "python -m unittest discover tests"
|
||||
install:
|
||||
- "pip install setuptools"
|
||||
- "pip install parsimonious tqdm"
|
||||
@@ -1,3 +1,8 @@
|
||||
[](https://travis-ci.com/iluvcapra/ptulsconv)
|
||||
  [](https://pypi.org/project/ptulsconv/) 
|
||||
|
||||

|
||||
|
||||
# ptulsconv
|
||||
Read Pro Tools text exports and generate XML, JSON, reports
|
||||
|
||||
@@ -109,7 +114,3 @@ cues will be applied (later clips having precedence). The clips need not be touc
|
||||
into a single row of the output. The start time of the first clip will become the start time of the row, and the finish
|
||||
time of the last clip will become the finish time of the row.
|
||||
|
||||
## Other Projects
|
||||
|
||||
This project is under construction. Look at [Pro Tools Text](https://github.com/iluvcapra/ProToolsText)
|
||||
for a working solution at this time.
|
||||
|
||||
38
man/ptulsconv.1
Normal file
38
man/ptulsconv.1
Normal file
@@ -0,0 +1,38 @@
|
||||
.\" Manpage for ptulsconv
|
||||
.\" Contact https://github.com/iluvcapra/ptulsconv
|
||||
.TH ptulsconv 1 "15 May 2020" "0.4.0" "ptulsconv man page"
|
||||
.SH NAME
|
||||
.BR "ptulsconv" " \- convert
|
||||
.IR "Avid Pro Tools" " text exports"
|
||||
.SH SYNOPSIS
|
||||
ptulsconv [OPTIONS] Export.txt
|
||||
.SH DESCRIPTION
|
||||
Convert a Pro Tools text export into a flat list of clip names with timecodes. A tagging
|
||||
language is interpreted to add columns and type the data. The default output format is
|
||||
an XML file for import into Filemaker Pro.
|
||||
.SH OPTIONS
|
||||
.IP "-h, --help"
|
||||
show a help message and exit.
|
||||
.TP
|
||||
.RI "-i " "TC"
|
||||
Drop events before this timecode.
|
||||
.TP
|
||||
.RI "-o " "TC"
|
||||
Drop events after this timecode.
|
||||
.TP
|
||||
.RI "-m "
|
||||
Include muted clips.
|
||||
.TP
|
||||
.RI "--json "
|
||||
Output a JSON document instead of XML. (--xform will have no effect.)
|
||||
.TP
|
||||
.RI "--xform " "NAME"
|
||||
Convert the output with a built-in output transform.
|
||||
.TP
|
||||
.RI "--show-available-tags"
|
||||
Print a list of tags that are interpreted and exit.
|
||||
.TP
|
||||
.RI "--show-available-transforms"
|
||||
Print a list of built-in output transforms and exit.
|
||||
.SH AUTHOR
|
||||
Jamie Hardt (contact at https://github.com/iluvcapra/ptulsconv)
|
||||
@@ -2,6 +2,6 @@ from .ptuls_grammar import protools_text_export_grammar
|
||||
from .ptuls_parser_visitor import DictionaryParserVisitor
|
||||
from .transformations import TimecodeInterpreter
|
||||
|
||||
__version__ = '0.0.1'
|
||||
__version__ = '0.5.1'
|
||||
__author__ = 'Jamie Hardt'
|
||||
__license__ = 'MIT'
|
||||
@@ -1,36 +1,103 @@
|
||||
from ptulsconv.commands import convert, dump_field_map
|
||||
from optparse import OptionParser
|
||||
from ptulsconv.commands import convert, dump_field_map, dump_xform_options
|
||||
from ptulsconv import __name__, __version__, __author__
|
||||
from optparse import OptionParser, OptionGroup
|
||||
from .reporting import print_status_style, print_banner_style, print_section_header_style, print_fatal_error
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
import traceback
|
||||
|
||||
|
||||
def main():
|
||||
parser = OptionParser()
|
||||
parser.usage = "ptulsconv TEXT_EXPORT.txt"
|
||||
parser.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode, and offset"
|
||||
" all events relative to this timecode.", metavar='TC')
|
||||
parser.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.", metavar='TC')
|
||||
parser.add_option('-P', '--progress', default=False, action='store_true', dest='show_progress',
|
||||
help='Show progress bar.')
|
||||
parser.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
|
||||
help='Read muted clips.')
|
||||
|
||||
parser.add_option('--show-tags', dest='show_tags',
|
||||
action='store_true',
|
||||
default=False, help='Display tag mappings for the FMP XML output style and exit.')
|
||||
filter_opts = OptionGroup(title='Filtering Options', parser=parser)
|
||||
|
||||
filter_opts.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode.",
|
||||
metavar='TC')
|
||||
filter_opts.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.",
|
||||
metavar='TC')
|
||||
# parser.add_option('-P', '--progress', default=False, action='store_true', dest='show_progress',
|
||||
# help='Show progress bar.')
|
||||
filter_opts.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
|
||||
help='Include muted clips.')
|
||||
|
||||
filter_opts.add_option('-R', '--reel', dest='select_reel', help="Output only events in reel N, and recalculate "
|
||||
" start times relative to that reel's start time.",
|
||||
default=None, metavar='N')
|
||||
|
||||
parser.add_option_group(filter_opts)
|
||||
|
||||
output_opts = OptionGroup(title="Output Options", parser=parser)
|
||||
output_opts.add_option('--json', default=False, action='store_true', dest='write_json',
|
||||
help='Output a JSON document instead of XML. If this option is enabled, --xform will have '
|
||||
'no effect.')
|
||||
|
||||
output_opts.add_option('--xform', dest='xslt', help="Convert with built-is XSLT transform.",
|
||||
default=None, metavar='NAME')
|
||||
|
||||
output_opts.add_option('--show-available-tags', dest='show_tags',
|
||||
action='store_true',
|
||||
default=False, help='Display tag mappings for the FMP XML output style and exit.')
|
||||
|
||||
output_opts.add_option('--show-available-transforms', dest='show_transforms',
|
||||
action='store_true',
|
||||
default=False, help='Display available built-in XSLT transforms.')
|
||||
|
||||
parser.add_option_group(output_opts)
|
||||
|
||||
(options, args) = parser.parse_args(sys.argv)
|
||||
|
||||
print_banner_style("%s %s (c) 2020 %s. All rights reserved." % (__name__, __version__, __author__))
|
||||
|
||||
print_section_header_style("Startup")
|
||||
print_status_style("This run started %s" % (datetime.datetime.now().isoformat()))
|
||||
|
||||
if options.show_tags:
|
||||
dump_field_map('ADR')
|
||||
sys.exit(0)
|
||||
|
||||
if options.show_transforms:
|
||||
dump_xform_options()
|
||||
sys.exit(0)
|
||||
|
||||
if len(args) < 2:
|
||||
print("Error: No input file", file=sys.stderr)
|
||||
print_fatal_error("Error: No input file")
|
||||
parser.print_help(sys.stderr)
|
||||
sys.exit(22)
|
||||
|
||||
convert(input_file=args[1], start=options.in_time, end=options.out_time, include_muted=options.include_muted,
|
||||
progress=options.show_progress, output=sys.stdout)
|
||||
print_status_style("Input file is %s" % (args[1]))
|
||||
if options.in_time:
|
||||
print_status_style("Start at time %s" % (options.in_time))
|
||||
else:
|
||||
print_status_style("No start time given.")
|
||||
|
||||
if options.out_time:
|
||||
print_status_style("End at time %s." % (options.out_time))
|
||||
else:
|
||||
print_status_style("No end time given.")
|
||||
|
||||
if options.include_muted:
|
||||
print_status_style("Muted regions are included.")
|
||||
else:
|
||||
print_status_style("Muted regions are ignored.")
|
||||
|
||||
try:
|
||||
output_format = 'fmpxml'
|
||||
if options.write_json:
|
||||
output_format = 'json'
|
||||
|
||||
convert(input_file=args[1], output_format=output_format, start=options.in_time, end=options.out_time,
|
||||
include_muted=options.include_muted, xsl=options.xslt, select_reel=options.select_reel,
|
||||
progress=False, output=sys.stdout, log_output=sys.stderr)
|
||||
except FileNotFoundError as e:
|
||||
print_fatal_error("Error trying to read input file")
|
||||
raise e
|
||||
except Exception as e:
|
||||
print_fatal_error("Error trying to convert file")
|
||||
print("\033[31m" + e.__repr__() + "\033[0m", file=sys.stderr)
|
||||
print(traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
from xml.etree.ElementTree import TreeBuilder, tostring
|
||||
import subprocess
|
||||
import pathlib
|
||||
import ptulsconv
|
||||
|
||||
from .reporting import print_section_header_style, print_status_style
|
||||
|
||||
# field_map maps tags in the text export to fields in FMPXMLRESULT
|
||||
# - tuple field 0 is a list of tags, the first tag with contents will be used as source
|
||||
# - tuple field 1 is the field in FMPXMLRESULT
|
||||
@@ -89,6 +94,21 @@ def fmp_dump(data, input_file_name, output):
|
||||
output.write(xmlstr)
|
||||
|
||||
|
||||
import glob
|
||||
|
||||
xslt_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'xslt')
|
||||
|
||||
def xform_options():
|
||||
return glob.glob(os.path.join(xslt_path, "*.xsl"))
|
||||
|
||||
def dump_xform_options(output=sys.stdout):
|
||||
print("# Available transforms:", file=output)
|
||||
print("# Transform dir: %s" % (xslt_path), file=output)
|
||||
for f in xform_options():
|
||||
base = os.path.basename(f)
|
||||
name, _ = os.path.splitext(base)
|
||||
print("# " + name, file=output)
|
||||
|
||||
def dump_field_map(field_map_name, output=sys.stdout):
|
||||
output.write("# Map of Tag fields to XML output columns\n")
|
||||
output.write("# (in order of precedence)\n")
|
||||
@@ -104,20 +124,45 @@ def dump_field_map(field_map_name, output=sys.stdout):
|
||||
|
||||
for n, field in enumerate(field_map):
|
||||
for tag in field[0]:
|
||||
output.write("# %-24s-> %-20s | %-8s| %-7i\n" % (tag[:24], field[1][:20], field[2].__name__, n+1 ))
|
||||
output.write("# %-24s-> %-20s | %-8s| %-7i\n" % (tag[:24], field[1][:20], field[2].__name__, n + 1))
|
||||
|
||||
|
||||
def convert(input_file, output_format='fmpxml', start=None, end=None, progress=False, include_muted=False,
|
||||
output=sys.stdout):
|
||||
def fmp_transformed_dump(data, input_file, xsl_name, output):
|
||||
pipe = io.StringIO()
|
||||
print_status_style("Generating base XML")
|
||||
fmp_dump(data, input_file, pipe)
|
||||
|
||||
strdata = pipe.getvalue()
|
||||
print_status_style("Base XML size %i" % (len(strdata)))
|
||||
|
||||
print_status_style("Running xsltproc")
|
||||
|
||||
xsl_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'xslt', xsl_name + ".xsl")
|
||||
print_status_style("Using xsl: %s" % (xsl_path))
|
||||
subprocess.run(['xsltproc', xsl_path, '-'], input=strdata, text=True,
|
||||
stdout=output, shell=False, check=True)
|
||||
|
||||
|
||||
def convert(input_file, output_format='fmpxml', start=None, end=None, select_reel=None,
|
||||
progress=False, include_muted=False, xsl=None,
|
||||
output=sys.stdout, log_output=sys.stderr):
|
||||
with open(input_file, 'r') as file:
|
||||
print_section_header_style('Parsing')
|
||||
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
|
||||
dict_parser = ptulsconv.DictionaryParserVisitor()
|
||||
parsed = dict_parser.visit(ast)
|
||||
|
||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress, ignore_muted=(not include_muted))
|
||||
print_status_style('Session title: %s' % parsed['header']['session_name'])
|
||||
print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
|
||||
print_status_style('Fount %i tracks' % len(parsed['tracks']))
|
||||
print_status_style('Found %i markers' % len(parsed['markers']))
|
||||
|
||||
parsed = tagxform.transform(tcxform.transform(parsed))
|
||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress, ignore_muted=(not include_muted),
|
||||
log_output=log_output)
|
||||
|
||||
parsed = tcxform.transform(parsed)
|
||||
parsed = tagxform.transform(parsed)
|
||||
|
||||
if start is not None and end is not None:
|
||||
start_fs = tcxform.convert_time(start,
|
||||
@@ -131,7 +176,17 @@ def convert(input_file, output_format='fmpxml', start=None, end=None, progress=F
|
||||
subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
|
||||
parsed = subclipxform.transform(parsed)
|
||||
|
||||
if select_reel is not None:
|
||||
reel_xform = ptulsconv.transformations.SelectReel(reel_num=select_reel)
|
||||
parsed = reel_xform.transform(parsed)
|
||||
|
||||
|
||||
if output_format == 'json':
|
||||
json.dump(parsed, output)
|
||||
elif output_format == 'fmpxml':
|
||||
fmp_dump(parsed, input_file, output)
|
||||
if xsl is None:
|
||||
fmp_dump(parsed, input_file, output)
|
||||
else:
|
||||
print_section_header_style("Performing XSL Translation")
|
||||
print_status_style("Using builtin translation: %s" % (xsl))
|
||||
fmp_transformed_dump(parsed, input_file, xsl, output)
|
||||
|
||||
@@ -45,7 +45,7 @@ protools_text_export_grammar = Grammar(
|
||||
|
||||
track_state_list = (track_state " ")*
|
||||
|
||||
track_state = "Solo" / "Muted" / "Inactive"
|
||||
track_state = "Solo" / "Muted" / "Inactive" / "Hidden"
|
||||
|
||||
track_clip_entry = integer_value isp fs
|
||||
integer_value isp fs
|
||||
|
||||
51
ptulsconv/reporting.py
Normal file
51
ptulsconv/reporting.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import sys
|
||||
|
||||
def print_banner_style(str):
|
||||
if sys.stderr.isatty():
|
||||
sys.stderr.write("\n\033[1m%s\033[0m\n\n" % str)
|
||||
else:
|
||||
sys.stderr.write("\n%s\n\n" % str)
|
||||
|
||||
def print_section_header_style(str):
|
||||
if sys.stderr.isatty():
|
||||
sys.stderr.write("\n\033[4m%s\033[0m\n\n" % str)
|
||||
else:
|
||||
sys.stderr.write("%s\n\n" % str)
|
||||
|
||||
def print_status_style(str):
|
||||
if sys.stderr.isatty():
|
||||
sys.stderr.write("\033[3m - %s\033[0m\n" % str)
|
||||
else:
|
||||
sys.stderr.write(" - %s\n" % str)
|
||||
|
||||
def print_advisory_tagging_error(failed_string, position, parent_track_name=None, clip_time=None):
|
||||
if sys.stderr.isatty():
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.write(" ! \033[33;1mTagging error: \033[0m")
|
||||
ok_string = failed_string[:position]
|
||||
not_ok_string = failed_string[position:]
|
||||
sys.stderr.write("\033[32m\"%s\033[31;1m%s\"\033[0m\n" % (ok_string, not_ok_string))
|
||||
|
||||
if parent_track_name is not None:
|
||||
sys.stderr.write(" ! > On track \"%s\"\n" % (parent_track_name))
|
||||
|
||||
if clip_time is not None:
|
||||
sys.stderr.write(" ! > In clip name at %s\n" % (clip_time))
|
||||
else:
|
||||
sys.stderr.write("\n")
|
||||
sys.stderr.write(" ! Tagging error: \"%s\"\n" % failed_string)
|
||||
sys.stderr.write(" ! %s _______________⬆\n" % (" " * position))
|
||||
|
||||
if parent_track_name is not None:
|
||||
sys.stderr.write(" ! > On track \"%s\"\n" % (parent_track_name))
|
||||
|
||||
if clip_time is not None:
|
||||
sys.stderr.write(" ! > In clip name at %s\n" % (clip_time))
|
||||
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def print_fatal_error(str):
|
||||
if sys.stderr.isatty():
|
||||
sys.stderr.write("\n\033[5;31;1m*** %s ***\033[0m\n" % str)
|
||||
else:
|
||||
sys.stderr.write("\n%s\n" % str)
|
||||
@@ -3,9 +3,11 @@ from parsimonious import Grammar, NodeVisitor
|
||||
from parsimonious.exceptions import IncompleteParseError
|
||||
import math
|
||||
import sys
|
||||
from .reporting import print_advisory_tagging_error, print_section_header_style, print_status_style
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
class Transformation:
|
||||
def transform(self, input_dict) -> dict:
|
||||
return input_dict
|
||||
@@ -17,19 +19,27 @@ class TimecodeInterpreter(Transformation):
|
||||
self.apply_session_start = False
|
||||
|
||||
def transform(self, input_dict: dict) -> dict:
|
||||
print_section_header_style('Converting Timecodes')
|
||||
|
||||
retval = super().transform(input_dict)
|
||||
rate = input_dict['header']['timecode_format']
|
||||
start_tc = self.convert_time(input_dict['header']['start_timecode'], rate,
|
||||
drop_frame=input_dict['header']['timecode_drop_frame'])
|
||||
|
||||
retval['header']['start_timecode_decoded'] = start_tc
|
||||
print_status_style('Converted start timecode.')
|
||||
|
||||
retval['tracks'] = self.convert_tracks(input_dict['tracks'], timecode_rate=rate,
|
||||
drop_frame=retval['header']['timecode_drop_frame'])
|
||||
|
||||
print_status_style('Converted clip timecodes for %i tracks.' % len(retval['tracks']))
|
||||
|
||||
for marker in retval['markers']:
|
||||
marker['location_decoded'] = self.convert_time(marker['location'], rate,
|
||||
drop_frame=retval['header']['timecode_drop_frame'])
|
||||
|
||||
print_status_style('Converted %i markers.' % len(retval['markers']))
|
||||
|
||||
return retval
|
||||
|
||||
def convert_tracks(self, tracks, timecode_rate, drop_frame):
|
||||
@@ -121,16 +131,19 @@ class TagInterpreter(Transformation):
|
||||
def generic_visit(self, node, visited_children):
|
||||
return visited_children or node
|
||||
|
||||
def __init__(self, ignore_muted=True, show_progress=False):
|
||||
def __init__(self, ignore_muted=True, show_progress=False, log_output=sys.stderr):
|
||||
self.visitor = TagInterpreter.TagListVisitor()
|
||||
self.ignore_muted = ignore_muted
|
||||
self.show_progress = show_progress
|
||||
self.log_output = log_output
|
||||
|
||||
def transform(self, input_dict: dict) -> dict:
|
||||
transformed = list()
|
||||
timespan_rules = list()
|
||||
|
||||
title_tags = self.parse_tags(input_dict['header']['session_name'], "<Session Name>")
|
||||
print_section_header_style('Parsing Tags')
|
||||
|
||||
title_tags = self.parse_tags(input_dict['header']['session_name'])
|
||||
markers = sorted(input_dict['markers'], key=lambda m: m['location_decoded']['frame_count'])
|
||||
|
||||
if self.show_progress:
|
||||
@@ -142,8 +155,8 @@ class TagInterpreter(Transformation):
|
||||
if 'Muted' in track['state'] and self.ignore_muted:
|
||||
continue
|
||||
|
||||
track_tags = self.parse_tags(track['name'], "<Track %s>" % (track['name']))
|
||||
comment_tags = self.parse_tags(track['comments'], "<Track %s>" % (track['name']))
|
||||
track_tags = self.parse_tags(track['name'], parent_track_name=track['name'])
|
||||
comment_tags = self.parse_tags(track['comments'], parent_track_name=track['name'])
|
||||
track_context_tags = track_tags['tags']
|
||||
track_context_tags.update(comment_tags['tags'])
|
||||
|
||||
@@ -151,8 +164,8 @@ class TagInterpreter(Transformation):
|
||||
if clip['state'] == 'Muted' and self.ignore_muted:
|
||||
continue
|
||||
|
||||
clip_tags = self.parse_tags(clip['clip_name'],
|
||||
"<Track %s/Clip event number %i at %s>" % (track['name'], clip['event'], clip['start_time']))
|
||||
clip_tags = self.parse_tags(clip['clip_name'], parent_track_name=track['name'],
|
||||
clip_time=clip['start_time'])
|
||||
clip_start = clip['start_time_decoded']['frame_count']
|
||||
if clip_tags['mode'] == 'Normal':
|
||||
event = dict()
|
||||
@@ -165,6 +178,7 @@ class TagInterpreter(Transformation):
|
||||
|
||||
event['PT.Track.Name'] = track_tags['line']
|
||||
event['PT.Session.Name'] = title_tags['line']
|
||||
event['PT.Session.TimecodeFormat'] = input_dict['header']['timecode_format']
|
||||
event['PT.Clip.Number'] = clip['event']
|
||||
event['PT.Clip.Name'] = clip_tags['line']
|
||||
event['PT.Clip.Start'] = clip['start_time']
|
||||
@@ -180,15 +194,22 @@ class TagInterpreter(Transformation):
|
||||
assert len(transformed) > 0, "First clip is in '&'-Append mode, fatal error."
|
||||
|
||||
transformed[-1].update(clip_tags['tags'])
|
||||
transformed[-1]['event_name'] = transformed[-1]['event_name'] + " " + clip_tags['line']
|
||||
transformed[-1]['PT.Clip.End_Frames'] = clip['end_time_decoded']['frame_count']
|
||||
transformed[-1]['PT.Clip.Name'] = transformed[-1]['PT.Clip.Name'] + " " + clip_tags['line']
|
||||
transformed[-1]['PT.Clip.Finish_Frames'] = clip['end_time_decoded']['frame_count']
|
||||
transformed[-1]['PT.Clip.Finish'] = clip['end_time']
|
||||
transformed[-1]['PT.Clip.Finish_Seconds'] = clip['end_time_decoded']['frame_count'] / \
|
||||
input_dict['header']['timecode_format']
|
||||
|
||||
|
||||
elif clip_tags['mode'] == 'Timespan':
|
||||
rule = dict(start_time=clip_start,
|
||||
rule = dict(start_time_literal=clip['start_time'],
|
||||
start_time=clip_start,
|
||||
start_time_seconds=clip_start / input_dict['header']['timecode_format'],
|
||||
end_time=clip['end_time_decoded']['frame_count'],
|
||||
tags=clip_tags['tags'])
|
||||
timespan_rules.append(rule)
|
||||
|
||||
print_status_style('Processed %i clips' % len(transformed))
|
||||
return dict(header=input_dict['header'], events=transformed)
|
||||
|
||||
def effective_timespan_tags_at_time(_, rules, time) -> dict:
|
||||
@@ -196,7 +217,17 @@ class TagInterpreter(Transformation):
|
||||
|
||||
for rule in rules:
|
||||
if rule['start_time'] <= time <= rule['end_time']:
|
||||
tag_keys = list(rule['tags'].keys())
|
||||
tag_times = dict()
|
||||
for key in tag_keys:
|
||||
key: str
|
||||
time_value = rule['start_time']
|
||||
tag_times["Timespan." + key + ".Start_Frames"] = time_value
|
||||
tag_times["Timespan." + key + ".Start"] = rule['start_time_literal']
|
||||
tag_times["Timespan." + key + ".Start_Seconds"] = rule['start_time_seconds']
|
||||
|
||||
retval.update(rule['tags'])
|
||||
retval.update(tag_times)
|
||||
|
||||
return retval
|
||||
|
||||
@@ -204,8 +235,8 @@ class TagInterpreter(Transformation):
|
||||
retval = dict()
|
||||
|
||||
for marker in markers:
|
||||
marker_name_tags = self.parse_tags(marker['name'], "Marker %i" % (marker['number']))
|
||||
marker_comment_tags = self.parse_tags(marker['comments'], "Marker %i" % (marker['number']))
|
||||
marker_name_tags = self.parse_tags(marker['name'], marker_index=marker['number'])
|
||||
marker_comment_tags = self.parse_tags(marker['comments'], marker_index=marker['number'])
|
||||
effective_tags = marker_name_tags['tags']
|
||||
effective_tags.update(marker_comment_tags['tags'])
|
||||
|
||||
@@ -215,24 +246,38 @@ class TagInterpreter(Transformation):
|
||||
break
|
||||
return retval
|
||||
|
||||
def report(self, mesg, *args):
|
||||
print(mesg % ( args) , file=sys.stderr)
|
||||
sys.stderr.write("\033[F")
|
||||
sys.stderr.write("\033[K")
|
||||
|
||||
def parse_tags(self, source, context_str=None):
|
||||
def parse_tags(self, source, parent_track_name=None, clip_time=None, marker_index=None):
|
||||
try:
|
||||
parse_tree = self.tag_grammar.parse(source)
|
||||
return self.visitor.visit(parse_tree)
|
||||
except IncompleteParseError as e:
|
||||
if context_str is not None:
|
||||
self.report("Error reading tags in: ")
|
||||
print_advisory_tagging_error(failed_string=source,
|
||||
parent_track_name=parent_track_name,
|
||||
clip_time=clip_time, position=e.pos)
|
||||
|
||||
trimmed_source = source[:e.pos]
|
||||
parse_tree = self.tag_grammar.parse(trimmed_source)
|
||||
return self.visitor.visit(parse_tree)
|
||||
|
||||
|
||||
class SelectReel(Transformation):
|
||||
|
||||
def __init__(self, reel_num):
|
||||
self.reel_num = reel_num
|
||||
|
||||
def transform(self, input_dict) -> dict:
|
||||
out_events = []
|
||||
for event in input_dict['events']:
|
||||
if event['Reel'] == str(self.reel_num):
|
||||
offset = event.get('Timespan.Reel.Start_Frames', 0)
|
||||
offset_sec = event.get('Timespan.Reel.Start_Seconds', 0.)
|
||||
event['PT.Clip.Start_Frames'] -= offset
|
||||
event['PT.Clip.Finish_Frames'] -= offset
|
||||
event['PT.Clip.Start_Seconds'] -= offset_sec
|
||||
event['PT.Clip.Finish_Seconds'] -= offset_sec
|
||||
out_events.append(event)
|
||||
|
||||
return dict(header=input_dict['header'], events=out_events)
|
||||
|
||||
|
||||
class SubclipOfSequence(Transformation):
|
||||
@@ -243,8 +288,8 @@ class SubclipOfSequence(Transformation):
|
||||
|
||||
def transform(self, input_dict: dict) -> dict:
|
||||
out_events = []
|
||||
offset = self.start
|
||||
offset_sec = self.start / input_dict['header']['timecode_format']
|
||||
offset = 0 #self.start
|
||||
offset_sec = 0. #self.start / input_dict['header']['timecode_format']
|
||||
for event in input_dict['events']:
|
||||
if self.start <= event['PT.Clip.Start_Frames'] <= self.end:
|
||||
e = event
|
||||
@@ -254,4 +299,4 @@ class SubclipOfSequence(Transformation):
|
||||
e['PT.Clip.Finish_Seconds'] = event['PT.Clip.Finish_Seconds'] - offset_sec
|
||||
out_events.append(e)
|
||||
|
||||
return dict(events=out_events)
|
||||
return dict(header=input_dict['header'], events=out_events)
|
||||
|
||||
@@ -37,8 +37,16 @@
|
||||
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
||||
<AvProp id="ATTR" name="OMFI:ATTB:Name" type="string">_ATN_CRM_COM</AvProp>
|
||||
<AvProp id="ATTR" name="OMFI:ATTB:StringAttribute" type="string">
|
||||
<xsl:value-of select="concat(fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA)"/>
|
||||
[Reason: <xsl:value-of select="fmp:COL[18]/fmp:DATA" />]</AvProp>
|
||||
<xsl:value-of select="concat('(',fmp:COL[14]/fmp:DATA,') ',fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA, ' ')"/>
|
||||
<xsl:choose>
|
||||
<xsl:when test="fmp:COL[18]/fmp:DATA != ''">[Reason: <xsl:value-of select="fmp:COL[18]/fmp:DATA" />]
|
||||
</xsl:when>
|
||||
<xsl:otherwise> </xsl:otherwise>
|
||||
</xsl:choose>
|
||||
<xsl:choose>
|
||||
<xsl:when test="fmp:COL[23]/fmp:DATA != ''">[Note: <xsl:value-of select="fmp:COL[23]/fmp:DATA" />]</xsl:when>
|
||||
</xsl:choose>
|
||||
</AvProp>
|
||||
</ListElem>
|
||||
<ListElem>
|
||||
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
||||
30
ptulsconv/xslt/SRT.xsl
Normal file
30
ptulsconv/xslt/SRT.xsl
Normal file
@@ -0,0 +1,30 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<xsl:transform version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:fmp="http://www.filemaker.com/fmpxmlresult">
|
||||
|
||||
<xsl:output method="text" encoding="windows-1252"/>
|
||||
<xsl:template match="/">
|
||||
|
||||
<xsl:for-each select="/fmp:FMPXMLRESULT/fmp:RESULTSET/fmp:ROW">
|
||||
<xsl:sort data-type="number" select="number(fmp:COL[9]/fmp:DATA)" />
|
||||
<xsl:value-of select="concat(position() ,'
')" />
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) div 3600),'00'), ':')" />
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) div 60),'00'), ':')" />
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) mod 60),'00'), ',')" />
|
||||
<xsl:value-of select="format-number((number(fmp:COL[9]/fmp:DATA) - floor(number(fmp:COL[9]/fmp:DATA))) * 1000,'000')" />
|
||||
<xsl:text> --> </xsl:text>
|
||||
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) div 3600),'00'), ':')" />
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) div 60),'00'), ':')" />
|
||||
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) mod 60),'00'), ',')" />
|
||||
<xsl:value-of select="format-number((number(fmp:COL[10]/fmp:DATA) - floor(number(fmp:COL[10]/fmp:DATA))) * 1000,'000')" />
|
||||
|
||||
<xsl:value-of select="concat('
',fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA)"/>
|
||||
<xsl:value-of select="'

'" />
|
||||
|
||||
</xsl:for-each>
|
||||
</xsl:template>
|
||||
|
||||
|
||||
</xsl:transform>
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/zsh
|
||||
python3 setup.py build
|
||||
python3 setup.py sdist bdist_wheel
|
||||
python3 -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
|
||||
6
setup.py
6
setup.py
@@ -24,12 +24,16 @@ setup(name='ptulsconv',
|
||||
'Topic :: Multimedia',
|
||||
'Topic :: Multimedia :: Sound/Audio',
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Development Status :: 4 - Beta",
|
||||
"Topic :: Text Processing :: Filters",
|
||||
"Topic :: Text Processing :: Markup :: XML"],
|
||||
packages=['ptulsconv'],
|
||||
keywords='text-processing parsers film tv editing editorial',
|
||||
install_requires=['parsimonious'],
|
||||
install_requires=['parsimonious', 'tqdm'],
|
||||
package_data={
|
||||
"ptulsconv": ["*.xsl"]
|
||||
},
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'ptulsconv = ptulsconv.__main__:main'
|
||||
|
||||
BIN
tests/export_cases/Tag Tests/Tag Tests.ptx
Normal file
BIN
tests/export_cases/Tag Tests/Tag Tests.ptx
Normal file
Binary file not shown.
100
tests/export_cases/Tag Tests/Tag Tests.txt
Normal file
100
tests/export_cases/Tag Tests/Tag Tests.txt
Normal file
@@ -0,0 +1,100 @@
|
||||
SESSION NAME: Tag Tests
|
||||
SAMPLE RATE: 48000.000000
|
||||
BIT DEPTH: 24-bit
|
||||
SESSION START TIMECODE: 01:00:00:00
|
||||
TIMECODE FORMAT: 23.976 Frame
|
||||
# OF AUDIO TRACKS: 8
|
||||
# OF AUDIO CLIPS: 0
|
||||
# OF AUDIO FILES: 0
|
||||
|
||||
|
||||
P L U G - I N S L I S T I N G
|
||||
MANUFACTURER PLUG-IN NAME VERSION FORMAT STEMS NUMBER OF INSTANCES
|
||||
|
||||
|
||||
T R A C K L I S T I N G
|
||||
TRACK NAME: Audio 1
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 Clip Name {X=300} 01:00:00:00 01:00:05:03 00:00:05:03 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 2 $A=1
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 Lorem ipsum {X=301} 01:00:00:00 01:00:05:03 00:00:05:03 Unmuted
|
||||
1 2 Dolor sic amet {X=302} 01:00:10:00 01:00:20:00 00:00:10:00 Unmuted
|
||||
1 3 & the rain in spain [ABC] 01:00:20:00 01:00:25:00 00:00:05:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 3 $A=2
|
||||
COMMENTS: {B=100}
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 A 01:00:15:00 01:00:25:00 00:00:10:00 Unmuted
|
||||
1 2 & B 01:00:25:00 01:00:35:00 00:00:10:00 Unmuted
|
||||
1 3 & C 01:00:35:00 01:00:45:00 00:00:10:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 4 $A=3
|
||||
COMMENTS: $A=4
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 Silver Bridge 01:00:00:00 01:00:05:00 00:00:05:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 5
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 @ {D=100} 01:00:00:00 01:00:10:00 00:00:10:00 Unmuted
|
||||
1 2 @ {D=101} 01:00:10:00 01:00:20:00 00:00:10:00 Unmuted
|
||||
1 3 @ {D=102} 01:00:20:00 01:00:30:00 00:00:10:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 6
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 Region 02 01:00:02:00 01:00:03:00 00:00:01:00 Unmuted
|
||||
1 2 Region 12 01:00:12:00 01:00:13:00 00:00:01:00 Unmuted
|
||||
1 3 Region 22 01:00:22:00 01:00:23:00 00:00:01:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 7
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 @ {D=200} {E=101} 01:00:00:00 01:00:10:00 00:00:10:00 Unmuted
|
||||
|
||||
|
||||
TRACK NAME: Audio 8
|
||||
COMMENTS:
|
||||
USER DELAY: 0 Samples
|
||||
STATE:
|
||||
PLUG-INS:
|
||||
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||
1 1 Region 04 01:00:04:00 01:00:05:00 00:00:01:00 Unmuted
|
||||
|
||||
|
||||
M A R K E R S L I S T I N G
|
||||
# LOCATION TIME REFERENCE UNITS NAME COMMENTS
|
||||
3 01:00:05:00 240240 Samples Marker $M=0
|
||||
1 01:00:10:00 480480 Samples $M=1
|
||||
2 01:00:22:00 1057056 Samples $M=2
|
||||
92
tests/test_tagging.py
Normal file
92
tests/test_tagging.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import unittest
|
||||
import ptulsconv
|
||||
import os.path
|
||||
|
||||
class TaggingIntegratedTests(unittest.TestCase):
|
||||
|
||||
path = os.path.dirname(__file__) + '/export_cases/Tag Tests/Tag Tests.txt'
|
||||
|
||||
def test_event_list(self):
|
||||
with open(self.path, 'r') as f:
|
||||
visitor = ptulsconv.DictionaryParserVisitor()
|
||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
||||
parsed: dict = visitor.visit(result)
|
||||
|
||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=False,
|
||||
ignore_muted=True,
|
||||
log_output=False)
|
||||
|
||||
parsed = tcxform.transform(parsed)
|
||||
parsed = tagxform.transform(parsed)
|
||||
|
||||
self.assertEqual(9, len(parsed['events']))
|
||||
self.assertEqual("Clip Name", parsed['events'][0]['PT.Clip.Name'])
|
||||
self.assertEqual("Lorem ipsum" , parsed['events'][1]['PT.Clip.Name'])
|
||||
self.assertEqual("Dolor sic amet the rain in spain" , parsed['events'][2]['PT.Clip.Name'])
|
||||
self.assertEqual("A B C" , parsed['events'][3]['PT.Clip.Name'])
|
||||
self.assertEqual("Silver Bridge" , parsed['events'][4]['PT.Clip.Name'])
|
||||
self.assertEqual("Region 02" , parsed['events'][5]['PT.Clip.Name'])
|
||||
self.assertEqual("Region 12" , parsed['events'][6]['PT.Clip.Name'])
|
||||
self.assertEqual("Region 22" , parsed['events'][7]['PT.Clip.Name'])
|
||||
self.assertEqual("Region 04" , parsed['events'][8]['PT.Clip.Name'])
|
||||
|
||||
def test_append(self):
|
||||
with open(self.path, 'r') as f:
|
||||
visitor = ptulsconv.DictionaryParserVisitor()
|
||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
||||
parsed: dict = visitor.visit(result)
|
||||
|
||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=False,
|
||||
ignore_muted=True,
|
||||
log_output=False)
|
||||
|
||||
parsed = tcxform.transform(parsed)
|
||||
parsed = tagxform.transform(parsed)
|
||||
|
||||
self.assertTrue(len(parsed['events']) > 2)
|
||||
|
||||
self.assertEqual("Dolor sic amet the rain in spain",
|
||||
parsed['events'][2]['PT.Clip.Name'])
|
||||
|
||||
self.assertTrue("01:00:10:00", parsed['events'][2]['PT.Clip.Start'])
|
||||
self.assertTrue("01:00:25:00", parsed['events'][2]['PT.Clip.Finish'])
|
||||
self.assertTrue(240, parsed['events'][2]['PT.Clip.Start_Frames'])
|
||||
self.assertTrue(600, parsed['events'][2]['PT.Clip.Finish_Frames'])
|
||||
|
||||
self.assertIn('X', parsed['events'][2].keys())
|
||||
self.assertIn('ABC', parsed['events'][2].keys())
|
||||
self.assertIn('A', parsed['events'][2].keys())
|
||||
self.assertEqual('302', parsed['events'][2]['X'])
|
||||
self.assertEqual('ABC', parsed['events'][2]['ABC'])
|
||||
self.assertEqual('1', parsed['events'][2]['A'])
|
||||
|
||||
def test_successive_appends(self):
|
||||
with open(self.path, 'r') as f:
|
||||
visitor = ptulsconv.DictionaryParserVisitor()
|
||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
||||
parsed: dict = visitor.visit(result)
|
||||
|
||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=False,
|
||||
ignore_muted=True,
|
||||
log_output=False)
|
||||
|
||||
parsed = tcxform.transform(parsed)
|
||||
parsed = tagxform.transform(parsed)
|
||||
|
||||
self.assertTrue(len(parsed['events']) > 3)
|
||||
|
||||
self.assertEqual("A B C",
|
||||
parsed['events'][3]['PT.Clip.Name'])
|
||||
|
||||
self.assertTrue("01:00:15:00", parsed['events'][3]['PT.Clip.Start'])
|
||||
self.assertTrue("01:00:45:00", parsed['events'][3]['PT.Clip.Finish'])
|
||||
self.assertTrue(80, parsed['events'][3]['PT.Clip.Start_Frames'])
|
||||
self.assertTrue(1080, parsed['events'][3]['PT.Clip.Finish_Frames'])
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user