mirror of
https://github.com/iluvcapra/ptulsconv.git
synced 2025-12-31 17:00:46 +00:00
Compare commits
129 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
617f34a515 | ||
|
|
5427b4cfb1 | ||
|
|
408829e820 | ||
|
|
b65401d25f | ||
|
|
50fe3e2c0a | ||
|
|
1c8feec8fe | ||
|
|
f510f98ede | ||
|
|
ddf1948f3c | ||
|
|
1c9d373b40 | ||
|
|
51b2517db1 | ||
|
|
27dd8bc94d | ||
|
|
dd394a8fec | ||
|
|
b5571891cf | ||
|
|
73058e9423 | ||
|
|
a11cda40e5 | ||
|
|
7381a37185 | ||
|
|
065bd26f4c | ||
|
|
7ec983f63f | ||
|
|
944e66728b | ||
|
|
6473c83785 | ||
|
|
8947d409b4 | ||
|
|
0494e771be | ||
|
|
f00bea8702 | ||
|
|
6e82a14e4f | ||
|
|
07669e4eca | ||
|
|
ddc406b1eb | ||
|
|
e07b3bb604 | ||
|
|
c02453d10f | ||
|
|
cdc8a838ac | ||
|
|
e2c7408413 | ||
|
|
a18154edb0 | ||
|
|
f15ee40d37 | ||
|
|
cd26be0c20 | ||
|
|
d50e45882b | ||
|
|
adb80eb174 | ||
|
|
2b91f128b9 | ||
|
|
9f24d45f25 | ||
|
|
3a58fdba75 | ||
|
|
800a4dfb12 | ||
|
|
6bc98063db | ||
|
|
b1bf49ca82 | ||
|
|
61250aaf63 | ||
|
|
43df2c1558 | ||
|
|
17dc868756 | ||
|
|
2e36a789b4 | ||
|
|
1345113a85 | ||
|
|
76c2e24084 | ||
|
|
a5ed16849c | ||
|
|
4c3e103e77 | ||
|
|
dd767b2d41 | ||
|
|
aaf751c1a2 | ||
|
|
91e0da278f | ||
|
|
a7d01779bd | ||
|
|
cb6c0c8895 | ||
|
|
a2a6782214 | ||
|
|
2c78d4a09d | ||
|
|
28cf7b5d09 | ||
|
|
b419814f82 | ||
|
|
967ef5c63a | ||
|
|
fe1a1eebd5 | ||
|
|
dadeab49fe | ||
|
|
900dd5d582 | ||
|
|
5882e01b31 | ||
|
|
e2e86faf54 | ||
|
|
bfdefc8da0 | ||
|
|
2af9317e7e | ||
|
|
9194e5ba54 | ||
|
|
528bd949ca | ||
|
|
5633eb89f0 | ||
|
|
29e1753b18 | ||
|
|
1df0b79ab6 | ||
|
|
68db6c9b09 | ||
|
|
2c664db0dd | ||
|
|
e46ac14118 | ||
|
|
bf3a5c37a8 | ||
|
|
d3b08e9238 | ||
|
|
c0d192e651 | ||
|
|
d3cc9074c4 | ||
|
|
87108c7865 | ||
|
|
04422360f0 | ||
|
|
cd4122ce50 | ||
|
|
a176d3b1f5 | ||
|
|
8a6f5e755b | ||
|
|
b4fef4b13f | ||
|
|
6fc7f26e9c | ||
|
|
09b3f9349b | ||
|
|
f6ee807ede | ||
|
|
f114012d4a | ||
|
|
c03b3dfb8d | ||
|
|
d2da8f1cb0 | ||
|
|
10c0e4f038 | ||
|
|
6703184f8f | ||
|
|
1c11e4d570 | ||
|
|
94317c288f | ||
|
|
9e374df367 | ||
|
|
fc2e823116 | ||
|
|
fbc7531374 | ||
|
|
1fb17b13ea | ||
|
|
21c32e282c | ||
|
|
8407d31333 | ||
|
|
97d6eeda02 | ||
|
|
3bee7a8391 | ||
|
|
68d38f8ed5 | ||
|
|
8e043b7175 | ||
|
|
a7b5adfffb | ||
|
|
da5b743191 | ||
|
|
caa5381306 | ||
|
|
9e2b932cad | ||
|
|
05ea48078f | ||
|
|
c26fa8dd75 | ||
|
|
9f8e3cf824 | ||
|
|
3b438b1399 | ||
|
|
41b1a3185f | ||
|
|
8877982a47 | ||
|
|
bb6fbcfd37 | ||
|
|
434b8816ee | ||
|
|
5ebaf6b473 | ||
|
|
d0f415b38f | ||
|
|
c5d6d82831 | ||
|
|
66a71283d5 | ||
|
|
15ad328edc | ||
|
|
a48eccb0d0 | ||
|
|
fa2cef35b2 | ||
|
|
c8053f65ae | ||
|
|
d9da7317a7 | ||
|
|
ab614cbc32 | ||
|
|
5a75a77f77 | ||
|
|
4daa5f0496 | ||
|
|
de48bcfe24 |
10
.github/workflows/python-package.yml
vendored
10
.github/workflows/python-package.yml
vendored
@@ -16,19 +16,19 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: [3.7, 3.8, 3.9, "3.10"]
|
||||
python-version: [3.7, 3.8, 3.9, "3.10", "3.11"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2.5.0
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
pip install -e .
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
@@ -37,4 +37,4 @@ jobs:
|
||||
flake8 ptulsconv tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
PYTHONPATH=. pytest
|
||||
pytest
|
||||
|
||||
38
.github/workflows/pythonpublish.yml
vendored
38
.github/workflows/pythonpublish.yml
vendored
@@ -2,28 +2,38 @@ name: Upload Python Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: release
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v3.5.2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4.6.0
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Install parsimonious
|
||||
run: |
|
||||
pip install parsimonious
|
||||
- name: Build and publish
|
||||
env:
|
||||
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
twine upload dist/*
|
||||
pip install build
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
- name: pypi-publish
|
||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||
# - name: Report to Mastodon
|
||||
# uses: cbrgm/mastodon-github-action@v1.0.1
|
||||
# with:
|
||||
# message: |
|
||||
# I just released a new version of ptulsconv, my ADR cue sheet generator!
|
||||
# #python #protools #pdf #filmmaking
|
||||
# ${{ github.server_url }}/${{ github.repository }}
|
||||
# env:
|
||||
# MASTODON_URL: ${{ secrets.MASTODON_URL }}
|
||||
# MASTODON_ACCESS_TOKEN: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
||||
|
||||
22
.github/workflows/toot.yml
vendored
Normal file
22
.github/workflows/toot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Test Toot
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
print-tag:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Report to Mastodon
|
||||
uses: cbrgm/mastodon-github-action@v1.0.1
|
||||
env:
|
||||
MASTODON_URL: ${{ secrets.MASTODON_URL }}
|
||||
MASTODON_ACCESS_TOKEN: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
||||
with:
|
||||
message: |
|
||||
This is a test toot, automatically posted by a github action.
|
||||
|
||||
${{ github.server_url }}/${{ github.repository }}
|
||||
|
||||
${{ github.ref }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -89,6 +89,7 @@ venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
venv_docs/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
@@ -104,3 +105,7 @@ venv.bak/
|
||||
.mypy_cache/
|
||||
.DS_Store
|
||||
/example/Charade/Session File Backups/
|
||||
lcov.info
|
||||
|
||||
.vim
|
||||
.vscode
|
||||
|
||||
32
.readthedocs.yaml
Normal file
32
.readthedocs.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
# .readthedocs.yaml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-20.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
# You can also specify other tool versions:
|
||||
# nodejs: "16"
|
||||
# rust: "1.55"
|
||||
# golang: "1.17"
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/source/conf.py
|
||||
|
||||
#If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||
formats:
|
||||
- pdf
|
||||
|
||||
#Optionally declare the Python requirements required to build your docs
|
||||
python:
|
||||
install:
|
||||
- method: pip
|
||||
path: .
|
||||
extra_requirements:
|
||||
- doc
|
||||
9
CONTRIBUTING.md
Normal file
9
CONTRIBUTING.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Contributing to ptulsconv
|
||||
|
||||
## Testing
|
||||
|
||||
Before submitting PRs or patches, please make sure your branch passes all of the unit tests by running Pytest.
|
||||
|
||||
```sh
|
||||
~/ptulsconv$ pytest
|
||||
```
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Jamie Hardt
|
||||
Copyright (c) 2022 Jamie Hardt
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
41
README.md
41
README.md
@@ -1,7 +1,9 @@
|
||||
[](https://ptulsconv.readthedocs.io/en/latest/?badge=latest)
|
||||

|
||||

|
||||
[][pypi]
|
||||

|
||||

|
||||
[](https://github.com/iluvcapra/ptulsconv/actions/workflows/python-package.yml)
|
||||
|
||||
[pypi]: https://pypi.org/project/ptulsconv/
|
||||
|
||||
@@ -9,39 +11,10 @@
|
||||
# ptulsconv
|
||||
|
||||
Read Pro Tools text exports and generate PDF reports, JSON output.
|
||||
|
||||
|
||||
## Theory of Operation
|
||||
|
||||
[Avid Pro Tools][avp] can be used to make spotting notes for ADR recording
|
||||
sessions by creating spotting regions with descriptive text and exporting the
|
||||
session as text. This file can then be dropped into Excel or any CSV-reading
|
||||
app like Filemaker Pro.
|
||||
|
||||
**ptulsconv** accepts a text export from Pro Tools and automatically creates
|
||||
PDF and CSV documents for use in ADR spotting, recording, editing and
|
||||
reporting, and supplemental JSON documents can be output for use with other
|
||||
workflows.
|
||||
|
||||
### Reports Generated by ptulsconv by Default
|
||||
|
||||
1. "ADR Report" lists every line in an export with most useful fields, sorted
|
||||
by time.
|
||||
2. "Continuity" lists every scene sorted by time.
|
||||
3. "Line Count" lists a count of every line, collated by reel number and by
|
||||
effort/TV/optional line designation.
|
||||
4. "CSV" is a folder of files of all lines collated by character and reel
|
||||
as CSV files, for use by studio cueing workflows.
|
||||
5. "Director Logs" is a folder of PDFs formatted like the "ADR Report" except
|
||||
collated by character.
|
||||
6. "Supervisor Logs" creates a PDF report for every character, with one line
|
||||
per page, optimized for note-taking.
|
||||
7. "Talent Scripts" is a minimal PDF layout of just timecode and line prompt,
|
||||
collated by character.
|
||||
|
||||
|
||||
[avp]: http://www.avid.com/pro-tools
|
||||
## Quick Start
|
||||
|
||||
For a quick overview of how to cue ADR with `ptulsconv`, check out the [Quickstart][quickstart].
|
||||
|
||||
|
||||
## Installation
|
||||
@@ -52,4 +25,6 @@ The easiest way to install on your site is to use `pip`:
|
||||
|
||||
This will install the necessary libraries on your host and gives you
|
||||
command-line access to the tool through an entry-point `ptulsconv`. In a
|
||||
terminal window type `ptulsconv -h` for a list of available options.
|
||||
terminal window type `ptulsconv -h` for a list of available options.
|
||||
|
||||
[quickstart]: https://ptulsconv.readthedocs.io/en/latest/user/quickstart.html
|
||||
|
||||
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
77
docs/source/conf.py
Normal file
77
docs/source/conf.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../.."))
|
||||
print(sys.path)
|
||||
|
||||
import ptulsconv
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = 'ptulsconv'
|
||||
# copyright = ptulsconv.__copyright__
|
||||
# author = ptulsconv.__author__
|
||||
release = ptulsconv.__version__
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.viewcode',
|
||||
'sphinx.ext.githubpages',
|
||||
]
|
||||
|
||||
templates_path = ['_templates']
|
||||
exclude_patterns = []
|
||||
|
||||
|
||||
master_doc = 'index'
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_static_path = ['_static']
|
||||
|
||||
|
||||
|
||||
latex_documents = [
|
||||
(master_doc, 'ptulsconv.tex', u'ptulsconv Documentation',
|
||||
u'Jamie Hardt', 'manual'),
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Epub output -------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = project
|
||||
|
||||
# The unique identifier of the text. This can be a ISBN number
|
||||
# or the project homepage.
|
||||
#
|
||||
# epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#
|
||||
# epub_uid = ''
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
epub_exclude_files = ['search.html']
|
||||
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
|
||||
# -- Options for todo extension ----------------------------------------------
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
7
docs/source/dev/contributing.rst
Normal file
7
docs/source/dev/contributing.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
Contributing
|
||||
============
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
Before submitting PRs or patches, please make sure your branch passes all of the unit tests by running Pytest.
|
||||
39
docs/source/dev/modules.rst
Normal file
39
docs/source/dev/modules.rst
Normal file
@@ -0,0 +1,39 @@
|
||||
Auxiliary and Helper Modules
|
||||
============================
|
||||
|
||||
Commands Module
|
||||
---------------
|
||||
|
||||
.. automodule:: ptulsconv.commands
|
||||
:members:
|
||||
|
||||
|
||||
Broadcast Timecode Module
|
||||
-------------------------
|
||||
|
||||
.. automodule:: ptulsconv.broadcast_timecode
|
||||
:members:
|
||||
|
||||
|
||||
Footage Module
|
||||
--------------
|
||||
|
||||
.. automodule:: ptulsconv.footage
|
||||
:members:
|
||||
|
||||
|
||||
Reporting Module
|
||||
----------------
|
||||
|
||||
.. automodule:: ptulsconv.reporting
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
||||
Validations Module
|
||||
------------------
|
||||
|
||||
.. automodule:: ptulsconv.validations
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
9
docs/source/dev/parsing.rst
Normal file
9
docs/source/dev/parsing.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Parsing
|
||||
=======
|
||||
|
||||
Docparser Classes
|
||||
-----------------
|
||||
|
||||
.. autoclass:: ptulsconv.docparser.adr_entity.ADRLine
|
||||
:members:
|
||||
:undoc-members:
|
||||
23
docs/source/dev/theory.rst
Normal file
23
docs/source/dev/theory.rst
Normal file
@@ -0,0 +1,23 @@
|
||||
Theory of Operation
|
||||
===================
|
||||
|
||||
Execution Flow When Producing "doc" Output
|
||||
------------------------------------------
|
||||
|
||||
#. The command line argv is read in :py:func:`ptulsconv.__main__.main()`,
|
||||
which calls :py:func:`ptulsconv.commands.convert()`
|
||||
#. :func:`ptulsconv.commands.convert()` reads the input with
|
||||
:func:`ptuslconv.docparser.doc_parser_visitor()`,
|
||||
which uses the ``parsimonious`` library to parse the input into an abstract
|
||||
syntax tree, which the parser visitor uses to convert into a
|
||||
:class:`ptulsconv.docparser.doc_entity.SessionDescriptor`,
|
||||
which structures all of the data in the session output.
|
||||
#. The next action based on the output format. In the
|
||||
case of the "doc" output format, it runs some validations
|
||||
on the input, and calls :func:`ptulsconv.commands.generate_documents()`.
|
||||
#. :func:`ptulsconv.commands.generate_documents()` creates the output folder, creates the
|
||||
Continuity report with :func:`ptulsconv.pdf.continuity.output_continuity()` (this document
|
||||
requires some special-casing), and at the tail calls...
|
||||
#. :func:`ptulsconv.commands.create_adr_reports()`, which creates folders for
|
||||
|
||||
(FIXME finish this)
|
||||
39
docs/source/index.rst
Normal file
39
docs/source/index.rst
Normal file
@@ -0,0 +1,39 @@
|
||||
.. ptulsconv documentation master file, created by
|
||||
sphinx-quickstart on Fri Nov 18 10:40:33 2022.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to ptulsconv's documentation!
|
||||
=====================================
|
||||
|
||||
`ptulsconv` is a tool for converting Pro Tools text exports into PDF
|
||||
reports for ADR spotting. It can also be used for converting text
|
||||
exports into JSON documents for processing by other applications.
|
||||
|
||||
.. toctree::
|
||||
:numbered:
|
||||
:maxdepth: 2
|
||||
:caption: User Documentation
|
||||
|
||||
user/quickstart
|
||||
user/tagging
|
||||
user/for_adr
|
||||
user/cli_reference
|
||||
|
||||
.. toctree::
|
||||
:numbered:
|
||||
:maxdepth: 1
|
||||
:caption: Developer Documentation
|
||||
|
||||
dev/contributing
|
||||
dev/theory
|
||||
dev/parsing
|
||||
dev/modules
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`modindex`
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
||||
74
docs/source/user/cli_reference.rst
Normal file
74
docs/source/user/cli_reference.rst
Normal file
@@ -0,0 +1,74 @@
|
||||
Command-Line Reference
|
||||
======================
|
||||
|
||||
Usage Form
|
||||
-----------
|
||||
|
||||
Invocations of ptulsconv take the following form:
|
||||
|
||||
ptulsconv [options] IN_FILE
|
||||
|
||||
|
||||
Flags
|
||||
-----
|
||||
|
||||
`-h`, `--help`
|
||||
Show the help message.
|
||||
|
||||
`f FMT`, `--format=FMT`
|
||||
Select the output format. By default this is `doc`, which will
|
||||
generate :ref:`ADR reports<adr-reports>`.
|
||||
|
||||
The :ref:`other available options<alt-output-options>`
|
||||
are `raw` and `tagged`.
|
||||
|
||||
|
||||
Informational Options
|
||||
"""""""""""""""""""""
|
||||
|
||||
These options display information and exit without processing any
|
||||
input documents.
|
||||
|
||||
`--show-formats`
|
||||
Display information about available output formats.
|
||||
|
||||
`--show-available-tags`
|
||||
Display information about tags that are used by the
|
||||
report generator.
|
||||
|
||||
|
||||
.. _alt-output-options:
|
||||
|
||||
Alternate Output Formats
|
||||
------------------------
|
||||
|
||||
.. _raw-output:
|
||||
|
||||
`raw` Output
|
||||
""""""""""""
|
||||
|
||||
The "raw" output format is a JSON document of the parsed input data.
|
||||
|
||||
The document is a top-level dictionary with keys for the main sections of the text export: `header`,
|
||||
`files`, `clips`, `plugins`, `tracks` and `markers`, and the values for these are a list of section
|
||||
entries, or a dictionary of values, in the case of `header`.
|
||||
|
||||
The text values of each record and field in the text export is read and output verbatim, no further
|
||||
processing is done.
|
||||
|
||||
.. _tagged-output:
|
||||
|
||||
`tagged` Output
|
||||
"""""""""""""""
|
||||
|
||||
The "tagged" output format is also a JSON document based on the parsed input data, after the additional
|
||||
step of processing all of the :ref:`tags<tags>` in the document.
|
||||
|
||||
The document is a top-level array of dictionaries, one for each recognized ADR spotting clip in the
|
||||
session. Each dictionary has a `clip_name`, `track_name` and `session_name` key, a `tags` key that
|
||||
contains a dictionary of every parsed tag (after applying tags from all tracks and markers), and a
|
||||
`start` and `end` key. The `start` and `end` key contain the parsed timecode representations of these
|
||||
values in rational number form, as a dictionary with `numerator` and `denominator` keys.
|
||||
|
||||
|
||||
|
||||
129
docs/source/user/for_adr.rst
Normal file
129
docs/source/user/for_adr.rst
Normal file
@@ -0,0 +1,129 @@
|
||||
.. _adr-reports:
|
||||
|
||||
`ptulsconv` For ADR Report Generation
|
||||
=====================================
|
||||
|
||||
Reports Created by the ADR Report Generator
|
||||
-------------------------------------------
|
||||
|
||||
(FIXME: write this)
|
||||
|
||||
|
||||
Tags Used by the ADR Report Generator
|
||||
-------------------------------------
|
||||
|
||||
|
||||
Project-Level Tags
|
||||
""""""""""""""""""
|
||||
|
||||
It usually makes sense to place these either in the session name,
|
||||
or on a :ref:`marker <tag-marker>` at the beginning of the session, so it will apply to
|
||||
all of the clips in the session.
|
||||
|
||||
`Title`
|
||||
The title of the project. This will appear at the top
|
||||
of every report.
|
||||
|
||||
.. warning::
|
||||
`ptulsconv` at this time only supports one title per export. If you attempt to
|
||||
use multiple titles in one export it will fail.
|
||||
|
||||
`Supv`
|
||||
The supervisor of the project. This appears at the bottom
|
||||
of every report.
|
||||
|
||||
`Client`
|
||||
The client of the project. This will often appear under the
|
||||
title on every report.
|
||||
|
||||
`Spot`
|
||||
The date or version number of the spotting report.
|
||||
|
||||
|
||||
Time Range Tags
|
||||
"""""""""""""""
|
||||
|
||||
All of these tags can be set to different values on each clip, but
|
||||
it often makes sense to use these tags in a :ref:`time range<tag-range>`.
|
||||
|
||||
`Sc`
|
||||
The scene description. This appears on the continuity report
|
||||
and is used in the Director's logs.
|
||||
|
||||
`Ver`
|
||||
The picture version. This appears beside the spot timecodes
|
||||
on most reports.
|
||||
|
||||
`Reel`
|
||||
The reel. This appears beside the spot timecodes
|
||||
on most reports and is used to summarize line totals on the
|
||||
line count report.
|
||||
|
||||
|
||||
Line tags
|
||||
"""""""""
|
||||
|
||||
`P`
|
||||
Priority.
|
||||
|
||||
`QN`
|
||||
Cue number. This appears on all reports.
|
||||
|
||||
.. warning::
|
||||
`ptulsconv` will verify that all cue numbers in a given title are unique.
|
||||
|
||||
All lines must have a cue number in order to generate reports, if any lines
|
||||
do not have a cue number set, `ptulsconv` will fail.
|
||||
|
||||
|
||||
`CN`
|
||||
Character number. This is used to collate character records
|
||||
and will appear on the line count and in character-collated
|
||||
reports.
|
||||
|
||||
`Char`
|
||||
Character name. By default, a clip will set this to the
|
||||
name of the track it appears on, but the track name can be
|
||||
overridden here.
|
||||
|
||||
`Actor`
|
||||
Actor name.
|
||||
|
||||
`Line`
|
||||
The prompt to appear for this ADR line. By default, this
|
||||
will be whatever text appears in a clip name prior to the first
|
||||
tag.
|
||||
|
||||
`R`
|
||||
Reason.
|
||||
|
||||
`Mins`
|
||||
Time budget for this line, in minutes. This is used in the
|
||||
line count report to give estimated times for each character. This
|
||||
can be set for the entire project (with a :ref:`marker <tag-marker>`), or for individual
|
||||
actors (with a tag in the :ref:`track comments<tag-track>`), or can be set for
|
||||
individual lines to override these.
|
||||
|
||||
`Shot`
|
||||
Shot. A Date or other description indicating the line has been
|
||||
recorded.
|
||||
|
||||
|
||||
Boolean-valued ADR Tag Fields
|
||||
"""""""""""""""""""""""""""""
|
||||
|
||||
`EFF`
|
||||
Effort. Lines with this tag are subtotaled in the line count report.
|
||||
|
||||
`TV`
|
||||
TV line. Lines with this tag are subtotaled in the line count report.
|
||||
|
||||
`TBW`
|
||||
To be written.
|
||||
|
||||
`ADLIB`
|
||||
Ad-lib.
|
||||
|
||||
`OPT`
|
||||
Optional. Lines with this tag are subtotaled in the line count report.
|
||||
|
||||
91
docs/source/user/quickstart.rst
Normal file
91
docs/source/user/quickstart.rst
Normal file
@@ -0,0 +1,91 @@
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
The workflow for creating ADR reports in `ptulsconv` is similar to other ADR
|
||||
spotting programs: spot ADR lines in Pro Tools with clips using a special
|
||||
code to take notes, export the tracks as text and then run the program.
|
||||
|
||||
|
||||
Step 1: Use Pro Tools to Spot ADR Lines
|
||||
---------------------------------------
|
||||
|
||||
`ptulsconv` can be used to spot ADR lines similarly to other programs.
|
||||
|
||||
#. Create a new Pro Tools session, name this session after your project.
|
||||
#. Create new tracks, one for each character. Name each track after a
|
||||
character.
|
||||
#. On each track, create a clip group (or edit in some audio) at the time you
|
||||
would like an ADR line to appear in the report. Name the clip after the
|
||||
dialogue you are replacing at that time.
|
||||
|
||||
|
||||
Step 2: Add More Information to Your Spots
|
||||
------------------------------------------
|
||||
|
||||
Clips, tracks and markers in your session can contain additional information
|
||||
to make your ADR reports more complete and useful. You add this information
|
||||
with *tagging*.
|
||||
|
||||
* Every ADR clip must have a unique cue number. After the name of each clip,
|
||||
add the letters "$QN=" and then a unique number (any combination of letters
|
||||
or numbers that don't contain a space). You can type these yourself or add
|
||||
them with batch-renaming when you're done spotting.
|
||||
* ADR spots should usually have a reason indicated, so you can remember exactly
|
||||
why you're replacing a particular line. Do this by adding the the text "{R="
|
||||
to your clip names after the prompt and then some short text describing the
|
||||
reason, and then a closing "}". You can type anything, including spaces.
|
||||
* If a line is a TV cover line, you can add the text "[TV]" to the end.
|
||||
|
||||
So for example, some ADR spot's clip name might look like:
|
||||
|
||||
Get to the ladder! {R=Noise} $QN=J1001
|
||||
"Forget your feelings! {R=TV Cover} $QN=J1002 [TV]
|
||||
|
||||
These tags can appear in any order.
|
||||
|
||||
* You can add the name of an actor to a character's track, so this information
|
||||
will appear on your reports. In the track name, or in the track comments,
|
||||
type "{Actor=xxx}" replacing the xxx with the actor's name.
|
||||
* Characters need to have a number (perhaps from the cast list) to express how
|
||||
they should be collated. Add "$CN=xxx" with a unique number to each track (or
|
||||
the track's comments.)
|
||||
* Set the scene for each line with markers. Create a marker at the beginning of
|
||||
a scene and make it's name "{Sc=xxx}", replacing the xxx with the scene
|
||||
number and name.
|
||||
|
||||
|
||||
Step 3: Export Tracks from Pro Tools as a Text File
|
||||
---------------------------------------------------
|
||||
|
||||
Export the file as a UTF-8 and be sure to include clips and markers. Export
|
||||
using the Timecode time format.
|
||||
|
||||
Do not export crossfades.
|
||||
|
||||
|
||||
Step 4: Run `ptulsconv` on the Text Export
|
||||
------------------------------------------
|
||||
|
||||
In your Terminal, run the following command:
|
||||
|
||||
ptulsconv path/to/your/TEXT_EXPORT.txt
|
||||
|
||||
`ptulsconv` will create a folder named "Title_CURRENT_DATE", and within that
|
||||
folder it will create several PDFs and folders:
|
||||
|
||||
- "TITLE ADR Report" 📄 a PDF tabular report of every ADR line you've spotted.
|
||||
- "TITLE Continuity" 📄 a PDF listing every scene you have indicated and its
|
||||
timecode.
|
||||
- "TITLE Line Count" 📄 a PDF tabular report giving line counts by reel, and the
|
||||
time budget per character and reel (if provided in the tagging).
|
||||
- "CSV/" a folder containing CSV documents of all spotted ADR, groupd by
|
||||
character and reel.
|
||||
- "Director Logs/" 📁 a folder containing PDF tabular reports, like the overall
|
||||
report except groupd by character.
|
||||
- "Supervisor Logs/" 📁 a folder containing PDF reports, one page per line,
|
||||
designed for note taking during a session, particularly on an iPad.
|
||||
- "Talent Scripts/" 📁 a folder containing PDF scripts or sides, with the timecode
|
||||
and prompts for each line, grouped by character but with most other
|
||||
information suppressed.
|
||||
|
||||
|
||||
130
docs/source/user/tagging.rst
Normal file
130
docs/source/user/tagging.rst
Normal file
@@ -0,0 +1,130 @@
|
||||
.. _tags:
|
||||
|
||||
Tagging
|
||||
=======
|
||||
|
||||
Tags are used to add additional data to a clip in an organized way. The
|
||||
tagging system in `ptulsconv` allows is flexible and can be used to add
|
||||
any kind of extra data to a clip.
|
||||
|
||||
Fields in Clip Names
|
||||
--------------------
|
||||
|
||||
Track names, track comments, and clip names can also contain meta-tags, or
|
||||
"fields," to add additional columns to the output. Thus, if a clip has the
|
||||
name:::
|
||||
|
||||
`Fireworks explosion {note=Replace for final} $V=1 [FX] [DESIGN]`
|
||||
|
||||
The row output for this clip will contain columns for the values:
|
||||
|
||||
|
||||
+---------------------+-------------------+---+----+--------+
|
||||
| Clip Name | note | V | FX | DESIGN |
|
||||
+=====================+===================+===+====+========+
|
||||
| Fireworks explosion | Replace for final | 1 | FX | DESIGN |
|
||||
+---------------------+-------------------+---+----+--------+
|
||||
|
||||
|
||||
These fields can be defined in the clip name in three ways:
|
||||
* `$NAME=VALUE` creates a field named `NAME` with a one-word value `VALUE`.
|
||||
* `{NAME=VALUE}` creates a field named `NAME` with the value `VALUE`. `VALUE`
|
||||
in this case may contain spaces or any chartacter up to the closing bracket.
|
||||
* `[NAME]` creates a field named `NAME` with a value `NAME`. This can be used
|
||||
to create a boolean-valued field; in the output, clips with the field
|
||||
will have it, and clips without will have the column with an empty value.
|
||||
|
||||
For example, if three clips are named:::
|
||||
|
||||
`"Squad fifty-one, what is your status?" [FUTZ] {Ch=Dispatcher} [ADR]`
|
||||
|
||||
`"We are ten-eight at Rampart Hospital." {Ch=Gage} [ADR]`
|
||||
|
||||
`(1M) FC callouts rescuing trapped survivors. {Ch=Group} $QN=1001 [GROUP]`
|
||||
|
||||
The output will contain the range:
|
||||
|
||||
|
||||
+----------------------------------------------+------------+------+-----+------+-------+
|
||||
| Clip Name | Ch | FUTZ | ADR | QN | GROUP |
|
||||
+==============================================+============+======+=====+======+=======+
|
||||
| "Squad fifty-one, what is your status?" | Dispatcher | FUTZ | ADR | | |
|
||||
+----------------------------------------------+------------+------+-----+------+-------+
|
||||
| "We are ten-eight at Rampart Hospital." | Gage | | ADR | | |
|
||||
+----------------------------------------------+------------+------+-----+------+-------+
|
||||
| (1M) FC callouts rescuing trapped survivors. | Group | | | 1001 | GROUP |
|
||||
+----------------------------------------------+------------+------+-----+------+-------+
|
||||
|
||||
|
||||
.. _tag-track:
|
||||
.. _tag-marker:
|
||||
|
||||
Fields in Track Names and Markers
|
||||
---------------------------------
|
||||
|
||||
Fields set in track names, and in track comments, will be applied to *each*
|
||||
clip on that track. If a track comment contains the text `{Dept=Foley}` for
|
||||
example, every clip on that track will have a "Foley" value in a "Dept" column.
|
||||
|
||||
Likewise, fields set on the session name will apply to all clips in the session.
|
||||
|
||||
Fields set in markers, and in marker comments, will be applied to all clips
|
||||
whose finish is *after* that marker. Fields in markers are applied cumulatively
|
||||
from breakfast to dinner in the session. The latest marker applying to a clip has
|
||||
precedence, so if one marker comes after the other, but both define a field, the
|
||||
value in the later marker
|
||||
|
||||
An important note here is that, always, fields set on the clip name have the
|
||||
highest precedence. If a field is set in a clip name, the same field set on the
|
||||
track, the value set on the clip will prevail.
|
||||
|
||||
|
||||
.. _tag-range:
|
||||
|
||||
Apply Fields to a Time Range of Clips
|
||||
-------------------------------------
|
||||
|
||||
A clip name beginning with "@" will not be included in the output, but its
|
||||
fields will be applied to clips within its time range on lower tracks.
|
||||
|
||||
If track 1 has a clip named `@ {Sc=1- The House}`, any clips beginning within
|
||||
that range on lower tracks will have a field `Sc` with that value.
|
||||
|
||||
|
||||
Combining Clips with Long Names or Many Tags
|
||||
--------------------------------------------
|
||||
|
||||
A clip name beginning with `&` will have its parsed clip name appended to the
|
||||
preceding cue, and the fields of following cues will be applied, earlier clips
|
||||
having precedence. The clips need not be touching, and the clips will be
|
||||
combined into a single row of the output. The start time of the first clip will
|
||||
become the start time of the row, and the finish time of the last clip will
|
||||
become the finish time of the row.
|
||||
|
||||
|
||||
Setting Document Options
|
||||
------------------------
|
||||
|
||||
.. note::
|
||||
Document options are not yet implemented.
|
||||
|
||||
A clip beginning with `!` sends a command to `ptulsconv`. These commands can
|
||||
appear anywhere in the document and apply to the entire document. Commands are
|
||||
a list of words
|
||||
|
||||
The following commands are available:
|
||||
|
||||
page $SIZE=`(letter|legal|a4)`
|
||||
Sets the PDF page size for the output.
|
||||
|
||||
font {NAME=`name`} {PATH=`path`}
|
||||
Sets the primary font for the output.
|
||||
|
||||
sub `replacement text` {FOR=`text_to_replace`} {IN=`tag`}
|
||||
Declares a substitution. Whereever text_to_replace is encountered in the
|
||||
document it will be replaced with "replacement text".
|
||||
|
||||
If `tag` is set, this substitution will only be applied to the values of
|
||||
that tag.
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
.\" Manpage for ptulsconv
|
||||
.\" Contact https://github.com/iluvcapra/ptulsconv
|
||||
.TH ptulsconv 1 "15 May 2020" "0.8.2" "ptulsconv man page"
|
||||
.SH NAME
|
||||
.BR "ptulsconv" " \- convert
|
||||
.IR "Avid Pro Tools" " text exports"
|
||||
.SH SYNOPSIS
|
||||
ptulsconv [OPTIONS] Export.txt
|
||||
.SH DESCRIPTION
|
||||
Convert a Pro Tools text export into ADR reports.
|
||||
.SH OPTIONS
|
||||
.IP "-h, --help"
|
||||
show a help message and exit.
|
||||
.TP
|
||||
.RI "--show-available-tags"
|
||||
Print a list of tags that are interpreted and exit.
|
||||
.SH AUTHOR
|
||||
Jamie Hardt (contact at https://github.com/iluvcapra/ptulsconv)
|
||||
@@ -1,5 +1,8 @@
|
||||
from ptulsconv.docparser.ptuls_grammar import protools_text_export_grammar
|
||||
"""
|
||||
Parse and convert Pro Tools text exports
|
||||
"""
|
||||
|
||||
__version__ = '0.8.3'
|
||||
__version__ = '1.0.7'
|
||||
__author__ = 'Jamie Hardt'
|
||||
__license__ = 'MIT'
|
||||
__copyright__ = "%s %s (c) 2023 %s. All rights reserved." % (__name__, __version__, __author__)
|
||||
|
||||
@@ -2,26 +2,31 @@ from optparse import OptionParser, OptionGroup
|
||||
import datetime
|
||||
import sys
|
||||
|
||||
from ptulsconv import __name__, __version__, __author__
|
||||
from ptulsconv import __name__, __version__, __author__, __copyright__
|
||||
from ptulsconv.commands import convert
|
||||
from ptulsconv.reporting import print_status_style, print_banner_style, print_section_header_style, print_fatal_error
|
||||
|
||||
|
||||
# TODO: Support Top-level modes
|
||||
|
||||
# Modes we want:
|
||||
# - "raw" : Output the parsed text export document with no further processing, as json
|
||||
# - "tagged"? : Output the parsed result of the TagCompiler
|
||||
# - "doc" : Generate a full panoply of PDF reports contextually based on tagging
|
||||
|
||||
|
||||
def dump_field_map(output=sys.stdout):
|
||||
from ptulsconv.docparser.tag_mapping import TagMapping
|
||||
from ptulsconv.docparser.adr_entity import ADRLine
|
||||
|
||||
from ptulsconv.docparser.adr_entity import ADRLine, GenericEvent
|
||||
|
||||
TagMapping.print_rules(GenericEvent, output=output)
|
||||
TagMapping.print_rules(ADRLine, output=output)
|
||||
|
||||
|
||||
def dump_formats():
|
||||
print_section_header_style("`raw` format:")
|
||||
sys.stderr.write("A JSON document of the parsed Pro Tools export.\n")
|
||||
print_section_header_style("`tagged` Format:")
|
||||
sys.stderr.write("A JSON document containing one record for each clip, with\n"
|
||||
"all tags parsed and all tagging rules applied. \n")
|
||||
print_section_header_style("`doc` format:")
|
||||
sys.stderr.write("Creates a directory with folders for different types\n"
|
||||
"of ADR reports.\n\n")
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point for the command-line invocation"""
|
||||
parser = OptionParser()
|
||||
@@ -49,6 +54,13 @@ def main():
|
||||
description='Print useful information and exit without processing '
|
||||
'input files.')
|
||||
|
||||
informational_options.add_option('--show-formats',
|
||||
dest='show_formats',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Display helpful information about the '
|
||||
'available output formats.')
|
||||
|
||||
informational_options.add_option('--show-available-tags',
|
||||
dest='show_tags',
|
||||
action='store_true',
|
||||
@@ -58,9 +70,10 @@ def main():
|
||||
|
||||
parser.add_option_group(informational_options)
|
||||
|
||||
print_banner_style(__copyright__)
|
||||
|
||||
(options, args) = parser.parse_args(sys.argv)
|
||||
|
||||
print_banner_style("%s %s (c) 2021 %s. All rights reserved." % (__name__, __version__, __author__))
|
||||
|
||||
print_section_header_style("Startup")
|
||||
print_status_style("This run started %s" % (datetime.datetime.now().isoformat()))
|
||||
@@ -69,6 +82,10 @@ def main():
|
||||
dump_field_map()
|
||||
sys.exit(0)
|
||||
|
||||
elif options.show_formats:
|
||||
dump_formats()
|
||||
sys.exit(0)
|
||||
|
||||
if len(args) < 2:
|
||||
print_fatal_error("Error: No input file")
|
||||
parser.print_help(sys.stderr)
|
||||
|
||||
@@ -1,21 +1,32 @@
|
||||
from fractions import Fraction
|
||||
import re
|
||||
"""
|
||||
Useful functions for parsing and working with timecode.
|
||||
"""
|
||||
|
||||
import math
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from fractions import Fraction
|
||||
from typing import Optional, SupportsFloat
|
||||
|
||||
|
||||
class TimecodeFormat(namedtuple("_TimecodeFormat", "frame_duration logical_fps drop_frame")):
|
||||
|
||||
def smpte_to_seconds(self, smpte: str) -> Fraction:
|
||||
"""
|
||||
A struct reperesenting a timecode datum.
|
||||
"""
|
||||
|
||||
def smpte_to_seconds(self, smpte: str) -> Optional[Fraction]:
|
||||
frame_count = smpte_to_frame_count(smpte, self.logical_fps, drop_frame_hint=self.drop_frame)
|
||||
return frame_count * self.frame_duration
|
||||
if frame_count is None:
|
||||
return None
|
||||
else:
|
||||
return frame_count * self.frame_duration
|
||||
|
||||
def seconds_to_smpte(self, seconds: Fraction) -> str:
|
||||
def seconds_to_smpte(self, seconds: SupportsFloat) -> str:
|
||||
frame_count = int(seconds / self.frame_duration)
|
||||
return frame_count_to_smpte(frame_count, self.logical_fps, self.drop_frame)
|
||||
|
||||
|
||||
def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int, drop_frame_hint=False) -> int:
|
||||
def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int, drop_frame_hint=False) -> Optional[int]:
|
||||
"""
|
||||
Convert a string with a SMPTE timecode representation into a frame count.
|
||||
|
||||
@@ -28,7 +39,11 @@ def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int,
|
||||
"""
|
||||
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
||||
|
||||
m = re.search("(\d?\d)[:;](\d\d)[:;](\d\d)([:;])(\d\d)(\.\d+)?", smpte_rep_string)
|
||||
m = re.search(r'(\d?\d)[:;](\d\d)[:;](\d\d)([:;])(\d\d)(\.\d+)?', smpte_rep_string)
|
||||
|
||||
if m is None:
|
||||
return None
|
||||
|
||||
hh, mm, ss, sep, ff, frac = m.groups()
|
||||
hh, mm, ss, ff, frac = int(hh), int(mm), int(ss), int(ff), float(frac or 0.0)
|
||||
|
||||
@@ -47,14 +62,14 @@ def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int,
|
||||
frames_dropped_per_inst = (frames_per_logical_second / 15)
|
||||
mins = hh * 60 + mm
|
||||
inst_count = mins - math.floor(mins / 10)
|
||||
dropped_frames = frames_dropped_per_inst * inst_count
|
||||
dropped_frames = int(frames_dropped_per_inst) * inst_count
|
||||
frames = raw_frames - dropped_frames
|
||||
|
||||
return frames
|
||||
|
||||
|
||||
def frame_count_to_smpte(frame_count: int, frames_per_logical_second: int, drop_frame: bool = False,
|
||||
fractional_frame: float = None) -> str:
|
||||
fractional_frame: Optional[float] = None) -> str:
|
||||
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
||||
assert fractional_frame is None or fractional_frame < 1.0
|
||||
|
||||
@@ -80,8 +95,10 @@ def frame_count_to_smpte(frame_count: int, frames_per_logical_second: int, drop_
|
||||
return "%02i:%02i:%02i%s%02i" % (hh, mm, ss, separator, ff)
|
||||
|
||||
|
||||
def footage_to_frame_count(footage_string):
|
||||
m = re.search("(\d+)\+(\d+)(\.\d+)?", footage_string)
|
||||
def footage_to_frame_count(footage_string) -> Optional[int]:
|
||||
m = re.search(r'(\d+)\+(\d+)(\.\d+)?', footage_string)
|
||||
if m is None:
|
||||
return None
|
||||
feet, frm, frac = m.groups()
|
||||
feet, frm, frac = int(feet), int(frm), float(frac or 0.0)
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
This module provides the main input document parsing and transform
|
||||
implementation.
|
||||
"""
|
||||
import datetime
|
||||
import os
|
||||
|
||||
@@ -5,6 +9,7 @@ import sys
|
||||
from itertools import chain
|
||||
import csv
|
||||
from typing import List
|
||||
from fractions import Fraction
|
||||
|
||||
from .docparser.adr_entity import make_entities
|
||||
from .reporting import print_section_header_style, print_status_style, print_warning
|
||||
@@ -25,9 +30,16 @@ from json import JSONEncoder
|
||||
|
||||
|
||||
class MyEncoder(JSONEncoder):
|
||||
"""
|
||||
A subclass of :class:`JSONEncoder` which encodes :class:`Fraction` objects
|
||||
as a dict.
|
||||
"""
|
||||
force_denominator: Optional[int]
|
||||
|
||||
def default(self, o):
|
||||
"""
|
||||
|
||||
"""
|
||||
if isinstance(o, Fraction):
|
||||
return dict(numerator=o.numerator, denominator=o.denominator)
|
||||
else:
|
||||
@@ -35,6 +47,11 @@ class MyEncoder(JSONEncoder):
|
||||
|
||||
|
||||
def output_adr_csv(lines: List[ADRLine], time_format: TimecodeFormat):
|
||||
"""
|
||||
Writes ADR lines as CSV to the current working directory. Creates directories
|
||||
for each character number and name pair, and within that directory, creates
|
||||
a CSV file for each reel.
|
||||
"""
|
||||
reels = set([ln.reel for ln in lines])
|
||||
|
||||
for n, name in [(n.character_id, n.character_name) for n in lines]:
|
||||
@@ -59,26 +76,54 @@ def output_adr_csv(lines: List[ADRLine], time_format: TimecodeFormat):
|
||||
'Reason', 'Note', 'TV'])
|
||||
|
||||
for event in these_lines:
|
||||
this_start = event.start or 0
|
||||
this_finish = event.finish or 0
|
||||
this_row = [event.title, event.character_name, event.cue_number,
|
||||
event.reel, event.version,
|
||||
time_format.seconds_to_smpte(event.start), time_format.seconds_to_smpte(event.finish),
|
||||
float(event.start), float(event.finish),
|
||||
time_format.seconds_to_smpte(this_start), time_format.seconds_to_smpte(this_finish),
|
||||
float(this_start), float(this_finish),
|
||||
event.prompt,
|
||||
event.reason, event.note, "TV" if event.tv else ""]
|
||||
|
||||
writer.writerow(this_row)
|
||||
os.chdir("..")
|
||||
|
||||
#
|
||||
# def output_avid_markers(lines):
|
||||
# reels = set([ln['Reel'] for ln in lines if 'Reel' in ln.keys()])
|
||||
#
|
||||
# for reel in reels:
|
||||
# pass
|
||||
|
||||
def generate_documents(session_tc_format, scenes, adr_lines: Iterator[ADRLine], title):
|
||||
"""
|
||||
Create PDF output.
|
||||
"""
|
||||
print_section_header_style("Creating PDF Reports")
|
||||
report_date = datetime.datetime.now()
|
||||
reports_dir = "%s_%s" % (title, report_date.strftime("%Y-%m-%d_%H%M%S"))
|
||||
os.makedirs(reports_dir, exist_ok=False)
|
||||
os.chdir(reports_dir)
|
||||
|
||||
client = next((x.client for x in adr_lines), "")
|
||||
supervisor = next((x.supervisor for x in adr_lines), "")
|
||||
|
||||
output_continuity(scenes=scenes, tc_display_format=session_tc_format,
|
||||
title=title, client=client, supervisor=supervisor)
|
||||
|
||||
# reels = sorted([r for r in compiler.compile_all_time_spans() if r[0] == 'Reel'],
|
||||
# key=lambda x: x[2])
|
||||
reels = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
|
||||
|
||||
if len(adr_lines) == 0:
|
||||
print_status_style("No ADR lines were found in the "
|
||||
"input document. ADR reports will not be generated.")
|
||||
|
||||
else:
|
||||
create_adr_reports(adr_lines, tc_display_format=session_tc_format,
|
||||
reel_list=sorted(reels))
|
||||
|
||||
|
||||
def create_adr_reports(lines: List[ADRLine], tc_display_format: TimecodeFormat, reel_list):
|
||||
|
||||
def create_adr_reports(lines: List[ADRLine], tc_display_format: TimecodeFormat, reel_list: List[str]):
|
||||
"""
|
||||
Creates a directory heirarchy and a respective set of ADR reports,
|
||||
given a list of lines.
|
||||
"""
|
||||
|
||||
print_status_style("Creating ADR Report")
|
||||
output_summary(lines, tc_display_format=tc_display_format)
|
||||
|
||||
@@ -103,31 +148,20 @@ def create_adr_reports(lines: List[ADRLine], tc_display_format: TimecodeFormat,
|
||||
output_adr_csv(lines, time_format=tc_display_format)
|
||||
os.chdir("..")
|
||||
|
||||
# print_status_style("Creating Avid Marker XML files")
|
||||
# os.makedirs("Avid Markers", exist_ok=True)
|
||||
# os.chdir("Avid Markers")
|
||||
# output_avid_markers(lines)
|
||||
# os.chdir("..")
|
||||
|
||||
print_status_style("Creating Scripts directory and reports")
|
||||
os.makedirs("Talent Scripts", exist_ok=True)
|
||||
os.chdir("Talent Scripts")
|
||||
output_talent_sides(lines, tc_display_format=tc_display_format)
|
||||
|
||||
|
||||
# def parse_text_export(file):
|
||||
# ast = ptulsconv.protools_text_export_grammar.parse(file.read())
|
||||
# dict_parser = ptulsconv.DictionaryParserVisitor()
|
||||
# parsed = dict_parser.visit(ast)
|
||||
# print_status_style('Session title: %s' % parsed['header']['session_name'])
|
||||
# print_status_style('Session timecode format: %f' % parsed['header']['timecode_format'])
|
||||
# print_status_style('Fount %i tracks' % len(parsed['tracks']))
|
||||
# print_status_style('Found %i markers' % len(parsed['markers']))
|
||||
# return parsed
|
||||
|
||||
|
||||
def convert(input_file, major_mode='fmpxml', output=sys.stdout, warnings=True):
|
||||
def convert(input_file, major_mode, output=sys.stdout, warnings=True):
|
||||
"""
|
||||
Primary worker function, accepts the input file and decides
|
||||
what to do with it based on the `major_mode`.
|
||||
|
||||
:param input_file: a path to the input file.
|
||||
:param major_mode: the selected output mode, 'raw', 'tagged' or 'doc'.
|
||||
"""
|
||||
session = parse_document(input_file)
|
||||
session_tc_format = session.header.timecode_format
|
||||
|
||||
@@ -142,41 +176,34 @@ def convert(input_file, major_mode='fmpxml', output=sys.stdout, warnings=True):
|
||||
if major_mode == 'tagged':
|
||||
output.write(MyEncoder().encode(compiled_events))
|
||||
|
||||
else:
|
||||
elif major_mode == 'doc':
|
||||
generic_events, adr_lines = make_entities(compiled_events)
|
||||
|
||||
scenes = sorted([s for s in compiler.compile_all_time_spans() if s[0] == 'Sc'],
|
||||
key=lambda x: x[2])
|
||||
|
||||
# TODO: Breakdown by titles
|
||||
titles = set([x.title for x in (generic_events + adr_lines)])
|
||||
assert len(titles) == 1, "Multiple titles per export is not supported"
|
||||
if len(titles) != 1:
|
||||
print_warning("Multiple titles per export is not supported, "
|
||||
"found multiple titles: %s Exiting." % titles)
|
||||
exit(-1)
|
||||
|
||||
print(titles)
|
||||
title = list(titles)[0]
|
||||
|
||||
print_status_style("%i generic events found." % len(generic_events))
|
||||
print_status_style("%i ADR events found." % len(adr_lines))
|
||||
|
||||
if warnings:
|
||||
perform_adr_validations(adr_lines)
|
||||
|
||||
if major_mode == 'doc':
|
||||
print_section_header_style("Creating PDF Reports")
|
||||
report_date = datetime.datetime.now()
|
||||
reports_dir = "%s_%s" % (list(titles)[0], report_date.strftime("%Y-%m-%d_%H%M%S"))
|
||||
os.makedirs(reports_dir, exist_ok=False)
|
||||
os.chdir(reports_dir)
|
||||
generate_documents(session_tc_format, scenes, adr_lines, title)
|
||||
|
||||
|
||||
scenes = sorted([s for s in compiler.compile_all_time_spans() if s[0] == 'Sc'],
|
||||
key=lambda x: x[2])
|
||||
|
||||
output_continuity(scenes=scenes, tc_display_format=session_tc_format,
|
||||
title=list(titles)[0], client="", supervisor="")
|
||||
|
||||
# reels = sorted([r for r in compiler.compile_all_time_spans() if r[0] == 'Reel'],
|
||||
# key=lambda x: x[2])
|
||||
reels = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
|
||||
|
||||
create_adr_reports(adr_lines,
|
||||
tc_display_format=session_tc_format,
|
||||
reel_list=sorted(reels))
|
||||
|
||||
|
||||
def perform_adr_validations(lines):
|
||||
def perform_adr_validations(lines : Iterator[ADRLine]):
|
||||
"""
|
||||
Performs validations on the input.
|
||||
"""
|
||||
for warning in chain(validate_unique_field(lines,
|
||||
field='cue_number',
|
||||
scope='title'),
|
||||
@@ -193,4 +220,3 @@ def perform_adr_validations(lines):
|
||||
key_field='character_id',
|
||||
dependent_field='actor_name')):
|
||||
print_warning(warning.report_message())
|
||||
|
||||
|
||||
@@ -1 +1,5 @@
|
||||
from .doc_parser_visitor import parse_document
|
||||
"""
|
||||
Docparser module
|
||||
"""
|
||||
|
||||
from .pt_doc_parser import parse_document
|
||||
@@ -1,5 +1,10 @@
|
||||
"""
|
||||
This module defines classes and methods for converting :class:`Event` objects into
|
||||
:class:`ADRLine` objects.
|
||||
"""
|
||||
|
||||
from ptulsconv.docparser.tag_compiler import Event
|
||||
from typing import Optional, List, Tuple, Any
|
||||
from typing import Optional, List, Tuple
|
||||
from dataclasses import dataclass
|
||||
from fractions import Fraction
|
||||
|
||||
@@ -7,22 +12,37 @@ from ptulsconv.docparser.tag_mapping import TagMapping
|
||||
|
||||
|
||||
def make_entities(from_events: List[Event]) -> Tuple[List['GenericEvent'], List['ADRLine']]:
|
||||
"""
|
||||
Accepts a list of Events and converts them into either ADRLine events or
|
||||
GenricEvents by calling :func:`make_entity` on each member.
|
||||
|
||||
:param from_events: A list of `Event` objects.
|
||||
|
||||
:returns: A tuple of two lists, the first containing :class:`GenericEvent` and the
|
||||
second containing :class:`ADRLine`.
|
||||
"""
|
||||
generic_events = list()
|
||||
adr_lines = list()
|
||||
|
||||
for event in from_events:
|
||||
result: Any = make_entity(event)
|
||||
result = make_entity(event)
|
||||
if type(result) is ADRLine:
|
||||
result: ADRLine
|
||||
adr_lines.append(result)
|
||||
elif type(result) is GenericEvent:
|
||||
result: GenericEvent
|
||||
generic_events.append(result)
|
||||
|
||||
return generic_events, adr_lines
|
||||
|
||||
|
||||
def make_entity(from_event: Event) -> Optional[object]:
|
||||
"""
|
||||
Accepts an event and creates either an :class:`ADRLine` or a
|
||||
:class:`GenericEvent`. An event is an "ADRLine" if it has a cue number/"QN"
|
||||
tag field.
|
||||
|
||||
:param from_event: An :class:`Event`.
|
||||
|
||||
"""
|
||||
instance = GenericEvent
|
||||
tag_map = GenericEvent.tag_mapping
|
||||
if 'QN' in from_event.tags.keys():
|
||||
@@ -41,17 +61,17 @@ def make_entity(from_event: Event) -> Optional[object]:
|
||||
|
||||
@dataclass
|
||||
class GenericEvent:
|
||||
title: Optional[str]
|
||||
supervisor: Optional[str]
|
||||
client: Optional[str]
|
||||
scene: Optional[str]
|
||||
version: Optional[str]
|
||||
reel: Optional[str]
|
||||
start: Optional[Fraction]
|
||||
finish: Optional[Fraction]
|
||||
omitted: bool
|
||||
note: Optional[str]
|
||||
requested_by: Optional[str]
|
||||
title: str = ""
|
||||
supervisor: Optional[str] = None
|
||||
client: Optional[str] = None
|
||||
scene: Optional[str] = None
|
||||
version: Optional[str] = None
|
||||
reel: Optional[str] = None
|
||||
start: Fraction = Fraction(0,1)
|
||||
finish: Fraction = Fraction(0,1)
|
||||
omitted: bool = False
|
||||
note: Optional[str] = None
|
||||
requested_by: Optional[str] = None
|
||||
|
||||
tag_mapping = [
|
||||
TagMapping(source='Title', target="title", alt=TagMapping.ContentSource.Session),
|
||||
@@ -69,21 +89,22 @@ class GenericEvent:
|
||||
|
||||
@dataclass
|
||||
class ADRLine(GenericEvent):
|
||||
priority: Optional[int]
|
||||
cue_number: Optional[str]
|
||||
character_id: Optional[str]
|
||||
character_name: Optional[str]
|
||||
actor_name: Optional[str]
|
||||
prompt: Optional[str]
|
||||
reason: Optional[str]
|
||||
time_budget_mins: Optional[float]
|
||||
spot: Optional[str]
|
||||
shot: Optional[str]
|
||||
effort: bool
|
||||
tv: bool
|
||||
tbw: bool
|
||||
adlib: bool
|
||||
optional: bool
|
||||
|
||||
priority: Optional[int] = None
|
||||
cue_number: Optional[str] = None
|
||||
character_id: Optional[str] = None
|
||||
character_name: Optional[str] = None
|
||||
actor_name: Optional[str] = None
|
||||
prompt: Optional[str] = None
|
||||
reason: Optional[str] = None
|
||||
time_budget_mins: Optional[float] = None
|
||||
spot: Optional[str] = None
|
||||
shot: Optional[str] = None
|
||||
effort: bool = False
|
||||
tv: bool = False
|
||||
tbw: bool = False
|
||||
adlib: bool = False
|
||||
optional: bool = False
|
||||
|
||||
tag_mapping = [
|
||||
|
||||
@@ -111,30 +132,4 @@ class ADRLine(GenericEvent):
|
||||
formatter=(lambda x: len(x) > 0))
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.title = None
|
||||
self.supervisor = None
|
||||
self.client = None
|
||||
self.scene = None
|
||||
self.version = None
|
||||
self.reel = None
|
||||
self.start = None
|
||||
self.finish = None
|
||||
self.priority = None
|
||||
self.cue_number = None
|
||||
self.character_id = None
|
||||
self.character_name = None
|
||||
self.actor_name = None
|
||||
self.prompt = None
|
||||
self.reason = None
|
||||
self.requested_by = None
|
||||
self.time_budget_mins = None
|
||||
self.note = None
|
||||
self.spot = None
|
||||
self.shot = None
|
||||
self.effort = False
|
||||
self.tv = False
|
||||
self.tbw = False
|
||||
self.omitted = False
|
||||
self.adlib = False
|
||||
self.optional = False
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ class SessionDescriptor:
|
||||
|
||||
def markers_timed(self) -> Iterator[Tuple['MarkerDescriptor', Fraction]]:
|
||||
for marker in self.markers:
|
||||
marker_time = self.header.convert_timecode(marker.location)
|
||||
marker_time = Fraction(marker.time_reference, int(self.header.sample_rate))
|
||||
#marker_time = self.header.convert_timecode(marker.location)
|
||||
yield marker, marker_time
|
||||
|
||||
def tracks_clips(self) -> Iterator[Tuple['TrackDescriptor', 'TrackClipDescriptor']]:
|
||||
|
||||
@@ -1,16 +1,90 @@
|
||||
from parsimonious.nodes import NodeVisitor
|
||||
from parsimonious.grammar import Grammar
|
||||
|
||||
from .doc_entity import SessionDescriptor, HeaderDescriptor, TrackDescriptor, FileDescriptor, \
|
||||
TrackClipDescriptor, ClipDescriptor, PluginDescriptor, MarkerDescriptor
|
||||
|
||||
|
||||
protools_text_export_grammar = Grammar(
|
||||
r"""
|
||||
document = header files_section? clips_section? plugin_listing? track_listing? markers_listing?
|
||||
header = "SESSION NAME:" fs string_value rs
|
||||
"SAMPLE RATE:" fs float_value rs
|
||||
"BIT DEPTH:" fs integer_value "-bit" rs
|
||||
"SESSION START TIMECODE:" fs string_value rs
|
||||
"TIMECODE FORMAT:" fs frame_rate " Drop"? " Frame" rs
|
||||
"# OF AUDIO TRACKS:" fs integer_value rs
|
||||
"# OF AUDIO CLIPS:" fs integer_value rs
|
||||
"# OF AUDIO FILES:" fs integer_value rs block_ending
|
||||
|
||||
frame_rate = ("60" / "59.94" / "30" / "29.97" / "25" / "24" / "23.976")
|
||||
files_section = files_header files_column_header file_record* block_ending
|
||||
files_header = "F I L E S I N S E S S I O N" rs
|
||||
files_column_header = "Filename" isp fs "Location" rs
|
||||
file_record = string_value fs string_value rs
|
||||
|
||||
clips_section = clips_header clips_column_header clip_record* block_ending
|
||||
clips_header = "O N L I N E C L I P S I N S E S S I O N" rs
|
||||
clips_column_header = string_value fs string_value rs
|
||||
clip_record = string_value fs string_value (fs "[" integer_value "]")? rs
|
||||
|
||||
plugin_listing = plugin_header plugin_column_header plugin_record* block_ending
|
||||
plugin_header = "P L U G - I N S L I S T I N G" rs
|
||||
plugin_column_header = "MANUFACTURER " fs "PLUG-IN NAME " fs
|
||||
"VERSION " fs "FORMAT " fs "STEMS " fs
|
||||
"NUMBER OF INSTANCES" rs
|
||||
plugin_record = string_value fs string_value fs string_value fs
|
||||
string_value fs string_value fs string_value rs
|
||||
|
||||
track_listing = track_listing_header track_block*
|
||||
track_block = track_list_top ( track_clip_entry / block_ending )*
|
||||
|
||||
track_listing_header = "T R A C K L I S T I N G" rs
|
||||
track_list_top = "TRACK NAME:" fs string_value rs
|
||||
"COMMENTS:" fs string_value rs
|
||||
"USER DELAY:" fs integer_value " Samples" rs
|
||||
"STATE: " track_state_list rs
|
||||
("PLUG-INS: " ( fs string_value )* rs)?
|
||||
"CHANNEL " fs "EVENT " fs "CLIP NAME " fs
|
||||
"START TIME " fs "END TIME " fs "DURATION " fs
|
||||
("TIMESTAMP " fs)? "STATE" rs
|
||||
|
||||
track_state_list = (track_state " ")*
|
||||
|
||||
track_state = "Solo" / "Muted" / "Inactive" / "Hidden"
|
||||
|
||||
track_clip_entry = integer_value isp fs
|
||||
integer_value isp fs
|
||||
string_value fs
|
||||
string_value fs string_value fs string_value fs (string_value fs)?
|
||||
track_clip_state rs
|
||||
|
||||
track_clip_state = ("Muted" / "Unmuted")
|
||||
|
||||
markers_listing = markers_listing_header markers_column_header marker_record*
|
||||
markers_listing_header = "M A R K E R S L I S T I N G" rs
|
||||
markers_column_header = "# " fs "LOCATION " fs "TIME REFERENCE " fs
|
||||
"UNITS " fs "NAME " fs "COMMENTS" rs
|
||||
|
||||
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
||||
string_value fs string_value fs string_value rs
|
||||
|
||||
fs = "\t"
|
||||
rs = "\n"
|
||||
block_ending = rs rs
|
||||
string_value = ~r"[^\t\n]*"
|
||||
integer_value = ~r"\d+"
|
||||
float_value = ~r"\d+(\.\d+)?"
|
||||
isp = ~r"[^\d\t\n]*"
|
||||
""")
|
||||
|
||||
|
||||
def parse_document(path: str) -> SessionDescriptor:
|
||||
"""
|
||||
Parse a Pro Tools text export.
|
||||
:param path: path to a file
|
||||
:return: the session descriptor
|
||||
"""
|
||||
from .ptuls_grammar import protools_text_export_grammar
|
||||
with open(path, 'r') as f:
|
||||
ast = protools_text_export_grammar.parse(f.read())
|
||||
return DocParserVisitor().visit(ast)
|
||||
@@ -1,74 +0,0 @@
|
||||
from parsimonious.grammar import Grammar
|
||||
|
||||
protools_text_export_grammar = Grammar(
|
||||
r"""
|
||||
document = header files_section? clips_section? plugin_listing? track_listing? markers_listing?
|
||||
header = "SESSION NAME:" fs string_value rs
|
||||
"SAMPLE RATE:" fs float_value rs
|
||||
"BIT DEPTH:" fs integer_value "-bit" rs
|
||||
"SESSION START TIMECODE:" fs string_value rs
|
||||
"TIMECODE FORMAT:" fs frame_rate " Drop"? " Frame" rs
|
||||
"# OF AUDIO TRACKS:" fs integer_value rs
|
||||
"# OF AUDIO CLIPS:" fs integer_value rs
|
||||
"# OF AUDIO FILES:" fs integer_value rs block_ending
|
||||
|
||||
frame_rate = ("60" / "59.94" / "30" / "29.97" / "25" / "24" / "23.976")
|
||||
files_section = files_header files_column_header file_record* block_ending
|
||||
files_header = "F I L E S I N S E S S I O N" rs
|
||||
files_column_header = "Filename" isp fs "Location" rs
|
||||
file_record = string_value fs string_value rs
|
||||
|
||||
clips_section = clips_header clips_column_header clip_record* block_ending
|
||||
clips_header = "O N L I N E C L I P S I N S E S S I O N" rs
|
||||
clips_column_header = string_value fs string_value rs
|
||||
clip_record = string_value fs string_value (fs "[" integer_value "]")? rs
|
||||
|
||||
plugin_listing = plugin_header plugin_column_header plugin_record* block_ending
|
||||
plugin_header = "P L U G - I N S L I S T I N G" rs
|
||||
plugin_column_header = "MANUFACTURER " fs "PLUG-IN NAME " fs
|
||||
"VERSION " fs "FORMAT " fs "STEMS " fs
|
||||
"NUMBER OF INSTANCES" rs
|
||||
plugin_record = string_value fs string_value fs string_value fs
|
||||
string_value fs string_value fs string_value rs
|
||||
|
||||
track_listing = track_listing_header track_block*
|
||||
track_block = track_list_top ( track_clip_entry / block_ending )*
|
||||
|
||||
track_listing_header = "T R A C K L I S T I N G" rs
|
||||
track_list_top = "TRACK NAME:" fs string_value rs
|
||||
"COMMENTS:" fs string_value rs
|
||||
"USER DELAY:" fs integer_value " Samples" rs
|
||||
"STATE: " track_state_list rs
|
||||
("PLUG-INS: " ( fs string_value )* rs)?
|
||||
"CHANNEL " fs "EVENT " fs "CLIP NAME " fs
|
||||
"START TIME " fs "END TIME " fs "DURATION " fs
|
||||
("TIMESTAMP " fs)? "STATE" rs
|
||||
|
||||
track_state_list = (track_state " ")*
|
||||
|
||||
track_state = "Solo" / "Muted" / "Inactive" / "Hidden"
|
||||
|
||||
track_clip_entry = integer_value isp fs
|
||||
integer_value isp fs
|
||||
string_value fs
|
||||
string_value fs string_value fs string_value fs (string_value fs)?
|
||||
track_clip_state rs
|
||||
|
||||
track_clip_state = ("Muted" / "Unmuted")
|
||||
|
||||
markers_listing = markers_listing_header markers_column_header marker_record*
|
||||
markers_listing_header = "M A R K E R S L I S T I N G" rs
|
||||
markers_column_header = "# " fs "LOCATION " fs "TIME REFERENCE " fs
|
||||
"UNITS " fs "NAME " fs "COMMENTS" rs
|
||||
|
||||
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
||||
string_value fs string_value fs string_value rs
|
||||
|
||||
fs = "\t"
|
||||
rs = "\n"
|
||||
block_ending = rs rs
|
||||
string_value = ~"[^\t\n]*"
|
||||
integer_value = ~"\d+"
|
||||
float_value = ~"\d+(\.\d+)?"
|
||||
isp = ~"[^\d\t\n]*"
|
||||
""")
|
||||
@@ -1,4 +1,3 @@
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from fractions import Fraction
|
||||
from typing import Iterator, Tuple, Callable, Generator, Dict, List
|
||||
@@ -20,6 +19,10 @@ class Event:
|
||||
|
||||
|
||||
class TagCompiler:
|
||||
"""
|
||||
Uses a `SessionDescriptor` as a data source to produce `Intermediate`
|
||||
items.
|
||||
"""
|
||||
|
||||
Intermediate = namedtuple('Intermediate', 'track_content track_tags track_comment_tags '
|
||||
'clip_content clip_tags clip_tag_mode start finish')
|
||||
@@ -27,6 +30,9 @@ class TagCompiler:
|
||||
session: doc_entity.SessionDescriptor
|
||||
|
||||
def compile_all_time_spans(self) -> List[Tuple[str, str, Fraction, Fraction]]:
|
||||
"""
|
||||
:returns: A `List` of (key: str, value: str, start: Fraction, finish: Fraction)
|
||||
"""
|
||||
ret_list = list()
|
||||
for element in self.parse_data():
|
||||
if element.clip_tag_mode == TagPreModes.TIMESPAN:
|
||||
@@ -62,22 +68,31 @@ class TagCompiler:
|
||||
|
||||
def compile_events(self) -> Iterator[Event]:
|
||||
step0 = self.parse_data()
|
||||
step1 = self.apply_appends(step0)
|
||||
step2 = self.collect_time_spans(step1)
|
||||
step3 = self.apply_tags(step2)
|
||||
for datum in step3:
|
||||
step1 = self.filter_out_directives(step0)
|
||||
step2 = self.apply_appends(step1)
|
||||
step3 = self.collect_time_spans(step2)
|
||||
step4 = self.apply_tags(step3)
|
||||
for datum in step4:
|
||||
yield Event(clip_name=datum[0], track_name=datum[1], session_name=datum[2],
|
||||
tags=datum[3], start=datum[4], finish=datum[5])
|
||||
|
||||
def _marker_tags(self, at):
|
||||
retval = dict()
|
||||
applicable = [(m, t) for (m, t) in self.session.markers_timed() if t <= at]
|
||||
for marker, time in sorted(applicable, key=lambda x: x[1]):
|
||||
retval.update(parse_tags(marker.comments).tag_dict)
|
||||
retval.update(parse_tags(marker.name).tag_dict)
|
||||
for marker, _ in sorted(applicable, key=lambda x: x[1]):
|
||||
retval.update(parse_tags(marker.comments or "").tag_dict)
|
||||
retval.update(parse_tags(marker.name or "").tag_dict)
|
||||
|
||||
return retval
|
||||
|
||||
def filter_out_directives(self, clips : Iterator[Intermediate]) -> Iterator[Intermediate]:
|
||||
for clip in clips:
|
||||
if clip.clip_tag_mode == 'Directive':
|
||||
continue
|
||||
else:
|
||||
yield clip
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _coalesce_tags(clip_tags: dict, track_tags: dict,
|
||||
track_comment_tags: dict,
|
||||
@@ -186,4 +201,4 @@ def apply_appends(source: Iterator,
|
||||
yield this_element
|
||||
this_element = element
|
||||
|
||||
yield this_element
|
||||
yield this_element
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from parsimonious import NodeVisitor, Grammar
|
||||
from typing import Dict, Optional
|
||||
from typing import Dict, Union
|
||||
from enum import Enum
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ class TagPreModes(Enum):
|
||||
NORMAL = 'Normal'
|
||||
APPEND = 'Append'
|
||||
TIMESPAN = 'Timespan'
|
||||
DIRECTIVE = 'Directive'
|
||||
|
||||
|
||||
tag_grammar = Grammar(
|
||||
@@ -18,24 +19,24 @@ tag_grammar = Grammar(
|
||||
key_tag = "[" key "]" word_sep?
|
||||
short_tag = "$" key "=" word word_sep?
|
||||
full_text_tag = "{" key "=" value "}" word_sep?
|
||||
key = ~"[A-Za-z][A-Za-z0-9_]*"
|
||||
value = ~"[^}]+"
|
||||
key = ~r"[A-Za-z][A-Za-z0-9_]*"
|
||||
value = ~r"[^}]+"
|
||||
tag_junk = word word_sep?
|
||||
word = ~"[^ \[\{\$][^ ]*"
|
||||
word_sep = ~" +"
|
||||
modifier = ("@" / "&") word_sep?
|
||||
word = ~r"[^ \[\{\$][^ ]*"
|
||||
word_sep = ~r" +"
|
||||
modifier = ("@" / "&" /"!") word_sep?
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def parse_tags(prompt) -> "TaggedStringResult":
|
||||
def parse_tags(prompt: str) -> "TaggedStringResult":
|
||||
ast = tag_grammar.parse(prompt)
|
||||
return TagListVisitor().visit(ast)
|
||||
|
||||
|
||||
class TaggedStringResult:
|
||||
content: Optional[str]
|
||||
tag_dict: Optional[Dict[str, str]]
|
||||
content: str
|
||||
tag_dict: Dict[str, str]
|
||||
mode: TagPreModes
|
||||
|
||||
def __init__(self, content, tag_dict, mode):
|
||||
@@ -51,7 +52,7 @@ class TagListVisitor(NodeVisitor):
|
||||
modifier_opt, line_opt, _, tag_list_opt = visited_children
|
||||
|
||||
return TaggedStringResult(content=next(iter(line_opt), None),
|
||||
tag_dict=next(iter(tag_list_opt), None),
|
||||
tag_dict=next(iter(tag_list_opt), dict()),
|
||||
mode=TagPreModes(next(iter(modifier_opt), 'Normal'))
|
||||
)
|
||||
|
||||
@@ -65,6 +66,8 @@ class TagListVisitor(NodeVisitor):
|
||||
return TagPreModes.TIMESPAN
|
||||
elif node.text.startswith('&'):
|
||||
return TagPreModes.APPEND
|
||||
elif node.text.startswith('!'):
|
||||
return TagPreModes.DIRECTIVE
|
||||
else:
|
||||
return TagPreModes.NORMAL
|
||||
|
||||
|
||||
30
ptulsconv/footage.py
Normal file
30
ptulsconv/footage.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""
|
||||
Methods for converting string reprentations of film footage.
|
||||
"""
|
||||
from fractions import Fraction
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def footage_to_seconds(footage: str) -> Optional[Fraction]:
|
||||
"""
|
||||
Converts a string representation of a footage (35mm, 24fps)
|
||||
into a :class:`Fraction`, this fraction being a some number of
|
||||
seconds.
|
||||
|
||||
:param footage: A string reprenentation of a footage of the form
|
||||
resembling "90+01".
|
||||
"""
|
||||
m = re.match(r'(\d+)\+(\d+)(\.\d+)?', footage)
|
||||
if m is None:
|
||||
return None
|
||||
|
||||
feet, frames, _ = m.groups()
|
||||
feet, frames = int(feet), int(frames)
|
||||
|
||||
fps = 24
|
||||
frames_per_foot = 16
|
||||
|
||||
total_frames = feet * frames_per_foot + frames
|
||||
|
||||
return Fraction(total_frames, fps)
|
||||
@@ -1,4 +1,4 @@
|
||||
import ffmpeg # ffmpeg-python
|
||||
#import ffmpeg # ffmpeg-python
|
||||
|
||||
# TODO: Implement movie export
|
||||
|
||||
|
||||
@@ -9,9 +9,11 @@ from reportlab.platypus.frames import Frame
|
||||
from reportlab.pdfbase import pdfmetrics
|
||||
from reportlab.pdfbase.ttfonts import TTFont
|
||||
|
||||
from typing import List
|
||||
|
||||
# TODO: A Generic report useful for spotting
|
||||
# TODO: A report useful for M&E mixer's notes
|
||||
|
||||
# TODO: Use a default font that doesn't need to be installed
|
||||
|
||||
# This is from https://code.activestate.com/recipes/576832/ for
|
||||
# generating page count messages
|
||||
@@ -36,7 +38,7 @@ class ReportCanvas(canvas.Canvas):
|
||||
|
||||
def draw_page_number(self, page_count):
|
||||
self.saveState()
|
||||
self.setFont("Futura", 10)
|
||||
self.setFont('Helvetica', 10) #FIXME make this customizable
|
||||
self.drawString(0.5 * inch, 0.5 * inch, "Page %d of %d" % (self._pageNumber, page_count))
|
||||
right_edge = self._pagesize[0] - 0.5 * inch
|
||||
self.drawRightString(right_edge, 0.5 * inch, self._report_date.strftime("%m/%d/%Y %H:%M"))
|
||||
@@ -60,7 +62,8 @@ def make_doc_template(page_size, filename, document_title,
|
||||
document_header: str,
|
||||
client: str,
|
||||
document_subheader: str,
|
||||
left_margin=0.5 * inch) -> ADRDocTemplate:
|
||||
left_margin=0.5 * inch,
|
||||
fonts: List[TTFont] = []) -> ADRDocTemplate:
|
||||
right_margin = top_margin = bottom_margin = 0.5 * inch
|
||||
page_box = GRect(0., 0., page_size[0], page_size[1])
|
||||
_, page_box = page_box.split_x(left_margin, direction='l')
|
||||
@@ -85,7 +88,9 @@ def make_doc_template(page_size, filename, document_title,
|
||||
frames=frames,
|
||||
onPage=on_page_lambda)
|
||||
|
||||
pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
for font in fonts:
|
||||
pdfmetrics.registerFont(font)
|
||||
|
||||
doc = ADRDocTemplate(filename,
|
||||
title=document_title,
|
||||
author=supervisor,
|
||||
@@ -99,6 +104,11 @@ def make_doc_template(page_size, filename, document_title,
|
||||
|
||||
|
||||
def time_format(mins, zero_str="-"):
|
||||
"""
|
||||
Formats a duration `mins` into a string
|
||||
"""
|
||||
if mins is None:
|
||||
return zero_str
|
||||
if mins == 0. and zero_str is not None:
|
||||
return zero_str
|
||||
elif mins < 60.:
|
||||
@@ -110,11 +120,11 @@ def time_format(mins, zero_str="-"):
|
||||
|
||||
|
||||
def draw_header_footer(a_canvas: ReportCanvas, left_box, right_box, footer_box, title: str, supervisor: str,
|
||||
document_subheader: str, client: str, doc_title=""):
|
||||
document_subheader: str, client: str, doc_title="", font_name='Helvetica'):
|
||||
|
||||
(_supervisor_box, client_box,), title_box = right_box.divide_y([16., 16., ])
|
||||
title_box.draw_text_cell(a_canvas, title, "Futura", 18, inset_y=2., inset_x=5.)
|
||||
client_box.draw_text_cell(a_canvas, client, "Futura", 11, inset_y=2., inset_x=5.)
|
||||
title_box.draw_text_cell(a_canvas, title, font_name, 18, inset_y=2., inset_x=5.)
|
||||
client_box.draw_text_cell(a_canvas, client, font_name, 11, inset_y=2., inset_x=5.)
|
||||
|
||||
a_canvas.saveState()
|
||||
a_canvas.setLineWidth(0.5)
|
||||
@@ -131,13 +141,13 @@ def draw_header_footer(a_canvas: ReportCanvas, left_box, right_box, footer_box,
|
||||
|
||||
(doc_title_cell, spotting_version_cell,), _ = left_box.divide_y([18., 14], direction='d')
|
||||
|
||||
doc_title_cell.draw_text_cell(a_canvas, doc_title, 'Futura', 14., inset_y=2.)
|
||||
doc_title_cell.draw_text_cell(a_canvas, doc_title, font_name, 14., inset_y=2.)
|
||||
|
||||
if document_subheader is not None:
|
||||
spotting_version_cell.draw_text_cell(a_canvas, document_subheader, 'Futura', 12., inset_y=2.)
|
||||
spotting_version_cell.draw_text_cell(a_canvas, document_subheader, font_name, 12., inset_y=2.)
|
||||
|
||||
if supervisor is not None:
|
||||
a_canvas.setFont('Futura', 11.)
|
||||
a_canvas.setFont(font_name, 11.)
|
||||
a_canvas.drawCentredString(footer_box.min_x + footer_box.width / 2., footer_box.min_y, supervisor)
|
||||
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ from ptulsconv.pdf import make_doc_template
|
||||
|
||||
# TODO: A Continuity
|
||||
|
||||
def table_for_scene(scene, tc_format):
|
||||
def table_for_scene(scene, tc_format, font_name = 'Helvetica'):
|
||||
scene_style = getSampleStyleSheet()['Normal']
|
||||
scene_style.fontName = 'Futura'
|
||||
scene_style.fontName = font_name
|
||||
scene_style.leftIndent = 0.
|
||||
scene_style.leftPadding = 0.
|
||||
scene_style.spaceAfter = 18.
|
||||
@@ -29,18 +29,18 @@ def table_for_scene(scene, tc_format):
|
||||
style = [('VALIGN', (0, 0), (-1, -1), 'TOP'),
|
||||
('LEFTPADDING', (0, 0), (0, 0), 0.0),
|
||||
('BOTTOMPADDING', (0, 0), (-1, -1), 12.),
|
||||
('FONTNAME', (0, 0), (-1, -1), 'Futura')]
|
||||
('FONTNAME', (0, 0), (-1, -1), font_name)]
|
||||
|
||||
return Table(data=[row], style=style, colWidths=[1.0 * inch, 6.5 * inch])
|
||||
|
||||
|
||||
def output_report(scenes: List[Tuple[str, str, Fraction, Fraction]],
|
||||
tc_display_format: TimecodeFormat,
|
||||
title: str, client: str, supervisor):
|
||||
title: str, client: str, supervisor, paper_size = letter):
|
||||
filename = "%s Continuity.pdf" % title
|
||||
document_header = "Continuity"
|
||||
|
||||
doc = make_doc_template(page_size=portrait(letter),
|
||||
doc = make_doc_template(page_size=portrait(paper_size),
|
||||
filename=filename,
|
||||
document_title="Continuity",
|
||||
title=title,
|
||||
|
||||
@@ -148,7 +148,7 @@ def populate_columns(lines: List[ADRLine], columns, include_omitted, _page_size)
|
||||
styles = list()
|
||||
columns_widths = list()
|
||||
|
||||
sorted_character_numbers = sorted(set([x.character_id for x in lines]),
|
||||
sorted_character_numbers: List[str] = sorted(set([x.character_id for x in lines]),
|
||||
key=lambda x: str(x))
|
||||
|
||||
# construct column styles
|
||||
@@ -164,18 +164,21 @@ def populate_columns(lines: List[ADRLine], columns, include_omitted, _page_size)
|
||||
|
||||
for n in sorted_character_numbers:
|
||||
char_records = [x for x in lines if x.character_id == n]
|
||||
row_data = list()
|
||||
row_data2 = list()
|
||||
for col in columns:
|
||||
row1_index = len(data)
|
||||
row2_index = row1_index + 1
|
||||
row_data.append(col['value_getter'](list(char_records)))
|
||||
row_data2.append(col['value_getter2'](list(char_records)))
|
||||
styles.extend([('TEXTCOLOR', (0, row2_index), (-1, row2_index), colors.red),
|
||||
('LINEBELOW', (0, row2_index), (-1, row2_index), 0.5, colors.black)])
|
||||
if len(char_records) > 0:
|
||||
row_data = list()
|
||||
row_data2 = list()
|
||||
|
||||
data.append(row_data)
|
||||
data.append(row_data2)
|
||||
for col in columns:
|
||||
row1_index = len(data)
|
||||
row2_index = row1_index + 1
|
||||
row_data.append(col['value_getter'](list(char_records)))
|
||||
row_data2.append(col['value_getter2'](list(char_records)))
|
||||
|
||||
styles.extend([('TEXTCOLOR', (0, row2_index), (-1, row2_index), colors.red),
|
||||
('LINEBELOW', (0, row2_index), (-1, row2_index), 0.5, colors.black)])
|
||||
|
||||
data.append(row_data)
|
||||
data.append(row_data2)
|
||||
|
||||
summary_row1 = list()
|
||||
summary_row2 = list()
|
||||
@@ -202,16 +205,16 @@ def populate_columns(lines: List[ADRLine], columns, include_omitted, _page_size)
|
||||
|
||||
|
||||
def output_report(lines: List[ADRLine], reel_list: List[str], include_omitted=False,
|
||||
page_size=portrait(letter)):
|
||||
page_size=portrait(letter), font_name='Helvetica'):
|
||||
columns = build_columns(lines, include_omitted=include_omitted, reel_list=reel_list)
|
||||
data, style, columns_widths = populate_columns(lines, columns, include_omitted, page_size)
|
||||
|
||||
style.append(('FONTNAME', (0, 0), (-1, -1), "Futura"))
|
||||
style.append(('FONTNAME', (0, 0), (-1, -1), font_name))
|
||||
style.append(('FONTSIZE', (0, 0), (-1, -1), 9.))
|
||||
style.append(('LINEBELOW', (0, 0), (-1, 0), 1.0, colors.black))
|
||||
# style.append(('LINEBELOW', (0, 1), (-1, -1), 0.25, colors.gray))
|
||||
|
||||
pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
#pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
|
||||
title = "%s Line Count" % lines[0].title
|
||||
filename = title + '.pdf'
|
||||
@@ -230,7 +233,7 @@ def output_report(lines: List[ADRLine], reel_list: List[str], include_omitted=Fa
|
||||
story = [Spacer(height=0.5 * inch, width=1.), table]
|
||||
|
||||
style = getSampleStyleSheet()['Normal']
|
||||
style.fontName = 'Futura'
|
||||
style.fontName = font_name
|
||||
style.fontSize = 12.
|
||||
style.spaceBefore = 16.
|
||||
style.spaceAfter = 16.
|
||||
|
||||
@@ -34,23 +34,26 @@ def build_aux_data_field(line: ADRLine):
|
||||
elif line.adlib:
|
||||
bg_color = 'purple'
|
||||
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font> " % (bg_color, fg_color, "ADLIB")
|
||||
elif line.optional:
|
||||
bg_color = 'green'
|
||||
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font>" % (bg_color, fg_color, "OPTIONAL")
|
||||
|
||||
entries.append(tag_field)
|
||||
|
||||
return "<br />".join(entries)
|
||||
|
||||
|
||||
def build_story(lines: List[ADRLine], tc_rate: TimecodeFormat):
|
||||
def build_story(lines: List[ADRLine], tc_rate: TimecodeFormat, font_name='Helvetica'):
|
||||
story = list()
|
||||
|
||||
this_scene = None
|
||||
scene_style = getSampleStyleSheet()['Normal']
|
||||
scene_style.fontName = 'Futura'
|
||||
scene_style.fontName = font_name
|
||||
scene_style.leftIndent = 0.
|
||||
scene_style.leftPadding = 0.
|
||||
scene_style.spaceAfter = 18.
|
||||
line_style = getSampleStyleSheet()['Normal']
|
||||
line_style.fontName = 'Futura'
|
||||
line_style.fontName = font_name
|
||||
|
||||
for line in lines:
|
||||
table_style = [('VALIGN', (0, 0), (-1, -1), 'TOP'),
|
||||
|
||||
@@ -11,11 +11,12 @@ from reportlab.platypus import Paragraph
|
||||
|
||||
from .__init__ import GRect
|
||||
|
||||
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||
from ptulsconv.broadcast_timecode import TimecodeFormat, footage_to_frame_count
|
||||
from ptulsconv.docparser.adr_entity import ADRLine
|
||||
|
||||
import datetime
|
||||
|
||||
font_name = 'Helvetica'
|
||||
|
||||
def draw_header_block(canvas, rect, record: ADRLine):
|
||||
rect.draw_text_cell(canvas, record.cue_number, "Helvetica", 44, vertical_align='m')
|
||||
@@ -23,19 +24,19 @@ def draw_header_block(canvas, rect, record: ADRLine):
|
||||
|
||||
def draw_character_row(canvas, rect, record: ADRLine):
|
||||
label_frame, value_frame = rect.split_x(1.25 * inch)
|
||||
label_frame.draw_text_cell(canvas, "CHARACTER", "Futura", 10, force_baseline=9.)
|
||||
label_frame.draw_text_cell(canvas, "CHARACTER", font_name, 10, force_baseline=9.)
|
||||
line = "%s / %s" % (record.character_id, record.character_name)
|
||||
if record.actor_name is not None:
|
||||
line = line + " / " + record.actor_name
|
||||
value_frame.draw_text_cell(canvas, line, "Futura", 12, force_baseline=9.)
|
||||
value_frame.draw_text_cell(canvas, line, font_name, 12, force_baseline=9.)
|
||||
rect.draw_border(canvas, ['min_y', 'max_y'])
|
||||
|
||||
|
||||
def draw_cue_number_block(canvas, rect, record: ADRLine):
|
||||
(label_frame, number_frame,), aux_frame = rect.divide_y([0.20 * inch, 0.375 * inch], direction='d')
|
||||
label_frame.draw_text_cell(canvas, "CUE NUMBER", "Futura", 10,
|
||||
label_frame.draw_text_cell(canvas, "CUE NUMBER", font_name, 10,
|
||||
inset_y=5., vertical_align='t')
|
||||
number_frame.draw_text_cell(canvas, record.cue_number, "Futura", 14,
|
||||
number_frame.draw_text_cell(canvas, record.cue_number, font_name, 14,
|
||||
inset_x=10., inset_y=2., draw_baseline=True)
|
||||
|
||||
tags = {'tv': 'TV',
|
||||
@@ -49,7 +50,7 @@ def draw_cue_number_block(canvas, rect, record: ADRLine):
|
||||
if getattr(record, key):
|
||||
tag_field = tag_field + tags[key] + " "
|
||||
|
||||
aux_frame.draw_text_cell(canvas, tag_field, "Futura", 10,
|
||||
aux_frame.draw_text_cell(canvas, tag_field, font_name, 10,
|
||||
inset_x=10., inset_y=2., vertical_align='t')
|
||||
rect.draw_border(canvas, 'max_x')
|
||||
|
||||
@@ -58,13 +59,13 @@ def draw_timecode_block(canvas, rect, record: ADRLine, tc_display_format: Timeco
|
||||
(in_label_frame, in_frame, out_label_frame, out_frame), _ = rect.divide_y(
|
||||
[0.20 * inch, 0.25 * inch, 0.20 * inch, 0.25 * inch], direction='d')
|
||||
|
||||
in_label_frame.draw_text_cell(canvas, "IN", "Futura", 10,
|
||||
in_label_frame.draw_text_cell(canvas, "IN", font_name, 10,
|
||||
vertical_align='t', inset_y=5., inset_x=5.)
|
||||
in_frame.draw_text_cell(canvas, tc_display_format.seconds_to_smpte(record.start), "Futura", 14,
|
||||
in_frame.draw_text_cell(canvas, tc_display_format.seconds_to_smpte(record.start), font_name, 14,
|
||||
inset_x=10., inset_y=2., draw_baseline=True)
|
||||
out_label_frame.draw_text_cell(canvas, "OUT", "Futura", 10,
|
||||
out_label_frame.draw_text_cell(canvas, "OUT", font_name, 10,
|
||||
vertical_align='t', inset_y=5., inset_x=5.)
|
||||
out_frame.draw_text_cell(canvas, tc_display_format.seconds_to_smpte(record.finish), "Futura", 14,
|
||||
out_frame.draw_text_cell(canvas, tc_display_format.seconds_to_smpte(record.finish), font_name, 14,
|
||||
inset_x=10., inset_y=2., draw_baseline=True)
|
||||
|
||||
rect.draw_border(canvas, 'max_x')
|
||||
@@ -75,16 +76,16 @@ def draw_reason_block(canvas, rect, record: ADRLine):
|
||||
reason_label, reason_value = reason_cell.split_x(.75 * inch)
|
||||
notes_label, notes_value = notes_cell.split_x(.75 * inch)
|
||||
|
||||
reason_label.draw_text_cell(canvas, "Reason:", "Futura", 12,
|
||||
reason_label.draw_text_cell(canvas, "Reason:", font_name, 12,
|
||||
inset_x=5., inset_y=5., vertical_align='b')
|
||||
reason_value.draw_text_cell(canvas, record.reason or "", "Futura", 12,
|
||||
reason_value.draw_text_cell(canvas, record.reason or "", font_name, 12,
|
||||
inset_x=5., inset_y=5., draw_baseline=True,
|
||||
vertical_align='b')
|
||||
notes_label.draw_text_cell(canvas, "Note:", "Futura", 12,
|
||||
notes_label.draw_text_cell(canvas, "Note:", font_name, 12,
|
||||
inset_x=5., inset_y=5., vertical_align='t')
|
||||
|
||||
style = getSampleStyleSheet()['BodyText']
|
||||
style.fontName = 'Futura'
|
||||
style.fontName = font_name
|
||||
style.fontSize = 12
|
||||
style.leading = 14
|
||||
|
||||
@@ -96,10 +97,10 @@ def draw_reason_block(canvas, rect, record: ADRLine):
|
||||
def draw_prompt(canvas, rect, prompt=""):
|
||||
label, block = rect.split_y(0.20 * inch, direction='d')
|
||||
|
||||
label.draw_text_cell(canvas, "PROMPT", "Futura", 10, vertical_align='t', inset_y=5., inset_x=0.)
|
||||
label.draw_text_cell(canvas, "PROMPT", font_name, 10, vertical_align='t', inset_y=5., inset_x=0.)
|
||||
|
||||
style = getSampleStyleSheet()['BodyText']
|
||||
style.fontName = 'Futura'
|
||||
style.fontName = font_name
|
||||
style.fontSize = 14
|
||||
|
||||
style.leading = 24
|
||||
@@ -116,10 +117,10 @@ def draw_prompt(canvas, rect, prompt=""):
|
||||
def draw_notes(canvas, rect, note=""):
|
||||
label, block = rect.split_y(0.20 * inch, direction='d')
|
||||
|
||||
label.draw_text_cell(canvas, "NOTES", "Futura", 10, vertical_align='t', inset_y=5., inset_x=0.)
|
||||
label.draw_text_cell(canvas, "NOTES", font_name, 10, vertical_align='t', inset_y=5., inset_x=0.)
|
||||
|
||||
style = getSampleStyleSheet()['BodyText']
|
||||
style.fontName = 'Futura'
|
||||
style.fontName = font_name
|
||||
style.fontSize = 14
|
||||
style.leading = 24
|
||||
|
||||
@@ -175,12 +176,12 @@ def draw_aux_block(canvas, rect, recording_time_sec_this_line, recording_time_se
|
||||
lines, last_line = content_rect.divide_y([12., 12., 24., 24., 24., 24.], direction='d')
|
||||
|
||||
lines[0].draw_text_cell(canvas,
|
||||
"Time for this line: %.1f mins" % (recording_time_sec_this_line / 60.), "Futura", 9.)
|
||||
lines[1].draw_text_cell(canvas, "Running time: %03.1f mins" % (recording_time_sec / 60.), "Futura", 9.)
|
||||
lines[2].draw_text_cell(canvas, "Actual Start: ______________", "Futura", 9., vertical_align='b')
|
||||
lines[3].draw_text_cell(canvas, "Record Date: ______________", "Futura", 9., vertical_align='b')
|
||||
lines[4].draw_text_cell(canvas, "Engineer: ______________", "Futura", 9., vertical_align='b')
|
||||
lines[5].draw_text_cell(canvas, "Location: ______________", "Futura", 9., vertical_align='b')
|
||||
"Time for this line: %.1f mins" % (recording_time_sec_this_line / 60.), font_name, 9.)
|
||||
lines[1].draw_text_cell(canvas, "Running time: %03.1f mins" % (recording_time_sec / 60.), font_name, 9.)
|
||||
lines[2].draw_text_cell(canvas, "Actual Start: ______________", font_name, 9., vertical_align='b')
|
||||
lines[3].draw_text_cell(canvas, "Record Date: ______________", font_name, 9., vertical_align='b')
|
||||
lines[4].draw_text_cell(canvas, "Engineer: ______________", font_name, 9., vertical_align='b')
|
||||
lines[5].draw_text_cell(canvas, "Location: ______________", font_name, 9., vertical_align='b')
|
||||
|
||||
|
||||
def draw_footer(canvas, rect, record: ADRLine, report_date, line_no, total_lines):
|
||||
@@ -189,7 +190,7 @@ def draw_footer(canvas, rect, record: ADRLine, report_date, line_no, total_lines
|
||||
spotting_name = [record.spot] if record.spot is not None else []
|
||||
pages_s = ["Line %i of %i" % (line_no, total_lines)]
|
||||
footer_s = " - ".join(report_date_s + spotting_name + pages_s)
|
||||
rect.draw_text_cell(canvas, footer_s, font_name="Futura", font_size=10., inset_y=2.)
|
||||
rect.draw_text_cell(canvas, footer_s, font_name=font_name, font_size=10., inset_y=2.)
|
||||
|
||||
|
||||
def create_report_for_character(records, report_date, tc_display_format: TimecodeFormat):
|
||||
@@ -200,7 +201,7 @@ def create_report_for_character(records, report_date, tc_display_format: Timecod
|
||||
assert outfile is not None
|
||||
assert outfile[-4:] == '.pdf', "Output file must have 'pdf' extension!"
|
||||
|
||||
pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
#pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
|
||||
page: GRect = GRect(0, 0, letter[0], letter[1])
|
||||
page = page.inset(inch * 0.5)
|
||||
|
||||
@@ -16,15 +16,15 @@ from ..broadcast_timecode import TimecodeFormat
|
||||
from ..docparser.adr_entity import ADRLine
|
||||
|
||||
|
||||
def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat):
|
||||
def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat, font_name="Helvetica"):
|
||||
character_numbers = set([n.character_id for n in lines])
|
||||
pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
#pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||
|
||||
for n in character_numbers:
|
||||
char_lines = [line for line in lines if not line.omitted and line.character_id == n]
|
||||
character_name = char_lines[0].character_name
|
||||
|
||||
sorted(char_lines, key=lambda line: line.start)
|
||||
char_lines = sorted(char_lines, key=lambda line: line.start)
|
||||
|
||||
title = "%s (%s) %s ADR Script" % (char_lines[0].title, character_name, n)
|
||||
filename = "%s_%s_%s_ADR Script.pdf" % (char_lines[0].title, n, character_name)
|
||||
@@ -39,7 +39,7 @@ def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat):
|
||||
story = []
|
||||
|
||||
prompt_style = getSampleStyleSheet()['Normal']
|
||||
prompt_style.fontName = 'Futura'
|
||||
prompt_style.fontName = font_name
|
||||
prompt_style.fontSize = 18.
|
||||
|
||||
prompt_style.leading = 24.
|
||||
@@ -47,7 +47,7 @@ def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat):
|
||||
prompt_style.rightIndent = 1.5 * inch
|
||||
|
||||
number_style = getSampleStyleSheet()['Normal']
|
||||
number_style.fontName = 'Futura'
|
||||
number_style.fontName = font_name
|
||||
number_style.fontSize = 14
|
||||
|
||||
number_style.leading = 24
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
"""
|
||||
Reporting logic. These methods provide reporting methods to the package and
|
||||
take some pains to provide nice-looking escape codes if we're writing to a
|
||||
tty.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
Validation logic for enforcing various consistency rules.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from ptulsconv.docparser.adr_entity import ADRLine
|
||||
from typing import Iterator, Optional
|
||||
|
||||
48
pyproject.toml
Normal file
48
pyproject.toml
Normal file
@@ -0,0 +1,48 @@
|
||||
[build-system]
|
||||
requires = ["flit_core >=3.2,<4"]
|
||||
build-backend = "flit_core.buildapi"
|
||||
|
||||
[project]
|
||||
name = "ptulsconv"
|
||||
authors = [
|
||||
{name = "Jamie Hardt", email = "jamiehardt@me.com"},
|
||||
]
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
classifiers = [
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Topic :: Multimedia',
|
||||
'Topic :: Multimedia :: Sound/Audio',
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Topic :: Text Processing :: Filters"
|
||||
]
|
||||
requires-python = ">=3.7"
|
||||
dynamic = ["version", "description"]
|
||||
keywords = ["text-processing", "parsers", "film",
|
||||
"broadcast", "editing", "editorial"]
|
||||
dependencies = ['parsimonious', 'tqdm', 'reportlab']
|
||||
|
||||
[project.optional-dependencies]
|
||||
doc = [
|
||||
"Sphinx ~= 5.3.0",
|
||||
"sphinx-rtd-theme >= 1.1.1"
|
||||
]
|
||||
|
||||
[tool.flit.module]
|
||||
name = "ptulsconv"
|
||||
|
||||
[project.scripts]
|
||||
ptulsconv = "ptulsconv.__main__:main"
|
||||
|
||||
[project.entry_points.console_scripts]
|
||||
ptulsconv = 'ptulsconv.__main__:main'
|
||||
|
||||
[project.urls]
|
||||
Source = 'https://github.com/iluvcapra/ptulsconv'
|
||||
Issues = 'https://github.com/iluvcapra/ptulsconv/issues'
|
||||
Documentation = 'https://ptulsconv.readthedocs.io/'
|
||||
@@ -1,5 +0,0 @@
|
||||
setuptools~=56.2.0
|
||||
reportlab~=3.5.67
|
||||
ffmpeg~=1.4
|
||||
parsimonious~=0.8.1
|
||||
tqdm~=4.60.0
|
||||
43
setup.py
43
setup.py
@@ -1,43 +0,0 @@
|
||||
from setuptools import setup
|
||||
|
||||
from ptulsconv import __author__, __license__, __version__
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(name='ptulsconv',
|
||||
version=__version__,
|
||||
author=__author__,
|
||||
description='Parse and convert Pro Tools text exports',
|
||||
long_description_content_type="text/markdown",
|
||||
long_description=long_description,
|
||||
license=__license__,
|
||||
url='https://github.com/iluvcapra/ptulsconv',
|
||||
project_urls={
|
||||
'Source':
|
||||
'https://github.com/iluvcapra/ptulsconv',
|
||||
'Issues':
|
||||
'https://github.com/iluvcapra/ptulsconv/issues',
|
||||
},
|
||||
classifiers=[
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Topic :: Multimedia',
|
||||
'Topic :: Multimedia :: Sound/Audio',
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Development Status :: 4 - Beta",
|
||||
"Topic :: Text Processing :: Filters"],
|
||||
packages=['ptulsconv'],
|
||||
keywords='text-processing parsers film tv editing editorial',
|
||||
install_requires=['parsimonious', 'tqdm', 'reportlab'],
|
||||
package_data={
|
||||
"ptulsconv": ["xslt/*.xsl"]
|
||||
},
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'ptulsconv = ptulsconv.__main__:main'
|
||||
]
|
||||
}
|
||||
)
|
||||
@@ -4,7 +4,7 @@ import os.path
|
||||
|
||||
|
||||
class TestRobinHood1(unittest.TestCase):
|
||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting.txt'
|
||||
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting.txt'
|
||||
|
||||
def test_header_export(self):
|
||||
|
||||
@@ -4,7 +4,7 @@ import os.path
|
||||
|
||||
|
||||
class TestRobinHood5(unittest.TestCase):
|
||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting5.txt'
|
||||
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting5.txt'
|
||||
|
||||
def test_skipped_segments(self):
|
||||
session = parse_document(self.path)
|
||||
@@ -4,7 +4,7 @@ import os.path
|
||||
|
||||
|
||||
class TestRobinHood6(unittest.TestCase):
|
||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting6.txt'
|
||||
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting6.txt'
|
||||
|
||||
def test_a_track(self):
|
||||
session = parse_document(self.path)
|
||||
@@ -4,7 +4,7 @@ import os.path
|
||||
|
||||
|
||||
class TestRobinHoodDF(unittest.TestCase):
|
||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood SpottingDF.txt'
|
||||
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood SpottingDF.txt'
|
||||
|
||||
def test_header_export_df(self):
|
||||
session = parse_document(self.path)
|
||||
34
tests/functional/test_pdf_export.py
Normal file
34
tests/functional/test_pdf_export.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import unittest
|
||||
|
||||
import tempfile
|
||||
|
||||
import os.path
|
||||
import os
|
||||
import glob
|
||||
|
||||
from ptulsconv import commands
|
||||
|
||||
class TestPDFExport(unittest.TestCase):
|
||||
def test_report_generation(self):
|
||||
"""
|
||||
Setp through every text file in export_cases and make sure it can
|
||||
be converted into PDF docs without throwing an error
|
||||
"""
|
||||
files = [os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting.txt"]
|
||||
#files.append(os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting2.txt")
|
||||
for path in files:
|
||||
tempdir = tempfile.TemporaryDirectory()
|
||||
os.chdir(tempdir.name)
|
||||
try:
|
||||
commands.convert(path, major_mode='doc')
|
||||
except:
|
||||
assert False, "Error processing file %s" % path
|
||||
finally:
|
||||
tempdir.cleanup()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -70,6 +70,16 @@ class TestBroadcastTimecode(unittest.TestCase):
|
||||
s1 = tc_format.seconds_to_smpte(secs)
|
||||
self.assertEqual(s1, "00:00:01:01")
|
||||
|
||||
def test_unparseable_footage(self):
|
||||
time_str = "10.1"
|
||||
s1 = broadcast_timecode.footage_to_frame_count(time_str)
|
||||
self.assertIsNone(s1)
|
||||
|
||||
def test_unparseable_timecode(self):
|
||||
time_str = "11.32-19"
|
||||
s1 = broadcast_timecode.smpte_to_frame_count(time_str, frames_per_logical_second=24)
|
||||
self.assertIsNone(s1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
15
tests/unittests/test_footage.py
Normal file
15
tests/unittests/test_footage.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import unittest
|
||||
from ptulsconv import footage
|
||||
|
||||
class TestFootage(unittest.TestCase):
|
||||
def test_basic_footage(self):
|
||||
r1 = "90+0"
|
||||
f1 = footage.footage_to_seconds(r1)
|
||||
self.assertEqual(float(f1 or 0), 60.0)
|
||||
|
||||
def test_feet_and_frames(self):
|
||||
r1 = "1+8"
|
||||
f1 = footage.footage_to_seconds(r1)
|
||||
self.assertEqual(float(f1 or 0), 1.0)
|
||||
|
||||
|
||||
@@ -97,14 +97,14 @@ class TestTagCompiler(unittest.TestCase):
|
||||
|
||||
markers = [doc_entity.MarkerDescriptor(number=1,
|
||||
location="01:00:00:00",
|
||||
time_reference=48000 * 60,
|
||||
time_reference=48000 * 3600,
|
||||
units="Samples",
|
||||
name="Marker 1 {Part=1}",
|
||||
comments=""
|
||||
),
|
||||
doc_entity.MarkerDescriptor(number=2,
|
||||
location="01:00:01:00",
|
||||
time_reference=48000 * 61,
|
||||
time_reference=48000 * 3601,
|
||||
units="Samples",
|
||||
name="Marker 2 {Part=2}",
|
||||
comments="[M1]"
|
||||
@@ -1,15 +1,15 @@
|
||||
import unittest
|
||||
from ptulsconv.docparser import doc_entity, doc_parser_visitor, ptuls_grammar, tag_compiler
|
||||
from ptulsconv.docparser import doc_entity, pt_doc_parser, tag_compiler
|
||||
import os.path
|
||||
|
||||
|
||||
class TaggingIntegratedTests(unittest.TestCase):
|
||||
path = os.path.dirname(__file__) + '/export_cases/Tag Tests/Tag Tests.txt'
|
||||
path = os.path.dirname(__file__) + '/../export_cases/Tag Tests/Tag Tests.txt'
|
||||
|
||||
def test_event_list(self):
|
||||
with open(self.path, 'r') as f:
|
||||
document_ast = ptuls_grammar.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = doc_parser_visitor.DocParserVisitor().visit(document_ast)
|
||||
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||
compiler = tag_compiler.TagCompiler()
|
||||
compiler.session = document
|
||||
|
||||
@@ -28,8 +28,8 @@ class TaggingIntegratedTests(unittest.TestCase):
|
||||
|
||||
def test_append(self):
|
||||
with open(self.path, 'r') as f:
|
||||
document_ast = ptuls_grammar.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = doc_parser_visitor.DocParserVisitor().visit(document_ast)
|
||||
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||
compiler = tag_compiler.TagCompiler()
|
||||
compiler.session = document
|
||||
|
||||
@@ -51,8 +51,8 @@ class TaggingIntegratedTests(unittest.TestCase):
|
||||
|
||||
def test_successive_appends(self):
|
||||
with open(self.path, 'r') as f:
|
||||
document_ast = ptuls_grammar.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = doc_parser_visitor.DocParserVisitor().visit(document_ast)
|
||||
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||
compiler = tag_compiler.TagCompiler()
|
||||
compiler.session = document
|
||||
|
||||
Reference in New Issue
Block a user