mirror of
https://github.com/iluvcapra/ptulsconv.git
synced 2025-12-31 17:00:46 +00:00
Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 32191791c6 | |||
| 1ff910e4d2 | |||
| 0e8a77f548 | |||
|
|
7058397f6f | ||
|
|
16d7befd9f | ||
|
|
2a3f4a7c18 | ||
| d7fff23e5e | |||
|
|
757c3171e7 | ||
| 48dd7de07e | |||
|
|
dc259d54aa | ||
|
|
09ed12fc8f | ||
| 2a69bf47c8 | |||
| 01d2374a9b | |||
| 68e7eb89a9 | |||
|
|
517fe3526a | ||
| 7789c0df4c | |||
| d97dcc3e08 | |||
| 615b2b5a2a | |||
| 4cd6ba1772 | |||
| 1942b323b3 | |||
| 7d297a7564 | |||
|
|
54fa8f04a7 | ||
|
|
dcc6113a63 | ||
|
|
04b9e35240 | ||
|
|
f460022160 | ||
| 5c5cd84811 | |||
|
|
ee90697be0 | ||
|
|
4a0d19ade1 | ||
|
|
df6c783c51 | ||
|
|
f0b232b2b6 | ||
|
|
519c6403ba | ||
|
|
d29c36eafa | ||
|
|
2095a1fb75 | ||
|
|
70defcc46c | ||
|
|
d156b6df89 | ||
|
|
3ba9d7933e | ||
|
|
b0c40ee0b6 | ||
|
|
921b0f07af | ||
|
|
57764bc859 | ||
|
|
779c93282c | ||
|
|
9684be6c7e | ||
|
|
484a70fc8e | ||
|
|
5aa005c317 | ||
|
|
454adea3d1 | ||
|
|
1e6546dab5 | ||
|
|
8b262d3bfb | ||
|
|
630e7960dc | ||
|
|
aa7b418121 | ||
|
|
a519a525b2 | ||
|
|
1412efe509 | ||
|
|
12a6c05467 | ||
|
|
cf87986014 | ||
|
|
67533879f8 | ||
|
|
f847b88aa3 | ||
|
|
c3a600c5d7 | ||
|
|
914783a809 | ||
|
|
c638c673e8 | ||
|
|
15fe6667af | ||
|
|
d4e23b59eb | ||
|
|
a602b09551 | ||
|
|
448d93d717 | ||
|
|
59e7d40d97 | ||
|
|
eaa5fe824f | ||
|
|
8ebfd32e02 | ||
|
|
83a9adb48a | ||
|
|
013ebcbe75 | ||
|
|
c87695e5fe | ||
|
|
4a8983cbbb | ||
|
|
9123cbd0b5 | ||
|
|
4224d106b0 | ||
|
|
ac22fab97f | ||
|
|
64ca2c6c5c | ||
|
|
c3af30dc6a | ||
|
|
c30f675cec | ||
|
|
204af7d9cb | ||
|
|
10fc211e80 | ||
|
|
d56c7df376 | ||
|
|
7b38449a5f | ||
|
|
17b87b6e69 | ||
|
|
a636791539 | ||
|
|
dfde3c4493 | ||
|
|
81909c8a51 | ||
|
|
e2b9a20870 | ||
|
|
006cec05e5 | ||
|
|
a95f0b5cca | ||
|
|
70a5206d73 | ||
|
|
128eed002d | ||
|
|
f8a0d70942 |
4
.flake8
4
.flake8
@@ -1,4 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
per-file-ignores =
|
|
||||||
ptulsconv/__init__.py: F401
|
|
||||||
ptulsconv/docparser/__init__.py: F401
|
|
||||||
14
.github/workflows/python-package.yml
vendored
14
.github/workflows/python-package.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
python-version: [3.8, 3.9, "3.10", "3.11"]
|
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.5.0
|
- uses: actions/checkout@v2.5.0
|
||||||
@@ -27,15 +27,11 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
python -m pip install flake8 pytest
|
pip install .
|
||||||
pip install -e .
|
pip install --group dev .
|
||||||
- name: Lint with flake8
|
- name: Lint with ruff
|
||||||
run: |
|
run: |
|
||||||
# stop the build if there are Python syntax errors or undefined names
|
ruff check src
|
||||||
flake8 ptulsconv tests --count --select=E9,F63,F7,F82 --show-source --statistics
|
|
||||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
|
||||||
flake8 ptulsconv tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
|
||||||
- name: Test with pytest
|
- name: Test with pytest
|
||||||
run: |
|
run: |
|
||||||
pytest
|
pytest
|
||||||
flake8 ptulsconv
|
|
||||||
|
|||||||
12
.github/workflows/pythonpublish.yml
vendored
12
.github/workflows/pythonpublish.yml
vendored
@@ -26,14 +26,4 @@ jobs:
|
|||||||
- name: Build package
|
- name: Build package
|
||||||
run: python -m build
|
run: python -m build
|
||||||
- name: pypi-publish
|
- name: pypi-publish
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.6
|
uses: pypa/gh-action-pypi-publish@v1.13.0
|
||||||
# - name: Report to Mastodon
|
|
||||||
# uses: cbrgm/mastodon-github-action@v1.0.1
|
|
||||||
# with:
|
|
||||||
# message: |
|
|
||||||
# I just released a new version of ptulsconv, my ADR cue sheet generator!
|
|
||||||
# #python #protools #pdf #filmmaking
|
|
||||||
# ${{ github.server_url }}/${{ github.repository }}
|
|
||||||
# env:
|
|
||||||
# MASTODON_URL: ${{ secrets.MASTODON_URL }}
|
|
||||||
# MASTODON_ACCESS_TOKEN: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
|
||||||
|
|||||||
@@ -10,10 +10,15 @@ build:
|
|||||||
os: ubuntu-20.04
|
os: ubuntu-20.04
|
||||||
tools:
|
tools:
|
||||||
python: "3.10"
|
python: "3.10"
|
||||||
# You can also specify other tool versions:
|
jobs:
|
||||||
# nodejs: "16"
|
pre_install:
|
||||||
# rust: "1.55"
|
- pip install --upgrade --upgrade-strategy only-if-needed --group doc .
|
||||||
# golang: "1.17"
|
build:
|
||||||
|
html:
|
||||||
|
python -m sphinx -T -b html -d _build/doctrees -D language=en . $READTHEDOCS_OUTPUT/html
|
||||||
|
post_build:
|
||||||
|
- echo "Command run at 'post_build' step"
|
||||||
|
- echo `date`
|
||||||
|
|
||||||
# Build documentation in the docs/ directory with Sphinx
|
# Build documentation in the docs/ directory with Sphinx
|
||||||
sphinx:
|
sphinx:
|
||||||
@@ -23,10 +28,13 @@ sphinx:
|
|||||||
formats:
|
formats:
|
||||||
- pdf
|
- pdf
|
||||||
|
|
||||||
|
# FIXME: We should be installing the `doc` dependency group and not the `doc`
|
||||||
|
# extra.
|
||||||
|
|
||||||
#Optionally declare the Python requirements required to build your docs
|
#Optionally declare the Python requirements required to build your docs
|
||||||
python:
|
# python:
|
||||||
install:
|
# install:
|
||||||
- method: pip
|
# - method: pip
|
||||||
path: .
|
# path: .
|
||||||
extra_requirements:
|
# extra_requirements:
|
||||||
- doc
|
# - doc
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||

|

|
||||||

|

|
||||||
[][pypi]
|
[][pypi]
|
||||||

|

|
||||||
[](https://github.com/iluvcapra/ptulsconv/actions/workflows/python-package.yml)
|
[](https://github.com/iluvcapra/ptulsconv/actions/workflows/python-package.yml)
|
||||||
|
|
||||||
[pypi]: https://pypi.org/project/ptulsconv/
|
[pypi]: https://pypi.org/project/ptulsconv/
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
# ptulsconv
|
# ptulsconv
|
||||||
|
|
||||||
Read Pro Tools text exports and generate PDF reports, JSON output.
|
Parse Pro Tools text exports and generate PDF reports, JSON output.
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
@@ -23,6 +23,10 @@ The easiest way to install on your site is to use `pip`:
|
|||||||
|
|
||||||
% pip3 install ptulsconv
|
% pip3 install ptulsconv
|
||||||
|
|
||||||
|
If you are using `uv` you can also do a tool install...
|
||||||
|
|
||||||
|
% uv tool install ptulsconv
|
||||||
|
|
||||||
This will install the necessary libraries on your host and gives you
|
This will install the necessary libraries on your host and gives you
|
||||||
command-line access to the tool through an entry-point `ptulsconv`. In a
|
command-line access to the tool through an entry-point `ptulsconv`. In a
|
||||||
terminal window type `ptulsconv -h` for a list of available options.
|
terminal window type `ptulsconv -h` for a list of available options.
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
# For the full list of built-in configuration values, see the documentation:
|
# For the full list of built-in configuration values, see the documentation:
|
||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||||
|
|
||||||
|
import importlib
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@@ -15,9 +16,9 @@ import ptulsconv
|
|||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||||
|
|
||||||
project = 'ptulsconv'
|
project = 'ptulsconv'
|
||||||
# copyright = ptulsconv.__copyright__
|
copyright = '2019-2025 Jamie Hardt. All rights reserved'
|
||||||
# author = ptulsconv.__author__
|
version = "Version 2"
|
||||||
release = ptulsconv.__version__
|
release = importlib.metadata.version("ptulsconv")
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||||
|
|||||||
@@ -6,7 +6,12 @@ Usage Form
|
|||||||
|
|
||||||
Invocations of ptulsconv take the following form:
|
Invocations of ptulsconv take the following form:
|
||||||
|
|
||||||
ptulsconv [options] IN_FILE
|
ptulsconv [options] [IN_FILE]
|
||||||
|
|
||||||
|
|
||||||
|
`IN_FILE` is a Pro Tools text export in UTF-8 encoding. If `IN_FILE` is
|
||||||
|
missing, `ptulsconv` will attempt to connect to Pro Tools and read cue data
|
||||||
|
from the selected tracks of the currently-open session.
|
||||||
|
|
||||||
|
|
||||||
Flags
|
Flags
|
||||||
|
|||||||
@@ -24,19 +24,21 @@ Step 2: Add More Information to Your Spots
|
|||||||
|
|
||||||
Clips, tracks and markers in your session can contain additional information
|
Clips, tracks and markers in your session can contain additional information
|
||||||
to make your ADR reports more complete and useful. You add this information
|
to make your ADR reports more complete and useful. You add this information
|
||||||
with *tagging*.
|
with :ref:`tagging<tags>`.
|
||||||
|
|
||||||
* Every ADR clip must have a unique cue number. After the name of each clip,
|
* **Every ADR clip must have a unique cue number.** After the name of each
|
||||||
add the letters "$QN=" and then a unique number (any combination of letters
|
clip, add the letters ``$QN=`` and then a unique number (any combination of
|
||||||
or numbers that don't contain a space). You can type these yourself or add
|
letters or numbers that don't contain a space). You can type these yourself
|
||||||
them with batch-renaming when you're done spotting.
|
or add them with batch-renaming when you're done spotting.
|
||||||
* ADR spots should usually have a reason indicated, so you can remember exactly
|
* ADR spots should usually have a reason indicated, so you can remember exactly
|
||||||
why you're replacing a particular line. Do this by adding the the text "{R="
|
why you're replacing a particular line. Do this by adding the the text
|
||||||
to your clip names after the prompt and then some short text describing the
|
``{R=`` to your clip names after the prompt and then some short text
|
||||||
reason, and then a closing "}". You can type anything, including spaces.
|
describing the reason, and then a closing ``}``. You can type anything,
|
||||||
* If a line is a TV cover line, you can add the text "[TV]" to the end.
|
including spaces.
|
||||||
|
* If, for example, a line is a TV cover line, you can add the text ``[TV]`` to
|
||||||
|
the end.
|
||||||
|
|
||||||
So for example, some ADR spot's clip name might look like:
|
So for example, some ADR spot's clip name might look like::
|
||||||
|
|
||||||
Get to the ladder! {R=Noise} $QN=J1001
|
Get to the ladder! {R=Noise} $QN=J1001
|
||||||
"Forget your feelings! {R=TV Cover} $QN=J1002 [TV]
|
"Forget your feelings! {R=TV Cover} $QN=J1002 [TV]
|
||||||
@@ -45,32 +47,26 @@ These tags can appear in any order.
|
|||||||
|
|
||||||
* You can add the name of an actor to a character's track, so this information
|
* You can add the name of an actor to a character's track, so this information
|
||||||
will appear on your reports. In the track name, or in the track comments,
|
will appear on your reports. In the track name, or in the track comments,
|
||||||
type "{Actor=xxx}" replacing the xxx with the actor's name.
|
type ``{Actor=xxx}`` replacing the xxx with the actor's name.
|
||||||
* Characters need to have a number (perhaps from the cast list) to express how
|
* Characters need to have a number (perhaps from the cast list) to express how
|
||||||
they should be collated. Add "$CN=xxx" with a unique number to each track (or
|
they should be collated. Add ``$CN=xxx`` with
|
||||||
the track's comments.)
|
a unique number to each track (or the track's comments.)
|
||||||
* Set the scene for each line with markers. Create a marker at the beginning of
|
* Set the scene for each line with markers. Create a marker at the beginning of
|
||||||
a scene and make it's name "{Sc=xxx}", replacing the xxx with the scene
|
a scene and make it's name ``{Sc=xxx}``, replacing the xxx with the scene
|
||||||
number and name.
|
number and name.
|
||||||
|
|
||||||
|
|
||||||
Step 3: Export Tracks from Pro Tools as a Text File
|
Step 3: Run `ptulsconv`
|
||||||
---------------------------------------------------
|
------------------------
|
||||||
|
|
||||||
Export the file as a UTF-8 and be sure to include clips and markers. Export
|
In Pro Tools, select the tracks that contain your spot clips.
|
||||||
using the Timecode time format.
|
|
||||||
|
|
||||||
Do not export crossfades.
|
Then, in your Terminal, run the following command::
|
||||||
|
|
||||||
|
ptulsconv
|
||||||
|
|
||||||
Step 4: Run `ptulsconv` on the Text Export
|
`ptulsconv` will connect to Pro Tools and read all of the clips on the selected
|
||||||
------------------------------------------
|
track. It will then create a folder named "Title_CURRENT_DATE", and within that
|
||||||
|
|
||||||
In your Terminal, run the following command:
|
|
||||||
|
|
||||||
ptulsconv path/to/your/TEXT_EXPORT.txt
|
|
||||||
|
|
||||||
`ptulsconv` will create a folder named "Title_CURRENT_DATE", and within that
|
|
||||||
folder it will create several PDFs and folders:
|
folder it will create several PDFs and folders:
|
||||||
|
|
||||||
- "TITLE ADR Report" 📄 a PDF tabular report of every ADR line you've spotted.
|
- "TITLE ADR Report" 📄 a PDF tabular report of every ADR line you've spotted.
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ Tagging
|
|||||||
=======
|
=======
|
||||||
|
|
||||||
Tags are used to add additional data to a clip in an organized way. The
|
Tags are used to add additional data to a clip in an organized way. The
|
||||||
tagging system in `ptulsconv` allows is flexible and can be used to add
|
tagging system in `ptulsconv` is flexible and can be used to add any kind of
|
||||||
any kind of extra data to a clip.
|
extra data to a clip.
|
||||||
|
|
||||||
Fields in Clip Names
|
Fields in Clip Names
|
||||||
--------------------
|
--------------------
|
||||||
@@ -14,7 +14,7 @@ Track names, track comments, and clip names can also contain meta-tags, or
|
|||||||
"fields," to add additional columns to the output. Thus, if a clip has the
|
"fields," to add additional columns to the output. Thus, if a clip has the
|
||||||
name:::
|
name:::
|
||||||
|
|
||||||
`Fireworks explosion {note=Replace for final} $V=1 [FX] [DESIGN]`
|
Fireworks explosion {note=Replace for final} $V=1 [FX] [DESIGN]
|
||||||
|
|
||||||
The row output for this clip will contain columns for the values:
|
The row output for this clip will contain columns for the values:
|
||||||
|
|
||||||
@@ -27,20 +27,24 @@ The row output for this clip will contain columns for the values:
|
|||||||
|
|
||||||
|
|
||||||
These fields can be defined in the clip name in three ways:
|
These fields can be defined in the clip name in three ways:
|
||||||
* `$NAME=VALUE` creates a field named `NAME` with a one-word value `VALUE`.
|
|
||||||
* `{NAME=VALUE}` creates a field named `NAME` with the value `VALUE`. `VALUE`
|
* ``$NAME=VALUE`` creates a field named ``NAME`` with a one-word value
|
||||||
in this case may contain spaces or any chartacter up to the closing bracket.
|
``VALUE``.
|
||||||
* `[NAME]` creates a field named `NAME` with a value `NAME`. This can be used
|
* ``{NAME=VALUE}`` creates a field named ``NAME`` with the value ``VALUE``.
|
||||||
to create a boolean-valued field; in the output, clips with the field
|
``VALUE`` in this case may contain spaces or any chartacter up to the
|
||||||
will have it, and clips without will have the column with an empty value.
|
closing bracket.
|
||||||
|
* ``[NAME]`` creates a field named ``NAME`` with a value ``NAME``. This can
|
||||||
|
be used to create a boolean-valued field; in the output, clips with the
|
||||||
|
field will have it, and clips without will have the column with an empty
|
||||||
|
value.
|
||||||
|
|
||||||
For example, if three clips are named:::
|
For example, if three clips are named:::
|
||||||
|
|
||||||
`"Squad fifty-one, what is your status?" [FUTZ] {Ch=Dispatcher} [ADR]`
|
"Squad fifty-one, what is your status?" [FUTZ] {Ch=Dispatcher} [ADR]
|
||||||
|
|
||||||
`"We are ten-eight at Rampart Hospital." {Ch=Gage} [ADR]`
|
"We are ten-eight at Rampart Hospital." {Ch=Gage} [ADR]
|
||||||
|
|
||||||
`(1M) FC callouts rescuing trapped survivors. {Ch=Group} $QN=1001 [GROUP]`
|
(1M) FC callouts rescuing trapped survivors. {Ch=Group} $QN=1001 [GROUP]
|
||||||
|
|
||||||
The output will contain the range:
|
The output will contain the range:
|
||||||
|
|
||||||
@@ -63,7 +67,7 @@ Fields in Track Names and Markers
|
|||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
Fields set in track names, and in track comments, will be applied to *each*
|
Fields set in track names, and in track comments, will be applied to *each*
|
||||||
clip on that track. If a track comment contains the text `{Dept=Foley}` for
|
clip on that track. If a track comment contains the text ``{Dept=Foley}`` for
|
||||||
example, every clip on that track will have a "Foley" value in a "Dept" column.
|
example, every clip on that track will have a "Foley" value in a "Dept" column.
|
||||||
|
|
||||||
Likewise, fields set on the session name will apply to all clips in the session.
|
Likewise, fields set on the session name will apply to all clips in the session.
|
||||||
@@ -72,7 +76,10 @@ Fields set in markers, and in marker comments, will be applied to all clips
|
|||||||
whose finish is *after* that marker. Fields in markers are applied cumulatively
|
whose finish is *after* that marker. Fields in markers are applied cumulatively
|
||||||
from breakfast to dinner in the session. The latest marker applying to a clip has
|
from breakfast to dinner in the session. The latest marker applying to a clip has
|
||||||
precedence, so if one marker comes after the other, but both define a field, the
|
precedence, so if one marker comes after the other, but both define a field, the
|
||||||
value in the later marker
|
value in the later marker.
|
||||||
|
|
||||||
|
All markers on all rulers will be scanned for tags. All markers on tracks will
|
||||||
|
be ignored.
|
||||||
|
|
||||||
An important note here is that, always, fields set on the clip name have the
|
An important note here is that, always, fields set on the clip name have the
|
||||||
highest precedence. If a field is set in a clip name, the same field set on the
|
highest precedence. If a field is set in a clip name, the same field set on the
|
||||||
@@ -84,17 +91,17 @@ track, the value set on the clip will prevail.
|
|||||||
Apply Fields to a Time Range of Clips
|
Apply Fields to a Time Range of Clips
|
||||||
-------------------------------------
|
-------------------------------------
|
||||||
|
|
||||||
A clip name beginning with "@" will not be included in the output, but its
|
A clip name beginning with ``@`` will not be included in the output, but its
|
||||||
fields will be applied to clips within its time range on lower tracks.
|
fields will be applied to clips within its time range on lower tracks.
|
||||||
|
|
||||||
If track 1 has a clip named `@ {Sc=1- The House}`, any clips beginning within
|
If track 1 has a clip named ``@ {Sc=1- The House}``, any clips beginning within
|
||||||
that range on lower tracks will have a field `Sc` with that value.
|
that range on lower tracks will have a field ``Sc`` with that value.
|
||||||
|
|
||||||
|
|
||||||
Combining Clips with Long Names or Many Tags
|
Combining Clips with Long Names or Many Tags
|
||||||
--------------------------------------------
|
--------------------------------------------
|
||||||
|
|
||||||
A clip name beginning with `&` will have its parsed clip name appended to the
|
A clip name beginning with ``&`` will have its parsed clip name appended to the
|
||||||
preceding cue, and the fields of following cues will be applied, earlier clips
|
preceding cue, and the fields of following cues will be applied, earlier clips
|
||||||
having precedence. The clips need not be touching, and the clips will be
|
having precedence. The clips need not be touching, and the clips will be
|
||||||
combined into a single row of the output. The start time of the first clip will
|
combined into a single row of the output. The start time of the first clip will
|
||||||
@@ -108,23 +115,24 @@ Setting Document Options
|
|||||||
.. note::
|
.. note::
|
||||||
Document options are not yet implemented.
|
Document options are not yet implemented.
|
||||||
|
|
||||||
A clip beginning with `!` sends a command to `ptulsconv`. These commands can
|
..
|
||||||
appear anywhere in the document and apply to the entire document. Commands are
|
A clip beginning with ``!`` sends a command to `ptulsconv`. These commands can
|
||||||
a list of words
|
appear anywhere in the document and apply to the entire document. Commands are
|
||||||
|
a list of words
|
||||||
|
|
||||||
The following commands are available:
|
The following commands are available:
|
||||||
|
|
||||||
page $SIZE=`(letter|legal|a4)`
|
page $SIZE=`(letter|legal|a4)`
|
||||||
Sets the PDF page size for the output.
|
Sets the PDF page size for the output.
|
||||||
|
|
||||||
font {NAME=`name`} {PATH=`path`}
|
font {NAME=`name`} {PATH=`path`}
|
||||||
Sets the primary font for the output.
|
Sets the primary font for the output.
|
||||||
|
|
||||||
sub `replacement text` {FOR=`text_to_replace`} {IN=`tag`}
|
sub `replacement text` {FOR=`text_to_replace`} {IN=`tag`}
|
||||||
Declares a substitution. Whereever text_to_replace is encountered in the
|
Declares a substitution. Whereever text_to_replace is encountered in the
|
||||||
document it will be replaced with "replacement text".
|
document it will be replaced with "replacement text".
|
||||||
|
|
||||||
If `tag` is set, this substitution will only be applied to the values of
|
If `tag` is set, this substitution will only be applied to the values of
|
||||||
that tag.
|
that tag.
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
"""
|
|
||||||
Parse and convert Pro Tools text exports
|
|
||||||
"""
|
|
||||||
|
|
||||||
__version__ = '2.0.0'
|
|
||||||
__author__ = 'Jamie Hardt'
|
|
||||||
__license__ = 'MIT'
|
|
||||||
__copyright__ = "%s %s (c) 2023 %s. All rights reserved." \
|
|
||||||
% (__name__, __version__, __author__)
|
|
||||||
@@ -1,52 +1,60 @@
|
|||||||
[build-system]
|
|
||||||
requires = ["flit_core >=3.2,<4"]
|
|
||||||
build-backend = "flit_core.buildapi"
|
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "ptulsconv"
|
name = "ptulsconv"
|
||||||
authors = [
|
version = "4.0.0"
|
||||||
{name = "Jamie Hardt", email = "jamiehardt@me.com"},
|
description = "Read Pro Tools Text exports and generate PDF ADR Reports, JSON"
|
||||||
]
|
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
requires-python = ">=3.9"
|
||||||
|
|
||||||
|
license-files = ["LICENSE"]
|
||||||
|
keywords = ["text-processing", "parsers", "film",
|
||||||
|
"broadcast", "editing", "editorial"]
|
||||||
classifiers = [
|
classifiers = [
|
||||||
'License :: OSI Approved :: MIT License',
|
'License :: OSI Approved :: MIT License',
|
||||||
'Topic :: Multimedia',
|
'Topic :: Multimedia',
|
||||||
'Topic :: Multimedia :: Sound/Audio',
|
'Topic :: Multimedia :: Sound/Audio',
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
"Programming Language :: Python :: 3.11",
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
"Programming Language :: Python :: 3.13",
|
||||||
"Development Status :: 5 - Production/Stable",
|
"Development Status :: 5 - Production/Stable",
|
||||||
"Topic :: Text Processing :: Filters"
|
"Topic :: Text Processing :: Filters"
|
||||||
]
|
]
|
||||||
requires-python = ">=3.8"
|
authors = [{name = "Jamie Hardt", email = "<jamiehardt@me.com>"}]
|
||||||
dynamic = ["version", "description"]
|
|
||||||
keywords = ["text-processing", "parsers", "film",
|
|
||||||
"broadcast", "editing", "editorial"]
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
'parsimonious',
|
"parsimonious>=0.10.0",
|
||||||
'tqdm',
|
"py-ptsl>=600.0.0",
|
||||||
'reportlab',
|
"reportlab>=4.4.4",
|
||||||
'py-ptsl >= 101.1.0'
|
"tqdm>=4.67.1",
|
||||||
]
|
|
||||||
|
|
||||||
[project.optional-dependencies]
|
|
||||||
doc = [
|
|
||||||
"Sphinx ~= 5.3.0",
|
|
||||||
"sphinx-rtd-theme >= 1.1.1"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.flit.module]
|
# [tool.uv.build-backend]
|
||||||
name = "ptulsconv"
|
# data = { headers = "include/headers", scripts = "bin" }
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
ptulsconv = "ptulsconv.__main__:main"
|
|
||||||
|
|
||||||
[project.entry_points.console_scripts]
|
|
||||||
ptulsconv = 'ptulsconv.__main__:main'
|
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Source = 'https://github.com/iluvcapra/ptulsconv'
|
Source = 'https://github.com/iluvcapra/ptulsconv'
|
||||||
Issues = 'https://github.com/iluvcapra/ptulsconv/issues'
|
Issues = 'https://github.com/iluvcapra/ptulsconv/issues'
|
||||||
Documentation = 'https://ptulsconv.readthedocs.io/'
|
Documentation = 'https://ptulsconv.readthedocs.io/'
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
doc = [
|
||||||
|
"sphinx>=7.4.7",
|
||||||
|
"sphinx-rtd-theme>=3.0.2",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
ptulsconv = "ptulsconv:__main__.main"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["uv_build>=0.8.18,<0.9.0"]
|
||||||
|
build-backend = "uv_build"
|
||||||
|
|
||||||
|
[dependency-groups]
|
||||||
|
dev = [
|
||||||
|
"ruff>=0.13.1",
|
||||||
|
]
|
||||||
|
doc = [
|
||||||
|
"sphinx>=7.4.7",
|
||||||
|
"sphinx-rtd-theme>=3.0.2",
|
||||||
|
]
|
||||||
|
|||||||
5
src/ptulsconv/__init__.py
Normal file
5
src/ptulsconv/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""
|
||||||
|
Parse and convert Pro Tools text exports
|
||||||
|
"""
|
||||||
|
|
||||||
|
__copyright__ = "ptulsconv (c) 2025 Jamie Hardt. All rights reserved."
|
||||||
@@ -2,7 +2,10 @@ from optparse import OptionParser, OptionGroup
|
|||||||
import datetime
|
import datetime
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ptulsconv import __name__, __copyright__
|
import importlib.metadata
|
||||||
|
|
||||||
|
from ptulsconv import __name__
|
||||||
|
import ptulsconv
|
||||||
from ptulsconv.commands import convert
|
from ptulsconv.commands import convert
|
||||||
from ptulsconv.reporting import print_status_style, \
|
from ptulsconv.reporting import print_status_style, \
|
||||||
print_banner_style, print_section_header_style, \
|
print_banner_style, print_section_header_style, \
|
||||||
@@ -41,6 +44,11 @@ def main():
|
|||||||
default='doc',
|
default='doc',
|
||||||
help='Set output format, `raw`, `tagged`, `doc`.')
|
help='Set output format, `raw`, `tagged`, `doc`.')
|
||||||
|
|
||||||
|
parser.add_option('-m', '--movie-opts',
|
||||||
|
dest='movie_opts',
|
||||||
|
metavar="MOVIE_OPTS",
|
||||||
|
help="Set movie options")
|
||||||
|
|
||||||
warn_options = OptionGroup(title="Warning and Validation Options",
|
warn_options = OptionGroup(title="Warning and Validation Options",
|
||||||
parser=parser)
|
parser=parser)
|
||||||
|
|
||||||
@@ -77,7 +85,9 @@ def main():
|
|||||||
|
|
||||||
parser.add_option_group(informational_options)
|
parser.add_option_group(informational_options)
|
||||||
|
|
||||||
print_banner_style(__copyright__)
|
version = importlib.metadata.version(ptulsconv.__name__)
|
||||||
|
print_banner_style(f"{ptulsconv.__name__} - version {version}")
|
||||||
|
print_banner_style(ptulsconv.__copyright__)
|
||||||
|
|
||||||
(options, args) = parser.parse_args(sys.argv)
|
(options, args) = parser.parse_args(sys.argv)
|
||||||
|
|
||||||
@@ -14,9 +14,9 @@ from fractions import Fraction
|
|||||||
import ptsl
|
import ptsl
|
||||||
|
|
||||||
from .docparser.adr_entity import make_entities, ADRLine
|
from .docparser.adr_entity import make_entities, ADRLine
|
||||||
from .reporting import print_section_header_style, print_status_style,\
|
from .reporting import print_section_header_style, print_status_style, \
|
||||||
print_warning
|
print_warning
|
||||||
from .validations import validate_unique_field, validate_non_empty_field,\
|
from .validations import validate_unique_field, validate_non_empty_field, \
|
||||||
validate_dependent_value
|
validate_dependent_value
|
||||||
|
|
||||||
from ptulsconv.docparser import parse_document
|
from ptulsconv.docparser import parse_document
|
||||||
@@ -32,7 +32,7 @@ from ptulsconv.pdf.continuity import output_report as output_continuity
|
|||||||
from json import JSONEncoder
|
from json import JSONEncoder
|
||||||
|
|
||||||
|
|
||||||
class MyEncoder(JSONEncoder):
|
class FractionEncoder(JSONEncoder):
|
||||||
"""
|
"""
|
||||||
A subclass of :class:`JSONEncoder` which encodes :class:`Fraction` objects
|
A subclass of :class:`JSONEncoder` which encodes :class:`Fraction` objects
|
||||||
as a dict.
|
as a dict.
|
||||||
@@ -55,8 +55,8 @@ def output_adr_csv(lines: List[ADRLine], time_format: TimecodeFormat):
|
|||||||
directories for each character number and name pair, and within that
|
directories for each character number and name pair, and within that
|
||||||
directory, creates a CSV file for each reel.
|
directory, creates a CSV file for each reel.
|
||||||
"""
|
"""
|
||||||
reels = set([ln.reel for ln in lines])
|
|
||||||
|
|
||||||
|
reels: set[str | None] = set([ln.reel for ln in lines])
|
||||||
for n, name in [(n.character_id, n.character_name) for n in lines]:
|
for n, name in [(n.character_id, n.character_name) for n in lines]:
|
||||||
dir_name = "%s_%s" % (n, name)
|
dir_name = "%s_%s" % (n, name)
|
||||||
os.makedirs(dir_name, exist_ok=True)
|
os.makedirs(dir_name, exist_ok=True)
|
||||||
@@ -97,7 +97,7 @@ def output_adr_csv(lines: List[ADRLine], time_format: TimecodeFormat):
|
|||||||
os.chdir("..")
|
os.chdir("..")
|
||||||
|
|
||||||
|
|
||||||
def generate_documents(session_tc_format, scenes, adr_lines: Iterator[ADRLine],
|
def generate_documents(session_tc_format, scenes, adr_lines: List[ADRLine],
|
||||||
title):
|
title):
|
||||||
"""
|
"""
|
||||||
Create PDF output.
|
Create PDF output.
|
||||||
@@ -112,7 +112,7 @@ def generate_documents(session_tc_format, scenes, adr_lines: Iterator[ADRLine],
|
|||||||
supervisor = next((x.supervisor for x in adr_lines), "")
|
supervisor = next((x.supervisor for x in adr_lines), "")
|
||||||
|
|
||||||
output_continuity(scenes=scenes, tc_display_format=session_tc_format,
|
output_continuity(scenes=scenes, tc_display_format=session_tc_format,
|
||||||
title=title, client=client,
|
title=title, client=client or "",
|
||||||
supervisor=supervisor)
|
supervisor=supervisor)
|
||||||
|
|
||||||
reels = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
|
reels = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
|
||||||
@@ -187,13 +187,13 @@ def convert(major_mode, input_file=None, output=sys.stdout, warnings=True):
|
|||||||
req.time_type("tc")
|
req.time_type("tc")
|
||||||
req.dont_show_crossfades()
|
req.dont_show_crossfades()
|
||||||
req.selected_tracks_only()
|
req.selected_tracks_only()
|
||||||
session_text = req.export_string
|
session_text = req.export_string()
|
||||||
|
|
||||||
session = parse_document(session_text)
|
session = parse_document(session_text)
|
||||||
session_tc_format = session.header.timecode_format
|
session_tc_format = session.header.timecode_format
|
||||||
|
|
||||||
if major_mode == 'raw':
|
if major_mode == 'raw':
|
||||||
output.write(MyEncoder().encode(session))
|
output.write(FractionEncoder().encode(session))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
compiler = TagCompiler()
|
compiler = TagCompiler()
|
||||||
@@ -201,7 +201,7 @@ def convert(major_mode, input_file=None, output=sys.stdout, warnings=True):
|
|||||||
compiled_events = list(compiler.compile_events())
|
compiled_events = list(compiler.compile_events())
|
||||||
|
|
||||||
if major_mode == 'tagged':
|
if major_mode == 'tagged':
|
||||||
output.write(MyEncoder().encode(compiled_events))
|
output.write(FractionEncoder().encode(compiled_events))
|
||||||
|
|
||||||
elif major_mode == 'doc':
|
elif major_mode == 'doc':
|
||||||
generic_events, adr_lines = make_entities(compiled_events)
|
generic_events, adr_lines = make_entities(compiled_events)
|
||||||
@@ -225,9 +225,10 @@ def convert(major_mode, input_file=None, output=sys.stdout, warnings=True):
|
|||||||
print_status_style("%i ADR events found." % len(adr_lines))
|
print_status_style("%i ADR events found." % len(adr_lines))
|
||||||
|
|
||||||
if warnings:
|
if warnings:
|
||||||
perform_adr_validations(adr_lines)
|
perform_adr_validations(iter(adr_lines))
|
||||||
|
|
||||||
generate_documents(session_tc_format, scenes, adr_lines, title)
|
generate_documents(session_tc_format, scenes, adr_lines,
|
||||||
|
title)
|
||||||
|
|
||||||
|
|
||||||
def perform_adr_validations(lines: Iterator[ADRLine]):
|
def perform_adr_validations(lines: Iterator[ADRLine]):
|
||||||
@@ -2,4 +2,7 @@
|
|||||||
Docparser module
|
Docparser module
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
from .pt_doc_parser import parse_document
|
from .pt_doc_parser import parse_document
|
||||||
|
|
||||||
|
__all__ = [parse_document]
|
||||||
@@ -19,8 +19,17 @@ class SessionDescriptor:
|
|||||||
self.tracks = kwargs['tracks']
|
self.tracks = kwargs['tracks']
|
||||||
self.markers = kwargs['markers']
|
self.markers = kwargs['markers']
|
||||||
|
|
||||||
def markers_timed(self) -> Iterator[Tuple['MarkerDescriptor', Fraction]]:
|
def markers_timed(self,
|
||||||
|
only_ruler_markers: bool = True) -> \
|
||||||
|
Iterator[Tuple['MarkerDescriptor', Fraction]]:
|
||||||
|
"""
|
||||||
|
Iterate each marker in the session with its respective time reference.
|
||||||
|
"""
|
||||||
for marker in self.markers:
|
for marker in self.markers:
|
||||||
|
|
||||||
|
if marker.track_marker and only_ruler_markers:
|
||||||
|
continue
|
||||||
|
|
||||||
marker_time = Fraction(marker.time_reference,
|
marker_time = Fraction(marker.time_reference,
|
||||||
int(self.header.sample_rate))
|
int(self.header.sample_rate))
|
||||||
# marker_time = self.header.convert_timecode(marker.location)
|
# marker_time = self.header.convert_timecode(marker.location)
|
||||||
@@ -28,6 +37,9 @@ class SessionDescriptor:
|
|||||||
|
|
||||||
def tracks_clips(self) -> Iterator[Tuple['TrackDescriptor',
|
def tracks_clips(self) -> Iterator[Tuple['TrackDescriptor',
|
||||||
'TrackClipDescriptor']]:
|
'TrackClipDescriptor']]:
|
||||||
|
"""
|
||||||
|
Iterate each track clip with its respective owning clip.
|
||||||
|
"""
|
||||||
for track in self.tracks:
|
for track in self.tracks:
|
||||||
for clip in track.clips:
|
for clip in track.clips:
|
||||||
yield track, clip
|
yield track, clip
|
||||||
@@ -37,7 +49,10 @@ class SessionDescriptor:
|
|||||||
Fraction, Fraction, Fraction]
|
Fraction, Fraction, Fraction]
|
||||||
]:
|
]:
|
||||||
"""
|
"""
|
||||||
:return: A Generator that yields track, clip, start time, finish time,
|
Iterate each track clip with its respective owning clip and timing
|
||||||
|
information.
|
||||||
|
|
||||||
|
:returns: A Generator that yields track, clip, start time, finish time,
|
||||||
and timestamp
|
and timestamp
|
||||||
"""
|
"""
|
||||||
for track, clip in self.tracks_clips():
|
for track, clip in self.tracks_clips():
|
||||||
@@ -115,6 +130,7 @@ class HeaderDescriptor:
|
|||||||
|
|
||||||
|
|
||||||
class TrackDescriptor:
|
class TrackDescriptor:
|
||||||
|
index: int
|
||||||
name: str
|
name: str
|
||||||
comments: str
|
comments: str
|
||||||
user_delay_samples: int
|
user_delay_samples: int
|
||||||
@@ -123,6 +139,7 @@ class TrackDescriptor:
|
|||||||
clips: List["TrackClipDescriptor"]
|
clips: List["TrackClipDescriptor"]
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
self.index = kwargs['index']
|
||||||
self.name = kwargs['name']
|
self.name = kwargs['name']
|
||||||
self.comments = kwargs['comments']
|
self.comments = kwargs['comments']
|
||||||
self.user_delay_samples = kwargs['user_delay_samples']
|
self.user_delay_samples = kwargs['user_delay_samples']
|
||||||
@@ -171,6 +188,7 @@ class MarkerDescriptor:
|
|||||||
units: str
|
units: str
|
||||||
name: str
|
name: str
|
||||||
comments: str
|
comments: str
|
||||||
|
track_marker: bool
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.number = kwargs['number']
|
self.number = kwargs['number']
|
||||||
@@ -179,3 +197,4 @@ class MarkerDescriptor:
|
|||||||
self.units = kwargs['units']
|
self.units = kwargs['units']
|
||||||
self.name = kwargs['name']
|
self.name = kwargs['name']
|
||||||
self.comments = kwargs['comments']
|
self.comments = kwargs['comments']
|
||||||
|
self.track_marker = kwargs['track_marker']
|
||||||
@@ -1,15 +1,16 @@
|
|||||||
from parsimonious.nodes import NodeVisitor
|
from parsimonious.nodes import NodeVisitor
|
||||||
from parsimonious.grammar import Grammar
|
from parsimonious.grammar import Grammar
|
||||||
|
|
||||||
from .doc_entity import SessionDescriptor, HeaderDescriptor, TrackDescriptor,\
|
from .doc_entity import SessionDescriptor, HeaderDescriptor, TrackDescriptor, \
|
||||||
FileDescriptor, TrackClipDescriptor, ClipDescriptor, PluginDescriptor,\
|
FileDescriptor, TrackClipDescriptor, ClipDescriptor, PluginDescriptor, \
|
||||||
MarkerDescriptor
|
MarkerDescriptor
|
||||||
|
|
||||||
|
|
||||||
protools_text_export_grammar = Grammar(
|
protools_text_export_grammar = Grammar(
|
||||||
r"""
|
r"""
|
||||||
document = header files_section? clips_section? plugin_listing?
|
document = header files_section? clips_section? plugin_listing?
|
||||||
track_listing? markers_listing?
|
track_listing? markers_block?
|
||||||
|
|
||||||
header = "SESSION NAME:" fs string_value rs
|
header = "SESSION NAME:" fs string_value rs
|
||||||
"SAMPLE RATE:" fs float_value rs
|
"SAMPLE RATE:" fs float_value rs
|
||||||
"BIT DEPTH:" fs integer_value "-bit" rs
|
"BIT DEPTH:" fs integer_value "-bit" rs
|
||||||
@@ -74,17 +75,33 @@ protools_text_export_grammar = Grammar(
|
|||||||
|
|
||||||
track_clip_state = ("Muted" / "Unmuted")
|
track_clip_state = ("Muted" / "Unmuted")
|
||||||
|
|
||||||
markers_listing = markers_listing_header markers_column_header
|
markers_block = markers_block_header
|
||||||
marker_record*
|
(markers_list / markers_list_simple)
|
||||||
markers_listing_header = "M A R K E R S L I S T I N G" rs
|
|
||||||
markers_column_header = "# " fs "LOCATION " fs
|
markers_list_simple = markers_column_header_simple marker_record_simple*
|
||||||
"TIME REFERENCE " fs
|
|
||||||
"UNITS " fs
|
markers_list = markers_column_header marker_record*
|
||||||
"NAME " fs
|
|
||||||
"COMMENTS" rs
|
markers_block_header = "M A R K E R S L I S T I N G" rs
|
||||||
|
|
||||||
|
markers_column_header_simple =
|
||||||
|
"# LOCATION TIME REFERENCE "
|
||||||
|
"UNITS NAME "
|
||||||
|
"COMMENTS" rs
|
||||||
|
|
||||||
|
markers_column_header =
|
||||||
|
"# LOCATION TIME REFERENCE "
|
||||||
|
"UNITS NAME "
|
||||||
|
"TRACK NAME "
|
||||||
|
"TRACK TYPE COMMENTS" rs
|
||||||
|
|
||||||
|
marker_record_simple = integer_value isp fs string_value fs
|
||||||
|
integer_value isp fs string_value fs string_value
|
||||||
|
fs string_value rs
|
||||||
|
|
||||||
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
||||||
string_value fs string_value fs string_value rs
|
string_value fs string_value fs string_value fs
|
||||||
|
string_value fs string_value rs
|
||||||
|
|
||||||
fs = "\t"
|
fs = "\t"
|
||||||
rs = "\n"
|
rs = "\n"
|
||||||
@@ -108,8 +125,12 @@ def parse_document(session_text: str) -> SessionDescriptor:
|
|||||||
|
|
||||||
class DocParserVisitor(NodeVisitor):
|
class DocParserVisitor(NodeVisitor):
|
||||||
|
|
||||||
@staticmethod
|
def __init__(self):
|
||||||
def visit_document(_, visited_children) -> SessionDescriptor:
|
self.track_index = 0
|
||||||
|
|
||||||
|
# @staticmethod
|
||||||
|
def visit_document(self, _, visited_children) -> SessionDescriptor:
|
||||||
|
self.track_index = 0
|
||||||
files = next(iter(visited_children[1]), None)
|
files = next(iter(visited_children[1]), None)
|
||||||
clips = next(iter(visited_children[2]), None)
|
clips = next(iter(visited_children[2]), None)
|
||||||
plugins = next(iter(visited_children[3]), None)
|
plugins = next(iter(visited_children[3]), None)
|
||||||
@@ -166,8 +187,8 @@ class DocParserVisitor(NodeVisitor):
|
|||||||
count_instances=child[10]),
|
count_instances=child[10]),
|
||||||
visited_children[2]))
|
visited_children[2]))
|
||||||
|
|
||||||
@staticmethod
|
# @staticmethod
|
||||||
def visit_track_block(_, visited_children):
|
def visit_track_block(self, _, visited_children):
|
||||||
track_header, track_clip_list = visited_children
|
track_header, track_clip_list = visited_children
|
||||||
clips = []
|
clips = []
|
||||||
for clip in track_clip_list:
|
for clip in track_clip_list:
|
||||||
@@ -179,7 +200,11 @@ class DocParserVisitor(NodeVisitor):
|
|||||||
for plugin in plugin_opt[1]:
|
for plugin in plugin_opt[1]:
|
||||||
plugins.append(plugin[1])
|
plugins.append(plugin[1])
|
||||||
|
|
||||||
|
this_index = self.track_index
|
||||||
|
self.track_index += 1
|
||||||
|
|
||||||
return TrackDescriptor(
|
return TrackDescriptor(
|
||||||
|
index=this_index,
|
||||||
name=track_header[2],
|
name=track_header[2],
|
||||||
comments=track_header[6],
|
comments=track_header[6],
|
||||||
user_delay_samples=track_header[10],
|
user_delay_samples=track_header[10],
|
||||||
@@ -223,22 +248,37 @@ class DocParserVisitor(NodeVisitor):
|
|||||||
return node.text
|
return node.text
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def visit_markers_listing(_, visited_children):
|
def visit_markers_block(_, visited_children):
|
||||||
markers = []
|
markers = []
|
||||||
|
|
||||||
for marker in visited_children[2]:
|
for marker in visited_children[1][0][1]:
|
||||||
markers.append(marker)
|
markers.append(marker)
|
||||||
|
|
||||||
return markers
|
return markers
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def visit_marker_record(_, visited_children):
|
def visit_marker_record_simple(_, visited_children):
|
||||||
|
|
||||||
return MarkerDescriptor(number=visited_children[0],
|
return MarkerDescriptor(number=visited_children[0],
|
||||||
location=visited_children[3],
|
location=visited_children[3],
|
||||||
time_reference=visited_children[5],
|
time_reference=visited_children[5],
|
||||||
units=visited_children[8],
|
units=visited_children[8],
|
||||||
name=visited_children[10],
|
name=visited_children[10],
|
||||||
comments=visited_children[12])
|
comments=visited_children[12],
|
||||||
|
track_marker=False)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_marker_record(_, visited_children):
|
||||||
|
track_type = visited_children[15]
|
||||||
|
is_track_marker = (track_type == "Track")
|
||||||
|
|
||||||
|
return MarkerDescriptor(number=visited_children[0],
|
||||||
|
location=visited_children[3],
|
||||||
|
time_reference=visited_children[5],
|
||||||
|
units=visited_children[8],
|
||||||
|
name=visited_children[10],
|
||||||
|
comments=visited_children[16],
|
||||||
|
track_marker=is_track_marker)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def visit_formatted_clip_name(_, visited_children):
|
def visit_formatted_clip_name(_, visited_children):
|
||||||
@@ -79,13 +79,13 @@ def make_doc_template(page_size, filename, document_title,
|
|||||||
header_box, page_box = page_box.split_y(0.75 * inch, direction='d')
|
header_box, page_box = page_box.split_y(0.75 * inch, direction='d')
|
||||||
title_box, report_box = header_box.split_x(3.5 * inch, direction='r')
|
title_box, report_box = header_box.split_x(3.5 * inch, direction='r')
|
||||||
|
|
||||||
on_page_lambda = (lambda c, _:
|
def on_page_lambda(c, _):
|
||||||
draw_header_footer(c, report_box, title_box,
|
draw_header_footer(c, report_box, title_box,
|
||||||
footer_box, title=title,
|
footer_box, title=title,
|
||||||
supervisor=supervisor,
|
supervisor=supervisor,
|
||||||
document_subheader=document_subheader,
|
document_subheader=document_subheader,
|
||||||
client=client,
|
client=client,
|
||||||
doc_title=document_header))
|
doc_title=document_header)
|
||||||
|
|
||||||
frames = [Frame(page_box.min_x, page_box.min_y,
|
frames = [Frame(page_box.min_x, page_box.min_y,
|
||||||
page_box.width, page_box.height)]
|
page_box.width, page_box.height)]
|
||||||
@@ -5,7 +5,7 @@ from .__init__ import make_doc_template
|
|||||||
from reportlab.lib.units import inch
|
from reportlab.lib.units import inch
|
||||||
from reportlab.lib.pagesizes import letter
|
from reportlab.lib.pagesizes import letter
|
||||||
|
|
||||||
from reportlab.platypus import Paragraph, Spacer, KeepTogether, Table,\
|
from reportlab.platypus import Paragraph, Spacer, KeepTogether, Table, \
|
||||||
HRFlowable
|
HRFlowable
|
||||||
from reportlab.lib.styles import getSampleStyleSheet
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
from reportlab.lib import colors
|
from reportlab.lib import colors
|
||||||
24
tests/export_cases/Test for ptulsconv.txt
Normal file
24
tests/export_cases/Test for ptulsconv.txt
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
SESSION NAME: Test for ptulsconv
|
||||||
|
SAMPLE RATE: 48000.000000
|
||||||
|
BIT DEPTH: 24-bit
|
||||||
|
SESSION START TIMECODE: 00:00:00:00
|
||||||
|
TIMECODE FORMAT: 23.976 Frame
|
||||||
|
# OF AUDIO TRACKS: 1
|
||||||
|
# OF AUDIO CLIPS: 0
|
||||||
|
# OF AUDIO FILES: 0
|
||||||
|
|
||||||
|
|
||||||
|
T R A C K L I S T I N G
|
||||||
|
TRACK NAME: Hamlet
|
||||||
|
COMMENTS: {Actor=Laurence Olivier}
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Test Line 1 $QN=T1001 00:00:00:00 00:00:02:00 00:00:02:00 Unmuted
|
||||||
|
1 2 Test Line 2 $QN=T1002 00:00:04:00 00:00:06:00 00:00:02:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
M A R K E R S L I S T I N G
|
||||||
|
# LOCATION TIME REFERENCE UNITS NAME TRACK NAME TRACK TYPE COMMENTS
|
||||||
|
1 00:00:00:00 0 Samples {Title=Multiple Marker Rulers Project} Markers Ruler
|
||||||
|
2 00:00:04:00 192192 Samples Track Marker Hamlet Track
|
||||||
@@ -2,33 +2,52 @@ import unittest
|
|||||||
|
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
|
import sys
|
||||||
import os.path
|
import os.path
|
||||||
import os
|
import os
|
||||||
import glob
|
import glob
|
||||||
|
|
||||||
from ptulsconv import commands
|
from ptulsconv import commands
|
||||||
|
|
||||||
|
|
||||||
class TestPDFExport(unittest.TestCase):
|
class TestPDFExport(unittest.TestCase):
|
||||||
def test_report_generation(self):
|
def test_report_generation(self):
|
||||||
"""
|
"""
|
||||||
Setp through every text file in export_cases and make sure it can
|
Setp through every text file in export_cases and make sure it can
|
||||||
be converted into PDF docs without throwing an error
|
be converted into PDF docs without throwing an error
|
||||||
"""
|
"""
|
||||||
files = [os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting.txt"]
|
files = []
|
||||||
#files.append(os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting2.txt")
|
files = [os.path.dirname(__file__) +
|
||||||
|
"/../export_cases/Robin Hood Spotting.txt"]
|
||||||
for path in files:
|
for path in files:
|
||||||
tempdir = tempfile.TemporaryDirectory()
|
tempdir = tempfile.TemporaryDirectory()
|
||||||
os.chdir(tempdir.name)
|
os.chdir(tempdir.name)
|
||||||
try:
|
try:
|
||||||
commands.convert(input_file=path, major_mode='doc')
|
commands.convert(input_file=path, major_mode='doc')
|
||||||
except:
|
except Exception as e:
|
||||||
assert False, "Error processing file %s" % path
|
print("Error in test_report_generation")
|
||||||
|
print(f"File: {path}")
|
||||||
|
print(repr(e))
|
||||||
|
raise e
|
||||||
|
finally:
|
||||||
|
tempdir.cleanup()
|
||||||
|
|
||||||
|
def test_report_generation_track_markers(self):
|
||||||
|
files = []
|
||||||
|
files.append(os.path.dirname(__file__) +
|
||||||
|
"/../export_cases/Test for ptulsconv.txt")
|
||||||
|
for path in files:
|
||||||
|
tempdir = tempfile.TemporaryDirectory()
|
||||||
|
os.chdir(tempdir.name)
|
||||||
|
try:
|
||||||
|
commands.convert(input_file=path, major_mode='doc')
|
||||||
|
except Exception as e:
|
||||||
|
print("Error in test_report_generation_track_markers")
|
||||||
|
print(f"File: {path}")
|
||||||
|
print(repr(e))
|
||||||
|
raise e
|
||||||
finally:
|
finally:
|
||||||
tempdir.cleanup()
|
tempdir.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -88,7 +88,9 @@ class TestTagCompiler(unittest.TestCase):
|
|||||||
state='Unmuted',
|
state='Unmuted',
|
||||||
timestamp=None),
|
timestamp=None),
|
||||||
]
|
]
|
||||||
test_track = doc_entity.TrackDescriptor(name="Track 1 [A] {Color=Red} $Mode=1",
|
test_track = doc_entity.TrackDescriptor(
|
||||||
|
index=0,
|
||||||
|
name="Track 1 [A] {Color=Red} $Mode=1",
|
||||||
comments="{Comment=This is some text in the comments}",
|
comments="{Comment=This is some text in the comments}",
|
||||||
user_delay_samples=0,
|
user_delay_samples=0,
|
||||||
plugins=[],
|
plugins=[],
|
||||||
@@ -100,14 +102,14 @@ class TestTagCompiler(unittest.TestCase):
|
|||||||
time_reference=48000 * 3600,
|
time_reference=48000 * 3600,
|
||||||
units="Samples",
|
units="Samples",
|
||||||
name="Marker 1 {Part=1}",
|
name="Marker 1 {Part=1}",
|
||||||
comments=""
|
comments="", track_marker=False,
|
||||||
),
|
),
|
||||||
doc_entity.MarkerDescriptor(number=2,
|
doc_entity.MarkerDescriptor(number=2,
|
||||||
location="01:00:01:00",
|
location="01:00:01:00",
|
||||||
time_reference=48000 * 3601,
|
time_reference=48000 * 3601,
|
||||||
units="Samples",
|
units="Samples",
|
||||||
name="Marker 2 {Part=2}",
|
name="Marker 2 {Part=2}",
|
||||||
comments="[M1]"
|
comments="[M1]", track_marker=False,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user