mirror of
https://github.com/iluvcapra/ptulsconv.git
synced 2025-12-31 08:50:48 +00:00
Compare commits
321 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f29e95ba9 | ||
|
|
82f07b13a6 | ||
|
|
fbcbba1098 | ||
|
|
622f04963f | ||
|
|
5b36dcb5aa | ||
|
|
fd02d962d0 | ||
|
|
2021159666 | ||
|
|
f825b92586 | ||
|
|
4318946596 | ||
|
|
2a98954885 | ||
|
|
79d8cc5b69 | ||
|
|
5785dc3364 | ||
|
|
4e64edcd85 | ||
|
|
58277367c5 | ||
|
|
617f34a515 | ||
|
|
5427b4cfb1 | ||
|
|
408829e820 | ||
|
|
b65401d25f | ||
|
|
50fe3e2c0a | ||
|
|
1c8feec8fe | ||
|
|
f510f98ede | ||
|
|
ddf1948f3c | ||
|
|
1c9d373b40 | ||
|
|
51b2517db1 | ||
|
|
27dd8bc94d | ||
|
|
dd394a8fec | ||
|
|
b5571891cf | ||
|
|
73058e9423 | ||
|
|
a11cda40e5 | ||
|
|
7381a37185 | ||
|
|
065bd26f4c | ||
|
|
7ec983f63f | ||
|
|
944e66728b | ||
|
|
6473c83785 | ||
|
|
8947d409b4 | ||
|
|
0494e771be | ||
|
|
f00bea8702 | ||
|
|
6e82a14e4f | ||
|
|
07669e4eca | ||
|
|
ddc406b1eb | ||
|
|
e07b3bb604 | ||
|
|
c02453d10f | ||
|
|
cdc8a838ac | ||
|
|
e2c7408413 | ||
|
|
a18154edb0 | ||
|
|
f15ee40d37 | ||
|
|
cd26be0c20 | ||
|
|
d50e45882b | ||
|
|
adb80eb174 | ||
|
|
2b91f128b9 | ||
|
|
9f24d45f25 | ||
|
|
3a58fdba75 | ||
|
|
800a4dfb12 | ||
|
|
6bc98063db | ||
|
|
b1bf49ca82 | ||
|
|
61250aaf63 | ||
|
|
43df2c1558 | ||
|
|
17dc868756 | ||
|
|
2e36a789b4 | ||
|
|
1345113a85 | ||
|
|
76c2e24084 | ||
|
|
a5ed16849c | ||
|
|
4c3e103e77 | ||
|
|
dd767b2d41 | ||
|
|
aaf751c1a2 | ||
|
|
91e0da278f | ||
|
|
a7d01779bd | ||
|
|
cb6c0c8895 | ||
|
|
a2a6782214 | ||
|
|
2c78d4a09d | ||
|
|
28cf7b5d09 | ||
|
|
b419814f82 | ||
|
|
967ef5c63a | ||
|
|
fe1a1eebd5 | ||
|
|
dadeab49fe | ||
|
|
900dd5d582 | ||
|
|
5882e01b31 | ||
|
|
e2e86faf54 | ||
|
|
bfdefc8da0 | ||
|
|
2af9317e7e | ||
|
|
9194e5ba54 | ||
|
|
528bd949ca | ||
|
|
5633eb89f0 | ||
|
|
29e1753b18 | ||
|
|
1df0b79ab6 | ||
|
|
68db6c9b09 | ||
|
|
2c664db0dd | ||
|
|
e46ac14118 | ||
|
|
bf3a5c37a8 | ||
|
|
d3b08e9238 | ||
|
|
c0d192e651 | ||
|
|
d3cc9074c4 | ||
|
|
87108c7865 | ||
|
|
04422360f0 | ||
|
|
cd4122ce50 | ||
|
|
a176d3b1f5 | ||
|
|
8a6f5e755b | ||
|
|
b4fef4b13f | ||
|
|
6fc7f26e9c | ||
|
|
09b3f9349b | ||
|
|
f6ee807ede | ||
|
|
f114012d4a | ||
|
|
c03b3dfb8d | ||
|
|
d2da8f1cb0 | ||
|
|
10c0e4f038 | ||
|
|
6703184f8f | ||
|
|
1c11e4d570 | ||
|
|
94317c288f | ||
|
|
9e374df367 | ||
|
|
fc2e823116 | ||
|
|
fbc7531374 | ||
|
|
1fb17b13ea | ||
|
|
21c32e282c | ||
|
|
8407d31333 | ||
|
|
97d6eeda02 | ||
|
|
3bee7a8391 | ||
|
|
68d38f8ed5 | ||
|
|
8e043b7175 | ||
|
|
a7b5adfffb | ||
|
|
da5b743191 | ||
|
|
caa5381306 | ||
|
|
9e2b932cad | ||
|
|
05ea48078f | ||
|
|
c26fa8dd75 | ||
|
|
9f8e3cf824 | ||
|
|
3b438b1399 | ||
|
|
41b1a3185f | ||
|
|
8877982a47 | ||
|
|
bb6fbcfd37 | ||
|
|
434b8816ee | ||
|
|
5ebaf6b473 | ||
|
|
d0f415b38f | ||
|
|
c5d6d82831 | ||
|
|
66a71283d5 | ||
|
|
15ad328edc | ||
|
|
a48eccb0d0 | ||
|
|
fa2cef35b2 | ||
|
|
c8053f65ae | ||
|
|
d9da7317a7 | ||
|
|
ab614cbc32 | ||
|
|
5a75a77f77 | ||
|
|
17f17d592a | ||
|
|
b41d92b842 | ||
|
|
dc11a40530 | ||
|
|
b80e593350 | ||
|
|
961d68df21 | ||
|
|
4daa5f0496 | ||
|
|
de48bcfe24 | ||
|
|
80729443d1 | ||
|
|
c1671f3656 | ||
|
|
6c76827f42 | ||
|
|
1228e2adbe | ||
|
|
88d9aef92a | ||
|
|
e5f94bb506 | ||
|
|
539cae15b7 | ||
|
|
ee407e9e62 | ||
|
|
591d966e64 | ||
|
|
6f2ec325cf | ||
|
|
ed2a916673 | ||
|
|
926072ae3c | ||
|
|
a06b8c8aaa | ||
|
|
7cd86332e8 | ||
|
|
02a96d7143 | ||
|
|
9043d28a21 | ||
|
|
81cdca5452 | ||
|
|
68969e77e2 | ||
|
|
d0da79b31b | ||
|
|
ccf6e65210 | ||
|
|
99c18478e6 | ||
|
|
2656aaaf20 | ||
|
|
3882939833 | ||
|
|
e0b2d00332 | ||
|
|
5559e1b057 | ||
|
|
3cd5a99dbb | ||
|
|
898fd96808 | ||
|
|
80305f6098 | ||
|
|
338e8c8fa6 | ||
|
|
6f37de4f20 | ||
|
|
51eada4cde | ||
|
|
ddd2cdb873 | ||
|
|
b60795fd95 | ||
|
|
d1a5430923 | ||
|
|
5416433c82 | ||
|
|
bda68b9c3b | ||
|
|
40b80b9997 | ||
|
|
2f55b16750 | ||
|
|
be4a4b91c0 | ||
|
|
7ab2907b26 | ||
|
|
55e19a3b8d | ||
|
|
784699050a | ||
|
|
55324a0f82 | ||
|
|
5fb4c389f4 | ||
|
|
b46fc85b16 | ||
|
|
caf4317b76 | ||
|
|
24c5a87358 | ||
|
|
8d4058d026 | ||
|
|
8654fdb847 | ||
|
|
594830144d | ||
|
|
1a43888c43 | ||
|
|
ade1cc463a | ||
|
|
f5acfd2362 | ||
|
|
945ba6102b | ||
|
|
2466db1401 | ||
|
|
76a90363fb | ||
|
|
be7a01cab9 | ||
|
|
32e3cfc594 | ||
|
|
c6be2ba404 | ||
|
|
3502eaddfd | ||
|
|
2e08499f70 | ||
|
|
2cc4f423cf | ||
|
|
7558e8f63c | ||
|
|
f514dde259 | ||
|
|
3dd36a9901 | ||
|
|
d1bb5990b2 | ||
|
|
859a427fc4 | ||
|
|
644c8a6f5d | ||
|
|
23174e3a97 | ||
|
|
3889f871b8 | ||
|
|
f4ad4a5b5d | ||
|
|
a9596c444d | ||
|
|
52bbecb909 | ||
|
|
b50d83c748 | ||
|
|
1294d5e208 | ||
|
|
9633bcdefb | ||
|
|
20b84623ff | ||
|
|
9927488f1e | ||
|
|
2f037ad4db | ||
|
|
f5a80b3bdf | ||
|
|
38fc92183d | ||
|
|
9d2c50b219 | ||
|
|
f2fd0e5da3 | ||
|
|
4ea8330921 | ||
|
|
d92b85897f | ||
|
|
88b32da0bd | ||
|
|
e9a23fb680 | ||
|
|
d6c5026bf0 | ||
|
|
e52bddc2fa | ||
|
|
4d9538b997 | ||
|
|
be92cbe884 | ||
|
|
ccf35283a7 | ||
|
|
d52c063607 | ||
|
|
9a5273bac5 | ||
|
|
ac5e7ffc35 | ||
|
|
f8cadcb9dc | ||
|
|
b1722966c6 | ||
|
|
00a05506d4 | ||
|
|
fe93985041 | ||
|
|
0fa37d7f19 | ||
|
|
90aa01749e | ||
|
|
efcb0acd08 | ||
|
|
35f7672d61 | ||
|
|
67b785c2c2 | ||
|
|
6cb93ea75f | ||
|
|
9a1dff2c3c | ||
|
|
8c95930aaa | ||
|
|
2aca5a3e8f | ||
|
|
8a067984eb | ||
|
|
9e00ba0dab | ||
|
|
a112e73a64 | ||
|
|
0e5b1e6dc9 | ||
|
|
5808f3a4ac | ||
|
|
a7a472d63f | ||
|
|
8b85793826 | ||
|
|
e78e55639d | ||
|
|
294e0732df | ||
|
|
9a88e5ff4e | ||
|
|
f161532768 | ||
|
|
c937a3745b | ||
|
|
d17f6951d6 | ||
|
|
6ad29ccf8b | ||
|
|
bb504ed7ce | ||
|
|
5db8a01271 | ||
|
|
99096f7dec | ||
|
|
d734180010 | ||
|
|
b0e7703303 | ||
|
|
30daec452d | ||
|
|
6ca1e5532d | ||
|
|
a0d386b666 | ||
|
|
3226e63f1d | ||
|
|
3a597b5046 | ||
|
|
b5d9b5acc2 | ||
|
|
9f2a080f6b | ||
|
|
1903e2a1f9 | ||
|
|
69491d98d7 | ||
|
|
7816f08912 | ||
|
|
44388c6b7d | ||
|
|
9daedca4de | ||
|
|
93a014bdc0 | ||
|
|
9bb2ae136a | ||
|
|
3718541e09 | ||
|
|
a58451d225 | ||
|
|
319ef3800d | ||
|
|
1d63234447 | ||
|
|
edb641b7ec | ||
|
|
eaf24ad6a8 | ||
|
|
6d5cd04c50 | ||
|
|
013081ef96 | ||
|
|
d57ee88bc2 | ||
|
|
d29a08dadf | ||
|
|
e806de0c1f | ||
|
|
22edecdbbf | ||
|
|
7c8a74aed9 | ||
|
|
96a4cdb612 | ||
|
|
8720087bb2 | ||
|
|
f734aae227 | ||
|
|
17e9c77ed7 | ||
|
|
fc7dde8fd6 | ||
|
|
3021721299 | ||
|
|
cf9be9abf1 | ||
|
|
73936510cd | ||
|
|
d118554443 | ||
|
|
22c205d638 | ||
|
|
36ac320b44 | ||
|
|
6fe0ff4314 | ||
|
|
a23119eb8c | ||
|
|
af29318a0c | ||
|
|
80f1114f05 | ||
|
|
9e8518a321 | ||
|
|
5ff1df7273 | ||
|
|
e05e56bcb5 | ||
|
|
4eba5b6b17 |
4
.flake8
Normal file
4
.flake8
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[flake8]
|
||||||
|
per-file-ignores =
|
||||||
|
ptulsconv/__init__.py: F401
|
||||||
|
ptulsconv/docparser/__init__.py: F401
|
||||||
41
.github/workflows/python-package.yml
vendored
Normal file
41
.github/workflows/python-package.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||||
|
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||||
|
|
||||||
|
name: Lint and Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: [3.8, 3.9, "3.10", "3.11"]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2.5.0
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v4.3.0
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install flake8 pytest
|
||||||
|
pip install -e .
|
||||||
|
- name: Lint with flake8
|
||||||
|
run: |
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 ptulsconv tests --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 ptulsconv tests --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
pytest
|
||||||
|
flake8 ptulsconv
|
||||||
39
.github/workflows/pythonpublish.yml
vendored
Normal file
39
.github/workflows/pythonpublish.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: Upload Python Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
environment:
|
||||||
|
name: release
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3.5.2
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4.6.0
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install build
|
||||||
|
- name: Build package
|
||||||
|
run: python -m build
|
||||||
|
- name: pypi-publish
|
||||||
|
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||||
|
# - name: Report to Mastodon
|
||||||
|
# uses: cbrgm/mastodon-github-action@v1.0.1
|
||||||
|
# with:
|
||||||
|
# message: |
|
||||||
|
# I just released a new version of ptulsconv, my ADR cue sheet generator!
|
||||||
|
# #python #protools #pdf #filmmaking
|
||||||
|
# ${{ github.server_url }}/${{ github.repository }}
|
||||||
|
# env:
|
||||||
|
# MASTODON_URL: ${{ secrets.MASTODON_URL }}
|
||||||
|
# MASTODON_ACCESS_TOKEN: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
||||||
22
.github/workflows/toot.yml
vendored
Normal file
22
.github/workflows/toot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
name: Test Toot
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
print-tag:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Report to Mastodon
|
||||||
|
uses: cbrgm/mastodon-github-action@v1.0.1
|
||||||
|
env:
|
||||||
|
MASTODON_URL: ${{ secrets.MASTODON_URL }}
|
||||||
|
MASTODON_ACCESS_TOKEN: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
||||||
|
with:
|
||||||
|
message: |
|
||||||
|
This is a test toot, automatically posted by a github action.
|
||||||
|
|
||||||
|
${{ github.server_url }}/${{ github.repository }}
|
||||||
|
|
||||||
|
${{ github.ref }}
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -89,6 +89,7 @@ venv/
|
|||||||
ENV/
|
ENV/
|
||||||
env.bak/
|
env.bak/
|
||||||
venv.bak/
|
venv.bak/
|
||||||
|
venv_docs/
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
@@ -102,3 +103,9 @@ venv.bak/
|
|||||||
|
|
||||||
# mypy
|
# mypy
|
||||||
.mypy_cache/
|
.mypy_cache/
|
||||||
|
.DS_Store
|
||||||
|
/example/Charade/Session File Backups/
|
||||||
|
lcov.info
|
||||||
|
|
||||||
|
.vim
|
||||||
|
.vscode
|
||||||
|
|||||||
4
.idea/.gitignore
generated
vendored
4
.idea/.gitignore
generated
vendored
@@ -1,4 +0,0 @@
|
|||||||
|
|
||||||
# Default ignored files
|
|
||||||
/workspace.xml
|
|
||||||
/tasks.xml
|
|
||||||
8
.idea/dictionaries/jamiehardt.xml
generated
8
.idea/dictionaries/jamiehardt.xml
generated
@@ -1,8 +0,0 @@
|
|||||||
<component name="ProjectDictionaryState">
|
|
||||||
<dictionary name="jamiehardt">
|
|
||||||
<words>
|
|
||||||
<w>frac</w>
|
|
||||||
<w>mins</w>
|
|
||||||
</words>
|
|
||||||
</dictionary>
|
|
||||||
</component>
|
|
||||||
6
.idea/inspectionProfiles/profiles_settings.xml
generated
6
.idea/inspectionProfiles/profiles_settings.xml
generated
@@ -1,6 +0,0 @@
|
|||||||
<component name="InspectionProjectProfileManager">
|
|
||||||
<settings>
|
|
||||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
|
||||||
<version value="1.0" />
|
|
||||||
</settings>
|
|
||||||
</component>
|
|
||||||
4
.idea/misc.xml
generated
4
.idea/misc.xml
generated
@@ -1,4 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
|
|
||||||
</project>
|
|
||||||
8
.idea/modules.xml
generated
8
.idea/modules.xml
generated
@@ -1,8 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectModuleManager">
|
|
||||||
<modules>
|
|
||||||
<module fileurl="file://$PROJECT_DIR$/.idea/ptulsconv.iml" filepath="$PROJECT_DIR$/.idea/ptulsconv.iml" />
|
|
||||||
</modules>
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
11
.idea/ptulsconv.iml
generated
11
.idea/ptulsconv.iml
generated
@@ -1,11 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<module type="PYTHON_MODULE" version="4">
|
|
||||||
<component name="NewModuleRootManager">
|
|
||||||
<content url="file://$MODULE_DIR$" />
|
|
||||||
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
|
||||||
</component>
|
|
||||||
<component name="TestRunnerService">
|
|
||||||
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
|
|
||||||
</component>
|
|
||||||
</module>
|
|
||||||
6
.idea/vcs.xml
generated
6
.idea/vcs.xml
generated
@@ -1,6 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="VcsDirectoryMappings">
|
|
||||||
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
32
.readthedocs.yaml
Normal file
32
.readthedocs.yaml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# .readthedocs.yaml
|
||||||
|
# Read the Docs configuration file
|
||||||
|
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||||
|
|
||||||
|
# Required
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
# Set the version of Python and other tools you might need
|
||||||
|
build:
|
||||||
|
os: ubuntu-20.04
|
||||||
|
tools:
|
||||||
|
python: "3.10"
|
||||||
|
# You can also specify other tool versions:
|
||||||
|
# nodejs: "16"
|
||||||
|
# rust: "1.55"
|
||||||
|
# golang: "1.17"
|
||||||
|
|
||||||
|
# Build documentation in the docs/ directory with Sphinx
|
||||||
|
sphinx:
|
||||||
|
configuration: docs/source/conf.py
|
||||||
|
|
||||||
|
#If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||||
|
formats:
|
||||||
|
- pdf
|
||||||
|
|
||||||
|
#Optionally declare the Python requirements required to build your docs
|
||||||
|
python:
|
||||||
|
install:
|
||||||
|
- method: pip
|
||||||
|
path: .
|
||||||
|
extra_requirements:
|
||||||
|
- doc
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
language: python
|
|
||||||
python:
|
|
||||||
- "3.7"
|
|
||||||
script:
|
|
||||||
- "python -m unittest discover tests"
|
|
||||||
install:
|
|
||||||
- "pip install setuptools"
|
|
||||||
- "pip install parsimonious tqdm"
|
|
||||||
9
CONTRIBUTING.md
Normal file
9
CONTRIBUTING.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Contributing to ptulsconv
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
Before submitting PRs or patches, please make sure your branch passes all of the unit tests by running Pytest.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
~/ptulsconv$ pytest
|
||||||
|
```
|
||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2019 Jamie Hardt
|
Copyright (c) 2022 Jamie Hardt
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
123
README.md
123
README.md
@@ -1,31 +1,21 @@
|
|||||||
[](https://travis-ci.com/iluvcapra/ptulsconv)
|
[](https://ptulsconv.readthedocs.io/en/latest/?badge=latest)
|
||||||
  [](https://pypi.org/project/ptulsconv/) 
|

|
||||||
|

|
||||||
|
[][pypi]
|
||||||
|

|
||||||
|
[](https://github.com/iluvcapra/ptulsconv/actions/workflows/python-package.yml)
|
||||||
|
|
||||||
|
[pypi]: https://pypi.org/project/ptulsconv/
|
||||||
|
|
||||||
|
|
||||||
# ptulsconv
|
# ptulsconv
|
||||||
Read Pro Tools text exports and generate XML, JSON, reports
|
|
||||||
|
|
||||||
## Quick Example
|
Read Pro Tools text exports and generate PDF reports, JSON output.
|
||||||
|
|
||||||
At this time we're using `ptulsconv` mostly for converting ADR notes in a Pro Tools session
|
## Quick Start
|
||||||
into an XML document we can import into Filemaker Pro.
|
|
||||||
|
For a quick overview of how to cue ADR with `ptulsconv`, check out the [Quickstart][quickstart].
|
||||||
|
|
||||||
% ptulsconv STAR_WARS_IV_R1_ADR_Notes_PT_Text_Export.txt > SW4_r1_ADR_Notes.xml
|
|
||||||
% xmllint --format SW4_r1_ADR_Notes.xml
|
|
||||||
<?xml version="1.0"?>
|
|
||||||
<FMPXMLRESULT xmlns="http://www.filemaker.com/fmpxmlresult">
|
|
||||||
<ERRORCODE>0</ERRORCODE>
|
|
||||||
<PRODUCT NAME="ptulsconv" VERSION="0.0.1"/>
|
|
||||||
<DATABASE DATEFORMAT="MM/dd/yy" LAYOUT="summary"
|
|
||||||
NAME="STAR_WARS_IV_R1_ADR_Notes_PT_Text_Export.txt"
|
|
||||||
RECORDS="84" TIMEFORMAT="hh:mm:ss"/>
|
|
||||||
<METADATA>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Title" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Supervisor" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Client" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Scene" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Version" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Reel" TYPE="TEXT"/>
|
|
||||||
<FIELD EMPTYOK="YES" MAXREPEAT="1" NAME="Start" TYPE="TEXT"/>
|
|
||||||
[... much much more]
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -33,85 +23,8 @@ The easiest way to install on your site is to use `pip`:
|
|||||||
|
|
||||||
% pip3 install ptulsconv
|
% pip3 install ptulsconv
|
||||||
|
|
||||||
This will install the necessary libraries on your host and gives you command-line access to the tool through an
|
This will install the necessary libraries on your host and gives you
|
||||||
entry-point `ptulsconv`. In a terminal window type `ptulsconv -h` for a list of available options.
|
command-line access to the tool through an entry-point `ptulsconv`. In a
|
||||||
|
terminal window type `ptulsconv -h` for a list of available options.
|
||||||
|
|
||||||
## Theory of Operation
|
[quickstart]: https://ptulsconv.readthedocs.io/en/latest/user/quickstart.html
|
||||||
|
|
||||||
[Avid Pro Tools][avp] exports a tab-delimited text file organized in multiple parts with an uneven syntax that usually
|
|
||||||
can't "drop in" to other tools like Excel or Filemaker. This tool accepts a text export from Pro Tools and produces an
|
|
||||||
XML file in the `FMPXMLRESULT` schema which Filemaker Pro can import directly into a new table.
|
|
||||||
|
|
||||||
In the default mode, all of the clips are parsed and converted into a flat list of events, one Filemaker Pro row per
|
|
||||||
clip with a start and finish time, track name, session name, etc. Timecodes are parsed and converted into frame counts
|
|
||||||
and seconds. Text is then parsed for descriptive meta-tags and these are assigned to columns in the output list.
|
|
||||||
|
|
||||||
[avp]: http://www.avid.com/pro-tools
|
|
||||||
|
|
||||||
### Fields in Clip Names
|
|
||||||
|
|
||||||
Track names, track comments, and clip names can also contain meta-tags, or "fields," to add additional columns to the
|
|
||||||
output. Thus, if a clip has the name:
|
|
||||||
|
|
||||||
`Fireworks explosion {note=Replace for final} $V=1 [FX] [DESIGN]`
|
|
||||||
|
|
||||||
The row output for this clip will contain columns for the values:
|
|
||||||
|
|
||||||
|...| PT.Clip.Name| note | V | FX | DESIGN | ...|
|
|
||||||
|---|------------|------|---|----|--------|----|
|
|
||||||
|...| Fireworks explosion| Replace for final | 1 | FX | DESIGN | ... |
|
|
||||||
|
|
||||||
These fields can be defined in the clip name in three ways:
|
|
||||||
* `$NAME=VALUE` creates a field named `NAME` with a one-word value `VALUE`.
|
|
||||||
* `{NAME=VALUE}` creates a field named `NAME` with the value `VALUE`. `VALUE` in this case may contain spaces or any
|
|
||||||
character up to the closing bracket.
|
|
||||||
* `[NAME]` creates a field named `NAME` with a value `NAME`. This can be used to create a boolean-valued field; in the
|
|
||||||
output, clips with the field will have it, and clips without will have the column with an empty value.
|
|
||||||
|
|
||||||
For example, if two clips are named:
|
|
||||||
|
|
||||||
`"Squad fifty-one, what is your status?" [FUTZ] {Ch=Dispatcher} [ADR]`
|
|
||||||
|
|
||||||
`"We are ten-eight at Rampart Hospital." {Ch=Gage} [ADR]`
|
|
||||||
|
|
||||||
The output will contain the range:
|
|
||||||
|
|
||||||
|...| PT.Clip.Name| Ch | FUTZ | ADR | ...|
|
|
||||||
|---|------------|------|---|----|-----|
|
|
||||||
|...| "Squad fifty-one, what is your status?"| Dispatcher | FUTZ | ADR | ... |
|
|
||||||
|...| "We are ten-eight at Rampart Hospital."| Gage | | ADR | ... |
|
|
||||||
|
|
||||||
|
|
||||||
### Fields in Track Names and Markers
|
|
||||||
|
|
||||||
Fields set in track names, and in track comments, will be applied to *each* clip on that track. If a track comment
|
|
||||||
contains the text `{Dept=Foley}` for example, every clip on that track will have a "Foley" value in a "Dept" column.
|
|
||||||
|
|
||||||
Likewise, fields set on the session name will apply to all clips in the session.
|
|
||||||
|
|
||||||
Fields set in markers, and in marker comments, will be applied to all clips whose finish is *after* that marker. Fields
|
|
||||||
in markers are applied cumulatively from breakfast to dinner in the session. The latest marker applying to a clip has
|
|
||||||
precedence, so if one marker comes after the other, but both define a field, the value in the later marker
|
|
||||||
|
|
||||||
An important note here is that, always, fields set on the clip name have the highest precedence. If a field is set in a clip
|
|
||||||
name, the same field set on the track, the value set on the clip will prevail.
|
|
||||||
|
|
||||||
### Using `@` to Apply Fields to a Span of Clips
|
|
||||||
|
|
||||||
A clip name beginning with "@" will not be included in the CSV output, but its fields will be applied to clips within
|
|
||||||
its time range on lower tracks.
|
|
||||||
|
|
||||||
If track 1 has a clip named `@ {Sc=1- The House}`, any clips beginning within that range on lower tracks will have a
|
|
||||||
field `Sc` with that value.
|
|
||||||
|
|
||||||
### Using `&` to Combine Clips
|
|
||||||
|
|
||||||
A clip name beginning with "&" will have its parsed clip name appended to the preceding cue, and the fields of following
|
|
||||||
cues will be applied (later clips having precedence). The clips need not be touching, and the clips will be combined
|
|
||||||
into a single row of the output. The start time of the first clip will become the start time of the row, and the finish
|
|
||||||
time of the last clip will become the finish time of the row.
|
|
||||||
|
|
||||||
## Other Projects
|
|
||||||
|
|
||||||
This project is under construction. Look at [Pro Tools Text](https://github.com/iluvcapra/ProToolsText)
|
|
||||||
for a working solution at this time.
|
|
||||||
|
|||||||
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Minimal makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line, and also
|
||||||
|
# from the environment for the first two.
|
||||||
|
SPHINXOPTS ?=
|
||||||
|
SPHINXBUILD ?= sphinx-build
|
||||||
|
SOURCEDIR = source
|
||||||
|
BUILDDIR = build
|
||||||
|
|
||||||
|
# Put it first so that "make" without argument is like "make help".
|
||||||
|
help:
|
||||||
|
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
|
|
||||||
|
.PHONY: help Makefile
|
||||||
|
|
||||||
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||||
|
%: Makefile
|
||||||
|
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
77
docs/source/conf.py
Normal file
77
docs/source/conf.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# Configuration file for the Sphinx documentation builder.
|
||||||
|
#
|
||||||
|
# For the full list of built-in configuration values, see the documentation:
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath("../.."))
|
||||||
|
print(sys.path)
|
||||||
|
|
||||||
|
import ptulsconv
|
||||||
|
|
||||||
|
# -- Project information -----------------------------------------------------
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||||
|
|
||||||
|
project = 'ptulsconv'
|
||||||
|
# copyright = ptulsconv.__copyright__
|
||||||
|
# author = ptulsconv.__author__
|
||||||
|
release = ptulsconv.__version__
|
||||||
|
|
||||||
|
# -- General configuration ---------------------------------------------------
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
'sphinx.ext.autodoc',
|
||||||
|
'sphinx.ext.todo',
|
||||||
|
'sphinx.ext.coverage',
|
||||||
|
'sphinx.ext.viewcode',
|
||||||
|
'sphinx.ext.githubpages',
|
||||||
|
]
|
||||||
|
|
||||||
|
templates_path = ['_templates']
|
||||||
|
exclude_patterns = []
|
||||||
|
|
||||||
|
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output -------------------------------------------------
|
||||||
|
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||||
|
|
||||||
|
html_theme = 'sphinx_rtd_theme'
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
latex_documents = [
|
||||||
|
(master_doc, 'ptulsconv.tex', u'ptulsconv Documentation',
|
||||||
|
u'Jamie Hardt', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for Epub output -------------------------------------------------
|
||||||
|
|
||||||
|
# Bibliographic Dublin Core info.
|
||||||
|
epub_title = project
|
||||||
|
|
||||||
|
# The unique identifier of the text. This can be a ISBN number
|
||||||
|
# or the project homepage.
|
||||||
|
#
|
||||||
|
# epub_identifier = ''
|
||||||
|
|
||||||
|
# A unique identification for the text.
|
||||||
|
#
|
||||||
|
# epub_uid = ''
|
||||||
|
|
||||||
|
# A list of files that should not be packed into the epub file.
|
||||||
|
epub_exclude_files = ['search.html']
|
||||||
|
|
||||||
|
|
||||||
|
# -- Extension configuration -------------------------------------------------
|
||||||
|
|
||||||
|
# -- Options for todo extension ----------------------------------------------
|
||||||
|
|
||||||
|
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||||
|
todo_include_todos = True
|
||||||
7
docs/source/dev/contributing.rst
Normal file
7
docs/source/dev/contributing.rst
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Contributing
|
||||||
|
============
|
||||||
|
|
||||||
|
Testing
|
||||||
|
-------
|
||||||
|
|
||||||
|
Before submitting PRs or patches, please make sure your branch passes all of the unit tests by running Pytest.
|
||||||
39
docs/source/dev/modules.rst
Normal file
39
docs/source/dev/modules.rst
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
Auxiliary and Helper Modules
|
||||||
|
============================
|
||||||
|
|
||||||
|
Commands Module
|
||||||
|
---------------
|
||||||
|
|
||||||
|
.. automodule:: ptulsconv.commands
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|
||||||
|
Broadcast Timecode Module
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
.. automodule:: ptulsconv.broadcast_timecode
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|
||||||
|
Footage Module
|
||||||
|
--------------
|
||||||
|
|
||||||
|
.. automodule:: ptulsconv.footage
|
||||||
|
:members:
|
||||||
|
|
||||||
|
|
||||||
|
Reporting Module
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. automodule:: ptulsconv.reporting
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
|
||||||
|
|
||||||
|
Validations Module
|
||||||
|
------------------
|
||||||
|
|
||||||
|
.. automodule:: ptulsconv.validations
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
|
||||||
9
docs/source/dev/parsing.rst
Normal file
9
docs/source/dev/parsing.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Parsing
|
||||||
|
=======
|
||||||
|
|
||||||
|
Docparser Classes
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
.. autoclass:: ptulsconv.docparser.adr_entity.ADRLine
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
23
docs/source/dev/theory.rst
Normal file
23
docs/source/dev/theory.rst
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
Theory of Operation
|
||||||
|
===================
|
||||||
|
|
||||||
|
Execution Flow When Producing "doc" Output
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
#. The command line argv is read in :py:func:`ptulsconv.__main__.main()`,
|
||||||
|
which calls :py:func:`ptulsconv.commands.convert()`
|
||||||
|
#. :func:`ptulsconv.commands.convert()` reads the input with
|
||||||
|
:func:`ptuslconv.docparser.doc_parser_visitor()`,
|
||||||
|
which uses the ``parsimonious`` library to parse the input into an abstract
|
||||||
|
syntax tree, which the parser visitor uses to convert into a
|
||||||
|
:class:`ptulsconv.docparser.doc_entity.SessionDescriptor`,
|
||||||
|
which structures all of the data in the session output.
|
||||||
|
#. The next action based on the output format. In the
|
||||||
|
case of the "doc" output format, it runs some validations
|
||||||
|
on the input, and calls :func:`ptulsconv.commands.generate_documents()`.
|
||||||
|
#. :func:`ptulsconv.commands.generate_documents()` creates the output folder, creates the
|
||||||
|
Continuity report with :func:`ptulsconv.pdf.continuity.output_continuity()` (this document
|
||||||
|
requires some special-casing), and at the tail calls...
|
||||||
|
#. :func:`ptulsconv.commands.create_adr_reports()`, which creates folders for
|
||||||
|
|
||||||
|
(FIXME finish this)
|
||||||
39
docs/source/index.rst
Normal file
39
docs/source/index.rst
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
.. ptulsconv documentation master file, created by
|
||||||
|
sphinx-quickstart on Fri Nov 18 10:40:33 2022.
|
||||||
|
You can adapt this file completely to your liking, but it should at least
|
||||||
|
contain the root `toctree` directive.
|
||||||
|
|
||||||
|
Welcome to ptulsconv's documentation!
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
`ptulsconv` is a tool for converting Pro Tools text exports into PDF
|
||||||
|
reports for ADR spotting. It can also be used for converting text
|
||||||
|
exports into JSON documents for processing by other applications.
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:numbered:
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: User Documentation
|
||||||
|
|
||||||
|
user/quickstart
|
||||||
|
user/tagging
|
||||||
|
user/for_adr
|
||||||
|
user/cli_reference
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:numbered:
|
||||||
|
:maxdepth: 1
|
||||||
|
:caption: Developer Documentation
|
||||||
|
|
||||||
|
dev/contributing
|
||||||
|
dev/theory
|
||||||
|
dev/parsing
|
||||||
|
dev/modules
|
||||||
|
|
||||||
|
|
||||||
|
Indices and tables
|
||||||
|
==================
|
||||||
|
|
||||||
|
* :ref:`modindex`
|
||||||
|
* :ref:`genindex`
|
||||||
|
* :ref:`search`
|
||||||
74
docs/source/user/cli_reference.rst
Normal file
74
docs/source/user/cli_reference.rst
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
Command-Line Reference
|
||||||
|
======================
|
||||||
|
|
||||||
|
Usage Form
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Invocations of ptulsconv take the following form:
|
||||||
|
|
||||||
|
ptulsconv [options] IN_FILE
|
||||||
|
|
||||||
|
|
||||||
|
Flags
|
||||||
|
-----
|
||||||
|
|
||||||
|
`-h`, `--help`
|
||||||
|
Show the help message.
|
||||||
|
|
||||||
|
`f FMT`, `--format=FMT`
|
||||||
|
Select the output format. By default this is `doc`, which will
|
||||||
|
generate :ref:`ADR reports<adr-reports>`.
|
||||||
|
|
||||||
|
The :ref:`other available options<alt-output-options>`
|
||||||
|
are `raw` and `tagged`.
|
||||||
|
|
||||||
|
|
||||||
|
Informational Options
|
||||||
|
"""""""""""""""""""""
|
||||||
|
|
||||||
|
These options display information and exit without processing any
|
||||||
|
input documents.
|
||||||
|
|
||||||
|
`--show-formats`
|
||||||
|
Display information about available output formats.
|
||||||
|
|
||||||
|
`--show-available-tags`
|
||||||
|
Display information about tags that are used by the
|
||||||
|
report generator.
|
||||||
|
|
||||||
|
|
||||||
|
.. _alt-output-options:
|
||||||
|
|
||||||
|
Alternate Output Formats
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. _raw-output:
|
||||||
|
|
||||||
|
`raw` Output
|
||||||
|
""""""""""""
|
||||||
|
|
||||||
|
The "raw" output format is a JSON document of the parsed input data.
|
||||||
|
|
||||||
|
The document is a top-level dictionary with keys for the main sections of the text export: `header`,
|
||||||
|
`files`, `clips`, `plugins`, `tracks` and `markers`, and the values for these are a list of section
|
||||||
|
entries, or a dictionary of values, in the case of `header`.
|
||||||
|
|
||||||
|
The text values of each record and field in the text export is read and output verbatim, no further
|
||||||
|
processing is done.
|
||||||
|
|
||||||
|
.. _tagged-output:
|
||||||
|
|
||||||
|
`tagged` Output
|
||||||
|
"""""""""""""""
|
||||||
|
|
||||||
|
The "tagged" output format is also a JSON document based on the parsed input data, after the additional
|
||||||
|
step of processing all of the :ref:`tags<tags>` in the document.
|
||||||
|
|
||||||
|
The document is a top-level array of dictionaries, one for each recognized ADR spotting clip in the
|
||||||
|
session. Each dictionary has a `clip_name`, `track_name` and `session_name` key, a `tags` key that
|
||||||
|
contains a dictionary of every parsed tag (after applying tags from all tracks and markers), and a
|
||||||
|
`start` and `end` key. The `start` and `end` key contain the parsed timecode representations of these
|
||||||
|
values in rational number form, as a dictionary with `numerator` and `denominator` keys.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
129
docs/source/user/for_adr.rst
Normal file
129
docs/source/user/for_adr.rst
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
.. _adr-reports:
|
||||||
|
|
||||||
|
`ptulsconv` For ADR Report Generation
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
Reports Created by the ADR Report Generator
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
(FIXME: write this)
|
||||||
|
|
||||||
|
|
||||||
|
Tags Used by the ADR Report Generator
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
Project-Level Tags
|
||||||
|
""""""""""""""""""
|
||||||
|
|
||||||
|
It usually makes sense to place these either in the session name,
|
||||||
|
or on a :ref:`marker <tag-marker>` at the beginning of the session, so it will apply to
|
||||||
|
all of the clips in the session.
|
||||||
|
|
||||||
|
`Title`
|
||||||
|
The title of the project. This will appear at the top
|
||||||
|
of every report.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
`ptulsconv` at this time only supports one title per export. If you attempt to
|
||||||
|
use multiple titles in one export it will fail.
|
||||||
|
|
||||||
|
`Supv`
|
||||||
|
The supervisor of the project. This appears at the bottom
|
||||||
|
of every report.
|
||||||
|
|
||||||
|
`Client`
|
||||||
|
The client of the project. This will often appear under the
|
||||||
|
title on every report.
|
||||||
|
|
||||||
|
`Spot`
|
||||||
|
The date or version number of the spotting report.
|
||||||
|
|
||||||
|
|
||||||
|
Time Range Tags
|
||||||
|
"""""""""""""""
|
||||||
|
|
||||||
|
All of these tags can be set to different values on each clip, but
|
||||||
|
it often makes sense to use these tags in a :ref:`time range<tag-range>`.
|
||||||
|
|
||||||
|
`Sc`
|
||||||
|
The scene description. This appears on the continuity report
|
||||||
|
and is used in the Director's logs.
|
||||||
|
|
||||||
|
`Ver`
|
||||||
|
The picture version. This appears beside the spot timecodes
|
||||||
|
on most reports.
|
||||||
|
|
||||||
|
`Reel`
|
||||||
|
The reel. This appears beside the spot timecodes
|
||||||
|
on most reports and is used to summarize line totals on the
|
||||||
|
line count report.
|
||||||
|
|
||||||
|
|
||||||
|
Line tags
|
||||||
|
"""""""""
|
||||||
|
|
||||||
|
`P`
|
||||||
|
Priority.
|
||||||
|
|
||||||
|
`QN`
|
||||||
|
Cue number. This appears on all reports.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
`ptulsconv` will verify that all cue numbers in a given title are unique.
|
||||||
|
|
||||||
|
All lines must have a cue number in order to generate reports, if any lines
|
||||||
|
do not have a cue number set, `ptulsconv` will fail.
|
||||||
|
|
||||||
|
|
||||||
|
`CN`
|
||||||
|
Character number. This is used to collate character records
|
||||||
|
and will appear on the line count and in character-collated
|
||||||
|
reports.
|
||||||
|
|
||||||
|
`Char`
|
||||||
|
Character name. By default, a clip will set this to the
|
||||||
|
name of the track it appears on, but the track name can be
|
||||||
|
overridden here.
|
||||||
|
|
||||||
|
`Actor`
|
||||||
|
Actor name.
|
||||||
|
|
||||||
|
`Line`
|
||||||
|
The prompt to appear for this ADR line. By default, this
|
||||||
|
will be whatever text appears in a clip name prior to the first
|
||||||
|
tag.
|
||||||
|
|
||||||
|
`R`
|
||||||
|
Reason.
|
||||||
|
|
||||||
|
`Mins`
|
||||||
|
Time budget for this line, in minutes. This is used in the
|
||||||
|
line count report to give estimated times for each character. This
|
||||||
|
can be set for the entire project (with a :ref:`marker <tag-marker>`), or for individual
|
||||||
|
actors (with a tag in the :ref:`track comments<tag-track>`), or can be set for
|
||||||
|
individual lines to override these.
|
||||||
|
|
||||||
|
`Shot`
|
||||||
|
Shot. A Date or other description indicating the line has been
|
||||||
|
recorded.
|
||||||
|
|
||||||
|
|
||||||
|
Boolean-valued ADR Tag Fields
|
||||||
|
"""""""""""""""""""""""""""""
|
||||||
|
|
||||||
|
`EFF`
|
||||||
|
Effort. Lines with this tag are subtotaled in the line count report.
|
||||||
|
|
||||||
|
`TV`
|
||||||
|
TV line. Lines with this tag are subtotaled in the line count report.
|
||||||
|
|
||||||
|
`TBW`
|
||||||
|
To be written.
|
||||||
|
|
||||||
|
`ADLIB`
|
||||||
|
Ad-lib.
|
||||||
|
|
||||||
|
`OPT`
|
||||||
|
Optional. Lines with this tag are subtotaled in the line count report.
|
||||||
|
|
||||||
91
docs/source/user/quickstart.rst
Normal file
91
docs/source/user/quickstart.rst
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
Quick Start
|
||||||
|
===========
|
||||||
|
|
||||||
|
The workflow for creating ADR reports in `ptulsconv` is similar to other ADR
|
||||||
|
spotting programs: spot ADR lines in Pro Tools with clips using a special
|
||||||
|
code to take notes, export the tracks as text and then run the program.
|
||||||
|
|
||||||
|
|
||||||
|
Step 1: Use Pro Tools to Spot ADR Lines
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
`ptulsconv` can be used to spot ADR lines similarly to other programs.
|
||||||
|
|
||||||
|
#. Create a new Pro Tools session, name this session after your project.
|
||||||
|
#. Create new tracks, one for each character. Name each track after a
|
||||||
|
character.
|
||||||
|
#. On each track, create a clip group (or edit in some audio) at the time you
|
||||||
|
would like an ADR line to appear in the report. Name the clip after the
|
||||||
|
dialogue you are replacing at that time.
|
||||||
|
|
||||||
|
|
||||||
|
Step 2: Add More Information to Your Spots
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
Clips, tracks and markers in your session can contain additional information
|
||||||
|
to make your ADR reports more complete and useful. You add this information
|
||||||
|
with *tagging*.
|
||||||
|
|
||||||
|
* Every ADR clip must have a unique cue number. After the name of each clip,
|
||||||
|
add the letters "$QN=" and then a unique number (any combination of letters
|
||||||
|
or numbers that don't contain a space). You can type these yourself or add
|
||||||
|
them with batch-renaming when you're done spotting.
|
||||||
|
* ADR spots should usually have a reason indicated, so you can remember exactly
|
||||||
|
why you're replacing a particular line. Do this by adding the the text "{R="
|
||||||
|
to your clip names after the prompt and then some short text describing the
|
||||||
|
reason, and then a closing "}". You can type anything, including spaces.
|
||||||
|
* If a line is a TV cover line, you can add the text "[TV]" to the end.
|
||||||
|
|
||||||
|
So for example, some ADR spot's clip name might look like:
|
||||||
|
|
||||||
|
Get to the ladder! {R=Noise} $QN=J1001
|
||||||
|
"Forget your feelings! {R=TV Cover} $QN=J1002 [TV]
|
||||||
|
|
||||||
|
These tags can appear in any order.
|
||||||
|
|
||||||
|
* You can add the name of an actor to a character's track, so this information
|
||||||
|
will appear on your reports. In the track name, or in the track comments,
|
||||||
|
type "{Actor=xxx}" replacing the xxx with the actor's name.
|
||||||
|
* Characters need to have a number (perhaps from the cast list) to express how
|
||||||
|
they should be collated. Add "$CN=xxx" with a unique number to each track (or
|
||||||
|
the track's comments.)
|
||||||
|
* Set the scene for each line with markers. Create a marker at the beginning of
|
||||||
|
a scene and make it's name "{Sc=xxx}", replacing the xxx with the scene
|
||||||
|
number and name.
|
||||||
|
|
||||||
|
|
||||||
|
Step 3: Export Tracks from Pro Tools as a Text File
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
Export the file as a UTF-8 and be sure to include clips and markers. Export
|
||||||
|
using the Timecode time format.
|
||||||
|
|
||||||
|
Do not export crossfades.
|
||||||
|
|
||||||
|
|
||||||
|
Step 4: Run `ptulsconv` on the Text Export
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
In your Terminal, run the following command:
|
||||||
|
|
||||||
|
ptulsconv path/to/your/TEXT_EXPORT.txt
|
||||||
|
|
||||||
|
`ptulsconv` will create a folder named "Title_CURRENT_DATE", and within that
|
||||||
|
folder it will create several PDFs and folders:
|
||||||
|
|
||||||
|
- "TITLE ADR Report" 📄 a PDF tabular report of every ADR line you've spotted.
|
||||||
|
- "TITLE Continuity" 📄 a PDF listing every scene you have indicated and its
|
||||||
|
timecode.
|
||||||
|
- "TITLE Line Count" 📄 a PDF tabular report giving line counts by reel, and the
|
||||||
|
time budget per character and reel (if provided in the tagging).
|
||||||
|
- "CSV/" a folder containing CSV documents of all spotted ADR, groupd by
|
||||||
|
character and reel.
|
||||||
|
- "Director Logs/" 📁 a folder containing PDF tabular reports, like the overall
|
||||||
|
report except groupd by character.
|
||||||
|
- "Supervisor Logs/" 📁 a folder containing PDF reports, one page per line,
|
||||||
|
designed for note taking during a session, particularly on an iPad.
|
||||||
|
- "Talent Scripts/" 📁 a folder containing PDF scripts or sides, with the timecode
|
||||||
|
and prompts for each line, grouped by character but with most other
|
||||||
|
information suppressed.
|
||||||
|
|
||||||
|
|
||||||
130
docs/source/user/tagging.rst
Normal file
130
docs/source/user/tagging.rst
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
.. _tags:
|
||||||
|
|
||||||
|
Tagging
|
||||||
|
=======
|
||||||
|
|
||||||
|
Tags are used to add additional data to a clip in an organized way. The
|
||||||
|
tagging system in `ptulsconv` allows is flexible and can be used to add
|
||||||
|
any kind of extra data to a clip.
|
||||||
|
|
||||||
|
Fields in Clip Names
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Track names, track comments, and clip names can also contain meta-tags, or
|
||||||
|
"fields," to add additional columns to the output. Thus, if a clip has the
|
||||||
|
name:::
|
||||||
|
|
||||||
|
`Fireworks explosion {note=Replace for final} $V=1 [FX] [DESIGN]`
|
||||||
|
|
||||||
|
The row output for this clip will contain columns for the values:
|
||||||
|
|
||||||
|
|
||||||
|
+---------------------+-------------------+---+----+--------+
|
||||||
|
| Clip Name | note | V | FX | DESIGN |
|
||||||
|
+=====================+===================+===+====+========+
|
||||||
|
| Fireworks explosion | Replace for final | 1 | FX | DESIGN |
|
||||||
|
+---------------------+-------------------+---+----+--------+
|
||||||
|
|
||||||
|
|
||||||
|
These fields can be defined in the clip name in three ways:
|
||||||
|
* `$NAME=VALUE` creates a field named `NAME` with a one-word value `VALUE`.
|
||||||
|
* `{NAME=VALUE}` creates a field named `NAME` with the value `VALUE`. `VALUE`
|
||||||
|
in this case may contain spaces or any chartacter up to the closing bracket.
|
||||||
|
* `[NAME]` creates a field named `NAME` with a value `NAME`. This can be used
|
||||||
|
to create a boolean-valued field; in the output, clips with the field
|
||||||
|
will have it, and clips without will have the column with an empty value.
|
||||||
|
|
||||||
|
For example, if three clips are named:::
|
||||||
|
|
||||||
|
`"Squad fifty-one, what is your status?" [FUTZ] {Ch=Dispatcher} [ADR]`
|
||||||
|
|
||||||
|
`"We are ten-eight at Rampart Hospital." {Ch=Gage} [ADR]`
|
||||||
|
|
||||||
|
`(1M) FC callouts rescuing trapped survivors. {Ch=Group} $QN=1001 [GROUP]`
|
||||||
|
|
||||||
|
The output will contain the range:
|
||||||
|
|
||||||
|
|
||||||
|
+----------------------------------------------+------------+------+-----+------+-------+
|
||||||
|
| Clip Name | Ch | FUTZ | ADR | QN | GROUP |
|
||||||
|
+==============================================+============+======+=====+======+=======+
|
||||||
|
| "Squad fifty-one, what is your status?" | Dispatcher | FUTZ | ADR | | |
|
||||||
|
+----------------------------------------------+------------+------+-----+------+-------+
|
||||||
|
| "We are ten-eight at Rampart Hospital." | Gage | | ADR | | |
|
||||||
|
+----------------------------------------------+------------+------+-----+------+-------+
|
||||||
|
| (1M) FC callouts rescuing trapped survivors. | Group | | | 1001 | GROUP |
|
||||||
|
+----------------------------------------------+------------+------+-----+------+-------+
|
||||||
|
|
||||||
|
|
||||||
|
.. _tag-track:
|
||||||
|
.. _tag-marker:
|
||||||
|
|
||||||
|
Fields in Track Names and Markers
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
Fields set in track names, and in track comments, will be applied to *each*
|
||||||
|
clip on that track. If a track comment contains the text `{Dept=Foley}` for
|
||||||
|
example, every clip on that track will have a "Foley" value in a "Dept" column.
|
||||||
|
|
||||||
|
Likewise, fields set on the session name will apply to all clips in the session.
|
||||||
|
|
||||||
|
Fields set in markers, and in marker comments, will be applied to all clips
|
||||||
|
whose finish is *after* that marker. Fields in markers are applied cumulatively
|
||||||
|
from breakfast to dinner in the session. The latest marker applying to a clip has
|
||||||
|
precedence, so if one marker comes after the other, but both define a field, the
|
||||||
|
value in the later marker
|
||||||
|
|
||||||
|
An important note here is that, always, fields set on the clip name have the
|
||||||
|
highest precedence. If a field is set in a clip name, the same field set on the
|
||||||
|
track, the value set on the clip will prevail.
|
||||||
|
|
||||||
|
|
||||||
|
.. _tag-range:
|
||||||
|
|
||||||
|
Apply Fields to a Time Range of Clips
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
A clip name beginning with "@" will not be included in the output, but its
|
||||||
|
fields will be applied to clips within its time range on lower tracks.
|
||||||
|
|
||||||
|
If track 1 has a clip named `@ {Sc=1- The House}`, any clips beginning within
|
||||||
|
that range on lower tracks will have a field `Sc` with that value.
|
||||||
|
|
||||||
|
|
||||||
|
Combining Clips with Long Names or Many Tags
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
A clip name beginning with `&` will have its parsed clip name appended to the
|
||||||
|
preceding cue, and the fields of following cues will be applied, earlier clips
|
||||||
|
having precedence. The clips need not be touching, and the clips will be
|
||||||
|
combined into a single row of the output. The start time of the first clip will
|
||||||
|
become the start time of the row, and the finish time of the last clip will
|
||||||
|
become the finish time of the row.
|
||||||
|
|
||||||
|
|
||||||
|
Setting Document Options
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Document options are not yet implemented.
|
||||||
|
|
||||||
|
A clip beginning with `!` sends a command to `ptulsconv`. These commands can
|
||||||
|
appear anywhere in the document and apply to the entire document. Commands are
|
||||||
|
a list of words
|
||||||
|
|
||||||
|
The following commands are available:
|
||||||
|
|
||||||
|
page $SIZE=`(letter|legal|a4)`
|
||||||
|
Sets the PDF page size for the output.
|
||||||
|
|
||||||
|
font {NAME=`name`} {PATH=`path`}
|
||||||
|
Sets the primary font for the output.
|
||||||
|
|
||||||
|
sub `replacement text` {FOR=`text_to_replace`} {IN=`tag`}
|
||||||
|
Declares a substitution. Whereever text_to_replace is encountered in the
|
||||||
|
document it will be replaced with "replacement text".
|
||||||
|
|
||||||
|
If `tag` is set, this substitution will only be applied to the values of
|
||||||
|
that tag.
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
BIN
example/Charade/Charade.ptx
Normal file
BIN
example/Charade/Charade.ptx
Normal file
Binary file not shown.
170
example/Charade/Charade.txt
Normal file
170
example/Charade/Charade.txt
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
SESSION NAME: Charade
|
||||||
|
SAMPLE RATE: 48000.000000
|
||||||
|
BIT DEPTH: 24-bit
|
||||||
|
SESSION START TIMECODE: 00:59:00:00
|
||||||
|
TIMECODE FORMAT: 25 Frame
|
||||||
|
# OF AUDIO TRACKS: 13
|
||||||
|
# OF AUDIO CLIPS: 2
|
||||||
|
# OF AUDIO FILES: 1
|
||||||
|
|
||||||
|
|
||||||
|
T R A C K L I S T I N G
|
||||||
|
TRACK NAME: Scenes
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 @ {Sc=Logos} 01:00:00:00 01:00:17:21 00:00:17:21 Unmuted
|
||||||
|
1 2 @ {Sc=1-2 Ext. French Countryside - Dusk} 01:00:17:21 01:01:00:24 00:00:43:03 Unmuted
|
||||||
|
1 3 @ {Sc=Main Titles} 01:01:00:24 01:03:04:02 00:02:03:02 Unmuted
|
||||||
|
1 4 @ {Sc=6 Ext. Megve - Day} 01:03:04:02 01:04:29:05 00:01:25:02 Unmuted
|
||||||
|
1 5 @ {Sc=8A Swimming Pool - Onto Terrace} 01:04:29:05 01:07:01:14 00:02:32:09 Unmuted
|
||||||
|
1 6 @ {Sc=11-12 Ext. Ave Foch - Lampert Apartment} 01:07:01:14 01:07:28:22 00:00:27:08 Unmuted
|
||||||
|
1 7 @ {Sc=15 Int. Apartment Landing} 01:07:28:22 01:07:39:16 00:00:10:19 Unmuted
|
||||||
|
1 8 @ {Sc=17 In. Lampert House - Empty} 01:07:39:16 01:08:57:21 00:01:18:05 Unmuted
|
||||||
|
1 9 @ {Sc=25 Int. Morgue} 01:08:57:21 01:09:38:23 00:00:41:02 Unmuted
|
||||||
|
1 10 @ {Sc=28 Int. Grandpierre's Office} 01:09:38:23 01:13:23:16 00:03:44:18 Unmuted
|
||||||
|
1 11 @ {Sc=36 Int. Lampert Apartment - Night} 01:13:23:16 01:15:18:13 00:01:54:21 Unmuted
|
||||||
|
1 12 @ {Sc=38A Int. Funeral Chapel - Day} 01:15:18:13 01:18:50:20 00:03:32:07 Unmuted
|
||||||
|
1 13 @ {Sc=63 Ext/Int American Embassy - Establishing} 01:18:50:20 01:19:09:20 00:00:19:00 Unmuted
|
||||||
|
1 14 @ {Sc=70 Int. Barholomew's Office} 01:19:09:20 01:25:12:07 00:06:02:12 Unmuted
|
||||||
|
1 15 @ {Sc=77 Ext. Esplanade des Champs-Elysées} 01:25:12:07 01:26:53:03 00:01:40:20 Unmuted
|
||||||
|
1 16 @ {Sc=88 Int. Nightclub - Night} 01:26:53:03 01:30:07:06 00:03:14:03 Unmuted
|
||||||
|
1 17 @ {Sc=102 Int. Nightclub Lounge - Night} 01:30:07:06 01:31:49:18 00:01:42:12 Unmuted
|
||||||
|
1 18 @ {Sc=108 Int. Hotel Lobby} 01:31:49:18 01:32:44:17 00:00:54:23 Unmuted
|
||||||
|
1 19 @ {Sc=109 Int. Elevator} 01:32:44:17 01:33:05:20 00:00:21:03 Unmuted
|
||||||
|
1 20 @ {Sc=110 Int. Hotel Third Landing} 01:33:05:20 01:33:55:07 00:00:49:12 Unmuted
|
||||||
|
1 21 @ {Sc=112 Int. Reggie's Room - Night} 01:33:55:07 01:34:23:00 00:00:27:17 Unmuted
|
||||||
|
1 22 @ {Sc=116 Int. Hotel Corridor} 01:34:23:00 01:34:46:16 00:00:23:16 Unmuted
|
||||||
|
1 23 @ {Sc=120 Int. Reggie's Room - Night} 01:34:46:16 01:35:25:17 00:00:39:01 Unmuted
|
||||||
|
1 24 @ {Sc=122 Ext. Hotel Window - Night} 01:35:25:17 01:36:49:04 00:01:23:11 Unmuted
|
||||||
|
1 25 @ {Sc=132 Int. Gideon's Hotel Room - Night} 01:36:49:04 01:38:13:08 00:01:24:04 Unmuted
|
||||||
|
1 26 @ {Sc=134 Int. Reggie's Room - Night} 01:38:13:08 01:38:29:16 00:00:16:08 Unmuted
|
||||||
|
1 27 @ {Sc=134 Int. Reggie's Room - Night} 01:38:29:16 01:40:18:07 00:01:48:16 Unmuted
|
||||||
|
1 28 @ {Sc=139/140 Int. Hotel Room/Phone Booth Intercut} 01:40:18:07 01:40:54:18 00:00:36:11 Unmuted
|
||||||
|
1 29 @ {Sc=142 Int. Reggie's Room - Night} 01:40:54:18 01:41:46:15 00:00:51:22 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: PETER
|
||||||
|
COMMENTS: $CN=1 {Actor=Cary Grant} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 "Does this belong to you?" (alt for "Does HE belong to you?" {R=Replace Line} $QN=P101 01:05:10:16 01:05:11:19 00:00:01:03 Unmuted
|
||||||
|
1 2 "Well I telephones by nobody answered." {R=Off mic} $QN=P102 01:13:47:24 01:13:49:19 00:00:01:19 Unmuted
|
||||||
|
1 3 "It's in all the afternoon papers." {R=Replace Line} {Note=Adding "ALL"} $QN=P103 01:13:59:21 01:14:01:11 00:00:01:14 Unmuted
|
||||||
|
1 4 "Here you are." {R=Replace temp} $QN=P104 01:33:08:00 01:33:09:01 00:00:01:01 Unmuted
|
||||||
|
1 5 "On the street where you live..." {R=Replace temp} $QN=P105 01:33:10:09 01:33:12:03 00:00:01:19 Unmuted
|
||||||
|
1 6 (adlib response to REGGIE) {R=Added/Replaces sync} [ADLIB] $QN=P106 01:34:27:10 01:34:29:03 00:00:01:18 Unmuted
|
||||||
|
1 7 (effort add PUNCH efforts, react to GETTING PUNCHED) {R=Added} [EFF] $QN=P107 01:34:31:11 01:34:41:23 00:00:10:12 Unmuted
|
||||||
|
1 8 "… And close these windows after me." {R=Replace temp} $QN=P108 01:35:19:16 01:35:21:11 00:00:01:20 Unmuted
|
||||||
|
1 9 (effort LEAPING to balcony) [EFF] {R=Added} $QN=P109 01:36:13:02 01:36:15:06 00:00:02:04 Unmuted
|
||||||
|
1 10 "It's me, Peter." {R=Performance} {Note=More voice, call through door} $QN=P110 01:38:32:01 01:38:33:03 00:00:01:02 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: REGGIE
|
||||||
|
COMMENTS: $CN=2 {Actor=Audrey Hepburn} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 (react to getting squirted by gun) {R=Added} [EFF] $QN=R101 01:03:40:02 01:03:41:19 00:00:01:16 Unmuted
|
||||||
|
1 2 "Look I admit I came to Paris to escape American Provincial but that doesn't mean I'm ready for French Traditional!" {R=Clarity} {Note=Low Priority} $QN=R102 01:04:45:22 01:04:50:15 00:00:04:18 Unmuted
|
||||||
|
1 3 "Oh, no— you see, I don't really love him." {R=Clarity} $QN=R103 01:06:14:17 01:06:16:15 00:00:01:22 Unmuted
|
||||||
|
1 4 (reactions to empty house, turning open cupboards etc.) {R=Added} [EFF] $QN=R104 01:07:41:13 01:08:41:19 00:01:00:06 Unmuted
|
||||||
|
1 5 (effort RUN INTO Grandpierre) {R=Added} [EFF] $QN=R105 01:08:41:19 01:08:45:12 00:00:03:17 Unmuted
|
||||||
|
1 6 "I know, I'm sorry." {R=Replace Sync} {Note=More hesitant} $QN=R106 01:10:36:00 01:10:38:06 00:00:02:06 Unmuted
|
||||||
|
1 7 "Misses Lampert, Misses Charles Lampert." {R=Clarity} {Note=Prounonce P of Lampert harder} $QN=R107 01:19:30:22 01:19:32:18 00:00:01:21 Unmuted
|
||||||
|
1 8 "Mister Bartholomew this is Regina Lampert— Mister Bartholomew I just saw one of those me—" {R=Clarity} $QN=R108 01:30:24:12 01:30:28:16 00:00:04:04 Unmuted
|
||||||
|
1 9 "Where?" {R=Replace temp} $QN=R109 01:33:09:07 01:33:09:24 00:00:00:16 Unmuted
|
||||||
|
1 10 "Peter? … Peter? … Peter are you alright?" {R=More sotto voce} $QN=R110 01:34:53:10 01:35:01:02 00:00:07:16 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: BARTHOLOMEW
|
||||||
|
COMMENTS: $CN=3 {Actor=Walter Matthau} $Mins=8
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 "Is there anything wrong, Miss Tompkins?" {R=Replace offscreen} $QN=B101 01:19:17:07 01:19:19:07 00:00:02:00 Unmuted
|
||||||
|
1 2 "Oh yes, uh, please— uh come in, Misses Lampert." {R=Clarity} {Note=Harder P on Lampert} $QN=B102 01:19:33:02 01:19:37:13 00:00:04:11 Unmuted
|
||||||
|
1 3 "You're Charles Lampert's widow, yes?" {R=Clarity} $QN=B103 01:20:03:06 01:20:04:22 00:00:01:16 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: TEX
|
||||||
|
COMMENTS: $CN=4 {Actor=James Coburn} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 "But if you do find that money…" {R=Accent} $QN=T101 01:37:50:08 01:37:52:15 00:00:02:07 Unmuted
|
||||||
|
1 2 "You ain't gonna forget to tell your buddies about it are ya?" {R=Accent} $QN=T102 01:37:53:15 01:37:55:24 00:00:02:08 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: SCOBIE
|
||||||
|
COMMENTS: $CN=5 {Actor=George Kennedy} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 (effort HEAVY BREATHING) {R=Added} [EFF] $QN=SC101 01:34:05:10 01:34:15:04 00:00:09:18 Unmuted
|
||||||
|
1 2 (effort add PUNCH efforts, react to GETTING PUNCHED) {R=Added} [EFF] $QN=SC102 01:34:31:11 01:34:41:23 00:00:10:12 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: SYLVIE
|
||||||
|
COMMENTS: $CN=6 {Actor=Dominique Minot} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 "It is infuriating that your unhappiness does not turn to fat!" {R=Accent} $QN=SY101 01:04:25:08 01:04:28:19 00:00:03:11 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: GIDEON
|
||||||
|
COMMENTS: $CN=7 {Actor=Ned Glass} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 (effort) "OWWW!" (kicked in shin) {R=Added} [EFF] $QN=GD101 01:29:55:21 01:29:58:16 00:00:02:19 Unmuted
|
||||||
|
1 2 "Eh" (sotto/closed-mouth reaction) {R=Added} [ADLIB] $QN=GD102 01:38:08:16 01:38:10:07 00:00:01:16 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: JEAN-LOUIS
|
||||||
|
COMMENTS: $CN=8m {Actor=Thomas Chelimsky} $Mins=5
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 "When you get your divorce, are you going back to America?" {R=Revioce} $QN=JL101 01:07:14:07 01:07:17:07 00:00:03:00 Unmuted
|
||||||
|
1 2 "Yes, of course, but if you went back and wrote me a letter—" {R=Revoice} $QN=JL102 01:07:18:20 01:07:21:18 00:00:02:23 Unmuted
|
||||||
|
1 3 "Okay." {R=Revoice} $QN=JL103 01:07:24:13 01:07:25:01 00:00:00:13 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Group
|
||||||
|
COMMENTS: $CN=99g {Char=Group} {Actor=Per LG} $Mins=3
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 ALL (Pool walla, FC "Whoo!" on man diving.) $QN=G101 01:04:29:05 01:04:43:09 00:00:14:04 Unmuted
|
||||||
|
1 2 (1M) "Madame" / "Miss" / "Merci" {R=Replace on-screen} $QN=G102 01:07:35:23 01:07:36:06 00:00:00:07 Unmuted
|
||||||
|
1 3 "D'accord" {R=Replace Futz} $QN=G103 01:10:47:20 01:10:48:08 00:00:00:13 Unmuted
|
||||||
|
1 4 (ALL KIDS) React to Punch and Judy Show, laughter bursts $QN=G104 01:25:12:07 01:25:23:22 00:00:11:15 Unmuted
|
||||||
|
1 5 (ALL) Laugh! Prelap cut $QN=G105 01:25:33:18 01:25:38:10 00:00:04:17 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Group.dup1
|
||||||
|
COMMENTS: $CN=99g {Char=Group} {Actor=Per LG} $Mins=3
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 (2M 2F) Detail reaction to show $QN=G106 01:25:14:03 01:25:15:23 00:00:01:20 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Group.dup2
|
||||||
|
COMMENTS: $CN=99g {Char=Group} {Actor=Per LG} $Mins=3
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 (1M) Boy reacts to show [ADLIB] [TBW] $QN=G107 01:25:15:21 01:25:18:12 00:00:02:16 Unmuted
|
||||||
|
1 2 (1M) Pointing boy $QN=G108 01:25:20:02 01:25:22:16 00:00:02:14 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
M A R K E R S L I S T I N G
|
||||||
|
# LOCATION TIME REFERENCE UNITS NAME COMMENTS
|
||||||
|
1 01:00:00:00 2880000 Samples {Title=Charade} {Client=Stanley Donen Films/Universal} {Supv=Allan Morrison} {Spot=2021-0520} $Reel=R1 [ADR]
|
||||||
|
2 01:18:50:20 57159360 Samples $Reel=R2
|
||||||
|
3 01:36:49:04 108919680 Samples $Reel=R3
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
from .ptuls_grammar import protools_text_export_grammar
|
"""
|
||||||
from .ptuls_parser_visitor import DictionaryParserVisitor
|
Parse and convert Pro Tools text exports
|
||||||
from .transformations import TimecodeInterpreter
|
"""
|
||||||
|
|
||||||
__version__ = '0.0.2'
|
__version__ = '2.0.0'
|
||||||
__author__ = 'Jamie Hardt'
|
__author__ = 'Jamie Hardt'
|
||||||
__license__ = 'MIT'
|
__license__ = 'MIT'
|
||||||
|
__copyright__ = "%s %s (c) 2023 %s. All rights reserved." \
|
||||||
|
% (__name__, __version__, __author__)
|
||||||
|
|||||||
@@ -1,36 +1,120 @@
|
|||||||
from ptulsconv.commands import convert, dump_field_map
|
from optparse import OptionParser, OptionGroup
|
||||||
from optparse import OptionParser
|
import datetime
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from ptulsconv import __name__, __copyright__
|
||||||
|
from ptulsconv.commands import convert
|
||||||
|
from ptulsconv.reporting import print_status_style, \
|
||||||
|
print_banner_style, print_section_header_style, \
|
||||||
|
print_fatal_error
|
||||||
|
|
||||||
|
|
||||||
|
def dump_field_map(output=sys.stdout):
|
||||||
|
from ptulsconv.docparser.tag_mapping import TagMapping
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine, GenericEvent
|
||||||
|
|
||||||
|
TagMapping.print_rules(GenericEvent, output=output)
|
||||||
|
TagMapping.print_rules(ADRLine, output=output)
|
||||||
|
|
||||||
|
|
||||||
|
def dump_formats():
|
||||||
|
print_section_header_style("`raw` format:")
|
||||||
|
sys.stderr.write("A JSON document of the parsed Pro Tools export.\n")
|
||||||
|
print_section_header_style("`tagged` Format:")
|
||||||
|
sys.stderr.write(
|
||||||
|
"A JSON document containing one record for each clip, with\n"
|
||||||
|
"all tags parsed and all tagging rules applied. \n")
|
||||||
|
print_section_header_style("`doc` format:")
|
||||||
|
sys.stderr.write("Creates a directory with folders for different types\n"
|
||||||
|
"of ADR reports.\n\n")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
"""Entry point for the command-line invocation"""
|
||||||
parser = OptionParser()
|
parser = OptionParser()
|
||||||
parser.usage = "ptulsconv TEXT_EXPORT.txt"
|
parser.usage = "ptulsconv [options] [TEXT_EXPORT.txt]"
|
||||||
parser.add_option('-i', dest='in_time', help="Don't output events occurring before this timecode, and offset"
|
|
||||||
" all events relative to this timecode.", metavar='TC')
|
|
||||||
parser.add_option('-o', dest='out_time', help="Don't output events occurring after this timecode.", metavar='TC')
|
|
||||||
parser.add_option('-P', '--progress', default=False, action='store_true', dest='show_progress',
|
|
||||||
help='Show progress bar.')
|
|
||||||
parser.add_option('-m', '--include-muted', default=False, action='store_true', dest='include_muted',
|
|
||||||
help='Read muted clips.')
|
|
||||||
|
|
||||||
parser.add_option('--show-tags', dest='show_tags',
|
parser.add_option('-f', '--format',
|
||||||
action='store_true',
|
dest='output_format',
|
||||||
default=False, help='Display tag mappings for the FMP XML output style and exit.')
|
metavar='FMT',
|
||||||
|
choices=['raw', 'tagged', 'doc'],
|
||||||
|
default='doc',
|
||||||
|
help='Set output format, `raw`, `tagged`, `doc`.')
|
||||||
|
|
||||||
|
warn_options = OptionGroup(title="Warning and Validation Options",
|
||||||
|
parser=parser)
|
||||||
|
|
||||||
|
warn_options.add_option('-W', action='store_false',
|
||||||
|
dest='warnings',
|
||||||
|
default=True,
|
||||||
|
help='Suppress warnings for common '
|
||||||
|
'errors (missing code numbers etc.)')
|
||||||
|
|
||||||
|
parser.add_option_group(warn_options)
|
||||||
|
|
||||||
|
informational_options = OptionGroup(title="Informational Options",
|
||||||
|
parser=parser,
|
||||||
|
description='Print useful '
|
||||||
|
'information '
|
||||||
|
'and exit without processing '
|
||||||
|
'input files.')
|
||||||
|
|
||||||
|
informational_options.add_option(
|
||||||
|
'--show-formats',
|
||||||
|
dest='show_formats',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Display helpful information about the available '
|
||||||
|
'output formats.')
|
||||||
|
|
||||||
|
informational_options.add_option(
|
||||||
|
'--show-available-tags',
|
||||||
|
dest='show_tags',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Display tag mappings for the FMP XML output style '
|
||||||
|
'and exit.')
|
||||||
|
|
||||||
|
parser.add_option_group(informational_options)
|
||||||
|
|
||||||
|
print_banner_style(__copyright__)
|
||||||
|
|
||||||
(options, args) = parser.parse_args(sys.argv)
|
(options, args) = parser.parse_args(sys.argv)
|
||||||
|
|
||||||
|
print_section_header_style("Startup")
|
||||||
|
print_status_style("This run started %s" %
|
||||||
|
(datetime.datetime.now().isoformat()))
|
||||||
|
|
||||||
if options.show_tags:
|
if options.show_tags:
|
||||||
dump_field_map('ADR')
|
dump_field_map()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
if len(args) < 2:
|
elif options.show_formats:
|
||||||
print("Error: No input file", file=sys.stderr)
|
dump_formats()
|
||||||
parser.print_help(sys.stderr)
|
sys.exit(0)
|
||||||
sys.exit(22)
|
try:
|
||||||
|
major_mode = options.output_format
|
||||||
|
|
||||||
convert(input_file=args[1], start=options.in_time, end=options.out_time, include_muted=options.include_muted,
|
if len(args) < 2:
|
||||||
progress=options.show_progress, output=sys.stdout)
|
print_status_style(
|
||||||
|
"No input file provided, will connect to Pro Tools "
|
||||||
|
"with PTSL...")
|
||||||
|
convert(major_mode=major_mode,
|
||||||
|
warnings=options.warnings)
|
||||||
|
else:
|
||||||
|
convert(input_file=args[1],
|
||||||
|
major_mode=major_mode,
|
||||||
|
warnings=options.warnings)
|
||||||
|
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
print_fatal_error("Error trying to read input file")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
import traceback
|
||||||
|
print_fatal_error("Error trying to convert file")
|
||||||
|
print("\033[31m" + e.__repr__() + "\033[0m", file=sys.stderr)
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -1,26 +1,58 @@
|
|||||||
from fractions import Fraction
|
"""
|
||||||
import re
|
Useful functions for parsing and working with timecode.
|
||||||
|
"""
|
||||||
|
|
||||||
import math
|
import math
|
||||||
|
import re
|
||||||
|
from collections import namedtuple
|
||||||
|
from fractions import Fraction
|
||||||
|
from typing import Optional, SupportsFloat
|
||||||
|
|
||||||
|
|
||||||
def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int, drop_frame_hint=False,
|
class TimecodeFormat(namedtuple("_TimecodeFormat",
|
||||||
include_fractional=False):
|
"frame_duration logical_fps drop_frame")):
|
||||||
|
"""
|
||||||
|
A struct reperesenting a timecode datum.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def smpte_to_seconds(self, smpte: str) -> Optional[Fraction]:
|
||||||
|
frame_count = smpte_to_frame_count(
|
||||||
|
smpte, self.logical_fps, drop_frame_hint=self.drop_frame)
|
||||||
|
if frame_count is None:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return frame_count * self.frame_duration
|
||||||
|
|
||||||
|
def seconds_to_smpte(self, seconds: SupportsFloat) -> str:
|
||||||
|
frame_count = int(seconds / self.frame_duration)
|
||||||
|
return frame_count_to_smpte(frame_count, self.logical_fps,
|
||||||
|
self.drop_frame)
|
||||||
|
|
||||||
|
|
||||||
|
def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int,
|
||||||
|
drop_frame_hint=False) -> Optional[int]:
|
||||||
"""
|
"""
|
||||||
Convert a string with a SMPTE timecode representation into a frame count.
|
Convert a string with a SMPTE timecode representation into a frame count.
|
||||||
|
|
||||||
:param smpte_rep_string: The timecode string
|
:param smpte_rep_string: The timecode string
|
||||||
:param frames_per_logical_second: Num of frames in a logical second. This is asserted to be
|
:param frames_per_logical_second: Num of frames in a logical second. This
|
||||||
in one of `[24,25,30,48,50,60]`
|
is asserted to be in one of `[24,25,30,48,50,60]`
|
||||||
:param drop_frame_hint: `True` if the timecode rep is drop frame. This is ignored (and implied `True`) if
|
:param drop_frame_hint: `True` if the timecode rep is drop frame. This is
|
||||||
the last separator in the timecode string is a semicolon. This is ignored (and implied `False`) if
|
ignored (and implied `True`) if the last separator in the timecode
|
||||||
`frames_per_logical_second` is not 30 or 60.
|
string is a semicolon. This is ignored (and implied `False`) if
|
||||||
:param include_fractional: If `True` fractional frames will be parsed and returned as a second retval in a tuple
|
`frames_per_logical_second` is not 30 or 60.
|
||||||
"""
|
"""
|
||||||
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
||||||
|
|
||||||
m = re.search("(\d?\d)[:;](\d\d)[:;](\d\d)([:;])(\d\d)(\.\d+)?", smpte_rep_string)
|
m = re.search(
|
||||||
|
r'(\d?\d)[:;](\d\d)[:;](\d\d)([:;])(\d\d)(\.\d+)?', smpte_rep_string)
|
||||||
|
|
||||||
|
if m is None:
|
||||||
|
return None
|
||||||
|
|
||||||
hh, mm, ss, sep, ff, frac = m.groups()
|
hh, mm, ss, sep, ff, frac = m.groups()
|
||||||
hh, mm, ss, ff, frac = int(hh), int(mm), int(ss), int(ff), float(frac or 0.0)
|
hh, mm, ss, ff, frac = int(hh), int(
|
||||||
|
mm), int(ss), int(ff), float(frac or 0.0)
|
||||||
|
|
||||||
drop_frame = drop_frame_hint
|
drop_frame = drop_frame_hint
|
||||||
if sep == ";":
|
if sep == ";":
|
||||||
@@ -29,25 +61,23 @@ def smpte_to_frame_count(smpte_rep_string: str, frames_per_logical_second: int,
|
|||||||
if frames_per_logical_second not in [30, 60]:
|
if frames_per_logical_second not in [30, 60]:
|
||||||
drop_frame = False
|
drop_frame = False
|
||||||
|
|
||||||
raw_frames = hh * 3600 * frames_per_logical_second + mm * 60 * frames_per_logical_second + \
|
raw_frames = hh * 3600 * frames_per_logical_second + mm * 60 * \
|
||||||
ss * frames_per_logical_second + ff
|
frames_per_logical_second + ss * frames_per_logical_second + ff
|
||||||
|
|
||||||
frames = raw_frames
|
frames = raw_frames
|
||||||
if drop_frame is True:
|
if drop_frame is True:
|
||||||
frames_dropped_per_inst = (frames_per_logical_second / 15)
|
frames_dropped_per_inst = (frames_per_logical_second / 15)
|
||||||
mins = hh * 60 + mm
|
mins = hh * 60 + mm
|
||||||
inst_count = mins - math.floor(mins / 10)
|
inst_count = mins - math.floor(mins / 10)
|
||||||
dropped_frames = frames_dropped_per_inst * inst_count
|
dropped_frames = int(frames_dropped_per_inst) * inst_count
|
||||||
frames = raw_frames - dropped_frames
|
frames = raw_frames - dropped_frames
|
||||||
|
|
||||||
if include_fractional:
|
return frames
|
||||||
return frames, frac
|
|
||||||
else:
|
|
||||||
return frames
|
|
||||||
|
|
||||||
|
|
||||||
def frame_count_to_smpte(frame_count: int, frames_per_logical_second: int, drop_frame: bool = False,
|
def frame_count_to_smpte(frame_count: int, frames_per_logical_second: int,
|
||||||
fractional_frame: float = None):
|
drop_frame: bool = False,
|
||||||
|
fractional_frame: Optional[float] = None) -> str:
|
||||||
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
assert frames_per_logical_second in [24, 25, 30, 48, 50, 60]
|
||||||
assert fractional_frame is None or fractional_frame < 1.0
|
assert fractional_frame is None or fractional_frame < 1.0
|
||||||
|
|
||||||
@@ -68,29 +98,24 @@ def frame_count_to_smpte(frame_count: int, frames_per_logical_second: int, drop_
|
|||||||
|
|
||||||
hh = hh % 24
|
hh = hh % 24
|
||||||
if fractional_frame is not None and fractional_frame > 0:
|
if fractional_frame is not None and fractional_frame > 0:
|
||||||
return "%02i:%02i:%02i%s%02i%s" % (hh, mm, ss, separator, ff, ("%.3f" % fractional_frame)[1:])
|
return "%02i:%02i:%02i%s%02i%s" % (hh, mm, ss, separator, ff,
|
||||||
|
("%.3f" % fractional_frame)[1:])
|
||||||
else:
|
else:
|
||||||
return "%02i:%02i:%02i%s%02i" % (hh, mm, ss, separator, ff)
|
return "%02i:%02i:%02i%s%02i" % (hh, mm, ss, separator, ff)
|
||||||
|
|
||||||
|
|
||||||
def footage_to_frame_count(footage_string, include_fractional=False):
|
def footage_to_frame_count(footage_string) -> Optional[int]:
|
||||||
m = re.search("(\d+)\+(\d+)(\.\d+)?", footage_string)
|
m = re.search(r'(\d+)\+(\d+)(\.\d+)?', footage_string)
|
||||||
|
if m is None:
|
||||||
|
return None
|
||||||
feet, frm, frac = m.groups()
|
feet, frm, frac = m.groups()
|
||||||
feet, frm, frac = int(feet), int(frm), float(frac or 0.0)
|
feet, frm, frac = int(feet), int(frm), float(frac or 0.0)
|
||||||
|
|
||||||
frames = feet * 16 + frm
|
frames = feet * 16 + frm
|
||||||
|
|
||||||
if include_fractional:
|
return frames
|
||||||
return frames, frac
|
|
||||||
else:
|
|
||||||
return frames
|
|
||||||
|
|
||||||
|
|
||||||
def frame_count_to_footage(frame_count, fractional_frames=None):
|
def frame_count_to_footage(frame_count):
|
||||||
assert fractional_frames is None or fractional_frames < 1.0
|
|
||||||
feet, frm = divmod(frame_count, 16)
|
feet, frm = divmod(frame_count, 16)
|
||||||
|
return "%i+%02i" % (feet, frm)
|
||||||
if fractional_frames is None:
|
|
||||||
return "%i+%02i" % (feet, frm)
|
|
||||||
else:
|
|
||||||
return "%i+%02i%s" % (feet, frm, ("%.3f" % fractional_frames)[1:])
|
|
||||||
|
|||||||
@@ -1,137 +1,254 @@
|
|||||||
import io
|
"""
|
||||||
import json
|
This module provides the main input document parsing and transform
|
||||||
import os.path
|
implementation.
|
||||||
|
"""
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from xml.etree.ElementTree import TreeBuilder, tostring
|
from itertools import chain
|
||||||
import ptulsconv
|
import csv
|
||||||
|
from typing import List, Optional, Iterator
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
# field_map maps tags in the text export to fields in FMPXMLRESULT
|
import ptsl
|
||||||
# - tuple field 0 is a list of tags, the first tag with contents will be used as source
|
|
||||||
# - tuple field 1 is the field in FMPXMLRESULT
|
from .docparser.adr_entity import make_entities, ADRLine
|
||||||
# - tuple field 2 the constructor/type of the field
|
from .reporting import print_section_header_style, print_status_style,\
|
||||||
adr_field_map = ((['Title', 'PT.Session.Name'], 'Title', str),
|
print_warning
|
||||||
(['Supv'], 'Supervisor', str),
|
from .validations import validate_unique_field, validate_non_empty_field,\
|
||||||
(['Client'], 'Client', str),
|
validate_dependent_value
|
||||||
(['Sc'], 'Scene', str),
|
|
||||||
(['Ver'], 'Version', str),
|
from ptulsconv.docparser import parse_document
|
||||||
(['Reel'], 'Reel', str),
|
from ptulsconv.docparser.tag_compiler import TagCompiler
|
||||||
(['PT.Clip.Start'], 'Start', str),
|
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||||
(['PT.Clip.Finish'], 'Finish', str),
|
|
||||||
(['PT.Clip.Start_Seconds'], 'Start Seconds', float),
|
from ptulsconv.pdf.supervisor_1pg import output_report as output_supervisor_1pg
|
||||||
(['PT.Clip.Finish_Seconds'], 'Finish Seconds', float),
|
from ptulsconv.pdf.line_count import output_report as output_line_count
|
||||||
(['PT.Clip.Start_Frames'], 'Start Frames', int),
|
from ptulsconv.pdf.talent_sides import output_report as output_talent_sides
|
||||||
(['PT.Clip.Finish_Frames'], 'Finish Frames', int),
|
from ptulsconv.pdf.summary_log import output_report as output_summary
|
||||||
(['P'], 'Priority', int),
|
from ptulsconv.pdf.continuity import output_report as output_continuity
|
||||||
(['QN'], 'Cue Number', str),
|
|
||||||
(['Char', 'PT.Track.Name'], 'Character Name', str),
|
from json import JSONEncoder
|
||||||
(['Actor'], 'Actor Name', str),
|
|
||||||
(['CN'], 'Character Number', str),
|
|
||||||
(['R'], 'Reason', str),
|
|
||||||
(['Rq'], 'Requested by', str),
|
|
||||||
(['Spot'], 'Spot', str),
|
|
||||||
(['PT.Clip.Name', 'Line'], 'Line', str),
|
|
||||||
(['Shot'], 'Shot', str),
|
|
||||||
(['Note'], 'Note', str),
|
|
||||||
(['Mins'], 'Time Budget Mins', float),
|
|
||||||
(['EFF'], 'Effort', str),
|
|
||||||
(['TV'], 'TV', str),
|
|
||||||
(['TBW'], 'To Be Written', str),
|
|
||||||
(['OMIT'], 'Omit', str),
|
|
||||||
(['ADLIB'], 'Adlib', str),
|
|
||||||
(['OPT'], 'Optional', str))
|
|
||||||
|
|
||||||
|
|
||||||
def fmp_dump(data, input_file_name, output):
|
class MyEncoder(JSONEncoder):
|
||||||
doc = TreeBuilder(element_factory=None)
|
"""
|
||||||
|
A subclass of :class:`JSONEncoder` which encodes :class:`Fraction` objects
|
||||||
|
as a dict.
|
||||||
|
"""
|
||||||
|
force_denominator: Optional[int]
|
||||||
|
|
||||||
doc.start('FMPXMLRESULT', {'xmlns': 'http://www.filemaker.com/fmpxmlresult'})
|
def default(self, o):
|
||||||
|
"""
|
||||||
|
|
||||||
doc.start('ERRORCODE')
|
"""
|
||||||
doc.data('0')
|
if isinstance(o, Fraction):
|
||||||
doc.end('ERRORCODE')
|
return dict(numerator=o.numerator, denominator=o.denominator)
|
||||||
|
else:
|
||||||
doc.start('PRODUCT', {'NAME': ptulsconv.__name__, 'VERSION': ptulsconv.__version__})
|
return o.__dict__
|
||||||
doc.end('PRODUCT')
|
|
||||||
|
|
||||||
doc.start('DATABASE', {'DATEFORMAT': 'MM/dd/yy', 'LAYOUT': 'summary', 'TIMEFORMAT': 'hh:mm:ss',
|
|
||||||
'RECORDS': str(len(data['events'])), 'NAME': os.path.basename(input_file_name)})
|
|
||||||
doc.end('DATABASE')
|
|
||||||
|
|
||||||
doc.start('METADATA')
|
|
||||||
for field in adr_field_map:
|
|
||||||
tp = field[2]
|
|
||||||
ft = 'TEXT'
|
|
||||||
if tp is int or tp is float:
|
|
||||||
ft = 'NUMBER'
|
|
||||||
|
|
||||||
doc.start('FIELD', {'EMPTYOK': 'YES', 'MAXREPEAT': '1', 'NAME': field[1], 'TYPE': ft})
|
|
||||||
doc.end('FIELD')
|
|
||||||
doc.end('METADATA')
|
|
||||||
|
|
||||||
doc.start('RESULTSET', {'FOUND': str(len(data['events']))})
|
|
||||||
for event in data['events']:
|
|
||||||
doc.start('ROW')
|
|
||||||
for field in adr_field_map:
|
|
||||||
doc.start('COL')
|
|
||||||
doc.start('DATA')
|
|
||||||
for key_attempt in field[0]:
|
|
||||||
if key_attempt in event.keys():
|
|
||||||
doc.data(str(event[key_attempt]))
|
|
||||||
break
|
|
||||||
doc.end('DATA')
|
|
||||||
doc.end('COL')
|
|
||||||
doc.end('ROW')
|
|
||||||
doc.end('RESULTSET')
|
|
||||||
|
|
||||||
doc.end('FMPXMLRESULT')
|
|
||||||
docelem = doc.close()
|
|
||||||
xmlstr = tostring(docelem, encoding='unicode', method='xml')
|
|
||||||
output.write(xmlstr)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_field_map(field_map_name, output=sys.stdout):
|
def output_adr_csv(lines: List[ADRLine], time_format: TimecodeFormat):
|
||||||
output.write("# Map of Tag fields to XML output columns\n")
|
"""
|
||||||
output.write("# (in order of precedence)\n")
|
Writes ADR lines as CSV to the current working directory. Creates
|
||||||
output.write("# \n")
|
directories for each character number and name pair, and within that
|
||||||
field_map = []
|
directory, creates a CSV file for each reel.
|
||||||
if field_map_name == 'ADR':
|
"""
|
||||||
field_map = adr_field_map
|
reels = set([ln.reel for ln in lines])
|
||||||
output.write("# ADR Table Fields\n")
|
|
||||||
|
|
||||||
output.write("# \n")
|
for n, name in [(n.character_id, n.character_name) for n in lines]:
|
||||||
output.write("# Tag Name | FMPXMLRESULT Column | Type | Column \n")
|
dir_name = "%s_%s" % (n, name)
|
||||||
output.write("# -------------------------+----------------------+---------+--------\n")
|
os.makedirs(dir_name, exist_ok=True)
|
||||||
|
os.chdir(dir_name)
|
||||||
|
for reel in reels:
|
||||||
|
these_lines = [ln for ln in lines
|
||||||
|
if ln.character_id == n and ln.reel == reel]
|
||||||
|
|
||||||
for n, field in enumerate(field_map):
|
if len(these_lines) == 0:
|
||||||
for tag in field[0]:
|
continue
|
||||||
output.write("# %-24s-> %-20s | %-8s| %-7i\n" % (tag[:24], field[1][:20], field[2].__name__, n+1 ))
|
|
||||||
|
outfile_name = "%s_%s_%s_%s.csv" % (these_lines[0].title, n,
|
||||||
|
these_lines[0].character_name,
|
||||||
|
reel,)
|
||||||
|
|
||||||
|
with open(outfile_name, mode='w', newline='') as outfile:
|
||||||
|
writer = csv.writer(outfile, dialect='excel')
|
||||||
|
writer.writerow(['Title', 'Character Name', 'Cue Number',
|
||||||
|
'Reel', 'Version',
|
||||||
|
'Start', 'Finish',
|
||||||
|
'Start Seconds', 'Finish Seconds',
|
||||||
|
'Prompt',
|
||||||
|
'Reason', 'Note', 'TV'])
|
||||||
|
|
||||||
|
for event in these_lines:
|
||||||
|
this_start = event.start or 0
|
||||||
|
this_finish = event.finish or 0
|
||||||
|
this_row = [event.title, event.character_name,
|
||||||
|
event.cue_number, event.reel, event.version,
|
||||||
|
time_format.seconds_to_smpte(this_start),
|
||||||
|
time_format.seconds_to_smpte(this_finish),
|
||||||
|
float(this_start), float(this_finish),
|
||||||
|
event.prompt,
|
||||||
|
event.reason, event.note, "TV"
|
||||||
|
if event.tv else ""]
|
||||||
|
|
||||||
|
writer.writerow(this_row)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
|
||||||
def convert(input_file, output_format='fmpxml', start=None, end=None, progress=False, include_muted=False,
|
def generate_documents(session_tc_format, scenes, adr_lines: Iterator[ADRLine],
|
||||||
output=sys.stdout):
|
title):
|
||||||
with open(input_file, 'r') as file:
|
"""
|
||||||
ast = ptulsconv.protools_text_export_grammar.parse(file.read())
|
Create PDF output.
|
||||||
dict_parser = ptulsconv.DictionaryParserVisitor()
|
"""
|
||||||
parsed = dict_parser.visit(ast)
|
print_section_header_style("Creating PDF Reports")
|
||||||
|
report_date = datetime.datetime.now()
|
||||||
|
reports_dir = "%s_%s" % (title, report_date.strftime("%Y-%m-%d_%H%M%S"))
|
||||||
|
os.makedirs(reports_dir, exist_ok=False)
|
||||||
|
os.chdir(reports_dir)
|
||||||
|
|
||||||
tcxform = ptulsconv.transformations.TimecodeInterpreter()
|
client = next((x.client for x in adr_lines), "")
|
||||||
tagxform = ptulsconv.transformations.TagInterpreter(show_progress=progress, ignore_muted=(not include_muted))
|
supervisor = next((x.supervisor for x in adr_lines), "")
|
||||||
|
|
||||||
parsed = tagxform.transform(tcxform.transform(parsed))
|
output_continuity(scenes=scenes, tc_display_format=session_tc_format,
|
||||||
|
title=title, client=client,
|
||||||
|
supervisor=supervisor)
|
||||||
|
|
||||||
if start is not None and end is not None:
|
reels = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
|
||||||
start_fs = tcxform.convert_time(start,
|
|
||||||
frame_rate=parsed['header']['timecode_format'],
|
|
||||||
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
|
||||||
|
|
||||||
end_fs = tcxform.convert_time(end,
|
if len(adr_lines) == 0:
|
||||||
frame_rate=parsed['header']['timecode_format'],
|
print_status_style("No ADR lines were found in the input document. "
|
||||||
drop_frame=parsed['header']['timecode_drop_frame'])['frame_count']
|
"ADR reports will not be generated.")
|
||||||
|
|
||||||
subclipxform = ptulsconv.transformations.SubclipOfSequence(start=start_fs, end=end_fs)
|
else:
|
||||||
parsed = subclipxform.transform(parsed)
|
create_adr_reports(adr_lines, tc_display_format=session_tc_format,
|
||||||
|
reel_list=sorted(reels))
|
||||||
|
|
||||||
if output_format == 'json':
|
|
||||||
json.dump(parsed, output)
|
def create_adr_reports(lines: List[ADRLine], tc_display_format: TimecodeFormat,
|
||||||
elif output_format == 'fmpxml':
|
reel_list: List[str]):
|
||||||
fmp_dump(parsed, input_file, output)
|
"""
|
||||||
|
Creates a directory heirarchy and a respective set of ADR reports,
|
||||||
|
given a list of lines.
|
||||||
|
"""
|
||||||
|
|
||||||
|
print_status_style("Creating ADR Report")
|
||||||
|
output_summary(lines, tc_display_format=tc_display_format)
|
||||||
|
|
||||||
|
print_status_style("Creating Line Count")
|
||||||
|
output_line_count(lines, reel_list=reel_list)
|
||||||
|
|
||||||
|
print_status_style("Creating Supervisor Logs directory and reports")
|
||||||
|
os.makedirs("Supervisor Logs", exist_ok=True)
|
||||||
|
os.chdir("Supervisor Logs")
|
||||||
|
output_supervisor_1pg(lines, tc_display_format=tc_display_format)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating Director's Logs director and reports")
|
||||||
|
os.makedirs("Director Logs", exist_ok=True)
|
||||||
|
os.chdir("Director Logs")
|
||||||
|
output_summary(lines, tc_display_format=tc_display_format,
|
||||||
|
by_character=True)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating CSV outputs")
|
||||||
|
os.makedirs("CSV", exist_ok=True)
|
||||||
|
os.chdir("CSV")
|
||||||
|
output_adr_csv(lines, time_format=tc_display_format)
|
||||||
|
os.chdir("..")
|
||||||
|
|
||||||
|
print_status_style("Creating Scripts directory and reports")
|
||||||
|
os.makedirs("Talent Scripts", exist_ok=True)
|
||||||
|
os.chdir("Talent Scripts")
|
||||||
|
output_talent_sides(lines, tc_display_format=tc_display_format)
|
||||||
|
|
||||||
|
|
||||||
|
def convert(major_mode, input_file=None, output=sys.stdout, warnings=True):
|
||||||
|
"""
|
||||||
|
Primary worker function, accepts the input file and decides
|
||||||
|
what to do with it based on the `major_mode`.
|
||||||
|
|
||||||
|
:param input_file: a path to the input file.
|
||||||
|
:param major_mode: the selected output mode, 'raw', 'tagged' or 'doc'.
|
||||||
|
"""
|
||||||
|
session_text = ""
|
||||||
|
if input_file is not None:
|
||||||
|
with open(input_file, "r") as file:
|
||||||
|
session_text = file.read()
|
||||||
|
else:
|
||||||
|
with ptsl.open_engine(
|
||||||
|
company_name="The ptulsconv developers",
|
||||||
|
application_name="ptulsconv") as engine:
|
||||||
|
req = engine.export_session_as_text()
|
||||||
|
req.utf8_encoding()
|
||||||
|
req.include_track_edls()
|
||||||
|
req.include_markers()
|
||||||
|
req.time_type("tc")
|
||||||
|
req.dont_show_crossfades()
|
||||||
|
req.selected_tracks_only()
|
||||||
|
session_text = req.export_string
|
||||||
|
|
||||||
|
session = parse_document(session_text)
|
||||||
|
session_tc_format = session.header.timecode_format
|
||||||
|
|
||||||
|
if major_mode == 'raw':
|
||||||
|
output.write(MyEncoder().encode(session))
|
||||||
|
|
||||||
|
else:
|
||||||
|
compiler = TagCompiler()
|
||||||
|
compiler.session = session
|
||||||
|
compiled_events = list(compiler.compile_events())
|
||||||
|
|
||||||
|
if major_mode == 'tagged':
|
||||||
|
output.write(MyEncoder().encode(compiled_events))
|
||||||
|
|
||||||
|
elif major_mode == 'doc':
|
||||||
|
generic_events, adr_lines = make_entities(compiled_events)
|
||||||
|
|
||||||
|
scenes = sorted([s for s in compiler.compile_all_time_spans()
|
||||||
|
if s[0] == 'Sc'],
|
||||||
|
key=lambda x: x[2])
|
||||||
|
|
||||||
|
# TODO: Breakdown by titles
|
||||||
|
titles = set([x.title for x in (generic_events + adr_lines)])
|
||||||
|
if len(titles) != 1:
|
||||||
|
print_warning("Multiple titles per export is not supported, "
|
||||||
|
"found multiple titles: %s Exiting." % titles)
|
||||||
|
exit(-1)
|
||||||
|
|
||||||
|
title = list(titles)[0]
|
||||||
|
|
||||||
|
print_status_style(
|
||||||
|
"%i generic events found." % len(generic_events)
|
||||||
|
)
|
||||||
|
print_status_style("%i ADR events found." % len(adr_lines))
|
||||||
|
|
||||||
|
if warnings:
|
||||||
|
perform_adr_validations(adr_lines)
|
||||||
|
|
||||||
|
generate_documents(session_tc_format, scenes, adr_lines, title)
|
||||||
|
|
||||||
|
|
||||||
|
def perform_adr_validations(lines: Iterator[ADRLine]):
|
||||||
|
"""
|
||||||
|
Performs validations on the input.
|
||||||
|
"""
|
||||||
|
for warning in chain(
|
||||||
|
validate_unique_field(lines,
|
||||||
|
field='cue_number',
|
||||||
|
scope='title'),
|
||||||
|
validate_non_empty_field(lines,
|
||||||
|
field='cue_number'),
|
||||||
|
validate_non_empty_field(lines,
|
||||||
|
field='character_id'),
|
||||||
|
validate_non_empty_field(lines,
|
||||||
|
field='title'),
|
||||||
|
validate_dependent_value(lines,
|
||||||
|
key_field='character_id',
|
||||||
|
dependent_field='character_name'),
|
||||||
|
validate_dependent_value(lines,
|
||||||
|
key_field='character_id',
|
||||||
|
dependent_field='actor_name')):
|
||||||
|
|
||||||
|
print_warning(warning.report_message())
|
||||||
|
|||||||
5
ptulsconv/docparser/__init__.py
Normal file
5
ptulsconv/docparser/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""
|
||||||
|
Docparser module
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .pt_doc_parser import parse_document
|
||||||
137
ptulsconv/docparser/adr_entity.py
Normal file
137
ptulsconv/docparser/adr_entity.py
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
"""
|
||||||
|
This module defines classes and methods for converting :class:`Event` objects
|
||||||
|
into :class:`ADRLine` objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from ptulsconv.docparser.tag_compiler import Event
|
||||||
|
from typing import Optional, List, Tuple
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
|
from ptulsconv.docparser.tag_mapping import TagMapping
|
||||||
|
|
||||||
|
|
||||||
|
def make_entities(from_events: List[Event]) -> Tuple[List['GenericEvent'],
|
||||||
|
List['ADRLine']]:
|
||||||
|
"""
|
||||||
|
Accepts a list of Events and converts them into either ADRLine events or
|
||||||
|
GenricEvents by calling :func:`make_entity` on each member.
|
||||||
|
|
||||||
|
:param from_events: A list of `Event` objects.
|
||||||
|
|
||||||
|
:returns: A tuple of two lists, the first containing :class:`GenericEvent`
|
||||||
|
and the second containing :class:`ADRLine`.
|
||||||
|
"""
|
||||||
|
generic_events = list()
|
||||||
|
adr_lines = list()
|
||||||
|
|
||||||
|
for event in from_events:
|
||||||
|
result = make_entity(event)
|
||||||
|
if type(result) is ADRLine:
|
||||||
|
adr_lines.append(result)
|
||||||
|
elif type(result) is GenericEvent:
|
||||||
|
generic_events.append(result)
|
||||||
|
|
||||||
|
return generic_events, adr_lines
|
||||||
|
|
||||||
|
|
||||||
|
def make_entity(from_event: Event) -> Optional[object]:
|
||||||
|
"""
|
||||||
|
Accepts an event and creates either an :class:`ADRLine` or a
|
||||||
|
:class:`GenericEvent`. An event is an "ADRLine" if it has a cue number/"QN"
|
||||||
|
tag field.
|
||||||
|
|
||||||
|
:param from_event: An :class:`Event`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
instance = GenericEvent
|
||||||
|
tag_map = GenericEvent.tag_mapping
|
||||||
|
if 'QN' in from_event.tags.keys():
|
||||||
|
instance = ADRLine
|
||||||
|
tag_map += ADRLine.tag_mapping
|
||||||
|
|
||||||
|
new = instance()
|
||||||
|
TagMapping.apply_rules(tag_map, from_event.tags,
|
||||||
|
from_event.clip_name, from_event.track_name,
|
||||||
|
from_event.session_name, new)
|
||||||
|
|
||||||
|
new.start = from_event.start
|
||||||
|
new.finish = from_event.finish
|
||||||
|
return new
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GenericEvent:
|
||||||
|
title: str = ""
|
||||||
|
supervisor: Optional[str] = None
|
||||||
|
client: Optional[str] = None
|
||||||
|
scene: Optional[str] = None
|
||||||
|
version: Optional[str] = None
|
||||||
|
reel: Optional[str] = None
|
||||||
|
start: Fraction = Fraction(0, 1)
|
||||||
|
finish: Fraction = Fraction(0, 1)
|
||||||
|
omitted: bool = False
|
||||||
|
note: Optional[str] = None
|
||||||
|
requested_by: Optional[str] = None
|
||||||
|
|
||||||
|
tag_mapping = [
|
||||||
|
TagMapping(source='Title', target="title",
|
||||||
|
alt=TagMapping.ContentSource.Session),
|
||||||
|
TagMapping(source="Supv", target="supervisor"),
|
||||||
|
TagMapping(source="Client", target="client"),
|
||||||
|
TagMapping(source="Sc", target="scene"),
|
||||||
|
TagMapping(source="Ver", target="version"),
|
||||||
|
TagMapping(source="Reel", target="reel"),
|
||||||
|
TagMapping(source="Note", target="note"),
|
||||||
|
TagMapping(source="Rq", target="requested_by"),
|
||||||
|
TagMapping(source="OMIT", target="omitted",
|
||||||
|
formatter=(lambda x: len(x) > 0)),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ADRLine(GenericEvent):
|
||||||
|
|
||||||
|
priority: Optional[int] = None
|
||||||
|
cue_number: Optional[str] = None
|
||||||
|
character_id: Optional[str] = None
|
||||||
|
character_name: Optional[str] = None
|
||||||
|
actor_name: Optional[str] = None
|
||||||
|
prompt: Optional[str] = None
|
||||||
|
reason: Optional[str] = None
|
||||||
|
time_budget_mins: Optional[float] = None
|
||||||
|
spot: Optional[str] = None
|
||||||
|
shot: Optional[str] = None
|
||||||
|
effort: bool = False
|
||||||
|
tv: bool = False
|
||||||
|
tbw: bool = False
|
||||||
|
adlib: bool = False
|
||||||
|
optional: bool = False
|
||||||
|
|
||||||
|
tag_mapping = [
|
||||||
|
|
||||||
|
TagMapping(source="P", target="priority"),
|
||||||
|
TagMapping(source="QN", target="cue_number"),
|
||||||
|
TagMapping(source="CN", target="character_id"),
|
||||||
|
TagMapping(source="Char", target="character_name",
|
||||||
|
alt=TagMapping.ContentSource.Track),
|
||||||
|
TagMapping(source="Actor", target="actor_name"),
|
||||||
|
TagMapping(source="Line", target="prompt",
|
||||||
|
alt=TagMapping.ContentSource.Clip),
|
||||||
|
TagMapping(source="R", target="reason"),
|
||||||
|
TagMapping(source="Mins", target="time_budget_mins",
|
||||||
|
formatter=(lambda n: float(n))),
|
||||||
|
TagMapping(source="Spot", target="spot"),
|
||||||
|
TagMapping(source="Shot", target="shot"),
|
||||||
|
TagMapping(source="EFF", target="effort",
|
||||||
|
formatter=(lambda x: len(x) > 0)),
|
||||||
|
TagMapping(source="TV", target="tv",
|
||||||
|
formatter=(lambda x: len(x) > 0)),
|
||||||
|
TagMapping(source="TBW", target="tbw",
|
||||||
|
formatter=(lambda x: len(x) > 0)),
|
||||||
|
|
||||||
|
TagMapping(source="ADLIB", target="adlib",
|
||||||
|
formatter=(lambda x: len(x) > 0)),
|
||||||
|
TagMapping(source="OPT", target="optional",
|
||||||
|
formatter=(lambda x: len(x) > 0))
|
||||||
|
]
|
||||||
181
ptulsconv/docparser/doc_entity.py
Normal file
181
ptulsconv/docparser/doc_entity.py
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
from fractions import Fraction
|
||||||
|
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||||
|
from typing import Tuple, List, Iterator
|
||||||
|
|
||||||
|
|
||||||
|
class SessionDescriptor:
|
||||||
|
header: "HeaderDescriptor"
|
||||||
|
files: List["FileDescriptor"]
|
||||||
|
clips: List["ClipDescriptor"]
|
||||||
|
plugins: List["PluginDescriptor"]
|
||||||
|
tracks: List["TrackDescriptor"]
|
||||||
|
markers: List["MarkerDescriptor"]
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.header = kwargs['header']
|
||||||
|
self.files = kwargs['files']
|
||||||
|
self.clips = kwargs['clips']
|
||||||
|
self.plugins = kwargs['plugins']
|
||||||
|
self.tracks = kwargs['tracks']
|
||||||
|
self.markers = kwargs['markers']
|
||||||
|
|
||||||
|
def markers_timed(self) -> Iterator[Tuple['MarkerDescriptor', Fraction]]:
|
||||||
|
for marker in self.markers:
|
||||||
|
marker_time = Fraction(marker.time_reference,
|
||||||
|
int(self.header.sample_rate))
|
||||||
|
# marker_time = self.header.convert_timecode(marker.location)
|
||||||
|
yield marker, marker_time
|
||||||
|
|
||||||
|
def tracks_clips(self) -> Iterator[Tuple['TrackDescriptor',
|
||||||
|
'TrackClipDescriptor']]:
|
||||||
|
for track in self.tracks:
|
||||||
|
for clip in track.clips:
|
||||||
|
yield track, clip
|
||||||
|
|
||||||
|
def track_clips_timed(self) -> Iterator[Tuple["TrackDescriptor",
|
||||||
|
"TrackClipDescriptor",
|
||||||
|
Fraction, Fraction, Fraction]
|
||||||
|
]:
|
||||||
|
"""
|
||||||
|
:return: A Generator that yields track, clip, start time, finish time,
|
||||||
|
and timestamp
|
||||||
|
"""
|
||||||
|
for track, clip in self.tracks_clips():
|
||||||
|
start_time = self.header.convert_timecode(clip.start_timecode)
|
||||||
|
finish_time = self.header.convert_timecode(clip.finish_timecode)
|
||||||
|
timestamp_time = self.header.convert_timecode(clip.timestamp) \
|
||||||
|
if clip.timestamp is not None else None
|
||||||
|
|
||||||
|
yield track, clip, start_time, finish_time, timestamp_time
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderDescriptor:
|
||||||
|
session_name: str
|
||||||
|
sample_rate: float
|
||||||
|
bit_depth: int
|
||||||
|
start_timecode: str
|
||||||
|
timecode_fps: str
|
||||||
|
timecode_drop_frame: bool
|
||||||
|
count_audio_tracks: int
|
||||||
|
count_clips: int
|
||||||
|
count_files: int
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.session_name = kwargs['session_name']
|
||||||
|
self.sample_rate = kwargs['sample_rate']
|
||||||
|
self.bit_depth = kwargs['bit_depth']
|
||||||
|
self.start_timecode = kwargs['start_timecode']
|
||||||
|
self.timecode_fps = kwargs['timecode_format']
|
||||||
|
self.timecode_drop_frame = kwargs['timecode_drop_frame']
|
||||||
|
self.count_audio_tracks = kwargs['count_audio_tracks']
|
||||||
|
self.count_clips = kwargs['count_clips']
|
||||||
|
self.count_files = kwargs['count_files']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def timecode_format(self):
|
||||||
|
return TimecodeFormat(frame_duration=self.frame_duration,
|
||||||
|
logical_fps=self.logical_fps,
|
||||||
|
drop_frame=self.timecode_drop_frame)
|
||||||
|
|
||||||
|
def convert_timecode(self, tc_string: str) -> Fraction:
|
||||||
|
return self.timecode_format.smpte_to_seconds(tc_string)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def start_time(self) -> Fraction:
|
||||||
|
"""
|
||||||
|
The start time of this session.
|
||||||
|
:return: Start time in seconds
|
||||||
|
"""
|
||||||
|
return self.convert_timecode(self.start_timecode)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def logical_fps(self) -> int:
|
||||||
|
return self._get_tc_format_params[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_duration(self) -> Fraction:
|
||||||
|
return self._get_tc_format_params[1]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _get_tc_format_params(self) -> Tuple[int, Fraction]:
|
||||||
|
frame_rates = {"23.976": (24, Fraction(1001, 24_000)),
|
||||||
|
"24": (24, Fraction(1, 24)),
|
||||||
|
"25": (25, Fraction(1, 25)),
|
||||||
|
"29.97": (30, Fraction(1001, 30_000)),
|
||||||
|
"30": (30, Fraction(1, 30)),
|
||||||
|
"59.94": (60, Fraction(1001, 60_000)),
|
||||||
|
"60": (60, Fraction(1, 60))
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.timecode_fps in frame_rates.keys():
|
||||||
|
return frame_rates[self.timecode_fps]
|
||||||
|
else:
|
||||||
|
raise ValueError("Unrecognized TC rate (%s)" %
|
||||||
|
self.timecode_format)
|
||||||
|
|
||||||
|
|
||||||
|
class TrackDescriptor:
|
||||||
|
name: str
|
||||||
|
comments: str
|
||||||
|
user_delay_samples: int
|
||||||
|
state: List[str]
|
||||||
|
plugins: List[str]
|
||||||
|
clips: List["TrackClipDescriptor"]
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.name = kwargs['name']
|
||||||
|
self.comments = kwargs['comments']
|
||||||
|
self.user_delay_samples = kwargs['user_delay_samples']
|
||||||
|
self.state = kwargs['state']
|
||||||
|
self.plugins = kwargs['plugins']
|
||||||
|
self.clips = kwargs['clips']
|
||||||
|
|
||||||
|
|
||||||
|
class FileDescriptor(dict):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TrackClipDescriptor:
|
||||||
|
channel: int
|
||||||
|
event: int
|
||||||
|
clip_name: str
|
||||||
|
start_timecode: str
|
||||||
|
finish_timecode: str
|
||||||
|
duration: str
|
||||||
|
timestamp: str
|
||||||
|
state: str
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.channel = kwargs['channel']
|
||||||
|
self.event = kwargs['event']
|
||||||
|
self.clip_name = kwargs['clip_name']
|
||||||
|
self.start_timecode = kwargs['start_time']
|
||||||
|
self.finish_timecode = kwargs['finish_time']
|
||||||
|
self.duration = kwargs['duration']
|
||||||
|
self.timestamp = kwargs['timestamp']
|
||||||
|
self.state = kwargs['state']
|
||||||
|
|
||||||
|
|
||||||
|
class ClipDescriptor(dict):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PluginDescriptor(dict):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MarkerDescriptor:
|
||||||
|
number: int
|
||||||
|
location: str
|
||||||
|
time_reference: int
|
||||||
|
units: str
|
||||||
|
name: str
|
||||||
|
comments: str
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.number = kwargs['number']
|
||||||
|
self.location = kwargs['location']
|
||||||
|
self.time_reference = kwargs['time_reference']
|
||||||
|
self.units = kwargs['units']
|
||||||
|
self.name = kwargs['name']
|
||||||
|
self.comments = kwargs['comments']
|
||||||
1
ptulsconv/docparser/generic_entity.py
Normal file
1
ptulsconv/docparser/generic_entity.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# from dataclasses import dataclass
|
||||||
267
ptulsconv/docparser/pt_doc_parser.py
Normal file
267
ptulsconv/docparser/pt_doc_parser.py
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
from parsimonious.nodes import NodeVisitor
|
||||||
|
from parsimonious.grammar import Grammar
|
||||||
|
|
||||||
|
from .doc_entity import SessionDescriptor, HeaderDescriptor, TrackDescriptor,\
|
||||||
|
FileDescriptor, TrackClipDescriptor, ClipDescriptor, PluginDescriptor,\
|
||||||
|
MarkerDescriptor
|
||||||
|
|
||||||
|
|
||||||
|
protools_text_export_grammar = Grammar(
|
||||||
|
r"""
|
||||||
|
document = header files_section? clips_section? plugin_listing?
|
||||||
|
track_listing? markers_listing?
|
||||||
|
header = "SESSION NAME:" fs string_value rs
|
||||||
|
"SAMPLE RATE:" fs float_value rs
|
||||||
|
"BIT DEPTH:" fs integer_value "-bit" rs
|
||||||
|
"SESSION START TIMECODE:" fs string_value rs
|
||||||
|
"TIMECODE FORMAT:" fs frame_rate " Drop"? " Frame" rs
|
||||||
|
"# OF AUDIO TRACKS:" fs integer_value rs
|
||||||
|
"# OF AUDIO CLIPS:" fs integer_value rs
|
||||||
|
"# OF AUDIO FILES:" fs integer_value rs block_ending
|
||||||
|
|
||||||
|
frame_rate = ("60" / "59.94" / "30" / "29.97" / "25" / "24" /
|
||||||
|
"23.976")
|
||||||
|
files_section = files_header files_column_header file_record*
|
||||||
|
block_ending
|
||||||
|
files_header = "F I L E S I N S E S S I O N" rs
|
||||||
|
files_column_header = "Filename" isp fs "Location" rs
|
||||||
|
file_record = string_value fs string_value rs
|
||||||
|
|
||||||
|
clips_section = clips_header clips_column_header clip_record*
|
||||||
|
block_ending
|
||||||
|
clips_header = "O N L I N E C L I P S I N S E S S I O N" rs
|
||||||
|
clips_column_header = string_value fs string_value rs
|
||||||
|
clip_record = string_value fs string_value
|
||||||
|
(fs "[" integer_value "]")? rs
|
||||||
|
|
||||||
|
plugin_listing = plugin_header plugin_column_header plugin_record*
|
||||||
|
block_ending
|
||||||
|
plugin_header = "P L U G - I N S L I S T I N G" rs
|
||||||
|
plugin_column_header = "MANUFACTURER " fs
|
||||||
|
"PLUG-IN NAME " fs
|
||||||
|
"VERSION " fs
|
||||||
|
"FORMAT " fs
|
||||||
|
"STEMS " fs
|
||||||
|
"NUMBER OF INSTANCES" rs
|
||||||
|
plugin_record = string_value fs string_value fs string_value fs
|
||||||
|
string_value fs string_value fs string_value rs
|
||||||
|
|
||||||
|
track_listing = track_listing_header track_block*
|
||||||
|
track_block = track_list_top ( track_clip_entry / block_ending )*
|
||||||
|
|
||||||
|
track_listing_header = "T R A C K L I S T I N G" rs
|
||||||
|
track_list_top = "TRACK NAME:" fs string_value rs
|
||||||
|
"COMMENTS:" fs string_value rs
|
||||||
|
"USER DELAY:" fs integer_value " Samples" rs
|
||||||
|
"STATE: " track_state_list rs
|
||||||
|
("PLUG-INS: " ( fs string_value )* rs)?
|
||||||
|
"CHANNEL " fs "EVENT " fs
|
||||||
|
"CLIP NAME " fs
|
||||||
|
"START TIME " fs "END TIME " fs
|
||||||
|
"DURATION " fs
|
||||||
|
("TIMESTAMP " fs)? "STATE" rs
|
||||||
|
|
||||||
|
track_state_list = (track_state " ")*
|
||||||
|
|
||||||
|
track_state = "Solo" / "Muted" / "Inactive" / "Hidden"
|
||||||
|
|
||||||
|
track_clip_entry = integer_value isp fs
|
||||||
|
integer_value isp fs
|
||||||
|
string_value fs
|
||||||
|
string_value fs string_value fs string_value fs
|
||||||
|
(string_value fs)?
|
||||||
|
track_clip_state rs
|
||||||
|
|
||||||
|
track_clip_state = ("Muted" / "Unmuted")
|
||||||
|
|
||||||
|
markers_listing = markers_listing_header markers_column_header
|
||||||
|
marker_record*
|
||||||
|
markers_listing_header = "M A R K E R S L I S T I N G" rs
|
||||||
|
markers_column_header = "# " fs "LOCATION " fs
|
||||||
|
"TIME REFERENCE " fs
|
||||||
|
"UNITS " fs
|
||||||
|
"NAME " fs
|
||||||
|
"COMMENTS" rs
|
||||||
|
|
||||||
|
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
||||||
|
string_value fs string_value fs string_value rs
|
||||||
|
|
||||||
|
fs = "\t"
|
||||||
|
rs = "\n"
|
||||||
|
block_ending = rs rs
|
||||||
|
string_value = ~r"[^\t\n]*"
|
||||||
|
integer_value = ~r"\d+"
|
||||||
|
float_value = ~r"\d+(\.\d+)?"
|
||||||
|
isp = ~r"[^\d\t\n]*"
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_document(session_text: str) -> SessionDescriptor:
|
||||||
|
"""
|
||||||
|
Parse a Pro Tools text export.
|
||||||
|
:param session_text: Pro Tools session text export
|
||||||
|
:return: the session descriptor
|
||||||
|
"""
|
||||||
|
ast = protools_text_export_grammar.parse(session_text)
|
||||||
|
return DocParserVisitor().visit(ast)
|
||||||
|
|
||||||
|
|
||||||
|
class DocParserVisitor(NodeVisitor):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_document(_, visited_children) -> SessionDescriptor:
|
||||||
|
files = next(iter(visited_children[1]), None)
|
||||||
|
clips = next(iter(visited_children[2]), None)
|
||||||
|
plugins = next(iter(visited_children[3]), None)
|
||||||
|
tracks = next(iter(visited_children[4]), None)
|
||||||
|
markers = next(iter(visited_children[5]), None)
|
||||||
|
|
||||||
|
return SessionDescriptor(header=visited_children[0],
|
||||||
|
files=files,
|
||||||
|
clips=clips,
|
||||||
|
plugins=plugins,
|
||||||
|
tracks=tracks,
|
||||||
|
markers=markers)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_header(_, visited_children):
|
||||||
|
|
||||||
|
tc_drop = False
|
||||||
|
for _ in visited_children[20]:
|
||||||
|
tc_drop = True
|
||||||
|
|
||||||
|
return HeaderDescriptor(session_name=visited_children[2],
|
||||||
|
sample_rate=visited_children[6],
|
||||||
|
bit_depth=visited_children[10],
|
||||||
|
start_timecode=visited_children[15],
|
||||||
|
timecode_format=visited_children[19],
|
||||||
|
timecode_drop_frame=tc_drop,
|
||||||
|
count_audio_tracks=visited_children[25],
|
||||||
|
count_clips=visited_children[29],
|
||||||
|
count_files=visited_children[33])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_files_section(_, visited_children):
|
||||||
|
return list(map(
|
||||||
|
lambda child: FileDescriptor(filename=child[0], path=child[2]),
|
||||||
|
visited_children[2]))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_clips_section(_, visited_children):
|
||||||
|
channel = next(iter(visited_children[2][3]), 1)
|
||||||
|
|
||||||
|
return list(map(
|
||||||
|
lambda child: ClipDescriptor(clip_name=child[0], file=child[2],
|
||||||
|
channel=channel),
|
||||||
|
visited_children[2]))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_plugin_listing(_, visited_children):
|
||||||
|
return list(map(lambda child:
|
||||||
|
PluginDescriptor(manufacturer=child[0],
|
||||||
|
plugin_name=child[2],
|
||||||
|
version=child[4],
|
||||||
|
format=child[6],
|
||||||
|
stems=child[8],
|
||||||
|
count_instances=child[10]),
|
||||||
|
visited_children[2]))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_track_block(_, visited_children):
|
||||||
|
track_header, track_clip_list = visited_children
|
||||||
|
clips = []
|
||||||
|
for clip in track_clip_list:
|
||||||
|
if clip[0] is not None:
|
||||||
|
clips.append(clip[0])
|
||||||
|
|
||||||
|
plugins = []
|
||||||
|
for plugin_opt in track_header[16]:
|
||||||
|
for plugin in plugin_opt[1]:
|
||||||
|
plugins.append(plugin[1])
|
||||||
|
|
||||||
|
return TrackDescriptor(
|
||||||
|
name=track_header[2],
|
||||||
|
comments=track_header[6],
|
||||||
|
user_delay_samples=track_header[10],
|
||||||
|
state=track_header[14],
|
||||||
|
plugins=plugins,
|
||||||
|
clips=clips
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_frame_rate(node, _):
|
||||||
|
return node.text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_track_listing(_, visited_children):
|
||||||
|
return visited_children[1]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_track_clip_entry(_, visited_children):
|
||||||
|
timestamp = None
|
||||||
|
if isinstance(visited_children[14], list):
|
||||||
|
timestamp = visited_children[14][0][0]
|
||||||
|
|
||||||
|
return TrackClipDescriptor(channel=visited_children[0],
|
||||||
|
event=visited_children[3],
|
||||||
|
clip_name=visited_children[6],
|
||||||
|
start_time=visited_children[8],
|
||||||
|
finish_time=visited_children[10],
|
||||||
|
duration=visited_children[12],
|
||||||
|
timestamp=timestamp,
|
||||||
|
state=visited_children[15])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_track_state_list(_, visited_children):
|
||||||
|
states = []
|
||||||
|
for next_state in visited_children:
|
||||||
|
states.append(next_state[0][0].text)
|
||||||
|
return states
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_track_clip_state(node, _):
|
||||||
|
return node.text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_markers_listing(_, visited_children):
|
||||||
|
markers = []
|
||||||
|
|
||||||
|
for marker in visited_children[2]:
|
||||||
|
markers.append(marker)
|
||||||
|
|
||||||
|
return markers
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_marker_record(_, visited_children):
|
||||||
|
return MarkerDescriptor(number=visited_children[0],
|
||||||
|
location=visited_children[3],
|
||||||
|
time_reference=visited_children[5],
|
||||||
|
units=visited_children[8],
|
||||||
|
name=visited_children[10],
|
||||||
|
comments=visited_children[12])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_formatted_clip_name(_, visited_children):
|
||||||
|
return visited_children[1].text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_string_value(node, _):
|
||||||
|
return node.text.strip(" ")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_integer_value(node, _):
|
||||||
|
return int(node.text)
|
||||||
|
|
||||||
|
# def visit_timecode_value(self, node, visited_children):
|
||||||
|
# return node.text.strip(" ")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_float_value(node, _):
|
||||||
|
return float(node.text)
|
||||||
|
|
||||||
|
def visit_block_ending(self, node, visited_children):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def generic_visit(self, node, visited_children):
|
||||||
|
""" The generic visit method. """
|
||||||
|
return visited_children or node
|
||||||
222
ptulsconv/docparser/tag_compiler.py
Normal file
222
ptulsconv/docparser/tag_compiler.py
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
from collections import namedtuple
|
||||||
|
from fractions import Fraction
|
||||||
|
from typing import Iterator, Tuple, Callable, Generator, Dict, List
|
||||||
|
|
||||||
|
import ptulsconv.docparser.doc_entity as doc_entity
|
||||||
|
from .tagged_string_parser_visitor import parse_tags, TagPreModes
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Event:
|
||||||
|
clip_name: str
|
||||||
|
track_name: str
|
||||||
|
session_name: str
|
||||||
|
tags: Dict[str, str]
|
||||||
|
start: Fraction
|
||||||
|
finish: Fraction
|
||||||
|
|
||||||
|
|
||||||
|
class TagCompiler:
|
||||||
|
"""
|
||||||
|
Uses a `SessionDescriptor` as a data source to produce `Intermediate`
|
||||||
|
items.
|
||||||
|
"""
|
||||||
|
|
||||||
|
Intermediate = namedtuple('Intermediate',
|
||||||
|
'track_content track_tags track_comment_tags '
|
||||||
|
'clip_content clip_tags clip_tag_mode start '
|
||||||
|
'finish')
|
||||||
|
|
||||||
|
session: doc_entity.SessionDescriptor
|
||||||
|
|
||||||
|
def compile_all_time_spans(self) -> List[Tuple[str, str, Fraction,
|
||||||
|
Fraction]]:
|
||||||
|
"""
|
||||||
|
:returns: A `List` of (key: str, value: str, start: Fraction,
|
||||||
|
finish: Fraction)
|
||||||
|
"""
|
||||||
|
ret_list = list()
|
||||||
|
for element in self.parse_data():
|
||||||
|
if element.clip_tag_mode == TagPreModes.TIMESPAN:
|
||||||
|
for k in element.clip_tags.keys():
|
||||||
|
ret_list.append((k, element.clip_tags[k], element.start,
|
||||||
|
element.finish))
|
||||||
|
|
||||||
|
return ret_list
|
||||||
|
|
||||||
|
def compile_tag_list(self) -> Dict[str, List[str]]:
|
||||||
|
tags_dict = dict()
|
||||||
|
|
||||||
|
def update_tags_dict(other_dict: dict):
|
||||||
|
for k in other_dict.keys():
|
||||||
|
if k not in tags_dict.keys():
|
||||||
|
tags_dict[k] = set()
|
||||||
|
tags_dict[k].add(other_dict[k])
|
||||||
|
|
||||||
|
for parsed in self.parse_data():
|
||||||
|
update_tags_dict(parsed.clip_tags)
|
||||||
|
update_tags_dict(parsed.track_tags)
|
||||||
|
update_tags_dict(parsed.track_comment_tags)
|
||||||
|
|
||||||
|
session_tags = parse_tags(self.session.header.session_name).tag_dict
|
||||||
|
update_tags_dict(session_tags)
|
||||||
|
|
||||||
|
for m in self.session.markers:
|
||||||
|
marker_tags = parse_tags(m.name).tag_dict
|
||||||
|
marker_comment_tags = parse_tags(m.comments).tag_dict
|
||||||
|
update_tags_dict(marker_tags)
|
||||||
|
update_tags_dict(marker_comment_tags)
|
||||||
|
|
||||||
|
return tags_dict
|
||||||
|
|
||||||
|
def compile_events(self) -> Iterator[Event]:
|
||||||
|
step0 = self.parse_data()
|
||||||
|
step1 = self.filter_out_directives(step0)
|
||||||
|
step2 = self.apply_appends(step1)
|
||||||
|
step3 = self.collect_time_spans(step2)
|
||||||
|
step4 = self.apply_tags(step3)
|
||||||
|
for datum in step4:
|
||||||
|
yield Event(clip_name=datum[0], track_name=datum[1],
|
||||||
|
session_name=datum[2], tags=datum[3], start=datum[4],
|
||||||
|
finish=datum[5])
|
||||||
|
|
||||||
|
def _marker_tags(self, at):
|
||||||
|
retval = dict()
|
||||||
|
|
||||||
|
applicable = [(m, t) for (m, t) in
|
||||||
|
self.session.markers_timed() if t <= at]
|
||||||
|
|
||||||
|
for marker, _ in sorted(applicable, key=lambda x: x[1]):
|
||||||
|
retval.update(parse_tags(marker.comments or "").tag_dict)
|
||||||
|
retval.update(parse_tags(marker.name or "").tag_dict)
|
||||||
|
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def filter_out_directives(self,
|
||||||
|
clips: Iterator[Intermediate]) \
|
||||||
|
-> Iterator[Intermediate]:
|
||||||
|
for clip in clips:
|
||||||
|
if clip.clip_tag_mode == 'Directive':
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
yield clip
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _coalesce_tags(clip_tags: dict, track_tags: dict,
|
||||||
|
track_comment_tags: dict,
|
||||||
|
timespan_tags: dict,
|
||||||
|
marker_tags: dict, session_tags: dict):
|
||||||
|
effective_tags = dict()
|
||||||
|
effective_tags.update(session_tags)
|
||||||
|
effective_tags.update(marker_tags)
|
||||||
|
effective_tags.update(timespan_tags)
|
||||||
|
effective_tags.update(track_comment_tags)
|
||||||
|
effective_tags.update(track_tags)
|
||||||
|
effective_tags.update(clip_tags)
|
||||||
|
return effective_tags
|
||||||
|
|
||||||
|
def parse_data(self) -> Iterator[Intermediate]:
|
||||||
|
|
||||||
|
for track, clip, start, finish, _ in self.session.track_clips_timed():
|
||||||
|
if clip.state == 'Muted':
|
||||||
|
continue
|
||||||
|
|
||||||
|
track_parsed = parse_tags(track.name)
|
||||||
|
track_comments_parsed = parse_tags(track.comments)
|
||||||
|
clip_parsed = parse_tags(clip.clip_name)
|
||||||
|
|
||||||
|
yield TagCompiler.Intermediate(
|
||||||
|
track_content=track_parsed.content,
|
||||||
|
track_tags=track_parsed.tag_dict,
|
||||||
|
track_comment_tags=track_comments_parsed.tag_dict,
|
||||||
|
clip_content=clip_parsed.content,
|
||||||
|
clip_tags=clip_parsed.tag_dict,
|
||||||
|
clip_tag_mode=clip_parsed.mode,
|
||||||
|
start=start, finish=finish)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def apply_appends(parsed: Iterator[Intermediate]) -> \
|
||||||
|
Iterator[Intermediate]:
|
||||||
|
|
||||||
|
def should_append(a, b):
|
||||||
|
return b.clip_tag_mode == TagPreModes.APPEND and \
|
||||||
|
b.start >= a.finish
|
||||||
|
|
||||||
|
def do_append(a, b):
|
||||||
|
merged_tags = dict(a.clip_tags)
|
||||||
|
merged_tags.update(b.clip_tags)
|
||||||
|
return TagCompiler.Intermediate(
|
||||||
|
track_content=a.track_content,
|
||||||
|
track_tags=a.track_tags,
|
||||||
|
track_comment_tags=a.track_comment_tags,
|
||||||
|
clip_content=a.clip_content + ' ' + b.clip_content,
|
||||||
|
clip_tags=merged_tags, clip_tag_mode=a.clip_tag_mode,
|
||||||
|
start=a.start, finish=b.finish)
|
||||||
|
|
||||||
|
yield from apply_appends(parsed, should_append, do_append)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def collect_time_spans(parsed: Iterator[Intermediate]) -> \
|
||||||
|
Iterator[Tuple[Intermediate, Tuple[dict, Fraction, Fraction]]]:
|
||||||
|
|
||||||
|
time_spans = list()
|
||||||
|
|
||||||
|
for item in parsed:
|
||||||
|
if item.clip_tag_mode == TagPreModes.TIMESPAN:
|
||||||
|
time_spans.append((item.clip_tags, item.start, item.finish))
|
||||||
|
else:
|
||||||
|
yield item, list(time_spans)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _time_span_tags(at_time: Fraction, applicable_spans) -> dict:
|
||||||
|
retval = dict()
|
||||||
|
for tags in reversed([a[0] for a in applicable_spans
|
||||||
|
if a[1] <= at_time <= a[2]]):
|
||||||
|
retval.update(tags)
|
||||||
|
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def apply_tags(self, parsed_with_time_spans) ->\
|
||||||
|
Iterator[Tuple[str, str, str, dict, Fraction, Fraction]]:
|
||||||
|
|
||||||
|
session_parsed = parse_tags(self.session.header.session_name)
|
||||||
|
|
||||||
|
for event, time_spans in parsed_with_time_spans:
|
||||||
|
event: 'TagCompiler.Intermediate'
|
||||||
|
marker_tags = self._marker_tags(event.start)
|
||||||
|
time_span_tags = self._time_span_tags(event.start, time_spans)
|
||||||
|
tags = self._coalesce_tags(
|
||||||
|
clip_tags=event.clip_tags,
|
||||||
|
track_tags=event.track_tags,
|
||||||
|
track_comment_tags=event.track_comment_tags,
|
||||||
|
timespan_tags=time_span_tags,
|
||||||
|
marker_tags=marker_tags,
|
||||||
|
session_tags=session_parsed.tag_dict)
|
||||||
|
|
||||||
|
yield (event.clip_content, event.track_content,
|
||||||
|
session_parsed.content, tags, event.start, event.finish)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_appends(source: Iterator,
|
||||||
|
should_append: Callable,
|
||||||
|
do_append: Callable) -> Generator:
|
||||||
|
"""
|
||||||
|
:param source:
|
||||||
|
:param should_append: Called with two variables a and b, your
|
||||||
|
function should return true if b should be
|
||||||
|
appended to a
|
||||||
|
:param do_append: Called with two variables a and b, your function
|
||||||
|
should return
|
||||||
|
:returns: A Generator
|
||||||
|
"""
|
||||||
|
this_element = next(source)
|
||||||
|
for element in source:
|
||||||
|
if should_append(this_element, element):
|
||||||
|
this_element = do_append(this_element, element)
|
||||||
|
else:
|
||||||
|
yield this_element
|
||||||
|
this_element = element
|
||||||
|
|
||||||
|
yield this_element
|
||||||
84
ptulsconv/docparser/tag_mapping.py
Normal file
84
ptulsconv/docparser/tag_mapping.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
import sys
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional, Callable, Any, List
|
||||||
|
|
||||||
|
|
||||||
|
class TagMapping:
|
||||||
|
class ContentSource(Enum):
|
||||||
|
Session = 1,
|
||||||
|
Track = 2,
|
||||||
|
Clip = 3,
|
||||||
|
|
||||||
|
source: str
|
||||||
|
alternate_source: Optional[ContentSource]
|
||||||
|
formatter: Callable[[str], Any]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def print_rules(for_type: object, output=sys.stdout):
|
||||||
|
format_str = "%-20s | %-20s | %-25s"
|
||||||
|
hr = "%s+%s+%s" % ("-" * 21, "-" * 23, "-" * 26)
|
||||||
|
print("Tag mapping for %s" % for_type.__name__)
|
||||||
|
print(hr)
|
||||||
|
print(format_str % ("Tag Source", "Target", "Type"),
|
||||||
|
file=output)
|
||||||
|
print(hr)
|
||||||
|
for rule in for_type.tag_mapping:
|
||||||
|
t = for_type.__annotations__[rule.target]
|
||||||
|
print(format_str % (rule.source, rule.target, t),
|
||||||
|
file=output)
|
||||||
|
if rule.alternate_source is TagMapping.ContentSource.Session:
|
||||||
|
print(format_str % (" - (Session Name)", rule.target, t),
|
||||||
|
file=output)
|
||||||
|
elif rule.alternate_source is TagMapping.ContentSource.Track:
|
||||||
|
print(format_str % (" - (Track Name)", rule.target, t),
|
||||||
|
file=output)
|
||||||
|
elif rule.alternate_source is TagMapping.ContentSource.Clip:
|
||||||
|
print(format_str % (" - (Clip Name)", rule.target, t),
|
||||||
|
file=output)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def apply_rules(rules: List['TagMapping'],
|
||||||
|
tags: dict,
|
||||||
|
clip_content: str,
|
||||||
|
track_content: str,
|
||||||
|
session_content: str,
|
||||||
|
to: object):
|
||||||
|
|
||||||
|
done = set()
|
||||||
|
for rule in rules:
|
||||||
|
if rule.target in done:
|
||||||
|
continue
|
||||||
|
if rule.apply(tags, clip_content, track_content, session_content,
|
||||||
|
to):
|
||||||
|
done.update(rule.target)
|
||||||
|
|
||||||
|
def __init__(self, source: str,
|
||||||
|
target: str,
|
||||||
|
alt: Optional[ContentSource] = None,
|
||||||
|
formatter=None):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.alternate_source = alt
|
||||||
|
self.formatter = formatter or (lambda x: x)
|
||||||
|
|
||||||
|
def apply(self, tags: dict,
|
||||||
|
clip_content: str,
|
||||||
|
track_content: str,
|
||||||
|
session_content: str, to: object) -> bool:
|
||||||
|
|
||||||
|
new_value = None
|
||||||
|
|
||||||
|
if self.source in tags.keys():
|
||||||
|
new_value = tags[self.source]
|
||||||
|
elif self.alternate_source == TagMapping.ContentSource.Session:
|
||||||
|
new_value = session_content
|
||||||
|
elif self.alternate_source == TagMapping.ContentSource.Track:
|
||||||
|
new_value = track_content
|
||||||
|
elif self.alternate_source == TagMapping.ContentSource.Clip:
|
||||||
|
new_value = clip_content
|
||||||
|
|
||||||
|
if new_value is not None:
|
||||||
|
setattr(to, self.target, self.formatter(new_value))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
101
ptulsconv/docparser/tagged_string_parser_visitor.py
Normal file
101
ptulsconv/docparser/tagged_string_parser_visitor.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
from parsimonious import NodeVisitor, Grammar
|
||||||
|
from typing import Dict
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class TagPreModes(Enum):
|
||||||
|
NORMAL = 'Normal'
|
||||||
|
APPEND = 'Append'
|
||||||
|
TIMESPAN = 'Timespan'
|
||||||
|
DIRECTIVE = 'Directive'
|
||||||
|
|
||||||
|
|
||||||
|
tag_grammar = Grammar(
|
||||||
|
r"""
|
||||||
|
document = modifier? line? word_sep? tag_list?
|
||||||
|
line = word (word_sep word)*
|
||||||
|
tag_list = tag*
|
||||||
|
tag = key_tag / short_tag / full_text_tag / tag_junk
|
||||||
|
key_tag = "[" key "]" word_sep?
|
||||||
|
short_tag = "$" key "=" word word_sep?
|
||||||
|
full_text_tag = "{" key "=" value "}" word_sep?
|
||||||
|
key = ~r"[A-Za-z][A-Za-z0-9_]*"
|
||||||
|
value = ~r"[^}]+"
|
||||||
|
tag_junk = word word_sep?
|
||||||
|
word = ~r"[^ \[\{\$][^ ]*"
|
||||||
|
word_sep = ~r" +"
|
||||||
|
modifier = ("@" / "&" /"!") word_sep?
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_tags(prompt: str) -> "TaggedStringResult":
|
||||||
|
ast = tag_grammar.parse(prompt)
|
||||||
|
return TagListVisitor().visit(ast)
|
||||||
|
|
||||||
|
|
||||||
|
class TaggedStringResult:
|
||||||
|
content: str
|
||||||
|
tag_dict: Dict[str, str]
|
||||||
|
mode: TagPreModes
|
||||||
|
|
||||||
|
def __init__(self, content, tag_dict, mode):
|
||||||
|
self.content = content
|
||||||
|
self.tag_dict = tag_dict
|
||||||
|
self.mode = mode
|
||||||
|
|
||||||
|
|
||||||
|
class TagListVisitor(NodeVisitor):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_document(_, visited_children) -> TaggedStringResult:
|
||||||
|
modifier_opt, line_opt, _, tag_list_opt = visited_children
|
||||||
|
|
||||||
|
return TaggedStringResult(content=next(iter(line_opt), None),
|
||||||
|
tag_dict=next(iter(tag_list_opt), dict()),
|
||||||
|
mode=TagPreModes(
|
||||||
|
next(iter(modifier_opt), 'Normal'))
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_line(node, _):
|
||||||
|
return str.strip(node.text, " ")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_modifier(node, _):
|
||||||
|
if node.text.startswith('@'):
|
||||||
|
return TagPreModes.TIMESPAN
|
||||||
|
elif node.text.startswith('&'):
|
||||||
|
return TagPreModes.APPEND
|
||||||
|
elif node.text.startswith('!'):
|
||||||
|
return TagPreModes.DIRECTIVE
|
||||||
|
else:
|
||||||
|
return TagPreModes.NORMAL
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_tag_list(_, visited_children):
|
||||||
|
retdict = dict()
|
||||||
|
for child in visited_children:
|
||||||
|
if child[0] is not None:
|
||||||
|
k, v = child[0]
|
||||||
|
retdict[k] = v
|
||||||
|
return retdict
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_key_tag(_, children):
|
||||||
|
return children[1].text, children[1].text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_short_tag(_, children):
|
||||||
|
return children[1].text, children[3].text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_full_text_tag(_, children):
|
||||||
|
return children[1].text, children[3].text
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def visit_tag_junk(_node, _visited_children):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def generic_visit(self, node, visited_children) -> object:
|
||||||
|
return visited_children or node
|
||||||
30
ptulsconv/footage.py
Normal file
30
ptulsconv/footage.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
"""
|
||||||
|
Methods for converting string reprentations of film footage.
|
||||||
|
"""
|
||||||
|
from fractions import Fraction
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
def footage_to_seconds(footage: str) -> Optional[Fraction]:
|
||||||
|
"""
|
||||||
|
Converts a string representation of a footage (35mm, 24fps)
|
||||||
|
into a :class:`Fraction`, this fraction being a some number of
|
||||||
|
seconds.
|
||||||
|
|
||||||
|
:param footage: A string reprenentation of a footage of the form
|
||||||
|
resembling "90+01".
|
||||||
|
"""
|
||||||
|
m = re.match(r'(\d+)\+(\d+)(\.\d+)?', footage)
|
||||||
|
if m is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
feet, frames, _ = m.groups()
|
||||||
|
feet, frames = int(feet), int(frames)
|
||||||
|
|
||||||
|
fps = 24
|
||||||
|
frames_per_foot = 16
|
||||||
|
|
||||||
|
total_frames = feet * frames_per_foot + frames
|
||||||
|
|
||||||
|
return Fraction(total_frames, fps)
|
||||||
16
ptulsconv/movie_export.py
Normal file
16
ptulsconv/movie_export.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# import ffmpeg # ffmpeg-python
|
||||||
|
|
||||||
|
# TODO: Implement movie export
|
||||||
|
|
||||||
|
# def create_movie(event):
|
||||||
|
# start = event['Movie.Start_Offset_Seconds']
|
||||||
|
# duration = event['PT.Clip.Finish_Seconds'] -
|
||||||
|
# event['PT.Clip.Start_Seconds']
|
||||||
|
# input_movie = event['Movie.Filename']
|
||||||
|
# print("Will make movie starting at {}, dur {} from movie {}"
|
||||||
|
# .format(start, duration, input_movie))
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# def export_movies(events):
|
||||||
|
# for event in events:
|
||||||
|
# create_movie(event)
|
||||||
375
ptulsconv/pdf/__init__.py
Normal file
375
ptulsconv/pdf/__init__.py
Normal file
@@ -0,0 +1,375 @@
|
|||||||
|
import datetime
|
||||||
|
|
||||||
|
from reportlab.pdfbase.pdfmetrics import (getAscent, getDescent)
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.pdfgen import canvas
|
||||||
|
from reportlab.platypus.doctemplate import BaseDocTemplate, PageTemplate
|
||||||
|
from reportlab.platypus.frames import Frame
|
||||||
|
|
||||||
|
from reportlab.pdfbase import pdfmetrics
|
||||||
|
from reportlab.pdfbase.ttfonts import TTFont
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
# TODO: A Generic report useful for spotting
|
||||||
|
# TODO: A report useful for M&E mixer's notes
|
||||||
|
# TODO: Use a default font that doesn't need to be installed
|
||||||
|
|
||||||
|
# This is from https://code.activestate.com/recipes/576832/ for
|
||||||
|
# generating page count messages
|
||||||
|
|
||||||
|
|
||||||
|
class ReportCanvas(canvas.Canvas):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
canvas.Canvas.__init__(self, *args, **kwargs)
|
||||||
|
self._saved_page_states = []
|
||||||
|
self._report_date = datetime.datetime.now()
|
||||||
|
|
||||||
|
def showPage(self):
|
||||||
|
self._saved_page_states.append(dict(self.__dict__))
|
||||||
|
self._startPage()
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
"""add page info to each page (page x of y)"""
|
||||||
|
num_pages = len(self._saved_page_states)
|
||||||
|
for state in self._saved_page_states:
|
||||||
|
self.__dict__.update(state)
|
||||||
|
self.draw_page_number(num_pages)
|
||||||
|
canvas.Canvas.showPage(self)
|
||||||
|
canvas.Canvas.save(self)
|
||||||
|
|
||||||
|
def draw_page_number(self, page_count):
|
||||||
|
self.saveState()
|
||||||
|
self.setFont('Helvetica', 10) # FIXME make this customizable
|
||||||
|
self.drawString(0.5 * inch, 0.5 * inch,
|
||||||
|
"Page %d of %d" % (self._pageNumber, page_count))
|
||||||
|
right_edge = self._pagesize[0] - 0.5 * inch
|
||||||
|
self.drawRightString(right_edge, 0.5 * inch,
|
||||||
|
self._report_date.strftime("%m/%d/%Y %H:%M"))
|
||||||
|
|
||||||
|
top_line = self.beginPath()
|
||||||
|
top_line.moveTo(0.5 * inch, 0.75 * inch)
|
||||||
|
top_line.lineTo(right_edge, 0.75 * inch)
|
||||||
|
self.setLineWidth(0.5)
|
||||||
|
self.drawPath(top_line)
|
||||||
|
self.restoreState()
|
||||||
|
|
||||||
|
|
||||||
|
class ADRDocTemplate(BaseDocTemplate):
|
||||||
|
def build(self, flowables, filename=None, canvasmaker=ReportCanvas):
|
||||||
|
BaseDocTemplate.build(self, flowables, filename, canvasmaker)
|
||||||
|
|
||||||
|
|
||||||
|
def make_doc_template(page_size, filename, document_title,
|
||||||
|
title: str,
|
||||||
|
supervisor: str,
|
||||||
|
document_header: str,
|
||||||
|
client: str,
|
||||||
|
document_subheader: str,
|
||||||
|
left_margin=0.5 * inch,
|
||||||
|
fonts: List[TTFont] = []) -> ADRDocTemplate:
|
||||||
|
right_margin = top_margin = bottom_margin = 0.5 * inch
|
||||||
|
page_box = GRect(0., 0., page_size[0], page_size[1])
|
||||||
|
_, page_box = page_box.split_x(left_margin, direction='l')
|
||||||
|
_, page_box = page_box.split_x(right_margin, direction='r')
|
||||||
|
_, page_box = page_box.split_y(bottom_margin, direction='u')
|
||||||
|
_, page_box = page_box.split_y(top_margin, direction='d')
|
||||||
|
|
||||||
|
footer_box, page_box = page_box.split_y(0.25 * inch, direction='u')
|
||||||
|
header_box, page_box = page_box.split_y(0.75 * inch, direction='d')
|
||||||
|
title_box, report_box = header_box.split_x(3.5 * inch, direction='r')
|
||||||
|
|
||||||
|
on_page_lambda = (lambda c, _:
|
||||||
|
draw_header_footer(c, report_box, title_box,
|
||||||
|
footer_box, title=title,
|
||||||
|
supervisor=supervisor,
|
||||||
|
document_subheader=document_subheader,
|
||||||
|
client=client,
|
||||||
|
doc_title=document_header))
|
||||||
|
|
||||||
|
frames = [Frame(page_box.min_x, page_box.min_y,
|
||||||
|
page_box.width, page_box.height)]
|
||||||
|
|
||||||
|
page_template = PageTemplate(id="Main",
|
||||||
|
frames=frames,
|
||||||
|
onPage=on_page_lambda)
|
||||||
|
|
||||||
|
for font in fonts:
|
||||||
|
pdfmetrics.registerFont(font)
|
||||||
|
|
||||||
|
doc = ADRDocTemplate(filename,
|
||||||
|
title=document_title,
|
||||||
|
author=supervisor,
|
||||||
|
pagesize=page_size,
|
||||||
|
leftMargin=left_margin, rightMargin=right_margin,
|
||||||
|
topMargin=top_margin, bottomMargin=bottom_margin)
|
||||||
|
|
||||||
|
doc.addPageTemplates([page_template])
|
||||||
|
|
||||||
|
return doc
|
||||||
|
|
||||||
|
|
||||||
|
def time_format(mins, zero_str="-"):
|
||||||
|
"""
|
||||||
|
Formats a duration `mins` into a string
|
||||||
|
"""
|
||||||
|
if mins is None:
|
||||||
|
return zero_str
|
||||||
|
if mins == 0. and zero_str is not None:
|
||||||
|
return zero_str
|
||||||
|
elif mins < 60.:
|
||||||
|
return "%im" % round(mins)
|
||||||
|
else:
|
||||||
|
m = round(mins)
|
||||||
|
hh, mm = divmod(m, 60)
|
||||||
|
return "%i:%02i" % (hh, mm)
|
||||||
|
|
||||||
|
|
||||||
|
def draw_header_footer(a_canvas: ReportCanvas, left_box, right_box,
|
||||||
|
footer_box, title: str, supervisor: str,
|
||||||
|
document_subheader: str, client: str, doc_title="",
|
||||||
|
font_name='Helvetica'):
|
||||||
|
|
||||||
|
(_supervisor_box, client_box,), title_box = \
|
||||||
|
right_box.divide_y([16., 16., ])
|
||||||
|
title_box.draw_text_cell(a_canvas, title, font_name, 18,
|
||||||
|
inset_y=2., inset_x=5.)
|
||||||
|
client_box.draw_text_cell(a_canvas, client, font_name, 11,
|
||||||
|
inset_y=2., inset_x=5.)
|
||||||
|
|
||||||
|
a_canvas.saveState()
|
||||||
|
a_canvas.setLineWidth(0.5)
|
||||||
|
tline = a_canvas.beginPath()
|
||||||
|
tline.moveTo(left_box.min_x, right_box.min_y)
|
||||||
|
tline.lineTo(right_box.max_x, right_box.min_y)
|
||||||
|
a_canvas.drawPath(tline)
|
||||||
|
|
||||||
|
tline2 = a_canvas.beginPath()
|
||||||
|
tline2.moveTo(right_box.min_x, left_box.min_y)
|
||||||
|
tline2.lineTo(right_box.min_x, left_box.max_y)
|
||||||
|
a_canvas.drawPath(tline2)
|
||||||
|
a_canvas.restoreState()
|
||||||
|
|
||||||
|
(doc_title_cell, spotting_version_cell,), _ = \
|
||||||
|
left_box.divide_y([18., 14], direction='d')
|
||||||
|
|
||||||
|
doc_title_cell.draw_text_cell(a_canvas, doc_title, font_name, 14.,
|
||||||
|
inset_y=2.)
|
||||||
|
|
||||||
|
if document_subheader is not None:
|
||||||
|
spotting_version_cell.draw_text_cell(a_canvas, document_subheader,
|
||||||
|
font_name, 12., inset_y=2.)
|
||||||
|
|
||||||
|
if supervisor is not None:
|
||||||
|
a_canvas.setFont(font_name, 11.)
|
||||||
|
a_canvas.drawCentredString(footer_box.min_x + footer_box.width / 2.,
|
||||||
|
footer_box.min_y, supervisor)
|
||||||
|
|
||||||
|
|
||||||
|
class GRect:
|
||||||
|
def __init__(self, x, y, width, height, debug_name=None):
|
||||||
|
self.x = x
|
||||||
|
self.y = y
|
||||||
|
self.width = width
|
||||||
|
self.height = height
|
||||||
|
self.debug_name = debug_name
|
||||||
|
self.normalize()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_x(self):
|
||||||
|
return self.x
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_y(self):
|
||||||
|
return self.y
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_x(self):
|
||||||
|
return self.x + self.width
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_y(self):
|
||||||
|
return self.y + self.height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def center_x(self):
|
||||||
|
return self.x + self.width / 2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def center_y(self):
|
||||||
|
return self.y + self.height / 2
|
||||||
|
|
||||||
|
def normalize(self):
|
||||||
|
if self.width < 0.:
|
||||||
|
self.width = abs(self.width)
|
||||||
|
self.x = self.x - self.width
|
||||||
|
|
||||||
|
if self.height < 0.:
|
||||||
|
self.height = abs(self.height)
|
||||||
|
self.y = self.y - self.height
|
||||||
|
|
||||||
|
def split_x(self, at, direction='l'):
|
||||||
|
if at >= self.width:
|
||||||
|
return None, self
|
||||||
|
elif at <= 0:
|
||||||
|
return self, None
|
||||||
|
else:
|
||||||
|
if direction == 'l':
|
||||||
|
return (GRect(self.min_x, self.min_y, at, self.height),
|
||||||
|
GRect(self.min_x + at, self.y,
|
||||||
|
self.width - at, self.height))
|
||||||
|
else:
|
||||||
|
return (GRect(self.max_x - at, self.y, at, self.height),
|
||||||
|
GRect(self.min_x, self.y,
|
||||||
|
self.width - at, self.height))
|
||||||
|
|
||||||
|
def split_y(self, at, direction='u'):
|
||||||
|
if at >= self.height:
|
||||||
|
return None, self
|
||||||
|
elif at <= 0:
|
||||||
|
return self, None
|
||||||
|
else:
|
||||||
|
if direction == 'u':
|
||||||
|
return (GRect(self.x, self.y, self.width, at),
|
||||||
|
GRect(self.x, self.y + at,
|
||||||
|
self.width, self.height - at))
|
||||||
|
else:
|
||||||
|
return (GRect(self.x, self.max_y - at, self.width, at),
|
||||||
|
GRect(self.x, self.y,
|
||||||
|
self.width, self.height - at))
|
||||||
|
|
||||||
|
def inset_xy(self, dx, dy):
|
||||||
|
return GRect(self.x + dx, self.y + dy,
|
||||||
|
self.width - dx * 2, self.height - dy * 2)
|
||||||
|
|
||||||
|
def inset(self, d):
|
||||||
|
return self.inset_xy(d, d)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<GRect x=%f y=%f width=%f height=%f>" % \
|
||||||
|
(self.x, self.y, self.width, self.height)
|
||||||
|
|
||||||
|
def divide_x(self, x_list, direction='l'):
|
||||||
|
ret_list = list()
|
||||||
|
|
||||||
|
rem = self
|
||||||
|
for item in x_list:
|
||||||
|
s, rem = rem.split_x(item, direction)
|
||||||
|
ret_list.append(s)
|
||||||
|
|
||||||
|
return ret_list, rem
|
||||||
|
|
||||||
|
def divide_y(self, y_list, direction='u'):
|
||||||
|
ret_list = list()
|
||||||
|
|
||||||
|
rem = self
|
||||||
|
for item in y_list:
|
||||||
|
s, rem = rem.split_y(item, direction)
|
||||||
|
ret_list.append(s)
|
||||||
|
|
||||||
|
return ret_list, rem
|
||||||
|
|
||||||
|
def draw_debug(self, a_canvas):
|
||||||
|
a_canvas.saveState()
|
||||||
|
a_canvas.setFont("Courier", 8)
|
||||||
|
a_canvas.rect(self.x, self.y, self.width, self.height)
|
||||||
|
a_canvas.drawString(self.x, self.y, self.debug_name or self.__repr__())
|
||||||
|
a_canvas.restoreState()
|
||||||
|
|
||||||
|
def draw_border(self, a_canvas, edge):
|
||||||
|
|
||||||
|
def draw_border_impl(en):
|
||||||
|
if en == 'min_x':
|
||||||
|
coordinates = ((self.min_x, self.min_y),
|
||||||
|
(self.min_x, self.max_y))
|
||||||
|
elif en == 'max_x':
|
||||||
|
coordinates = ((self.max_x, self.min_y),
|
||||||
|
(self.max_x, self.max_y))
|
||||||
|
elif en == 'min_y':
|
||||||
|
coordinates = ((self.min_x, self.min_y),
|
||||||
|
(self.max_x, self.min_y))
|
||||||
|
elif en == 'max_y':
|
||||||
|
coordinates = ((self.min_x, self.max_y),
|
||||||
|
(self.max_x, self.max_y))
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
s = a_canvas.beginPath()
|
||||||
|
s.moveTo(*coordinates[0])
|
||||||
|
s.lineTo(*coordinates[1])
|
||||||
|
a_canvas.drawPath(s)
|
||||||
|
|
||||||
|
if type(edge) is str:
|
||||||
|
edge = [edge]
|
||||||
|
|
||||||
|
for e in edge:
|
||||||
|
draw_border_impl(e)
|
||||||
|
|
||||||
|
def draw_text_cell(self, a_canvas, text, font_name, font_size,
|
||||||
|
vertical_align='t', force_baseline=None, inset_x=0.,
|
||||||
|
inset_y=0., draw_baseline=False):
|
||||||
|
if text is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
a_canvas.saveState()
|
||||||
|
|
||||||
|
inset_rect = self.inset_xy(inset_x, inset_y)
|
||||||
|
|
||||||
|
if vertical_align == 'm':
|
||||||
|
y = inset_rect.center_y - getAscent(font_name, font_size) / 2.
|
||||||
|
elif vertical_align == 't':
|
||||||
|
y = inset_rect.max_y - getAscent(font_name, font_size)
|
||||||
|
else:
|
||||||
|
y = inset_rect.min_y - getDescent(font_name, font_size)
|
||||||
|
|
||||||
|
if force_baseline is not None:
|
||||||
|
y = self.min_y + force_baseline
|
||||||
|
|
||||||
|
cp = a_canvas.beginPath()
|
||||||
|
cp.rect(self.min_x, self.min_y, self.width, self.height)
|
||||||
|
a_canvas.clipPath(cp, stroke=0, fill=0)
|
||||||
|
|
||||||
|
a_canvas.setFont(font_name, font_size)
|
||||||
|
tx = a_canvas.beginText()
|
||||||
|
tx.setTextOrigin(inset_rect.min_x, y)
|
||||||
|
tx.textLine(text)
|
||||||
|
a_canvas.drawText(tx)
|
||||||
|
|
||||||
|
if draw_baseline:
|
||||||
|
a_canvas.setDash([3.0, 1.0, 2.0, 1.0])
|
||||||
|
a_canvas.setLineWidth(0.5)
|
||||||
|
bl = a_canvas.beginPath()
|
||||||
|
bl.moveTo(inset_rect.min_x, y - 1.)
|
||||||
|
bl.lineTo(inset_rect.max_x, y - 1.)
|
||||||
|
a_canvas.drawPath(bl)
|
||||||
|
|
||||||
|
a_canvas.restoreState()
|
||||||
|
|
||||||
|
def draw_flowable(self, a_canvas, flowable, inset_x=0.,
|
||||||
|
inset_y=0., draw_baselines=False):
|
||||||
|
a_canvas.saveState()
|
||||||
|
|
||||||
|
inset_rect = self.inset_xy(inset_x, inset_y)
|
||||||
|
|
||||||
|
cp = a_canvas.beginPath()
|
||||||
|
cp.rect(self.min_x, self.min_y, self.width, self.height)
|
||||||
|
a_canvas.clipPath(cp, stroke=0, fill=0)
|
||||||
|
|
||||||
|
w, h = flowable.wrap(inset_rect.width, inset_rect.height)
|
||||||
|
|
||||||
|
flowable.drawOn(a_canvas, inset_rect.x, inset_rect.max_y - h)
|
||||||
|
|
||||||
|
if draw_baselines:
|
||||||
|
a_canvas.setDash([3.0, 1.0, 2.0, 1.0])
|
||||||
|
a_canvas.setLineWidth(0.5)
|
||||||
|
leading = flowable.style.leading
|
||||||
|
|
||||||
|
y = inset_rect.max_y - flowable.style.fontSize - 1.
|
||||||
|
while y > inset_rect.min_x:
|
||||||
|
bl = a_canvas.beginPath()
|
||||||
|
bl.moveTo(inset_rect.min_x, y)
|
||||||
|
bl.lineTo(inset_rect.max_x, y)
|
||||||
|
a_canvas.drawPath(bl)
|
||||||
|
y = y - leading
|
||||||
|
|
||||||
|
a_canvas.restoreState()
|
||||||
58
ptulsconv/pdf/continuity.py
Normal file
58
ptulsconv/pdf/continuity.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
from fractions import Fraction
|
||||||
|
from typing import Tuple, List
|
||||||
|
|
||||||
|
from reportlab.lib.pagesizes import portrait, letter
|
||||||
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.platypus import Paragraph, Table
|
||||||
|
|
||||||
|
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||||
|
from ptulsconv.pdf import make_doc_template
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: A Continuity
|
||||||
|
|
||||||
|
def table_for_scene(scene, tc_format, font_name='Helvetica'):
|
||||||
|
scene_style = getSampleStyleSheet()['Normal']
|
||||||
|
scene_style.fontName = font_name
|
||||||
|
scene_style.leftIndent = 0.
|
||||||
|
scene_style.leftPadding = 0.
|
||||||
|
scene_style.spaceAfter = 18.
|
||||||
|
|
||||||
|
tc_data = "<em>%s</em><br />%s" % (tc_format.seconds_to_smpte(scene[2]),
|
||||||
|
tc_format.seconds_to_smpte(scene[3]))
|
||||||
|
|
||||||
|
row = [
|
||||||
|
Paragraph(tc_data, scene_style),
|
||||||
|
Paragraph(scene[1], scene_style),
|
||||||
|
]
|
||||||
|
|
||||||
|
style = [('VALIGN', (0, 0), (-1, -1), 'TOP'),
|
||||||
|
('LEFTPADDING', (0, 0), (0, 0), 0.0),
|
||||||
|
('BOTTOMPADDING', (0, 0), (-1, -1), 12.),
|
||||||
|
('FONTNAME', (0, 0), (-1, -1), font_name)]
|
||||||
|
|
||||||
|
return Table(data=[row], style=style, colWidths=[1.0 * inch, 6.5 * inch])
|
||||||
|
|
||||||
|
|
||||||
|
def output_report(scenes: List[Tuple[str, str, Fraction, Fraction]],
|
||||||
|
tc_display_format: TimecodeFormat,
|
||||||
|
title: str, client: str, supervisor, paper_size=letter):
|
||||||
|
filename = "%s Continuity.pdf" % title
|
||||||
|
document_header = "Continuity"
|
||||||
|
|
||||||
|
doc = make_doc_template(page_size=portrait(paper_size),
|
||||||
|
filename=filename,
|
||||||
|
document_title="Continuity",
|
||||||
|
title=title,
|
||||||
|
client=client,
|
||||||
|
document_subheader="",
|
||||||
|
supervisor=supervisor,
|
||||||
|
document_header=document_header,
|
||||||
|
left_margin=0.5 * inch)
|
||||||
|
story = list()
|
||||||
|
# story.append(Spacer(height=0.5 * inch, width=1.))
|
||||||
|
for scene in scenes:
|
||||||
|
story.append(table_for_scene(scene, tc_display_format))
|
||||||
|
|
||||||
|
doc.build(story)
|
||||||
304
ptulsconv/pdf/line_count.py
Normal file
304
ptulsconv/pdf/line_count.py
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
# from reportlab.pdfbase import pdfmetrics
|
||||||
|
# from reportlab.pdfbase.ttfonts import TTFont
|
||||||
|
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.lib.pagesizes import letter, portrait
|
||||||
|
from reportlab.lib import colors
|
||||||
|
|
||||||
|
from reportlab.platypus import Table, Paragraph, Spacer
|
||||||
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
|
|
||||||
|
from .__init__ import time_format, make_doc_template
|
||||||
|
from ..docparser.adr_entity import ADRLine
|
||||||
|
|
||||||
|
|
||||||
|
def build_columns(lines: List[ADRLine], reel_list: Optional[List[str]],
|
||||||
|
show_priorities=False, include_omitted=False):
|
||||||
|
columns = list()
|
||||||
|
reel_numbers = reel_list or sorted(
|
||||||
|
set([x.reel for x in lines if x.reel is not None])
|
||||||
|
)
|
||||||
|
|
||||||
|
num_column_width = 15. / 32. * inch
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': '#',
|
||||||
|
'value_getter': lambda recs: recs[0].character_id,
|
||||||
|
'value_getter2': lambda recs: "",
|
||||||
|
'style_getter': lambda col_index: [],
|
||||||
|
'width': 0.375 * inch,
|
||||||
|
'summarize': False
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': 'Role',
|
||||||
|
'value_getter': lambda recs: recs[0].character_name,
|
||||||
|
'value_getter2': lambda recs: recs[0].actor_name or "",
|
||||||
|
'style_getter': lambda col_index: [('LINEAFTER',
|
||||||
|
(col_index, 0),
|
||||||
|
(col_index, -1),
|
||||||
|
1.0, colors.black)],
|
||||||
|
'width': 1.75 * inch,
|
||||||
|
'summarize': False
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': 'TV',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs if r.tv]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs if r.tv]))
|
||||||
|
),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER'),
|
||||||
|
('LINEBEFORE', (col_index, 0), (col_index, -1),
|
||||||
|
1., colors.black),
|
||||||
|
('LINEAFTER', (col_index, 0), (col_index, -1),
|
||||||
|
.5, colors.gray)]
|
||||||
|
),
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': 'Opt',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs if r.optional]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs if r.optional]))
|
||||||
|
),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER'),
|
||||||
|
('LINEAFTER', (col_index, 0), (col_index, -1),
|
||||||
|
.5, colors.gray)]
|
||||||
|
),
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': 'Eff',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs if r.effort]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs if r.effort]))
|
||||||
|
),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER')]
|
||||||
|
),
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': '',
|
||||||
|
'value_getter': lambda _: '',
|
||||||
|
'value_getter2': lambda _: '',
|
||||||
|
'style_getter': lambda col_index: [
|
||||||
|
('LINEBEFORE', (col_index, 0), (col_index, -1), 1., colors.black),
|
||||||
|
('LINEAFTER', (col_index, 0), (col_index, -1), 1., colors.black),
|
||||||
|
],
|
||||||
|
'width': 2.
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(reel_numbers) > 0:
|
||||||
|
|
||||||
|
for n in reel_numbers:
|
||||||
|
columns.append({
|
||||||
|
'heading': n,
|
||||||
|
'value_getter': (lambda recs, n1=n:
|
||||||
|
len([r for r in recs if r.reel == n1])
|
||||||
|
),
|
||||||
|
'value_getter2': (lambda recs, n1=n:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs
|
||||||
|
if r.reel == n1]))
|
||||||
|
),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER'),
|
||||||
|
('LINEAFTER', (col_index, 0),
|
||||||
|
(col_index, -1),
|
||||||
|
.5, colors.gray)]
|
||||||
|
),
|
||||||
|
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
if show_priorities:
|
||||||
|
for n in range(1, 6,):
|
||||||
|
columns.append({
|
||||||
|
'heading': 'P%i' % n,
|
||||||
|
'value_getter': lambda recs: len([r for r in recs
|
||||||
|
if r.priority == n]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs
|
||||||
|
if r.priority == n]))
|
||||||
|
),
|
||||||
|
'style_getter': lambda col_index: [],
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': '>P5',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs
|
||||||
|
if (r.priority or 5) > 5]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs
|
||||||
|
if (r.priority or 5) > 5]))
|
||||||
|
),
|
||||||
|
'style_getter': lambda col_index: [],
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
if include_omitted:
|
||||||
|
columns.append({
|
||||||
|
'heading': 'Omit',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs if r.omitted]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(sum([r.time_budget_mins or 0.
|
||||||
|
for r in recs if r.omitted]))),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER')]
|
||||||
|
),
|
||||||
|
'width': num_column_width
|
||||||
|
})
|
||||||
|
|
||||||
|
columns.append({
|
||||||
|
'heading': 'Total',
|
||||||
|
'value_getter': lambda recs: len([r for r in recs if not r.omitted]),
|
||||||
|
'value_getter2': (lambda recs:
|
||||||
|
time_format(
|
||||||
|
sum([r.time_budget_mins or 0.
|
||||||
|
|
||||||
|
for r in recs if not r.omitted])
|
||||||
|
)
|
||||||
|
),
|
||||||
|
'style_getter': (lambda col_index:
|
||||||
|
[('LINEBEFORE', (col_index, 0), (col_index, -1),
|
||||||
|
1.0, colors.black),
|
||||||
|
('ALIGN', (col_index, 0), (col_index, -1),
|
||||||
|
'CENTER')]
|
||||||
|
),
|
||||||
|
'width': 0.5 * inch
|
||||||
|
})
|
||||||
|
|
||||||
|
return columns
|
||||||
|
|
||||||
|
|
||||||
|
def populate_columns(lines: List[ADRLine], columns, include_omitted,
|
||||||
|
_page_size):
|
||||||
|
data = list()
|
||||||
|
styles = list()
|
||||||
|
columns_widths = list()
|
||||||
|
|
||||||
|
sorted_character_numbers: List[str] = sorted(
|
||||||
|
set([x.character_id for x in lines]),
|
||||||
|
key=lambda x: str(x))
|
||||||
|
|
||||||
|
# construct column styles
|
||||||
|
|
||||||
|
for i, c in enumerate(columns):
|
||||||
|
styles.extend(c['style_getter'](i))
|
||||||
|
columns_widths.append(c['width'])
|
||||||
|
|
||||||
|
data.append(list(map(lambda x: x['heading'], columns)))
|
||||||
|
|
||||||
|
if not include_omitted:
|
||||||
|
lines = [x for x in lines if not x.omitted]
|
||||||
|
|
||||||
|
for n in sorted_character_numbers:
|
||||||
|
char_records = [x for x in lines if x.character_id == n]
|
||||||
|
if len(char_records) > 0:
|
||||||
|
row_data = list()
|
||||||
|
row_data2 = list()
|
||||||
|
|
||||||
|
for col in columns:
|
||||||
|
row1_index = len(data)
|
||||||
|
row2_index = row1_index + 1
|
||||||
|
row_data.append(col['value_getter'](list(char_records)))
|
||||||
|
row_data2.append(col['value_getter2'](list(char_records)))
|
||||||
|
|
||||||
|
styles.extend([('TEXTCOLOR', (0, row2_index), (-1, row2_index),
|
||||||
|
colors.red),
|
||||||
|
('LINEBELOW', (0, row2_index), (-1, row2_index),
|
||||||
|
0.5, colors.black)])
|
||||||
|
|
||||||
|
data.append(row_data)
|
||||||
|
data.append(row_data2)
|
||||||
|
|
||||||
|
summary_row1 = list()
|
||||||
|
summary_row2 = list()
|
||||||
|
row1_index = len(data)
|
||||||
|
|
||||||
|
for col in columns:
|
||||||
|
if col.get('summarize', True):
|
||||||
|
summary_row1.append(col['value_getter'](lines))
|
||||||
|
summary_row2.append(col['value_getter2'](lines))
|
||||||
|
else:
|
||||||
|
summary_row1.append("")
|
||||||
|
summary_row2.append("")
|
||||||
|
|
||||||
|
styles.append(('LINEABOVE', (0, row1_index), (-1, row1_index), 2.0,
|
||||||
|
colors.black))
|
||||||
|
|
||||||
|
data.append(summary_row1)
|
||||||
|
data.append(summary_row2)
|
||||||
|
|
||||||
|
return data, styles, columns_widths
|
||||||
|
|
||||||
|
|
||||||
|
# def build_header(column_widths):
|
||||||
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
def output_report(lines: List[ADRLine], reel_list: List[str],
|
||||||
|
include_omitted=False, page_size=portrait(letter),
|
||||||
|
font_name='Helvetica'):
|
||||||
|
columns = build_columns(lines, include_omitted=include_omitted,
|
||||||
|
reel_list=reel_list)
|
||||||
|
data, style, columns_widths = populate_columns(lines, columns,
|
||||||
|
include_omitted, page_size)
|
||||||
|
|
||||||
|
style.append(('FONTNAME', (0, 0), (-1, -1), font_name))
|
||||||
|
style.append(('FONTSIZE', (0, 0), (-1, -1), 9.))
|
||||||
|
style.append(('LINEBELOW', (0, 0), (-1, 0), 1.0, colors.black))
|
||||||
|
# style.append(('LINEBELOW', (0, 1), (-1, -1), 0.25, colors.gray))
|
||||||
|
|
||||||
|
# pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||||
|
|
||||||
|
title = "%s Line Count" % lines[0].title
|
||||||
|
filename = title + '.pdf'
|
||||||
|
doc = make_doc_template(page_size=page_size, filename=filename,
|
||||||
|
document_title=title, title=lines[0].title,
|
||||||
|
document_subheader=lines[0].spot,
|
||||||
|
client=lines[0].client,
|
||||||
|
supervisor=lines[0].supervisor,
|
||||||
|
document_header='Line Count')
|
||||||
|
|
||||||
|
# header_data, header_style, header_widths = build_header(columns_widths)
|
||||||
|
# header_table = Table(data=header_data, style=header_style,
|
||||||
|
# colWidths=header_widths)
|
||||||
|
|
||||||
|
table = Table(data=data, style=style, colWidths=columns_widths)
|
||||||
|
|
||||||
|
story = [Spacer(height=0.5 * inch, width=1.), table]
|
||||||
|
|
||||||
|
style = getSampleStyleSheet()['Normal']
|
||||||
|
style.fontName = font_name
|
||||||
|
style.fontSize = 12.
|
||||||
|
style.spaceBefore = 16.
|
||||||
|
style.spaceAfter = 16.
|
||||||
|
|
||||||
|
omitted_count = len([x for x in lines if x.omitted])
|
||||||
|
|
||||||
|
if not include_omitted and omitted_count > 0:
|
||||||
|
story.append(Paragraph("* %i Omitted lines are excluded." %
|
||||||
|
omitted_count, style))
|
||||||
|
|
||||||
|
doc.build(story)
|
||||||
6
ptulsconv/pdf/recordist_log.py
Normal file
6
ptulsconv/pdf/recordist_log.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# TODO: Complete Recordist Log
|
||||||
|
|
||||||
|
def output_report(records):
|
||||||
|
# order by start
|
||||||
|
|
||||||
|
pass
|
||||||
157
ptulsconv/pdf/summary_log.py
Normal file
157
ptulsconv/pdf/summary_log.py
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from .__init__ import time_format, make_doc_template
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.lib.pagesizes import letter, portrait
|
||||||
|
|
||||||
|
from reportlab.platypus import Paragraph, Spacer, KeepTogether, Table
|
||||||
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine
|
||||||
|
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||||
|
|
||||||
|
|
||||||
|
def build_aux_data_field(line: ADRLine):
|
||||||
|
entries = list()
|
||||||
|
if line.reason is not None:
|
||||||
|
entries.append("Reason: " + line.reason)
|
||||||
|
if line.note is not None:
|
||||||
|
entries.append("Note: " + line.note)
|
||||||
|
if line.requested_by is not None:
|
||||||
|
entries.append("Requested by: " + line.requested_by)
|
||||||
|
if line.shot is not None:
|
||||||
|
entries.append("Shot: " + line.shot)
|
||||||
|
|
||||||
|
fg_color = 'white'
|
||||||
|
tag_field = ""
|
||||||
|
if line.effort:
|
||||||
|
bg_color = 'red'
|
||||||
|
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font> " \
|
||||||
|
% (bg_color, fg_color, "EFF")
|
||||||
|
elif line.tv:
|
||||||
|
bg_color = 'blue'
|
||||||
|
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font> " \
|
||||||
|
% (bg_color, fg_color, "TV")
|
||||||
|
elif line.adlib:
|
||||||
|
bg_color = 'purple'
|
||||||
|
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font> " \
|
||||||
|
% (bg_color, fg_color, "ADLIB")
|
||||||
|
elif line.optional:
|
||||||
|
bg_color = 'green'
|
||||||
|
tag_field += "<font backColor=%s textColor=%s fontSize=11>%s</font>" \
|
||||||
|
% (bg_color, fg_color, "OPTIONAL")
|
||||||
|
|
||||||
|
entries.append(tag_field)
|
||||||
|
|
||||||
|
return "<br />".join(entries)
|
||||||
|
|
||||||
|
|
||||||
|
def build_story(lines: List[ADRLine], tc_rate: TimecodeFormat,
|
||||||
|
font_name='Helvetica'):
|
||||||
|
story = list()
|
||||||
|
|
||||||
|
this_scene = None
|
||||||
|
scene_style = getSampleStyleSheet()['Normal']
|
||||||
|
scene_style.fontName = font_name
|
||||||
|
scene_style.leftIndent = 0.
|
||||||
|
scene_style.leftPadding = 0.
|
||||||
|
scene_style.spaceAfter = 18.
|
||||||
|
line_style = getSampleStyleSheet()['Normal']
|
||||||
|
line_style.fontName = font_name
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
table_style = [('VALIGN', (0, 0), (-1, -1), 'TOP'),
|
||||||
|
('LEFTPADDING', (0, 0), (0, 0), 0.0),
|
||||||
|
('BOTTOMPADDING', (0, 0), (-1, -1), 24.)]
|
||||||
|
|
||||||
|
cue_number_field = "%s<br /><font fontSize=7>%s</font>" \
|
||||||
|
% (line.cue_number, line.character_name)
|
||||||
|
|
||||||
|
time_data = time_format(line.time_budget_mins)
|
||||||
|
|
||||||
|
if line.priority is not None:
|
||||||
|
time_data = time_data + "<br />" + "P: " + line.priority
|
||||||
|
|
||||||
|
aux_data_field = build_aux_data_field(line)
|
||||||
|
|
||||||
|
tc_data = build_tc_data(line, tc_rate)
|
||||||
|
|
||||||
|
line_table_data = [[Paragraph(cue_number_field, line_style),
|
||||||
|
Paragraph(tc_data, line_style),
|
||||||
|
Paragraph(line.prompt, line_style),
|
||||||
|
Paragraph(time_data, line_style),
|
||||||
|
Paragraph(aux_data_field, line_style)
|
||||||
|
]]
|
||||||
|
|
||||||
|
line_table = Table(data=line_table_data,
|
||||||
|
colWidths=[inch * 0.75, inch, inch * 3., 0.5 * inch,
|
||||||
|
inch * 2.],
|
||||||
|
style=table_style)
|
||||||
|
|
||||||
|
if (line.scene or "[No Scene]") != this_scene:
|
||||||
|
this_scene = line.scene or "[No Scene]"
|
||||||
|
story.append(KeepTogether([
|
||||||
|
Spacer(1., 0.25 * inch),
|
||||||
|
Paragraph("<u>" + this_scene + "</u>", scene_style),
|
||||||
|
line_table]))
|
||||||
|
else:
|
||||||
|
line_table.setStyle(table_style)
|
||||||
|
story.append(KeepTogether([line_table]))
|
||||||
|
|
||||||
|
return story
|
||||||
|
|
||||||
|
|
||||||
|
def build_tc_data(line: ADRLine, tc_format: TimecodeFormat):
|
||||||
|
tc_data = tc_format.seconds_to_smpte(line.start) + "<br />" + \
|
||||||
|
tc_format.seconds_to_smpte(line.finish)
|
||||||
|
third_line = []
|
||||||
|
if line.reel is not None:
|
||||||
|
if line.reel[0:1] == 'R':
|
||||||
|
third_line.append("%s" % line.reel)
|
||||||
|
else:
|
||||||
|
third_line.append("Reel %s" % line.reel)
|
||||||
|
if line.version is not None:
|
||||||
|
third_line.append("(%s)" % line.version)
|
||||||
|
if len(third_line) > 0:
|
||||||
|
tc_data = tc_data + "<br/>" + " ".join(third_line)
|
||||||
|
return tc_data
|
||||||
|
|
||||||
|
|
||||||
|
def generate_report(page_size, lines: List[ADRLine], tc_rate: TimecodeFormat,
|
||||||
|
character_number=None, include_omitted=True):
|
||||||
|
if character_number is not None:
|
||||||
|
lines = [r for r in lines if r.character_id == character_number]
|
||||||
|
title = "%s ADR Report (%s)" % (lines[0].title,
|
||||||
|
lines[0].character_name)
|
||||||
|
document_header = "%s ADR Report" % lines[0].character_name
|
||||||
|
else:
|
||||||
|
title = "%s ADR Report" % lines[0].title
|
||||||
|
document_header = 'ADR Report'
|
||||||
|
|
||||||
|
if not include_omitted:
|
||||||
|
lines = [line for line in lines if not line.omitted]
|
||||||
|
|
||||||
|
lines = sorted(lines, key=lambda line: line.start)
|
||||||
|
|
||||||
|
filename = title + ".pdf"
|
||||||
|
doc = make_doc_template(page_size=page_size,
|
||||||
|
filename=filename, document_title=title,
|
||||||
|
document_header=document_header,
|
||||||
|
title=lines[0].title,
|
||||||
|
supervisor=lines[0].supervisor,
|
||||||
|
client=lines[0].client,
|
||||||
|
document_subheader=lines[0].spot,
|
||||||
|
left_margin=0.75 * inch)
|
||||||
|
story = build_story(lines, tc_rate)
|
||||||
|
doc.build(story)
|
||||||
|
|
||||||
|
|
||||||
|
def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat,
|
||||||
|
page_size=portrait(letter), by_character=False):
|
||||||
|
if by_character:
|
||||||
|
character_numbers = set((r.character_id for r in lines))
|
||||||
|
for n in character_numbers:
|
||||||
|
generate_report(page_size, lines, tc_display_format, n)
|
||||||
|
else:
|
||||||
|
generate_report(page_size, lines, tc_display_format)
|
||||||
293
ptulsconv/pdf/supervisor_1pg.py
Normal file
293
ptulsconv/pdf/supervisor_1pg.py
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
from reportlab.pdfgen.canvas import Canvas
|
||||||
|
|
||||||
|
# from reportlab.pdfbase import pdfmetrics
|
||||||
|
# from reportlab.pdfbase.ttfonts import TTFont
|
||||||
|
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.lib.pagesizes import letter
|
||||||
|
|
||||||
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
|
from reportlab.platypus import Paragraph
|
||||||
|
|
||||||
|
from .__init__ import GRect
|
||||||
|
|
||||||
|
from ptulsconv.broadcast_timecode import TimecodeFormat
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
font_name = 'Helvetica'
|
||||||
|
|
||||||
|
|
||||||
|
def draw_header_block(canvas, rect, record: ADRLine):
|
||||||
|
rect.draw_text_cell(canvas, record.cue_number, "Helvetica", 44,
|
||||||
|
vertical_align='m')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_character_row(canvas, rect, record: ADRLine):
|
||||||
|
label_frame, value_frame = rect.split_x(1.25 * inch)
|
||||||
|
label_frame.draw_text_cell(canvas, "CHARACTER", font_name, 10,
|
||||||
|
force_baseline=9.)
|
||||||
|
line = "%s / %s" % (record.character_id, record.character_name)
|
||||||
|
if record.actor_name is not None:
|
||||||
|
line = line + " / " + record.actor_name
|
||||||
|
value_frame.draw_text_cell(canvas, line, font_name, 12, force_baseline=9.)
|
||||||
|
rect.draw_border(canvas, ['min_y', 'max_y'])
|
||||||
|
|
||||||
|
|
||||||
|
def draw_cue_number_block(canvas, rect, record: ADRLine):
|
||||||
|
(label_frame, number_frame,), aux_frame = \
|
||||||
|
rect.divide_y([0.20 * inch, 0.375 * inch], direction='d')
|
||||||
|
label_frame.draw_text_cell(canvas, "CUE NUMBER", font_name, 10,
|
||||||
|
inset_y=5., vertical_align='t')
|
||||||
|
number_frame.draw_text_cell(canvas, record.cue_number, font_name, 14,
|
||||||
|
inset_x=10., inset_y=2., draw_baseline=True)
|
||||||
|
|
||||||
|
tags = {'tv': 'TV',
|
||||||
|
'optional': 'OPT',
|
||||||
|
'adlib': 'ADLIB',
|
||||||
|
'effort': 'EFF',
|
||||||
|
'tbw': 'TBW',
|
||||||
|
'omitted': 'OMIT'}
|
||||||
|
tag_field = ""
|
||||||
|
for key in tags.keys():
|
||||||
|
if getattr(record, key):
|
||||||
|
tag_field = tag_field + tags[key] + " "
|
||||||
|
|
||||||
|
aux_frame.draw_text_cell(canvas, tag_field, font_name, 10,
|
||||||
|
inset_x=10., inset_y=2., vertical_align='t')
|
||||||
|
rect.draw_border(canvas, 'max_x')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_timecode_block(canvas, rect, record: ADRLine,
|
||||||
|
tc_display_format: TimecodeFormat):
|
||||||
|
(in_label_frame, in_frame, out_label_frame, out_frame), _ = rect.divide_y(
|
||||||
|
[0.20 * inch, 0.25 * inch, 0.20 * inch, 0.25 * inch], direction='d')
|
||||||
|
|
||||||
|
in_label_frame.draw_text_cell(canvas, "IN", font_name, 10,
|
||||||
|
vertical_align='t', inset_y=5., inset_x=5.)
|
||||||
|
in_frame.draw_text_cell(canvas,
|
||||||
|
tc_display_format.seconds_to_smpte(record.start),
|
||||||
|
font_name, 14,
|
||||||
|
inset_x=10., inset_y=2.,
|
||||||
|
draw_baseline=True)
|
||||||
|
out_label_frame.draw_text_cell(canvas, "OUT", font_name, 10,
|
||||||
|
vertical_align='t', inset_y=5., inset_x=5.)
|
||||||
|
out_frame.draw_text_cell(canvas,
|
||||||
|
tc_display_format.seconds_to_smpte(record.finish),
|
||||||
|
font_name, 14,
|
||||||
|
inset_x=10., inset_y=2.,
|
||||||
|
draw_baseline=True)
|
||||||
|
|
||||||
|
rect.draw_border(canvas, 'max_x')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_reason_block(canvas, rect, record: ADRLine):
|
||||||
|
reason_cell, notes_cell = rect.split_y(24., direction='d')
|
||||||
|
reason_label, reason_value = reason_cell.split_x(.75 * inch)
|
||||||
|
notes_label, notes_value = notes_cell.split_x(.75 * inch)
|
||||||
|
|
||||||
|
reason_label.draw_text_cell(canvas, "Reason:", font_name, 12,
|
||||||
|
inset_x=5., inset_y=5., vertical_align='b')
|
||||||
|
reason_value.draw_text_cell(canvas, record.reason or "", font_name, 12,
|
||||||
|
inset_x=5., inset_y=5., draw_baseline=True,
|
||||||
|
vertical_align='b')
|
||||||
|
notes_label.draw_text_cell(canvas, "Note:", font_name, 12,
|
||||||
|
inset_x=5., inset_y=5., vertical_align='t')
|
||||||
|
|
||||||
|
style = getSampleStyleSheet()['BodyText']
|
||||||
|
style.fontName = font_name
|
||||||
|
style.fontSize = 12
|
||||||
|
style.leading = 14
|
||||||
|
|
||||||
|
p = Paragraph(record.note or "", style)
|
||||||
|
|
||||||
|
notes_value.draw_flowable(canvas, p, draw_baselines=True,
|
||||||
|
inset_x=5., inset_y=5.)
|
||||||
|
|
||||||
|
|
||||||
|
def draw_prompt(canvas, rect, prompt=""):
|
||||||
|
label, block = rect.split_y(0.20 * inch, direction='d')
|
||||||
|
|
||||||
|
label.draw_text_cell(canvas, "PROMPT", font_name, 10, vertical_align='t',
|
||||||
|
inset_y=5., inset_x=0.)
|
||||||
|
|
||||||
|
style = getSampleStyleSheet()['BodyText']
|
||||||
|
style.fontName = font_name
|
||||||
|
style.fontSize = 14
|
||||||
|
|
||||||
|
style.leading = 24
|
||||||
|
style.leftIndent = 1.5 * inch
|
||||||
|
style.rightIndent = 1.5 * inch
|
||||||
|
|
||||||
|
p = Paragraph(prompt, style)
|
||||||
|
|
||||||
|
block.draw_flowable(canvas, p, draw_baselines=True)
|
||||||
|
|
||||||
|
rect.draw_border(canvas, 'max_y')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_notes(canvas, rect, note=""):
|
||||||
|
label, block = rect.split_y(0.20 * inch, direction='d')
|
||||||
|
|
||||||
|
label.draw_text_cell(canvas, "NOTES", font_name, 10, vertical_align='t',
|
||||||
|
inset_y=5., inset_x=0.)
|
||||||
|
|
||||||
|
style = getSampleStyleSheet()['BodyText']
|
||||||
|
style.fontName = font_name
|
||||||
|
style.fontSize = 14
|
||||||
|
style.leading = 24
|
||||||
|
|
||||||
|
prompt = Paragraph(note, style)
|
||||||
|
|
||||||
|
block.draw_flowable(canvas, prompt, draw_baselines=True)
|
||||||
|
|
||||||
|
rect.draw_border(canvas, ['max_y', 'min_y'])
|
||||||
|
|
||||||
|
|
||||||
|
def draw_take_grid(canvas, rect):
|
||||||
|
canvas.saveState()
|
||||||
|
|
||||||
|
cp = canvas.beginPath()
|
||||||
|
cp.rect(rect.min_x, rect.min_y, rect.width, rect.height)
|
||||||
|
canvas.clipPath(cp, stroke=0, fill=0)
|
||||||
|
|
||||||
|
canvas.setDash([3.0, 2.0])
|
||||||
|
|
||||||
|
for xi in range(1, 10):
|
||||||
|
x = xi * (rect.width / 10)
|
||||||
|
if xi % 5 == 0:
|
||||||
|
canvas.setDash(1, 0)
|
||||||
|
else:
|
||||||
|
canvas.setDash([2, 5])
|
||||||
|
|
||||||
|
ln = canvas.beginPath()
|
||||||
|
ln.moveTo(rect.min_x + x, rect.min_y)
|
||||||
|
ln.lineTo(rect.min_x + x, rect.max_y)
|
||||||
|
canvas.drawPath(ln)
|
||||||
|
|
||||||
|
for yi in range(1, 10):
|
||||||
|
y = yi * (rect.height / 6)
|
||||||
|
if yi % 2 == 0:
|
||||||
|
canvas.setDash(1, 0)
|
||||||
|
else:
|
||||||
|
canvas.setDash([2, 5])
|
||||||
|
|
||||||
|
ln = canvas.beginPath()
|
||||||
|
ln.moveTo(rect.min_x, rect.min_y + y)
|
||||||
|
ln.lineTo(rect.max_x, rect.min_y + y)
|
||||||
|
canvas.drawPath(ln)
|
||||||
|
|
||||||
|
rect.draw_border(canvas, 'max_x')
|
||||||
|
|
||||||
|
canvas.restoreState()
|
||||||
|
|
||||||
|
|
||||||
|
def draw_aux_block(canvas, rect, recording_time_sec_this_line,
|
||||||
|
recording_time_sec):
|
||||||
|
rect.draw_border(canvas, 'min_x')
|
||||||
|
|
||||||
|
content_rect = rect.inset_xy(10., 10.)
|
||||||
|
lines, last_line = content_rect.divide_y([12., 12., 24., 24., 24., 24.],
|
||||||
|
direction='d')
|
||||||
|
|
||||||
|
lines[0].draw_text_cell(canvas,
|
||||||
|
"Time for this line: %.1f mins" %
|
||||||
|
(recording_time_sec_this_line / 60.),
|
||||||
|
font_name, 9.)
|
||||||
|
lines[1].draw_text_cell(canvas, "Running time: %03.1f mins" %
|
||||||
|
(recording_time_sec / 60.), font_name, 9.)
|
||||||
|
lines[2].draw_text_cell(canvas, "Actual Start: ______________",
|
||||||
|
font_name, 9., vertical_align='b')
|
||||||
|
lines[3].draw_text_cell(canvas, "Record Date: ______________",
|
||||||
|
font_name, 9., vertical_align='b')
|
||||||
|
lines[4].draw_text_cell(canvas, "Engineer: ______________",
|
||||||
|
font_name, 9., vertical_align='b')
|
||||||
|
lines[5].draw_text_cell(canvas, "Location: ______________",
|
||||||
|
font_name, 9., vertical_align='b')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_footer(canvas, rect, record: ADRLine, report_date, line_no,
|
||||||
|
total_lines):
|
||||||
|
rect.draw_border(canvas, 'max_y')
|
||||||
|
report_date_s = [report_date.strftime("%c")]
|
||||||
|
spotting_name = [record.spot] if record.spot is not None else []
|
||||||
|
pages_s = ["Line %i of %i" % (line_no, total_lines)]
|
||||||
|
footer_s = " - ".join(report_date_s + spotting_name + pages_s)
|
||||||
|
rect.draw_text_cell(canvas, footer_s, font_name=font_name, font_size=10.,
|
||||||
|
inset_y=2.)
|
||||||
|
|
||||||
|
|
||||||
|
def create_report_for_character(records, report_date,
|
||||||
|
tc_display_format: TimecodeFormat):
|
||||||
|
|
||||||
|
outfile = "%s_%s_%s_Log.pdf" % (records[0].title,
|
||||||
|
records[0].character_id,
|
||||||
|
records[0].character_name,)
|
||||||
|
assert outfile is not None
|
||||||
|
assert outfile[-4:] == '.pdf', "Output file must have 'pdf' extension!"
|
||||||
|
|
||||||
|
# pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||||
|
|
||||||
|
page: GRect = GRect(0, 0, letter[0], letter[1])
|
||||||
|
page = page.inset(inch * 0.5)
|
||||||
|
(header_row, char_row, data_row,
|
||||||
|
prompt_row, notes_row, takes_row), footer = \
|
||||||
|
page.divide_y([0.875 * inch, 0.375 * inch, inch,
|
||||||
|
3.0 * inch, 1.5 * inch, 3 * inch], direction='d')
|
||||||
|
|
||||||
|
cue_header_block, title_header_block = header_row.split_x(4.0 * inch)
|
||||||
|
(cue_number_block, timecode_block), reason_block = \
|
||||||
|
data_row.divide_x([1.5 * inch, 1.5 * inch])
|
||||||
|
(take_grid_block), aux_block = takes_row.split_x(5.25 * inch)
|
||||||
|
|
||||||
|
c = Canvas(outfile, pagesize=letter,)
|
||||||
|
|
||||||
|
c.setTitle("%s %s (%s) Supervisor's Log" % (records[0].title,
|
||||||
|
records[0].character_name,
|
||||||
|
records[0].character_id))
|
||||||
|
c.setAuthor(records[0].supervisor)
|
||||||
|
|
||||||
|
recording_time_sec = 0.0
|
||||||
|
total_lines = len(records)
|
||||||
|
line_n = 1
|
||||||
|
for record in records:
|
||||||
|
record: ADRLine
|
||||||
|
recording_time_sec_this_line: float = (
|
||||||
|
record.time_budget_mins or 6.0) * 60.0
|
||||||
|
recording_time_sec = recording_time_sec + recording_time_sec_this_line
|
||||||
|
|
||||||
|
draw_header_block(c, cue_header_block, record)
|
||||||
|
# FIXME: Draw the title
|
||||||
|
# TODO: Integrate this report into the common DocTemplate api
|
||||||
|
|
||||||
|
# draw_title_box(c, title_header_block, record)
|
||||||
|
draw_character_row(c, char_row, record)
|
||||||
|
draw_cue_number_block(c, cue_number_block, record)
|
||||||
|
draw_timecode_block(c, timecode_block, record,
|
||||||
|
tc_display_format=tc_display_format)
|
||||||
|
draw_reason_block(c, reason_block, record)
|
||||||
|
draw_prompt(c, prompt_row, prompt=record.prompt or "")
|
||||||
|
draw_notes(c, notes_row, note="")
|
||||||
|
draw_take_grid(c, take_grid_block)
|
||||||
|
draw_aux_block(c, aux_block, recording_time_sec_this_line,
|
||||||
|
recording_time_sec)
|
||||||
|
|
||||||
|
draw_footer(c, footer, record, report_date, line_no=line_n,
|
||||||
|
total_lines=total_lines)
|
||||||
|
line_n = line_n + 1
|
||||||
|
|
||||||
|
c.showPage()
|
||||||
|
|
||||||
|
c.save()
|
||||||
|
|
||||||
|
|
||||||
|
def output_report(lines, tc_display_format: TimecodeFormat):
|
||||||
|
report_date = datetime.datetime.now()
|
||||||
|
events = sorted(lines, key=lambda x: x.start)
|
||||||
|
character_numbers = set([x.character_id for x in lines])
|
||||||
|
|
||||||
|
for n in character_numbers:
|
||||||
|
create_report_for_character([e for e in events if e.character_id == n],
|
||||||
|
report_date,
|
||||||
|
tc_display_format=tc_display_format)
|
||||||
83
ptulsconv/pdf/talent_sides.py
Normal file
83
ptulsconv/pdf/talent_sides.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from .__init__ import make_doc_template
|
||||||
|
from reportlab.lib.units import inch
|
||||||
|
from reportlab.lib.pagesizes import letter
|
||||||
|
|
||||||
|
from reportlab.platypus import Paragraph, Spacer, KeepTogether, Table,\
|
||||||
|
HRFlowable
|
||||||
|
from reportlab.lib.styles import getSampleStyleSheet
|
||||||
|
from reportlab.lib import colors
|
||||||
|
|
||||||
|
# from reportlab.pdfbase import pdfmetrics
|
||||||
|
# from reportlab.pdfbase.ttfonts import TTFont
|
||||||
|
|
||||||
|
from ..broadcast_timecode import TimecodeFormat
|
||||||
|
from ..docparser.adr_entity import ADRLine
|
||||||
|
|
||||||
|
|
||||||
|
def output_report(lines: List[ADRLine], tc_display_format: TimecodeFormat,
|
||||||
|
font_name="Helvetica"):
|
||||||
|
character_numbers = set([n.character_id for n in lines])
|
||||||
|
# pdfmetrics.registerFont(TTFont('Futura', 'Futura.ttc'))
|
||||||
|
|
||||||
|
for n in character_numbers:
|
||||||
|
char_lines = [line for line in lines
|
||||||
|
if not line.omitted and line.character_id == n]
|
||||||
|
character_name = char_lines[0].character_name
|
||||||
|
|
||||||
|
char_lines = sorted(char_lines, key=lambda line: line.start)
|
||||||
|
|
||||||
|
title = "%s (%s) %s ADR Script" % (char_lines[0].title,
|
||||||
|
character_name, n)
|
||||||
|
filename = "%s_%s_%s_ADR Script.pdf" % (char_lines[0].title,
|
||||||
|
n, character_name)
|
||||||
|
|
||||||
|
doc = make_doc_template(page_size=letter, filename=filename,
|
||||||
|
document_title=title,
|
||||||
|
title=char_lines[0].title,
|
||||||
|
document_subheader=char_lines[0].spot or "",
|
||||||
|
supervisor=char_lines[0].supervisor or "",
|
||||||
|
client=char_lines[0].client or "",
|
||||||
|
document_header=character_name or "")
|
||||||
|
|
||||||
|
story = []
|
||||||
|
|
||||||
|
prompt_style = getSampleStyleSheet()['Normal']
|
||||||
|
prompt_style.fontName = font_name
|
||||||
|
prompt_style.fontSize = 18.
|
||||||
|
|
||||||
|
prompt_style.leading = 24.
|
||||||
|
prompt_style.leftIndent = 1.5 * inch
|
||||||
|
prompt_style.rightIndent = 1.5 * inch
|
||||||
|
|
||||||
|
number_style = getSampleStyleSheet()['Normal']
|
||||||
|
number_style.fontName = font_name
|
||||||
|
number_style.fontSize = 14
|
||||||
|
|
||||||
|
number_style.leading = 24
|
||||||
|
number_style.leftIndent = 0.
|
||||||
|
number_style.rightIndent = 0.
|
||||||
|
|
||||||
|
for line in char_lines:
|
||||||
|
start_tc = tc_display_format.seconds_to_smpte(line.start)
|
||||||
|
finish_tc = tc_display_format.seconds_to_smpte(line.finish)
|
||||||
|
data_block = [[Paragraph(line.cue_number, number_style),
|
||||||
|
Paragraph(start_tc + " - " + finish_tc,
|
||||||
|
number_style)
|
||||||
|
]]
|
||||||
|
|
||||||
|
# RIGHTWARDS ARROW →
|
||||||
|
# Unicode: U+2192, UTF-8: E2 86 92
|
||||||
|
story.append(
|
||||||
|
KeepTogether(
|
||||||
|
[HRFlowable(width='50%', color=colors.black),
|
||||||
|
Table(data=data_block, colWidths=[1.5 * inch, 6. * inch],
|
||||||
|
style=[('LEFTPADDING', (0, 0), (-1, -1), 0.)]),
|
||||||
|
Paragraph(line.prompt, prompt_style),
|
||||||
|
Spacer(1., inch * 1.5)]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
doc.build(story)
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
from parsimonious.grammar import Grammar
|
|
||||||
|
|
||||||
protools_text_export_grammar = Grammar(
|
|
||||||
r"""
|
|
||||||
document = header files_section? clips_section? plugin_listing? track_listing? markers_listing?
|
|
||||||
header = "SESSION NAME:" fs string_value rs
|
|
||||||
"SAMPLE RATE:" fs float_value rs
|
|
||||||
"BIT DEPTH:" fs integer_value "-bit" rs
|
|
||||||
"SESSION START TIMECODE:" fs string_value rs
|
|
||||||
"TIMECODE FORMAT:" fs float_value " Drop"? " Frame" rs
|
|
||||||
"# OF AUDIO TRACKS:" fs integer_value rs
|
|
||||||
"# OF AUDIO CLIPS:" fs integer_value rs
|
|
||||||
"# OF AUDIO FILES:" fs integer_value rs block_ending
|
|
||||||
|
|
||||||
files_section = files_header files_column_header file_record* block_ending
|
|
||||||
files_header = "F I L E S I N S E S S I O N" rs
|
|
||||||
files_column_header = "Filename" isp fs "Location" rs
|
|
||||||
file_record = string_value fs string_value rs
|
|
||||||
|
|
||||||
clips_section = clips_header clips_column_header clip_record* block_ending
|
|
||||||
clips_header = "O N L I N E C L I P S I N S E S S I O N" rs
|
|
||||||
clips_column_header = string_value fs string_value rs
|
|
||||||
clip_record = string_value fs string_value (fs "[" integer_value "]")? rs
|
|
||||||
|
|
||||||
plugin_listing = plugin_header plugin_column_header plugin_record* block_ending
|
|
||||||
plugin_header = "P L U G - I N S L I S T I N G" rs
|
|
||||||
plugin_column_header = "MANUFACTURER " fs "PLUG-IN NAME " fs
|
|
||||||
"VERSION " fs "FORMAT " fs "STEMS " fs
|
|
||||||
"NUMBER OF INSTANCES" rs
|
|
||||||
plugin_record = string_value fs string_value fs string_value fs
|
|
||||||
string_value fs string_value fs string_value rs
|
|
||||||
|
|
||||||
track_listing = track_listing_header track_block*
|
|
||||||
track_block = track_list_top ( track_clip_entry / block_ending )*
|
|
||||||
|
|
||||||
track_listing_header = "T R A C K L I S T I N G" rs
|
|
||||||
track_list_top = "TRACK NAME:" fs string_value rs
|
|
||||||
"COMMENTS:" fs string_value rs
|
|
||||||
"USER DELAY:" fs integer_value " Samples" rs
|
|
||||||
"STATE: " track_state_list rs
|
|
||||||
("PLUG-INS: " ( fs string_value )* rs)?
|
|
||||||
"CHANNEL " fs "EVENT " fs "CLIP NAME " fs
|
|
||||||
"START TIME " fs "END TIME " fs "DURATION " fs
|
|
||||||
("TIMESTAMP " fs)? "STATE" rs
|
|
||||||
|
|
||||||
track_state_list = (track_state " ")*
|
|
||||||
|
|
||||||
track_state = "Solo" / "Muted" / "Inactive"
|
|
||||||
|
|
||||||
track_clip_entry = integer_value isp fs
|
|
||||||
integer_value isp fs
|
|
||||||
string_value fs
|
|
||||||
string_value fs string_value fs string_value fs (string_value fs)?
|
|
||||||
track_clip_state rs
|
|
||||||
|
|
||||||
track_clip_state = ("Muted" / "Unmuted")
|
|
||||||
|
|
||||||
markers_listing = markers_listing_header markers_column_header marker_record*
|
|
||||||
markers_listing_header = "M A R K E R S L I S T I N G" rs
|
|
||||||
markers_column_header = "# " fs "LOCATION " fs "TIME REFERENCE " fs
|
|
||||||
"UNITS " fs "NAME " fs "COMMENTS" rs
|
|
||||||
|
|
||||||
marker_record = integer_value isp fs string_value fs integer_value isp fs
|
|
||||||
string_value fs string_value fs string_value rs
|
|
||||||
|
|
||||||
fs = "\t"
|
|
||||||
rs = "\n"
|
|
||||||
block_ending = rs rs
|
|
||||||
string_value = ~"[^\t\n]*"
|
|
||||||
integer_value = ~"\d+"
|
|
||||||
float_value = ~"\d+(\.\d+)"
|
|
||||||
isp = ~"[^\d\t\n]*"
|
|
||||||
""")
|
|
||||||
@@ -1,152 +0,0 @@
|
|||||||
from parsimonious.nodes import NodeVisitor, Node
|
|
||||||
|
|
||||||
|
|
||||||
class DictionaryParserVisitor(NodeVisitor):
|
|
||||||
|
|
||||||
def visit_document(self, node: Node, visited_children) -> dict:
|
|
||||||
files = next(iter(visited_children[1]), None)
|
|
||||||
clips = next(iter(visited_children[2]), None)
|
|
||||||
plugins = next(iter(visited_children[3]), None)
|
|
||||||
tracks = next(iter(visited_children[4]), None)
|
|
||||||
markers = next(iter(visited_children[5]), None)
|
|
||||||
|
|
||||||
return dict(header=visited_children[0],
|
|
||||||
files=files,
|
|
||||||
clips=clips,
|
|
||||||
plugins=plugins,
|
|
||||||
tracks=tracks,
|
|
||||||
markers=markers)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_header(node, visited_children):
|
|
||||||
|
|
||||||
tc_drop = False
|
|
||||||
for _ in visited_children[20]:
|
|
||||||
tc_drop = True
|
|
||||||
|
|
||||||
return dict(session_name=visited_children[2],
|
|
||||||
sample_rate=visited_children[6],
|
|
||||||
bit_depth=visited_children[10],
|
|
||||||
start_timecode=visited_children[15],
|
|
||||||
timecode_format=visited_children[19],
|
|
||||||
timecode_drop_frame=tc_drop,
|
|
||||||
count_audio_tracks=visited_children[25],
|
|
||||||
count_clips=visited_children[29],
|
|
||||||
count_files=visited_children[33])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_files_section(node, visited_children):
|
|
||||||
return list(map(lambda child: dict(filename=child[0], path=child[2]), visited_children[2]))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_clips_section(node, visited_children):
|
|
||||||
channel = next(iter(visited_children[2][3]), 1)
|
|
||||||
|
|
||||||
return list(map(lambda child: dict(clip_name=child[0], file=child[2], channel=channel),
|
|
||||||
visited_children[2]))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_plugin_listing(node, visited_children):
|
|
||||||
return list(map(lambda child: dict(manufacturer=child[0],
|
|
||||||
plugin_name=child[2],
|
|
||||||
version=child[4],
|
|
||||||
format=child[6],
|
|
||||||
stems=child[8],
|
|
||||||
count_instances=child[10]),
|
|
||||||
visited_children[2]))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_track_block(node, visited_children):
|
|
||||||
track_header, track_clip_list = visited_children
|
|
||||||
clips = []
|
|
||||||
for clip in track_clip_list:
|
|
||||||
if clip[0] != None:
|
|
||||||
clips.append(clip[0])
|
|
||||||
|
|
||||||
plugins = []
|
|
||||||
for plugin_opt in track_header[16]:
|
|
||||||
for plugin in plugin_opt[1]:
|
|
||||||
plugins.append(plugin[1])
|
|
||||||
|
|
||||||
return dict(
|
|
||||||
name=track_header[2],
|
|
||||||
comments=track_header[6],
|
|
||||||
user_delay_samples=track_header[10],
|
|
||||||
state=track_header[14],
|
|
||||||
plugins=plugins,
|
|
||||||
clips=clips
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_track_listing(node, visited_children):
|
|
||||||
return visited_children[1]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_track_clip_entry(node, visited_children):
|
|
||||||
timestamp = None
|
|
||||||
if isinstance(visited_children[14], list):
|
|
||||||
timestamp = visited_children[14][0][0]
|
|
||||||
|
|
||||||
return dict(channel=visited_children[0],
|
|
||||||
event=visited_children[3],
|
|
||||||
clip_name=visited_children[6],
|
|
||||||
start_time=visited_children[8],
|
|
||||||
end_time=visited_children[10],
|
|
||||||
duration=visited_children[12],
|
|
||||||
timestamp=timestamp,
|
|
||||||
state=visited_children[15])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_track_state_list(node, visited_children):
|
|
||||||
states = []
|
|
||||||
for next_state in visited_children:
|
|
||||||
states.append(next_state[0][0].text)
|
|
||||||
return states
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_track_clip_state(node, visited_children):
|
|
||||||
return node.text
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_markers_listing(node, visited_children):
|
|
||||||
markers = []
|
|
||||||
|
|
||||||
for marker in visited_children[2]:
|
|
||||||
markers.append(marker)
|
|
||||||
|
|
||||||
return markers
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_marker_record(node, visited_children):
|
|
||||||
return dict(number=visited_children[0],
|
|
||||||
location=visited_children[3],
|
|
||||||
time_reference=visited_children[5],
|
|
||||||
units=visited_children[8],
|
|
||||||
name=visited_children[10],
|
|
||||||
comments=visited_children[12])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_formatted_clip_name(_, visited_children):
|
|
||||||
return visited_children[1].text
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_string_value(node, visited_children):
|
|
||||||
return node.text.strip(" ")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_integer_value(node, visited_children):
|
|
||||||
return int(node.text)
|
|
||||||
|
|
||||||
# def visit_timecode_value(self, node, visited_children):
|
|
||||||
# return node.text.strip(" ")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def visit_float_value(node, visited_children):
|
|
||||||
return float(node.text)
|
|
||||||
|
|
||||||
def visit_block_ending(self, node, visited_children):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def generic_visit(self, node, visited_children):
|
|
||||||
""" The generic visit method. """
|
|
||||||
return visited_children or node
|
|
||||||
71
ptulsconv/reporting.py
Normal file
71
ptulsconv/reporting.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
"""
|
||||||
|
Reporting logic. These methods provide reporting methods to the package and
|
||||||
|
take some pains to provide nice-looking escape codes if we're writing to a
|
||||||
|
tty.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def print_banner_style(message):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\n\033[1m%s\033[0m\n\n" % message)
|
||||||
|
else:
|
||||||
|
sys.stderr.write("\n%s\n\n" % message)
|
||||||
|
|
||||||
|
|
||||||
|
def print_section_header_style(message):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\n\033[4m%s\033[0m\n\n" % message)
|
||||||
|
else:
|
||||||
|
sys.stderr.write("%s\n\n" % message)
|
||||||
|
|
||||||
|
|
||||||
|
def print_status_style(message):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\033[3m - %s\033[0m\n" % message)
|
||||||
|
else:
|
||||||
|
sys.stderr.write(" - %s\n" % message)
|
||||||
|
|
||||||
|
|
||||||
|
def print_warning(warning_string):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\033[3m - %s\033[0m\n" % warning_string)
|
||||||
|
else:
|
||||||
|
sys.stderr.write(" - %s\n" % warning_string)
|
||||||
|
|
||||||
|
|
||||||
|
def print_advisory_tagging_error(failed_string, position,
|
||||||
|
parent_track_name=None, clip_time=None):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\n")
|
||||||
|
sys.stderr.write(" ! \033[33;1mTagging error: \033[0m")
|
||||||
|
ok_string = failed_string[:position]
|
||||||
|
not_ok_string = failed_string[position:]
|
||||||
|
sys.stderr.write("\033[32m\"%s\033[31;1m%s\"\033[0m\n" %
|
||||||
|
(ok_string, not_ok_string))
|
||||||
|
|
||||||
|
if parent_track_name is not None:
|
||||||
|
sys.stderr.write(" ! > On track \"%s\"\n" % parent_track_name)
|
||||||
|
|
||||||
|
if clip_time is not None:
|
||||||
|
sys.stderr.write(" ! > In clip name at %s\n" % clip_time)
|
||||||
|
else:
|
||||||
|
sys.stderr.write("\n")
|
||||||
|
sys.stderr.write(" ! Tagging error: \"%s\"\n" % failed_string)
|
||||||
|
sys.stderr.write(" ! %s _______________⬆\n" % (" " * position))
|
||||||
|
|
||||||
|
if parent_track_name is not None:
|
||||||
|
sys.stderr.write(" ! > On track \"%s\"\n" % parent_track_name)
|
||||||
|
|
||||||
|
if clip_time is not None:
|
||||||
|
sys.stderr.write(" ! > In clip name at %s\n" % clip_time)
|
||||||
|
|
||||||
|
sys.stderr.write("\n")
|
||||||
|
|
||||||
|
|
||||||
|
def print_fatal_error(message):
|
||||||
|
if sys.stderr.isatty():
|
||||||
|
sys.stderr.write("\n\033[5;31;1m*** %s ***\033[0m\n" % message)
|
||||||
|
else:
|
||||||
|
sys.stderr.write("\n%s\n" % message)
|
||||||
@@ -1,257 +0,0 @@
|
|||||||
from . import broadcast_timecode
|
|
||||||
from parsimonious import Grammar, NodeVisitor
|
|
||||||
from parsimonious.exceptions import IncompleteParseError
|
|
||||||
import math
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
class Transformation:
|
|
||||||
def transform(self, input_dict) -> dict:
|
|
||||||
return input_dict
|
|
||||||
|
|
||||||
|
|
||||||
class TimecodeInterpreter(Transformation):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.apply_session_start = False
|
|
||||||
|
|
||||||
def transform(self, input_dict: dict) -> dict:
|
|
||||||
retval = super().transform(input_dict)
|
|
||||||
rate = input_dict['header']['timecode_format']
|
|
||||||
start_tc = self.convert_time(input_dict['header']['start_timecode'], rate,
|
|
||||||
drop_frame=input_dict['header']['timecode_drop_frame'])
|
|
||||||
|
|
||||||
retval['header']['start_timecode_decoded'] = start_tc
|
|
||||||
retval['tracks'] = self.convert_tracks(input_dict['tracks'], timecode_rate=rate,
|
|
||||||
drop_frame=retval['header']['timecode_drop_frame'])
|
|
||||||
|
|
||||||
for marker in retval['markers']:
|
|
||||||
marker['location_decoded'] = self.convert_time(marker['location'], rate,
|
|
||||||
drop_frame=retval['header']['timecode_drop_frame'])
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def convert_tracks(self, tracks, timecode_rate, drop_frame):
|
|
||||||
for track in tracks:
|
|
||||||
new_clips = []
|
|
||||||
for clip in track['clips']:
|
|
||||||
new_clips.append(self.convert_clip(clip, drop_frame=drop_frame, timecode_rate=timecode_rate))
|
|
||||||
|
|
||||||
track['clips'] = new_clips
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def convert_clip(self, clip, timecode_rate, drop_frame):
|
|
||||||
time_fields = ['start_time', 'end_time', 'duration', 'timestamp']
|
|
||||||
|
|
||||||
for time_field in time_fields:
|
|
||||||
if clip[time_field] is not None:
|
|
||||||
clip[time_field + "_decoded"] = self.convert_time(clip[time_field], drop_frame=drop_frame,
|
|
||||||
frame_rate=timecode_rate)
|
|
||||||
return clip
|
|
||||||
|
|
||||||
def convert_time(self, time_string, frame_rate, drop_frame=False):
|
|
||||||
lfps = math.ceil(frame_rate)
|
|
||||||
|
|
||||||
frame_count = broadcast_timecode.smpte_to_frame_count(time_string, lfps, drop_frame_hint=drop_frame)
|
|
||||||
|
|
||||||
return dict(frame_count=frame_count, logical_fps=lfps, drop_frame=drop_frame)
|
|
||||||
|
|
||||||
|
|
||||||
class TagInterpreter(Transformation):
|
|
||||||
tag_grammar = Grammar(
|
|
||||||
r"""
|
|
||||||
document = modifier? line? word_sep? tag_list?
|
|
||||||
line = word (word_sep word)*
|
|
||||||
tag_list = tag*
|
|
||||||
tag = key_tag / short_tag / full_text_tag / tag_junk
|
|
||||||
key_tag = "[" key "]" word_sep?
|
|
||||||
short_tag = "$" key "=" word word_sep?
|
|
||||||
full_text_tag = "{" key "=" value "}" word_sep?
|
|
||||||
key = ~"[A-Za-z][A-Za-z0-9_]*"
|
|
||||||
value = ~"[^}]+"
|
|
||||||
tag_junk = word word_sep?
|
|
||||||
word = ~"[^ \[\{\$][^ ]*"
|
|
||||||
word_sep = ~" +"
|
|
||||||
modifier = ("@" / "&") word_sep?
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
class TagListVisitor(NodeVisitor):
|
|
||||||
def visit_document(self, _, visited_children):
|
|
||||||
modifier_opt, line_opt, _, tag_list_opt = visited_children
|
|
||||||
|
|
||||||
return dict(line=next(iter(line_opt), None),
|
|
||||||
tags=next(iter(tag_list_opt), None),
|
|
||||||
mode=next(iter(modifier_opt), 'Normal')
|
|
||||||
)
|
|
||||||
|
|
||||||
def visit_line(self, node, _):
|
|
||||||
return str.strip(node.text, " ")
|
|
||||||
|
|
||||||
def visit_modifier(self, node, _):
|
|
||||||
if node.text.startswith('@'):
|
|
||||||
return 'Timespan'
|
|
||||||
elif node.text.startswith('&'):
|
|
||||||
return 'Append'
|
|
||||||
else:
|
|
||||||
return 'Normal'
|
|
||||||
|
|
||||||
def visit_tag_list(self, _, visited_children):
|
|
||||||
retdict = dict()
|
|
||||||
for child in visited_children:
|
|
||||||
if child[0] is not None:
|
|
||||||
k, v = child[0]
|
|
||||||
retdict[k] = v
|
|
||||||
return retdict
|
|
||||||
|
|
||||||
def visit_key_tag(self, _, children):
|
|
||||||
return children[1].text, children[1].text
|
|
||||||
|
|
||||||
def visit_short_tag(self, _, children):
|
|
||||||
return children[1].text, children[3].text
|
|
||||||
|
|
||||||
def visit_full_text_tag(self, _, children):
|
|
||||||
return children[1].text, children[3].text
|
|
||||||
|
|
||||||
def visit_tag_junk(self, node, _):
|
|
||||||
return None
|
|
||||||
|
|
||||||
def generic_visit(self, node, visited_children):
|
|
||||||
return visited_children or node
|
|
||||||
|
|
||||||
def __init__(self, ignore_muted=True, show_progress=False):
|
|
||||||
self.visitor = TagInterpreter.TagListVisitor()
|
|
||||||
self.ignore_muted = ignore_muted
|
|
||||||
self.show_progress = show_progress
|
|
||||||
|
|
||||||
def transform(self, input_dict: dict) -> dict:
|
|
||||||
transformed = list()
|
|
||||||
timespan_rules = list()
|
|
||||||
|
|
||||||
title_tags = self.parse_tags(input_dict['header']['session_name'], "<Session Name>")
|
|
||||||
markers = sorted(input_dict['markers'], key=lambda m: m['location_decoded']['frame_count'])
|
|
||||||
|
|
||||||
if self.show_progress:
|
|
||||||
track_iter = tqdm(input_dict['tracks'], desc="Reading tracks...", unit='Track')
|
|
||||||
else:
|
|
||||||
track_iter = input_dict['tracks']
|
|
||||||
|
|
||||||
for track in track_iter:
|
|
||||||
if 'Muted' in track['state'] and self.ignore_muted:
|
|
||||||
continue
|
|
||||||
|
|
||||||
track_tags = self.parse_tags(track['name'], "<Track %s>" % (track['name']))
|
|
||||||
comment_tags = self.parse_tags(track['comments'], "<Track %s>" % (track['name']))
|
|
||||||
track_context_tags = track_tags['tags']
|
|
||||||
track_context_tags.update(comment_tags['tags'])
|
|
||||||
|
|
||||||
for clip in track['clips']:
|
|
||||||
if clip['state'] == 'Muted' and self.ignore_muted:
|
|
||||||
continue
|
|
||||||
|
|
||||||
clip_tags = self.parse_tags(clip['clip_name'],
|
|
||||||
"<Track %s/Clip event number %i at %s>" % (track['name'], clip['event'], clip['start_time']))
|
|
||||||
clip_start = clip['start_time_decoded']['frame_count']
|
|
||||||
if clip_tags['mode'] == 'Normal':
|
|
||||||
event = dict()
|
|
||||||
event.update(title_tags['tags'])
|
|
||||||
event.update(track_context_tags)
|
|
||||||
event.update(self.effective_timespan_tags_at_time(timespan_rules, clip_start))
|
|
||||||
event.update(self.effective_marker_tags_at_time(markers, clip_start))
|
|
||||||
|
|
||||||
event.update(clip_tags['tags'])
|
|
||||||
|
|
||||||
event['PT.Track.Name'] = track_tags['line']
|
|
||||||
event['PT.Session.Name'] = title_tags['line']
|
|
||||||
event['PT.Clip.Number'] = clip['event']
|
|
||||||
event['PT.Clip.Name'] = clip_tags['line']
|
|
||||||
event['PT.Clip.Start'] = clip['start_time']
|
|
||||||
event['PT.Clip.Finish'] = clip['end_time']
|
|
||||||
event['PT.Clip.Start_Frames'] = clip_start
|
|
||||||
event['PT.Clip.Finish_Frames'] = clip['end_time_decoded']['frame_count']
|
|
||||||
event['PT.Clip.Start_Seconds'] = clip_start / input_dict['header']['timecode_format']
|
|
||||||
event['PT.Clip.Finish_Seconds'] = clip['end_time_decoded']['frame_count'] / input_dict['header'][
|
|
||||||
'timecode_format']
|
|
||||||
transformed.append(event)
|
|
||||||
|
|
||||||
elif clip_tags['mode'] == 'Append':
|
|
||||||
assert len(transformed) > 0, "First clip is in '&'-Append mode, fatal error."
|
|
||||||
|
|
||||||
transformed[-1].update(clip_tags['tags'])
|
|
||||||
transformed[-1]['event_name'] = transformed[-1]['event_name'] + " " + clip_tags['line']
|
|
||||||
transformed[-1]['PT.Clip.End_Frames'] = clip['end_time_decoded']['frame_count']
|
|
||||||
|
|
||||||
elif clip_tags['mode'] == 'Timespan':
|
|
||||||
rule = dict(start_time=clip_start,
|
|
||||||
end_time=clip['end_time_decoded']['frame_count'],
|
|
||||||
tags=clip_tags['tags'])
|
|
||||||
timespan_rules.append(rule)
|
|
||||||
|
|
||||||
return dict(header=input_dict['header'], events=transformed)
|
|
||||||
|
|
||||||
def effective_timespan_tags_at_time(_, rules, time) -> dict:
|
|
||||||
retval = dict()
|
|
||||||
|
|
||||||
for rule in rules:
|
|
||||||
if rule['start_time'] <= time <= rule['end_time']:
|
|
||||||
retval.update(rule['tags'])
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def effective_marker_tags_at_time(self, markers, time):
|
|
||||||
retval = dict()
|
|
||||||
|
|
||||||
for marker in markers:
|
|
||||||
marker_name_tags = self.parse_tags(marker['name'], "Marker %i" % (marker['number']))
|
|
||||||
marker_comment_tags = self.parse_tags(marker['comments'], "Marker %i" % (marker['number']))
|
|
||||||
effective_tags = marker_name_tags['tags']
|
|
||||||
effective_tags.update(marker_comment_tags['tags'])
|
|
||||||
|
|
||||||
if marker['location_decoded']['frame_count'] <= time:
|
|
||||||
retval.update(effective_tags)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def report(self, mesg, *args):
|
|
||||||
print(mesg % ( args) , file=sys.stderr)
|
|
||||||
sys.stderr.write("\033[F")
|
|
||||||
sys.stderr.write("\033[K")
|
|
||||||
|
|
||||||
def parse_tags(self, source, context_str=None):
|
|
||||||
try:
|
|
||||||
parse_tree = self.tag_grammar.parse(source)
|
|
||||||
return self.visitor.visit(parse_tree)
|
|
||||||
except IncompleteParseError as e:
|
|
||||||
if context_str is not None:
|
|
||||||
self.report("Error reading tags in: ")
|
|
||||||
|
|
||||||
trimmed_source = source[:e.pos]
|
|
||||||
parse_tree = self.tag_grammar.parse(trimmed_source)
|
|
||||||
return self.visitor.visit(parse_tree)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class SubclipOfSequence(Transformation):
|
|
||||||
|
|
||||||
def __init__(self, start, end):
|
|
||||||
self.start = start
|
|
||||||
self.end = end
|
|
||||||
|
|
||||||
def transform(self, input_dict: dict) -> dict:
|
|
||||||
out_events = []
|
|
||||||
offset = self.start
|
|
||||||
offset_sec = self.start / input_dict['header']['timecode_format']
|
|
||||||
for event in input_dict['events']:
|
|
||||||
if self.start <= event['PT.Clip.Start_Frames'] <= self.end:
|
|
||||||
e = event
|
|
||||||
e['PT.Clip.Start_Frames'] = event['PT.Clip.Start_Frames'] - offset
|
|
||||||
e['PT.Clip.Finish_Frames'] = event['PT.Clip.Finish_Frames'] - offset
|
|
||||||
e['PT.Clip.Start_Seconds'] = event['PT.Clip.Start_Seconds'] - offset_sec
|
|
||||||
e['PT.Clip.Finish_Seconds'] = event['PT.Clip.Finish_Seconds'] - offset_sec
|
|
||||||
out_events.append(e)
|
|
||||||
|
|
||||||
return dict(events=out_events)
|
|
||||||
85
ptulsconv/validations.py
Normal file
85
ptulsconv/validations.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
"""
|
||||||
|
Validation logic for enforcing various consistency rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine
|
||||||
|
from typing import Iterator, Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ValidationError:
|
||||||
|
message: str
|
||||||
|
event: Optional[ADRLine] = None
|
||||||
|
|
||||||
|
def report_message(self):
|
||||||
|
if self.event is not None:
|
||||||
|
return (f"{self.message}: event at {self.event.start} with number"
|
||||||
|
"{self.event.cue_number}")
|
||||||
|
else:
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
|
||||||
|
def validate_unique_count(input_lines: Iterator[ADRLine], field='title',
|
||||||
|
count=1):
|
||||||
|
values = set(list(map(lambda e: getattr(e, field), input_lines)))
|
||||||
|
if len(values) > count:
|
||||||
|
yield ValidationError(
|
||||||
|
message="Field {} has too many values (max={}): {}"
|
||||||
|
.format(field, count, values)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_value(input_lines: Iterator[ADRLine], key_field, predicate):
|
||||||
|
for event in input_lines:
|
||||||
|
val = getattr(event, key_field)
|
||||||
|
if not predicate(val):
|
||||||
|
yield ValidationError(message='Field {} not in range'.format(val),
|
||||||
|
event=event)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_unique_field(input_lines: Iterator[ADRLine], field='cue_number',
|
||||||
|
scope=None):
|
||||||
|
values = dict()
|
||||||
|
for event in input_lines:
|
||||||
|
this = getattr(event, field)
|
||||||
|
if scope is not None:
|
||||||
|
key = getattr(event, scope)
|
||||||
|
else:
|
||||||
|
key = '_values'
|
||||||
|
|
||||||
|
values.setdefault(key, set())
|
||||||
|
if this in values[key]:
|
||||||
|
yield ValidationError(message='Re-used {}'.format(field),
|
||||||
|
event=event)
|
||||||
|
else:
|
||||||
|
values[key].update(this)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_non_empty_field(input_lines: Iterator[ADRLine],
|
||||||
|
field='cue_number'):
|
||||||
|
for event in input_lines:
|
||||||
|
if getattr(event, field, None) is None:
|
||||||
|
yield ValidationError(message='Empty field {}'.format(field),
|
||||||
|
event=event)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_dependent_value(input_lines: Iterator[ADRLine], key_field,
|
||||||
|
dependent_field):
|
||||||
|
"""
|
||||||
|
Validates that two events with the same value in `key_field` always have
|
||||||
|
the same value in `dependent_field`
|
||||||
|
"""
|
||||||
|
key_values = set((getattr(x, key_field) for x in input_lines))
|
||||||
|
|
||||||
|
for key_value in key_values:
|
||||||
|
rows = [(getattr(x, key_field), getattr(x, dependent_field))
|
||||||
|
for x in input_lines
|
||||||
|
if getattr(x, key_field) == key_value]
|
||||||
|
unique_rows = set(rows)
|
||||||
|
if len(unique_rows) > 1:
|
||||||
|
message = "Non-unique values for key {} = ".format(key_field)
|
||||||
|
for u in unique_rows:
|
||||||
|
message = message + "\n - {} -> {}".format(u[0], u[1])
|
||||||
|
|
||||||
|
yield ValidationError(message=message, event=None)
|
||||||
183
ptulsconv/xml/common.py
Normal file
183
ptulsconv/xml/common.py
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import pathlib
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import glob
|
||||||
|
import datetime
|
||||||
|
from xml.etree.ElementTree import TreeBuilder, tostring
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import ptulsconv
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine
|
||||||
|
|
||||||
|
# TODO Get a third-party test for Avid Marker lists
|
||||||
|
|
||||||
|
|
||||||
|
def avid_marker_list(lines: List[ADRLine], report_date=datetime.datetime.now(),
|
||||||
|
reel_start_frame=0, fps=24):
|
||||||
|
doc = TreeBuilder(element_factory=None)
|
||||||
|
|
||||||
|
doc.start('Avid:StreamItems', {'xmlns:Avid': 'http://www.avid.com'})
|
||||||
|
doc.start('Avid:XMLFileData', {})
|
||||||
|
doc.start('AvProp', {'name': 'DomainMagic', 'type': 'string'})
|
||||||
|
doc.data("Domain")
|
||||||
|
doc.end('AvProp')
|
||||||
|
doc.start('AvProp', {'name': 'DomainKey', 'type': 'string'})
|
||||||
|
doc.data("58424a44")
|
||||||
|
doc.end('AvProp')
|
||||||
|
|
||||||
|
def insert_elem(kind, attb, atype, name, value):
|
||||||
|
doc.start('ListElem', {})
|
||||||
|
doc.start('AvProp', {'id': 'ATTR',
|
||||||
|
'name': 'OMFI:ATTB:Kind',
|
||||||
|
'type': 'int32'})
|
||||||
|
doc.data(kind)
|
||||||
|
doc.end('AvProp')
|
||||||
|
|
||||||
|
doc.start('AvProp', {'id': 'ATTR',
|
||||||
|
'name': 'OMFI:ATTB:Name',
|
||||||
|
'type': 'string'})
|
||||||
|
doc.data(name)
|
||||||
|
doc.end('AvProp')
|
||||||
|
|
||||||
|
doc.start('AvProp', {'id': 'ATTR',
|
||||||
|
'name': attb,
|
||||||
|
'type': atype})
|
||||||
|
doc.data(value)
|
||||||
|
doc.end('AvProp')
|
||||||
|
|
||||||
|
doc.end('ListElem')
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
doc.start('AvClass', {'id': 'ATTR'})
|
||||||
|
doc.start('AvProp', {'id': 'ATTR',
|
||||||
|
'name': '__OMFI:ATTR:NumItems',
|
||||||
|
'type': 'int32'})
|
||||||
|
doc.data('7')
|
||||||
|
doc.end('AvProp')
|
||||||
|
|
||||||
|
doc.start('List', {'id': 'OMFI:ATTR:AttrRefs'})
|
||||||
|
|
||||||
|
insert_elem('1', 'OMFI:ATTB:IntAttribute', 'int32',
|
||||||
|
'_ATN_CRM_LONG_CREATE_DATE', report_date.strftime("%s"))
|
||||||
|
insert_elem('2', 'OMFI:ATTB:StringAttribute', 'string',
|
||||||
|
'_ATN_CRM_COLOR', 'yellow')
|
||||||
|
insert_elem('2', 'OMFI:ATTB:StringAttribute', 'string',
|
||||||
|
'_ATN_CRM_USER', line.supervisor or "")
|
||||||
|
|
||||||
|
marker_name = "%s: %s" % (line.cue_number, line.prompt)
|
||||||
|
insert_elem('2', 'OMFI:ATTB:StringAttribute', 'string',
|
||||||
|
'_ATN_CRM_COM', marker_name)
|
||||||
|
|
||||||
|
start_frame = int(line.start * fps)
|
||||||
|
|
||||||
|
insert_elem('2', "OMFI:ATTB:StringAttribute", 'string',
|
||||||
|
'_ATN_CRM_TC',
|
||||||
|
str(start_frame - reel_start_frame))
|
||||||
|
|
||||||
|
insert_elem('2', "OMFI:ATTB:StringAttribute", 'string',
|
||||||
|
'_ATN_CRM_TRK', 'V1')
|
||||||
|
insert_elem('1', "OMFI:ATTB:IntAttribute", 'int32',
|
||||||
|
'_ATN_CRM_LENGTH', '1')
|
||||||
|
|
||||||
|
doc.start('ListElem', {})
|
||||||
|
doc.end('ListElem')
|
||||||
|
|
||||||
|
doc.end('List')
|
||||||
|
doc.end('AvClass')
|
||||||
|
|
||||||
|
doc.end('Avid:XMLFileData')
|
||||||
|
doc.end('Avid:StreamItems')
|
||||||
|
|
||||||
|
|
||||||
|
def dump_fmpxml(data, input_file_name, output, adr_field_map):
|
||||||
|
doc = TreeBuilder(element_factory=None)
|
||||||
|
|
||||||
|
doc.start('FMPXMLRESULT', {'xmlns':
|
||||||
|
'http://www.filemaker.com/fmpxmlresult'})
|
||||||
|
|
||||||
|
doc.start('ERRORCODE', {})
|
||||||
|
doc.data('0')
|
||||||
|
doc.end('ERRORCODE')
|
||||||
|
|
||||||
|
doc.start('PRODUCT', {'NAME': ptulsconv.__name__,
|
||||||
|
'VERSION': ptulsconv.__version__})
|
||||||
|
doc.end('PRODUCT')
|
||||||
|
|
||||||
|
doc.start('DATABASE', {'DATEFORMAT': 'MM/dd/yy',
|
||||||
|
'LAYOUT': 'summary',
|
||||||
|
'TIMEFORMAT': 'hh:mm:ss',
|
||||||
|
'RECORDS': str(len(data['events'])),
|
||||||
|
'NAME': os.path.basename(input_file_name)})
|
||||||
|
doc.end('DATABASE')
|
||||||
|
|
||||||
|
doc.start('METADATA', {})
|
||||||
|
for field in adr_field_map:
|
||||||
|
tp = field[2]
|
||||||
|
ft = 'TEXT'
|
||||||
|
if tp is int or tp is float:
|
||||||
|
ft = 'NUMBER'
|
||||||
|
|
||||||
|
doc.start('FIELD', {'EMPTYOK': 'YES', 'MAXREPEAT': '1',
|
||||||
|
'NAME': field[1], 'TYPE': ft})
|
||||||
|
doc.end('FIELD')
|
||||||
|
doc.end('METADATA')
|
||||||
|
|
||||||
|
doc.start('RESULTSET', {'FOUND': str(len(data['events']))})
|
||||||
|
for event in data['events']:
|
||||||
|
doc.start('ROW', {})
|
||||||
|
for field in adr_field_map:
|
||||||
|
doc.start('COL', {})
|
||||||
|
doc.start('DATA', {})
|
||||||
|
for key_attempt in field[0]:
|
||||||
|
if key_attempt in event.keys():
|
||||||
|
doc.data(str(event[key_attempt]))
|
||||||
|
break
|
||||||
|
doc.end('DATA')
|
||||||
|
doc.end('COL')
|
||||||
|
doc.end('ROW')
|
||||||
|
doc.end('RESULTSET')
|
||||||
|
|
||||||
|
doc.end('FMPXMLRESULT')
|
||||||
|
docelem = doc.close()
|
||||||
|
xmlstr = tostring(docelem, encoding='unicode', method='xml')
|
||||||
|
output.write(xmlstr)
|
||||||
|
|
||||||
|
|
||||||
|
xslt_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'xslt')
|
||||||
|
|
||||||
|
|
||||||
|
def xform_options():
|
||||||
|
return glob.glob(os.path.join(xslt_path, "*.xsl"))
|
||||||
|
|
||||||
|
|
||||||
|
def dump_xform_options(output=sys.stdout):
|
||||||
|
print("# Available transforms:", file=output)
|
||||||
|
print("# Transform dir: %s" % xslt_path, file=output)
|
||||||
|
for f in xform_options():
|
||||||
|
base = os.path.basename(f)
|
||||||
|
name, _ = os.path.splitext(base)
|
||||||
|
print("# " + name, file=output)
|
||||||
|
|
||||||
|
|
||||||
|
def fmp_transformed_dump(data, input_file, xsl_name, output, adr_field_map):
|
||||||
|
from ptulsconv.reporting import print_status_style
|
||||||
|
import io
|
||||||
|
|
||||||
|
pipe = io.StringIO()
|
||||||
|
|
||||||
|
print_status_style("Generating base XML")
|
||||||
|
dump_fmpxml(data, input_file, pipe, adr_field_map)
|
||||||
|
|
||||||
|
str_data = pipe.getvalue()
|
||||||
|
print_status_style("Base XML size %i" % (len(str_data)))
|
||||||
|
|
||||||
|
print_status_style("Running xsltproc")
|
||||||
|
|
||||||
|
xsl_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'xslt',
|
||||||
|
xsl_name + ".xsl")
|
||||||
|
print_status_style("Using xsl: %s" % xsl_path)
|
||||||
|
subprocess.run(['xsltproc', xsl_path, '-'],
|
||||||
|
input=str_data, text=True,
|
||||||
|
stdout=output, shell=False, check=True)
|
||||||
@@ -37,8 +37,16 @@
|
|||||||
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
||||||
<AvProp id="ATTR" name="OMFI:ATTB:Name" type="string">_ATN_CRM_COM</AvProp>
|
<AvProp id="ATTR" name="OMFI:ATTB:Name" type="string">_ATN_CRM_COM</AvProp>
|
||||||
<AvProp id="ATTR" name="OMFI:ATTB:StringAttribute" type="string">
|
<AvProp id="ATTR" name="OMFI:ATTB:StringAttribute" type="string">
|
||||||
<xsl:value-of select="concat(fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA)"/>
|
<xsl:value-of select="concat('(',fmp:COL[14]/fmp:DATA,') ',fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA, ' ')"/>
|
||||||
[Reason: <xsl:value-of select="fmp:COL[18]/fmp:DATA" />]</AvProp>
|
<xsl:choose>
|
||||||
|
<xsl:when test="fmp:COL[18]/fmp:DATA != ''">[Reason: <xsl:value-of select="fmp:COL[18]/fmp:DATA" />]
|
||||||
|
</xsl:when>
|
||||||
|
<xsl:otherwise> </xsl:otherwise>
|
||||||
|
</xsl:choose>
|
||||||
|
<xsl:choose>
|
||||||
|
<xsl:when test="fmp:COL[23]/fmp:DATA != ''">[Note: <xsl:value-of select="fmp:COL[23]/fmp:DATA" />]</xsl:when>
|
||||||
|
</xsl:choose>
|
||||||
|
</AvProp>
|
||||||
</ListElem>
|
</ListElem>
|
||||||
<ListElem>
|
<ListElem>
|
||||||
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
<AvProp id="ATTR" name="OMFI:ATTB:Kind" type="int32">2</AvProp>
|
||||||
30
ptulsconv/xslt/SRT.xsl
Normal file
30
ptulsconv/xslt/SRT.xsl
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<xsl:transform version="1.0"
|
||||||
|
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||||
|
xmlns:fmp="http://www.filemaker.com/fmpxmlresult">
|
||||||
|
|
||||||
|
<xsl:output method="text" encoding="windows-1252"/>
|
||||||
|
<xsl:template match="/">
|
||||||
|
|
||||||
|
<xsl:for-each select="/fmp:FMPXMLRESULT/fmp:RESULTSET/fmp:ROW">
|
||||||
|
<xsl:sort data-type="number" select="number(fmp:COL[9]/fmp:DATA)" />
|
||||||
|
<xsl:value-of select="concat(position() ,'
')" />
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) div 3600),'00'), ':')" />
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) div 60),'00'), ':')" />
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[9]/fmp:DATA) mod 60),'00'), ',')" />
|
||||||
|
<xsl:value-of select="format-number((number(fmp:COL[9]/fmp:DATA) - floor(number(fmp:COL[9]/fmp:DATA))) * 1000,'000')" />
|
||||||
|
<xsl:text> --> </xsl:text>
|
||||||
|
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) div 3600),'00'), ':')" />
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) div 60),'00'), ':')" />
|
||||||
|
<xsl:value-of select="concat(format-number(floor(number(fmp:COL[10]/fmp:DATA) mod 60),'00'), ',')" />
|
||||||
|
<xsl:value-of select="format-number((number(fmp:COL[10]/fmp:DATA) - floor(number(fmp:COL[10]/fmp:DATA))) * 1000,'000')" />
|
||||||
|
|
||||||
|
<xsl:value-of select="concat('
',fmp:COL[15]/fmp:DATA, ': ', fmp:COL[21]/fmp:DATA)"/>
|
||||||
|
<xsl:value-of select="'

'" />
|
||||||
|
|
||||||
|
</xsl:for-each>
|
||||||
|
</xsl:template>
|
||||||
|
|
||||||
|
|
||||||
|
</xsl:transform>
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
#!/bin/zsh
|
|
||||||
python3 setup.py build
|
|
||||||
python3 setup.py sdist bdist_wheel
|
|
||||||
python3 -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
|
|
||||||
52
pyproject.toml
Normal file
52
pyproject.toml
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["flit_core >=3.2,<4"]
|
||||||
|
build-backend = "flit_core.buildapi"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "ptulsconv"
|
||||||
|
authors = [
|
||||||
|
{name = "Jamie Hardt", email = "jamiehardt@me.com"},
|
||||||
|
]
|
||||||
|
readme = "README.md"
|
||||||
|
license = { file = "LICENSE" }
|
||||||
|
classifiers = [
|
||||||
|
'License :: OSI Approved :: MIT License',
|
||||||
|
'Topic :: Multimedia',
|
||||||
|
'Topic :: Multimedia :: Sound/Audio',
|
||||||
|
"Programming Language :: Python :: 3.8",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Development Status :: 5 - Production/Stable",
|
||||||
|
"Topic :: Text Processing :: Filters"
|
||||||
|
]
|
||||||
|
requires-python = ">=3.8"
|
||||||
|
dynamic = ["version", "description"]
|
||||||
|
keywords = ["text-processing", "parsers", "film",
|
||||||
|
"broadcast", "editing", "editorial"]
|
||||||
|
dependencies = [
|
||||||
|
'parsimonious',
|
||||||
|
'tqdm',
|
||||||
|
'reportlab',
|
||||||
|
'py-ptsl >= 101.1.0'
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
doc = [
|
||||||
|
"Sphinx ~= 5.3.0",
|
||||||
|
"sphinx-rtd-theme >= 1.1.1"
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.flit.module]
|
||||||
|
name = "ptulsconv"
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
ptulsconv = "ptulsconv.__main__:main"
|
||||||
|
|
||||||
|
[project.entry_points.console_scripts]
|
||||||
|
ptulsconv = 'ptulsconv.__main__:main'
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Source = 'https://github.com/iluvcapra/ptulsconv'
|
||||||
|
Issues = 'https://github.com/iluvcapra/ptulsconv/issues'
|
||||||
|
Documentation = 'https://ptulsconv.readthedocs.io/'
|
||||||
62
reaper/Export Items as Text.py
Normal file
62
reaper/Export Items as Text.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# Export Items as Text.py
|
||||||
|
# (c) 2021 Jamie Hardt. All rights reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os.path
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
item_records = list()
|
||||||
|
|
||||||
|
for i in range(0, RPR_CountMediaItems(0) ):
|
||||||
|
this_item = RPR_GetMediaItem(0, i)
|
||||||
|
|
||||||
|
item_record = {}
|
||||||
|
item_record["mute"] = True if RPR_GetMediaItemInfo_Value(this_item, "B_MUTE_ACTUAL") > 0. else False
|
||||||
|
|
||||||
|
item_record["duration"] = RPR_GetMediaItemInfo_Value(this_item, "D_LENGTH")
|
||||||
|
_, item_record["duration_tc"], _, _, _ = RPR_format_timestr_len(item_record["duration"], "", 128, 0., 5)
|
||||||
|
|
||||||
|
item_record["position"] = RPR_GetMediaItemInfo_Value(this_item, "D_POSITION")
|
||||||
|
_, item_record["position_tc"], _, _ = RPR_format_timestr_pos(item_record["position"], "", 128, 5)
|
||||||
|
|
||||||
|
item_record["selected"] = True if RPR_GetMediaItemInfo_Value(this_item, "B_UISEL") > 0. else False
|
||||||
|
_, _, _, item_record["notes"], _ = RPR_GetSetMediaItemInfo_String(this_item, "P_NOTES", "", False)
|
||||||
|
_, _, _, item_record["item_guid"], _ = RPR_GetSetMediaItemInfo_String(this_item, "GUID", "", False)
|
||||||
|
|
||||||
|
active_take = RPR_GetActiveTake(this_item)
|
||||||
|
_, _, _, item_record["active_take_name"], _ = RPR_GetSetMediaItemTakeInfo_String(active_take, "P_NAME", "", False)
|
||||||
|
_, _, _, item_record["active_take_guid"], _ = RPR_GetSetMediaItemTakeInfo_String(active_take, "GUID", "", False)
|
||||||
|
|
||||||
|
item_track = RPR_GetMediaItemTrack(this_item)
|
||||||
|
_, _, _, item_record["track_name"], _ = RPR_GetSetMediaTrackInfo_String(item_track, "P_NAME", "", False)
|
||||||
|
_, _, _, item_record["track_guid"], _ = RPR_GetSetMediaTrackInfo_String(item_track, "GUID", "", False)
|
||||||
|
item_record["track_index"] = RPR_GetMediaTrackInfo_Value(item_track, "IP_TRACKNUMBER")
|
||||||
|
item_record["track_muted"] = True if RPR_GetMediaTrackInfo_Value(item_track, "B_MUTE") > 0. else False
|
||||||
|
|
||||||
|
item_records = item_records + [item_record]
|
||||||
|
|
||||||
|
output = dict()
|
||||||
|
output["items"] = item_records
|
||||||
|
_, output["project_title"], _ = RPR_GetProjectName(0, "", 1024)
|
||||||
|
_, _, output["project_author"], _ = RPR_GetSetProjectAuthor(0, False, "", 1024)
|
||||||
|
output["project_frame_rate"], _, output["project_drop_frame"] = RPR_TimeMap_curFrameRate(0, True)
|
||||||
|
|
||||||
|
output_path, _ = RPR_GetProjectPath("", 1024)
|
||||||
|
|
||||||
|
now = datetime.datetime.now()
|
||||||
|
output_title = output["project_title"]
|
||||||
|
|
||||||
|
if output_title == "":
|
||||||
|
output_title = "unsaved project"
|
||||||
|
|
||||||
|
output_file_name = "%s Text Export %s.txt" % (output_title, now.strftime('%Y%m%d_%H%M'))
|
||||||
|
output_path = output_path + "/" + output_file_name
|
||||||
|
|
||||||
|
with open(output_path, "w") as f:
|
||||||
|
json.dump(output, f, allow_nan=True, indent=4)
|
||||||
|
|
||||||
|
RPR_ShowMessageBox("Exported text file \"%s\" to project folder." % output_file_name, "Text Export Complete", 0)
|
||||||
|
|
||||||
|
#RPR_ShowConsoleMsg(output_path)
|
||||||
38
setup.py
38
setup.py
@@ -1,38 +0,0 @@
|
|||||||
from setuptools import setup
|
|
||||||
|
|
||||||
from ptulsconv import __author__, __license__, __version__
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
|
||||||
long_description = fh.read()
|
|
||||||
|
|
||||||
setup(name='ptulsconv',
|
|
||||||
version=__version__,
|
|
||||||
author=__author__,
|
|
||||||
description='Parse and convert Pro Tools text exports',
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
long_description=long_description,
|
|
||||||
license=__license__,
|
|
||||||
url='https://github.com/iluvcapra/ptulsconv',
|
|
||||||
project_urls={
|
|
||||||
'Source':
|
|
||||||
'https://github.com/iluvcapra/ptulsconv',
|
|
||||||
'Issues':
|
|
||||||
'https://github.com/iluvcapra/ptulsconv/issues',
|
|
||||||
},
|
|
||||||
classifiers=[
|
|
||||||
'License :: OSI Approved :: MIT License',
|
|
||||||
'Topic :: Multimedia',
|
|
||||||
'Topic :: Multimedia :: Sound/Audio',
|
|
||||||
"Programming Language :: Python :: 3.7",
|
|
||||||
"Development Status :: 4 - Beta",
|
|
||||||
"Topic :: Text Processing :: Filters",
|
|
||||||
"Topic :: Text Processing :: Markup :: XML"],
|
|
||||||
packages=['ptulsconv'],
|
|
||||||
keywords='text-processing parsers film tv editing editorial',
|
|
||||||
install_requires=['parsimonious','tqdm'],
|
|
||||||
entry_points={
|
|
||||||
'console_scripts': [
|
|
||||||
'ptulsconv = ptulsconv.__main__:main'
|
|
||||||
]
|
|
||||||
}
|
|
||||||
)
|
|
||||||
BIN
tests/export_cases/Tag Tests/Tag Tests.ptx
Normal file
BIN
tests/export_cases/Tag Tests/Tag Tests.ptx
Normal file
Binary file not shown.
100
tests/export_cases/Tag Tests/Tag Tests.txt
Normal file
100
tests/export_cases/Tag Tests/Tag Tests.txt
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
SESSION NAME: Tag Tests
|
||||||
|
SAMPLE RATE: 48000.000000
|
||||||
|
BIT DEPTH: 24-bit
|
||||||
|
SESSION START TIMECODE: 01:00:00:00
|
||||||
|
TIMECODE FORMAT: 23.976 Frame
|
||||||
|
# OF AUDIO TRACKS: 8
|
||||||
|
# OF AUDIO CLIPS: 0
|
||||||
|
# OF AUDIO FILES: 0
|
||||||
|
|
||||||
|
|
||||||
|
P L U G - I N S L I S T I N G
|
||||||
|
MANUFACTURER PLUG-IN NAME VERSION FORMAT STEMS NUMBER OF INSTANCES
|
||||||
|
|
||||||
|
|
||||||
|
T R A C K L I S T I N G
|
||||||
|
TRACK NAME: Audio 1
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Clip Name {X=300} 01:00:00:00 01:00:05:03 00:00:05:03 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 2 $A=1
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Lorem ipsum {X=301} 01:00:00:00 01:00:05:03 00:00:05:03 Unmuted
|
||||||
|
1 2 Dolor sic amet {X=302} 01:00:10:00 01:00:20:00 00:00:10:00 Unmuted
|
||||||
|
1 3 & the rain in spain [ABC] 01:00:20:00 01:00:25:00 00:00:05:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 3 $A=2
|
||||||
|
COMMENTS: {B=100}
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 A 01:00:15:00 01:00:25:00 00:00:10:00 Unmuted
|
||||||
|
1 2 & B 01:00:25:00 01:00:35:00 00:00:10:00 Unmuted
|
||||||
|
1 3 & C 01:00:35:00 01:00:45:00 00:00:10:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 4 $A=3
|
||||||
|
COMMENTS: $A=4
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Silver Bridge 01:00:00:00 01:00:05:00 00:00:05:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 5
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 @ {D=100} 01:00:00:00 01:00:10:00 00:00:10:00 Unmuted
|
||||||
|
1 2 @ {D=101} 01:00:10:00 01:00:20:00 00:00:10:00 Unmuted
|
||||||
|
1 3 @ {D=102} 01:00:20:00 01:00:30:00 00:00:10:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 6
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Region 02 01:00:02:00 01:00:03:00 00:00:01:00 Unmuted
|
||||||
|
1 2 Region 12 01:00:12:00 01:00:13:00 00:00:01:00 Unmuted
|
||||||
|
1 3 Region 22 01:00:22:00 01:00:23:00 00:00:01:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 7
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 @ {D=200} {E=101} 01:00:00:00 01:00:10:00 00:00:10:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
TRACK NAME: Audio 8
|
||||||
|
COMMENTS:
|
||||||
|
USER DELAY: 0 Samples
|
||||||
|
STATE:
|
||||||
|
PLUG-INS:
|
||||||
|
CHANNEL EVENT CLIP NAME START TIME END TIME DURATION STATE
|
||||||
|
1 1 Region 04 01:00:04:00 01:00:05:00 00:00:01:00 Unmuted
|
||||||
|
|
||||||
|
|
||||||
|
M A R K E R S L I S T I N G
|
||||||
|
# LOCATION TIME REFERENCE UNITS NAME COMMENTS
|
||||||
|
3 01:00:05:00 240240 Samples Marker $M=0
|
||||||
|
1 01:00:10:00 480480 Samples $M=1
|
||||||
|
2 01:00:22:00 1057056 Samples $M=2
|
||||||
90
tests/export_tests/test_robinhood1.py
Normal file
90
tests/export_tests/test_robinhood1.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser import parse_document
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
|
class TestRobinHood1(unittest.TestCase):
|
||||||
|
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting.txt'
|
||||||
|
|
||||||
|
def test_header_export(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertIsNotNone(session.header)
|
||||||
|
self.assertEqual(session.header.session_name, 'Robin Hood Spotting')
|
||||||
|
self.assertEqual(session.header.sample_rate, 48000.0)
|
||||||
|
self.assertEqual(session.header.bit_depth, 24)
|
||||||
|
self.assertEqual(session.header.timecode_fps, '29.97')
|
||||||
|
self.assertEqual(session.header.timecode_drop_frame, False)
|
||||||
|
|
||||||
|
def test_all_sections(self):
|
||||||
|
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertIsNotNone(session.header)
|
||||||
|
self.assertIsNotNone(session.files)
|
||||||
|
self.assertIsNotNone(session.clips)
|
||||||
|
self.assertIsNotNone(session.plugins)
|
||||||
|
self.assertIsNotNone(session.tracks)
|
||||||
|
self.assertIsNotNone(session.markers)
|
||||||
|
|
||||||
|
def test_tracks(self):
|
||||||
|
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertEqual(len(session.tracks), 14)
|
||||||
|
self.assertListEqual(["Scenes", "Robin", "Will", "Marian", "John",
|
||||||
|
"Guy", "Much", "Butcher", "Town Crier",
|
||||||
|
"Soldier 1", "Soldier 2", "Soldier 3",
|
||||||
|
"Priest", "Guest at Court"],
|
||||||
|
list(map(lambda t: t.name, session.tracks)))
|
||||||
|
self.assertListEqual(["", "[ADR] {Actor=Errol Flynn} $CN=1",
|
||||||
|
"[ADR] {Actor=Patrick Knowles} $CN=2",
|
||||||
|
"[ADR] {Actor=Olivia DeHavilland} $CN=3",
|
||||||
|
"[ADR] {Actor=Claude Raines} $CN=4",
|
||||||
|
"[ADR] {Actor=Basil Rathbone} $CN=5",
|
||||||
|
"[ADR] {Actor=Herbert Mundin} $CN=6",
|
||||||
|
"[ADR] {Actor=George Bunny} $CN=101",
|
||||||
|
"[ADR] {Actor=Leonard Mundie} $CN=102",
|
||||||
|
"[ADR] $CN=103",
|
||||||
|
"[ADR] $CN=104",
|
||||||
|
"[ADR] $CN=105",
|
||||||
|
"[ADR] {Actor=Thomas R. Mills} $CN=106",
|
||||||
|
"[ADR] $CN=107"],
|
||||||
|
list(map(lambda t: t.comments, session.tracks)))
|
||||||
|
|
||||||
|
def test_a_track(self):
|
||||||
|
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
guy_track = session.tracks[5]
|
||||||
|
self.assertEqual(guy_track.name, 'Guy')
|
||||||
|
self.assertEqual(guy_track.comments, '[ADR] {Actor=Basil Rathbone} $CN=5')
|
||||||
|
self.assertEqual(guy_track.user_delay_samples, 0)
|
||||||
|
self.assertListEqual(guy_track.state, [])
|
||||||
|
self.assertEqual(len(guy_track.clips), 16)
|
||||||
|
self.assertEqual(guy_track.clips[5].channel, 1)
|
||||||
|
self.assertEqual(guy_track.clips[5].event, 6)
|
||||||
|
self.assertEqual(guy_track.clips[5].clip_name, "\"What's your name? You Saxon dog!\" $QN=GY106")
|
||||||
|
self.assertEqual(guy_track.clips[5].start_timecode, "01:04:19:15")
|
||||||
|
self.assertEqual(guy_track.clips[5].finish_timecode, "01:04:21:28")
|
||||||
|
self.assertEqual(guy_track.clips[5].duration, "00:00:02:13")
|
||||||
|
self.assertEqual(guy_track.clips[5].timestamp, None)
|
||||||
|
self.assertEqual(guy_track.clips[5].state, 'Unmuted')
|
||||||
|
|
||||||
|
def test_memory_locations(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertEqual(len(session.markers), 1)
|
||||||
|
self.assertEqual(session.markers[0].number, 1)
|
||||||
|
self.assertEqual(session.markers[0].location, "01:00:00:00")
|
||||||
|
self.assertEqual(session.markers[0].time_reference, 0)
|
||||||
|
self.assertEqual(session.markers[0].units, "Samples")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
52
tests/export_tests/test_robinhood5.py
Normal file
52
tests/export_tests/test_robinhood5.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser import parse_document
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
|
class TestRobinHood5(unittest.TestCase):
|
||||||
|
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting5.txt'
|
||||||
|
|
||||||
|
def test_skipped_segments(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertIsNone(session.files)
|
||||||
|
self.assertIsNone(session.clips)
|
||||||
|
|
||||||
|
def test_plugins(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertEqual(len(session.plugins), 2)
|
||||||
|
|
||||||
|
def test_stereo_track(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertEqual(session.tracks[1].name, 'MX WT (Stereo)')
|
||||||
|
self.assertEqual(len(session.tracks[1].clips), 2)
|
||||||
|
self.assertEqual(session.tracks[1].clips[0].clip_name, 'RobinHood.1-01.L')
|
||||||
|
self.assertEqual(session.tracks[1].clips[1].clip_name, 'RobinHood.1-01.R')
|
||||||
|
|
||||||
|
def test_a_track(self):
|
||||||
|
with open(self.path,"r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
guy_track = session.tracks[8]
|
||||||
|
self.assertEqual(guy_track.name, 'Guy')
|
||||||
|
self.assertEqual(guy_track.comments, '[ADR] {Actor=Basil Rathbone} $CN=5')
|
||||||
|
self.assertEqual(guy_track.user_delay_samples, 0)
|
||||||
|
self.assertListEqual(guy_track.state, ['Solo'])
|
||||||
|
self.assertEqual(len(guy_track.clips), 16)
|
||||||
|
self.assertEqual(guy_track.clips[5].channel, 1)
|
||||||
|
self.assertEqual(guy_track.clips[5].event, 6)
|
||||||
|
self.assertEqual(guy_track.clips[5].clip_name, "\"What's your name? You Saxon dog!\" $QN=GY106")
|
||||||
|
self.assertEqual(guy_track.clips[5].start_timecode, "01:04:19:15.00")
|
||||||
|
self.assertEqual(guy_track.clips[5].finish_timecode, "01:04:21:28.00")
|
||||||
|
self.assertEqual(guy_track.clips[5].duration, "00:00:02:13.00")
|
||||||
|
self.assertEqual(guy_track.clips[5].timestamp, "01:04:19:09.70")
|
||||||
|
self.assertEqual(guy_track.clips[5].state, 'Unmuted')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
33
tests/export_tests/test_robinhood6.py
Normal file
33
tests/export_tests/test_robinhood6.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser import parse_document
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
|
class TestRobinHood6(unittest.TestCase):
|
||||||
|
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood Spotting6.txt'
|
||||||
|
|
||||||
|
def test_a_track(self):
|
||||||
|
with open(self.path, "r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
|
||||||
|
marian_track = session.tracks[6]
|
||||||
|
self.assertEqual(marian_track.name, 'Marian')
|
||||||
|
self.assertEqual(marian_track.comments, '[ADR] {Actor=Olivia DeHavilland} $CN=3')
|
||||||
|
self.assertEqual(marian_track.user_delay_samples, 0)
|
||||||
|
self.assertListEqual(marian_track.state, ['Solo'])
|
||||||
|
self.assertEqual(len(marian_track.clips), 4)
|
||||||
|
self.assertListEqual(marian_track.plugins, ['Channel Strip (mono)', 'ReVibe II (mono/5.1)'])
|
||||||
|
self.assertEqual(marian_track.clips[2].channel, 1)
|
||||||
|
self.assertEqual(marian_track.clips[2].event, 3)
|
||||||
|
self.assertEqual(marian_track.clips[2].clip_name,
|
||||||
|
"\"Isn't that reason enough for a Royal Ward who must obey her guardian?\" $QN=M103")
|
||||||
|
self.assertEqual(marian_track.clips[2].start_timecode, "01:08:01:11")
|
||||||
|
self.assertEqual(marian_track.clips[2].finish_timecode, "01:08:04:24")
|
||||||
|
self.assertEqual(marian_track.clips[2].duration, "00:00:03:12")
|
||||||
|
self.assertEqual(marian_track.clips[2].timestamp, "01:08:01:11")
|
||||||
|
self.assertEqual(marian_track.clips[2].state, 'Unmuted')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
34
tests/export_tests/test_robinhooddf.py
Normal file
34
tests/export_tests/test_robinhooddf.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser import parse_document
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
|
class TestRobinHoodDF(unittest.TestCase):
|
||||||
|
path = os.path.dirname(__file__) + '/../export_cases/Robin Hood SpottingDF.txt'
|
||||||
|
|
||||||
|
def test_header_export_df(self):
|
||||||
|
|
||||||
|
with open(self.path, "r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
self.assertEqual(session.header.timecode_drop_frame, True)
|
||||||
|
|
||||||
|
def test_a_track(self):
|
||||||
|
|
||||||
|
with open(self.path, "r") as file:
|
||||||
|
session = parse_document(file.read())
|
||||||
|
|
||||||
|
guy_track = session.tracks[4]
|
||||||
|
self.assertEqual(guy_track.name, 'Robin')
|
||||||
|
self.assertEqual(guy_track.comments, '[ADR] {Actor=Errol Flynn} $CN=1')
|
||||||
|
self.assertEqual(guy_track.user_delay_samples, 0)
|
||||||
|
self.assertListEqual(guy_track.state, [])
|
||||||
|
self.assertEqual(len(guy_track.clips), 10)
|
||||||
|
self.assertEqual(guy_track.clips[5].channel, 1)
|
||||||
|
self.assertEqual(guy_track.clips[5].event, 6)
|
||||||
|
self.assertEqual(guy_track.clips[5].clip_name, "\"Hold there! What's his fault?\" $QN=R106")
|
||||||
|
self.assertEqual(guy_track.clips[5].start_timecode, "01:05:30;15")
|
||||||
|
self.assertEqual(guy_track.clips[5].finish_timecode, "01:05:32;01")
|
||||||
|
self.assertEqual(guy_track.clips[5].duration, "00:00:01;16")
|
||||||
|
self.assertEqual(guy_track.clips[5].timestamp, None)
|
||||||
|
self.assertEqual(guy_track.clips[5].state, 'Unmuted')
|
||||||
34
tests/functional/test_pdf_export.py
Normal file
34
tests/functional/test_pdf_export.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
|
||||||
|
from ptulsconv import commands
|
||||||
|
|
||||||
|
class TestPDFExport(unittest.TestCase):
|
||||||
|
def test_report_generation(self):
|
||||||
|
"""
|
||||||
|
Setp through every text file in export_cases and make sure it can
|
||||||
|
be converted into PDF docs without throwing an error
|
||||||
|
"""
|
||||||
|
files = [os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting.txt"]
|
||||||
|
#files.append(os.path.dirname(__file__) + "/../export_cases/Robin Hood Spotting2.txt")
|
||||||
|
for path in files:
|
||||||
|
tempdir = tempfile.TemporaryDirectory()
|
||||||
|
os.chdir(tempdir.name)
|
||||||
|
try:
|
||||||
|
commands.convert(input_file=path, major_mode='doc')
|
||||||
|
except:
|
||||||
|
assert False, "Error processing file %s" % path
|
||||||
|
finally:
|
||||||
|
tempdir.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,110 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import ptulsconv
|
|
||||||
#import pprint
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
|
|
||||||
class TestRobinHood1(unittest.TestCase):
|
|
||||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting.txt'
|
|
||||||
|
|
||||||
def test_header_export(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
|
|
||||||
self.assertTrue('header' in parsed.keys())
|
|
||||||
self.assertEqual(parsed['header']['session_name'], 'Robin Hood Spotting')
|
|
||||||
self.assertEqual(parsed['header']['sample_rate'], 48000.0)
|
|
||||||
self.assertEqual(parsed['header']['bit_depth'], 24)
|
|
||||||
self.assertEqual(parsed['header']['timecode_format'], 29.97)
|
|
||||||
self.assertEqual(parsed['header']['timecode_drop_frame'], False)
|
|
||||||
|
|
||||||
def test_all_sections(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
|
|
||||||
self.assertIn('header', parsed.keys())
|
|
||||||
self.assertIn('files', parsed.keys())
|
|
||||||
self.assertIn('clips', parsed.keys())
|
|
||||||
self.assertIn('plugins', parsed.keys())
|
|
||||||
self.assertIn('tracks', parsed.keys())
|
|
||||||
self.assertIn('markers', parsed.keys())
|
|
||||||
|
|
||||||
def test_tracks(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
self.assertEqual(len(parsed['tracks']), 14)
|
|
||||||
self.assertListEqual(["Scenes", "Robin", "Will", "Marian", "John",
|
|
||||||
"Guy", "Much", "Butcher", "Town Crier",
|
|
||||||
"Soldier 1", "Soldier 2", "Soldier 3",
|
|
||||||
"Priest", "Guest at Court"],
|
|
||||||
list(map(lambda n: n['name'], parsed['tracks'])))
|
|
||||||
self.assertListEqual(["", "[ADR] {Actor=Errol Flynn} $CN=1",
|
|
||||||
"[ADR] {Actor=Patrick Knowles} $CN=2",
|
|
||||||
"[ADR] {Actor=Olivia DeHavilland} $CN=3",
|
|
||||||
"[ADR] {Actor=Claude Raines} $CN=4",
|
|
||||||
"[ADR] {Actor=Basil Rathbone} $CN=5",
|
|
||||||
"[ADR] {Actor=Herbert Mundin} $CN=6",
|
|
||||||
"[ADR] {Actor=George Bunny} $CN=101",
|
|
||||||
"[ADR] {Actor=Leonard Mundie} $CN=102",
|
|
||||||
"[ADR] $CN=103",
|
|
||||||
"[ADR] $CN=104",
|
|
||||||
"[ADR] $CN=105",
|
|
||||||
"[ADR] {Actor=Thomas R. Mills} $CN=106",
|
|
||||||
"[ADR] $CN=107"],
|
|
||||||
list(map(lambda n: n['comments'], parsed['tracks'])))
|
|
||||||
|
|
||||||
def test_a_track(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
guy_track = parsed['tracks'][5]
|
|
||||||
self.assertEqual(guy_track['name'], 'Guy')
|
|
||||||
self.assertEqual(guy_track['comments'], '[ADR] {Actor=Basil Rathbone} $CN=5')
|
|
||||||
self.assertEqual(guy_track['user_delay_samples'], 0)
|
|
||||||
self.assertListEqual(guy_track['state'], [])
|
|
||||||
self.assertEqual(len(guy_track['clips']), 16)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['channel'], 1)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['event'], 6)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['clip_name'], "\"What's your name? You Saxon dog!\" $QN=GY106")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['start_time'], "01:04:19:15")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['end_time'], "01:04:21:28")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['duration'], "00:00:02:13")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['timestamp'], None)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['state'], 'Unmuted')
|
|
||||||
|
|
||||||
def test_memory_locations(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
|
|
||||||
self.assertEqual(len(parsed['markers']),1)
|
|
||||||
self.assertEqual(parsed['markers'][0]['number'], 1)
|
|
||||||
self.assertEqual(parsed['markers'][0]['location'], "01:00:00:00")
|
|
||||||
self.assertEqual(parsed['markers'][0]['time_reference'], 0)
|
|
||||||
self.assertEqual(parsed['markers'][0]['units'], "Samples")
|
|
||||||
|
|
||||||
def test_transform_timecode(self):
|
|
||||||
parsed = dict()
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed = visitor.visit(result)
|
|
||||||
|
|
||||||
xformer = ptulsconv.TimecodeInterpreter()
|
|
||||||
xformer.apply_session_start = True
|
|
||||||
|
|
||||||
xformed = xformer.transform(parsed)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import ptulsconv
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
|
|
||||||
class TestRobinHood5(unittest.TestCase):
|
|
||||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting5.txt'
|
|
||||||
|
|
||||||
def test_skipped_segments(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
self.assertIsNone(parsed['files'])
|
|
||||||
self.assertIsNone(parsed['clips'])
|
|
||||||
|
|
||||||
def test_plugins(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
self.assertEqual(len(parsed['plugins']), 2)
|
|
||||||
|
|
||||||
def test_stereo_track(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
self.assertEqual(parsed['tracks'][1]['name'], 'MX WT (Stereo)')
|
|
||||||
self.assertEqual(len(parsed['tracks'][1]['clips']), 2)
|
|
||||||
self.assertEqual(parsed['tracks'][1]['clips'][0]['clip_name'], 'RobinHood.1-01.L')
|
|
||||||
self.assertEqual(parsed['tracks'][1]['clips'][1]['clip_name'], 'RobinHood.1-01.R')
|
|
||||||
|
|
||||||
def test_a_track(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
|
|
||||||
guy_track = parsed['tracks'][8]
|
|
||||||
self.assertEqual(guy_track['name'], 'Guy')
|
|
||||||
self.assertEqual(guy_track['comments'], '[ADR] {Actor=Basil Rathbone} $CN=5')
|
|
||||||
self.assertEqual(guy_track['user_delay_samples'], 0)
|
|
||||||
self.assertListEqual(guy_track['state'], ['Solo'])
|
|
||||||
self.assertEqual(len(guy_track['clips']), 16)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['channel'], 1)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['event'], 6)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['clip_name'], "\"What's your name? You Saxon dog!\" $QN=GY106")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['start_time'], "01:04:19:15.00")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['end_time'], "01:04:21:28.00")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['duration'], "00:00:02:13.00")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['timestamp'], "01:04:19:09.70")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['state'], 'Unmuted')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import ptulsconv
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
|
|
||||||
class TestRobinHood6(unittest.TestCase):
|
|
||||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood Spotting6.txt'
|
|
||||||
|
|
||||||
def test_a_track(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
marian_track = parsed['tracks'][6]
|
|
||||||
self.assertEqual(marian_track['name'], 'Marian')
|
|
||||||
self.assertEqual(marian_track['comments'], '[ADR] {Actor=Olivia DeHavilland} $CN=3')
|
|
||||||
self.assertEqual(marian_track['user_delay_samples'], 0)
|
|
||||||
self.assertListEqual(marian_track['state'], ['Solo'])
|
|
||||||
self.assertEqual(len(marian_track['clips']), 4)
|
|
||||||
self.assertListEqual(marian_track['plugins'], ['Channel Strip (mono)', 'ReVibe II (mono/5.1)'])
|
|
||||||
self.assertEqual(marian_track['clips'][2]['channel'], 1)
|
|
||||||
self.assertEqual(marian_track['clips'][2]['event'], 3)
|
|
||||||
self.assertEqual(marian_track['clips'][2]['clip_name'], "\"Isn't that reason enough for a Royal Ward who must obey her guardian?\" $QN=M103")
|
|
||||||
self.assertEqual(marian_track['clips'][2]['start_time'], "01:08:01:11")
|
|
||||||
self.assertEqual(marian_track['clips'][2]['end_time'], "01:08:04:24")
|
|
||||||
self.assertEqual(marian_track['clips'][2]['duration'], "00:00:03:12")
|
|
||||||
self.assertEqual(marian_track['clips'][2]['timestamp'], "01:08:01:11")
|
|
||||||
self.assertEqual(marian_track['clips'][2]['state'], 'Unmuted')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import ptulsconv
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
|
|
||||||
class TestRobinHoodDF(unittest.TestCase):
|
|
||||||
path = os.path.dirname(__file__) + '/export_cases/Robin Hood SpottingDF.txt'
|
|
||||||
|
|
||||||
def test_header_export_df(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
|
|
||||||
self.assertTrue('header' in parsed.keys())
|
|
||||||
self.assertEqual(parsed['header']['timecode_drop_frame'], True)
|
|
||||||
|
|
||||||
def test_a_track(self):
|
|
||||||
with open(self.path, 'r') as f:
|
|
||||||
visitor = ptulsconv.DictionaryParserVisitor()
|
|
||||||
result = ptulsconv.protools_text_export_grammar.parse(f.read())
|
|
||||||
parsed: dict = visitor.visit(result)
|
|
||||||
guy_track = parsed['tracks'][4]
|
|
||||||
self.assertEqual(guy_track['name'], 'Robin')
|
|
||||||
self.assertEqual(guy_track['comments'], '[ADR] {Actor=Errol Flynn} $CN=1')
|
|
||||||
self.assertEqual(guy_track['user_delay_samples'], 0)
|
|
||||||
self.assertListEqual(guy_track['state'], [])
|
|
||||||
self.assertEqual(len(guy_track['clips']), 10)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['channel'], 1)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['event'], 6)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['clip_name'], "\"Hold there! What's his fault?\" $QN=R106")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['start_time'], "01:05:30;15")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['end_time'], "01:05:32;01")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['duration'], "00:00:01;16")
|
|
||||||
self.assertEqual(guy_track['clips'][5]['timestamp'], None)
|
|
||||||
self.assertEqual(guy_track['clips'][5]['state'], 'Unmuted')
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
import unittest
|
|
||||||
|
|
||||||
from ptulsconv.transformations import TagInterpreter
|
|
||||||
|
|
||||||
class TestTagInterpreter(unittest.TestCase):
|
|
||||||
def test_line(self):
|
|
||||||
ti = TagInterpreter()
|
|
||||||
s1 = ti.parse_tags("this is a test")
|
|
||||||
self.assertEqual(s1['line'], "this is a test")
|
|
||||||
self.assertEqual(s1['mode'], 'Normal')
|
|
||||||
self.assertEqual(len(s1['tags']), 0)
|
|
||||||
|
|
||||||
s2 = ti.parse_tags("this! IS! Me! ** Typing! 123 <> |||")
|
|
||||||
self.assertEqual(s2['line'], "this! IS! Me! ** Typing! 123 <> |||")
|
|
||||||
self.assertEqual(s2['mode'], 'Normal')
|
|
||||||
self.assertEqual(len(s2['tags']), 0)
|
|
||||||
|
|
||||||
def test_tags(self):
|
|
||||||
ti = TagInterpreter()
|
|
||||||
s1 = ti.parse_tags("{a=100}")
|
|
||||||
self.assertIn('tags', s1)
|
|
||||||
self.assertEqual(s1['tags']['a'], "100")
|
|
||||||
|
|
||||||
s2 = ti.parse_tags("{b=This is a test} [option] $X=9")
|
|
||||||
self.assertEqual(s2['tags']['b'], 'This is a test')
|
|
||||||
self.assertEqual(s2['tags']['option'], 'option')
|
|
||||||
self.assertEqual(s2['tags']['X'], "9")
|
|
||||||
|
|
||||||
def test_modes(self):
|
|
||||||
ti = TagInterpreter()
|
|
||||||
s1 = ti.parse_tags("@ Monday Tuesday {a=1}")
|
|
||||||
self.assertEqual(s1['mode'], 'Timespan')
|
|
||||||
|
|
||||||
s2 = ti.parse_tags("Monday Tuesday {a=1}")
|
|
||||||
self.assertEqual(s2['mode'], 'Normal')
|
|
||||||
|
|
||||||
s3 = ti.parse_tags("&Monday Tuesday {a=1}")
|
|
||||||
self.assertEqual(s3['mode'], 'Append')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
38
tests/unittests/test_adr_entity.py
Normal file
38
tests/unittests/test_adr_entity.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
from ptulsconv.docparser.tag_compiler import Event
|
||||||
|
from ptulsconv.docparser.adr_entity import ADRLine, make_entity
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
|
|
||||||
|
class TestADREntity(unittest.TestCase):
|
||||||
|
def test_event2line(self):
|
||||||
|
tags = {
|
||||||
|
'Ver': '1.0',
|
||||||
|
'Actor': "Bill",
|
||||||
|
'CN': "1",
|
||||||
|
'QN': 'J1001',
|
||||||
|
'R': 'Noise',
|
||||||
|
'EFF': 'EFF'
|
||||||
|
}
|
||||||
|
event = Event(clip_name='"This is a test." (sotto voce)',
|
||||||
|
track_name="Justin",
|
||||||
|
session_name="Test Project",
|
||||||
|
tags=tags,
|
||||||
|
start=Fraction(0, 1), finish=Fraction(1, 1))
|
||||||
|
|
||||||
|
line = make_entity(event)
|
||||||
|
|
||||||
|
self.assertIsInstance(line, ADRLine)
|
||||||
|
self.assertEqual('Bill', line.actor_name)
|
||||||
|
self.assertEqual('Justin', line.character_name)
|
||||||
|
self.assertEqual('"This is a test." (sotto voce)', line.prompt)
|
||||||
|
self.assertEqual('Noise', line.reason)
|
||||||
|
self.assertEqual('J1001', line.cue_number)
|
||||||
|
self.assertEqual(True, line.effort)
|
||||||
|
self.assertEqual('Test Project', line.title)
|
||||||
|
self.assertEqual('1.0', line.version)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from ptulsconv import broadcast_timecode
|
from ptulsconv import broadcast_timecode
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
class TestBroadcastTimecode(unittest.TestCase):
|
class TestBroadcastTimecode(unittest.TestCase):
|
||||||
def test_basic_to_framecount(self):
|
def test_basic_to_frame_count(self):
|
||||||
r1 = "01:00:00:00"
|
r1 = "01:00:00:00"
|
||||||
f1 = broadcast_timecode.smpte_to_frame_count(r1, 24, False)
|
f1 = broadcast_timecode.smpte_to_frame_count(r1, 24, False)
|
||||||
self.assertEqual(f1, 86_400)
|
self.assertEqual(f1, 86_400)
|
||||||
@@ -32,19 +32,7 @@ class TestBroadcastTimecode(unittest.TestCase):
|
|||||||
s1 = broadcast_timecode.frame_count_to_smpte(c1, 30, drop_frame=True)
|
s1 = broadcast_timecode.frame_count_to_smpte(c1, 30, drop_frame=True)
|
||||||
self.assertEqual(s1, "01:00:03;18")
|
self.assertEqual(s1, "01:00:03;18")
|
||||||
|
|
||||||
def test_fractional_to_framecount(self):
|
def test_drop_frame_to_frame_count(self):
|
||||||
s1 = "00:00:01:04.105"
|
|
||||||
c1, f1 = broadcast_timecode.smpte_to_frame_count(s1, 24, drop_frame_hint=False, include_fractional=True)
|
|
||||||
self.assertEqual(c1, 28)
|
|
||||||
self.assertEqual(f1, 0.105)
|
|
||||||
|
|
||||||
def test_fractional_to_string(self):
|
|
||||||
c1 = 99
|
|
||||||
f1 = .145
|
|
||||||
s1 = broadcast_timecode.frame_count_to_smpte(c1, 25, drop_frame=False, fractional_frame=f1)
|
|
||||||
self.assertEqual(s1, "00:00:03:24.145")
|
|
||||||
|
|
||||||
def test_drop_frame_to_framecount(self):
|
|
||||||
r1 = "01:00:00;00"
|
r1 = "01:00:00;00"
|
||||||
z1 = broadcast_timecode.smpte_to_frame_count(r1, 30, drop_frame_hint=True)
|
z1 = broadcast_timecode.smpte_to_frame_count(r1, 30, drop_frame_hint=True)
|
||||||
self.assertEqual(z1, 107_892)
|
self.assertEqual(z1, 107_892)
|
||||||
@@ -61,17 +49,13 @@ class TestBroadcastTimecode(unittest.TestCase):
|
|||||||
f3 = broadcast_timecode.smpte_to_frame_count(r3, 30, True)
|
f3 = broadcast_timecode.smpte_to_frame_count(r3, 30, True)
|
||||||
self.assertEqual(f3, 1799)
|
self.assertEqual(f3, 1799)
|
||||||
|
|
||||||
def test_footage_to_framecount(self):
|
def test_footage_to_frame_count(self):
|
||||||
s1 = "194+11"
|
s1 = "194+11"
|
||||||
f1 = broadcast_timecode.footage_to_frame_count(s1)
|
f1 = broadcast_timecode.footage_to_frame_count(s1)
|
||||||
self.assertEqual(f1, 3115)
|
self.assertEqual(f1, 3115)
|
||||||
|
|
||||||
s2 = "1+1.014"
|
|
||||||
f2 = broadcast_timecode.footage_to_frame_count(s2, include_fractional=True)
|
|
||||||
self.assertEqual(f2, (17, 0.014))
|
|
||||||
|
|
||||||
s3 = "0+0.1"
|
s3 = "0+0.1"
|
||||||
f3 = broadcast_timecode.footage_to_frame_count(s3, include_fractional=False)
|
f3 = broadcast_timecode.footage_to_frame_count(s3)
|
||||||
self.assertEqual(f3, 0)
|
self.assertEqual(f3, 0)
|
||||||
|
|
||||||
def test_frame_count_to_footage(self):
|
def test_frame_count_to_footage(self):
|
||||||
@@ -79,10 +63,23 @@ class TestBroadcastTimecode(unittest.TestCase):
|
|||||||
s1 = broadcast_timecode.frame_count_to_footage(c1)
|
s1 = broadcast_timecode.frame_count_to_footage(c1)
|
||||||
self.assertEqual(s1, "1+03")
|
self.assertEqual(s1, "1+03")
|
||||||
|
|
||||||
c2 = 24
|
def test_seconds_to_smpte(self):
|
||||||
f2 = .1
|
secs = Fraction(25, 24)
|
||||||
s2 = broadcast_timecode.frame_count_to_footage(c2, fractional_frames=f2)
|
frame_duration = Fraction(1, 24)
|
||||||
self.assertEqual(s2, "1+08.100")
|
tc_format = broadcast_timecode.TimecodeFormat(frame_duration=frame_duration, logical_fps=24, drop_frame=False)
|
||||||
|
s1 = tc_format.seconds_to_smpte(secs)
|
||||||
|
self.assertEqual(s1, "00:00:01:01")
|
||||||
|
|
||||||
|
def test_unparseable_footage(self):
|
||||||
|
time_str = "10.1"
|
||||||
|
s1 = broadcast_timecode.footage_to_frame_count(time_str)
|
||||||
|
self.assertIsNone(s1)
|
||||||
|
|
||||||
|
def test_unparseable_timecode(self):
|
||||||
|
time_str = "11.32-19"
|
||||||
|
s1 = broadcast_timecode.smpte_to_frame_count(time_str, frames_per_logical_second=24)
|
||||||
|
self.assertIsNone(s1)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
24
tests/unittests/test_doc_entities.py
Normal file
24
tests/unittests/test_doc_entities.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser.doc_entity import HeaderDescriptor
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
|
|
||||||
|
class DocParserTestCase(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_header(self):
|
||||||
|
header = HeaderDescriptor(session_name="Test Session",
|
||||||
|
sample_rate=48000.0,
|
||||||
|
bit_depth=24,
|
||||||
|
start_timecode="00:59:52:00",
|
||||||
|
timecode_format="30",
|
||||||
|
timecode_drop_frame=False,
|
||||||
|
count_audio_tracks=0,
|
||||||
|
count_clips=0,
|
||||||
|
count_files=0)
|
||||||
|
|
||||||
|
self.assertEqual(header.session_name, "Test Session")
|
||||||
|
self.assertEqual(header.start_time, Fraction((59 * 60 + 52) * 30, 30))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
15
tests/unittests/test_footage.py
Normal file
15
tests/unittests/test_footage.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv import footage
|
||||||
|
|
||||||
|
class TestFootage(unittest.TestCase):
|
||||||
|
def test_basic_footage(self):
|
||||||
|
r1 = "90+0"
|
||||||
|
f1 = footage.footage_to_seconds(r1)
|
||||||
|
self.assertEqual(float(f1 or 0), 60.0)
|
||||||
|
|
||||||
|
def test_feet_and_frames(self):
|
||||||
|
r1 = "1+8"
|
||||||
|
f1 = footage.footage_to_seconds(r1)
|
||||||
|
self.assertEqual(float(f1 or 0), 1.0)
|
||||||
|
|
||||||
|
|
||||||
124
tests/unittests/test_tag_compiler.py
Normal file
124
tests/unittests/test_tag_compiler.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
import ptulsconv.docparser.tag_compiler
|
||||||
|
from ptulsconv.docparser import doc_entity
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
|
|
||||||
|
class TestTagCompiler(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_one_track(self):
|
||||||
|
c = ptulsconv.docparser.tag_compiler.TagCompiler()
|
||||||
|
|
||||||
|
test_session = self.make_test_session()
|
||||||
|
|
||||||
|
c.session = test_session
|
||||||
|
|
||||||
|
events = c.compile_events()
|
||||||
|
event1 = next(events)
|
||||||
|
self.assertEqual('This is clip 1', event1.clip_name)
|
||||||
|
self.assertEqual('Track 1', event1.track_name)
|
||||||
|
self.assertEqual('Test Session', event1.session_name)
|
||||||
|
self.assertEqual(dict(A='A',
|
||||||
|
Color='Blue',
|
||||||
|
Ver='1.1',
|
||||||
|
Mode='2',
|
||||||
|
Comment='This is some text in the comments',
|
||||||
|
Part='1'), event1.tags)
|
||||||
|
self.assertEqual(Fraction(3600, 1), event1.start)
|
||||||
|
|
||||||
|
event2 = next(events)
|
||||||
|
self.assertEqual("This is the second clip ...and this is the last clip", event2.clip_name)
|
||||||
|
self.assertEqual('Track 1', event2.track_name)
|
||||||
|
self.assertEqual('Test Session', event2.session_name)
|
||||||
|
self.assertEqual(dict(R='Noise', A='A', B='B',
|
||||||
|
Color='Red',
|
||||||
|
Comment='This is some text in the comments',
|
||||||
|
N='1', Mode='2',
|
||||||
|
Ver='1.1',
|
||||||
|
M1='M1',
|
||||||
|
Part='2'), event2.tags)
|
||||||
|
|
||||||
|
self.assertEqual(c.session.header.convert_timecode('01:00:01:10'), event2.start)
|
||||||
|
self.assertEqual(c.session.header.convert_timecode('01:00:03:00'), event2.finish)
|
||||||
|
|
||||||
|
self.assertIsNone(next(events, None))
|
||||||
|
|
||||||
|
def test_tag_list(self):
|
||||||
|
session = self.make_test_session()
|
||||||
|
c = ptulsconv.docparser.tag_compiler.TagCompiler()
|
||||||
|
c.session = session
|
||||||
|
|
||||||
|
all_tags = c.compile_tag_list()
|
||||||
|
|
||||||
|
self.assertTrue(all_tags['Mode'] == {'2', '1'})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def make_test_session():
|
||||||
|
test_header = doc_entity.HeaderDescriptor(session_name="Test Session $Ver=1.1",
|
||||||
|
sample_rate=48000,
|
||||||
|
timecode_format="24",
|
||||||
|
timecode_drop_frame=False,
|
||||||
|
bit_depth=24,
|
||||||
|
start_timecode='00:59:00:00',
|
||||||
|
count_audio_tracks=1,
|
||||||
|
count_clips=3,
|
||||||
|
count_files=0
|
||||||
|
)
|
||||||
|
test_clips = [
|
||||||
|
doc_entity.TrackClipDescriptor(channel=1, event=1,
|
||||||
|
clip_name='This is clip 1 {Color=Blue} $Mode=2',
|
||||||
|
start_time='01:00:00:00',
|
||||||
|
finish_time='01:00:01:03',
|
||||||
|
duration='00:00:01:03',
|
||||||
|
state='Unmuted',
|
||||||
|
timestamp=None),
|
||||||
|
doc_entity.TrackClipDescriptor(channel=1, event=2,
|
||||||
|
clip_name='This is the second clip {R=Noise} [B] $Mode=2',
|
||||||
|
start_time='01:00:01:10',
|
||||||
|
finish_time='01:00:02:00',
|
||||||
|
duration='00:00:00:14',
|
||||||
|
state='Unmuted',
|
||||||
|
timestamp=None),
|
||||||
|
doc_entity.TrackClipDescriptor(channel=1, event=3,
|
||||||
|
clip_name='& ...and this is the last clip $N=1 $Mode=2',
|
||||||
|
start_time='01:00:02:00',
|
||||||
|
finish_time='01:00:03:00',
|
||||||
|
duration='00:00:01:00',
|
||||||
|
state='Unmuted',
|
||||||
|
timestamp=None),
|
||||||
|
]
|
||||||
|
test_track = doc_entity.TrackDescriptor(name="Track 1 [A] {Color=Red} $Mode=1",
|
||||||
|
comments="{Comment=This is some text in the comments}",
|
||||||
|
user_delay_samples=0,
|
||||||
|
plugins=[],
|
||||||
|
state=[],
|
||||||
|
clips=test_clips)
|
||||||
|
|
||||||
|
markers = [doc_entity.MarkerDescriptor(number=1,
|
||||||
|
location="01:00:00:00",
|
||||||
|
time_reference=48000 * 3600,
|
||||||
|
units="Samples",
|
||||||
|
name="Marker 1 {Part=1}",
|
||||||
|
comments=""
|
||||||
|
),
|
||||||
|
doc_entity.MarkerDescriptor(number=2,
|
||||||
|
location="01:00:01:00",
|
||||||
|
time_reference=48000 * 3601,
|
||||||
|
units="Samples",
|
||||||
|
name="Marker 2 {Part=2}",
|
||||||
|
comments="[M1]"
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
test_session = doc_entity.SessionDescriptor(header=test_header,
|
||||||
|
tracks=[test_track],
|
||||||
|
clips=[],
|
||||||
|
files=[],
|
||||||
|
markers=markers,
|
||||||
|
plugins=[])
|
||||||
|
return test_session
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
39
tests/unittests/test_tag_interpreter.py
Normal file
39
tests/unittests/test_tag_interpreter.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
from ptulsconv.docparser.tagged_string_parser_visitor import parse_tags, TagPreModes
|
||||||
|
|
||||||
|
|
||||||
|
class TestTagInterpreter(unittest.TestCase):
|
||||||
|
def test_line(self):
|
||||||
|
s1 = parse_tags("this is a test")
|
||||||
|
self.assertEqual(s1.content, "this is a test")
|
||||||
|
self.assertEqual(s1.mode, TagPreModes.NORMAL)
|
||||||
|
self.assertEqual(len(s1.tag_dict), 0)
|
||||||
|
|
||||||
|
s2 = parse_tags("this! IS! Me! ** Typing! 123 <> |||")
|
||||||
|
self.assertEqual(s2.content, "this! IS! Me! ** Typing! 123 <> |||")
|
||||||
|
self.assertEqual(s2.mode, TagPreModes.NORMAL)
|
||||||
|
self.assertEqual(len(s2.tag_dict), 0)
|
||||||
|
|
||||||
|
def test_tags(self):
|
||||||
|
s1 = parse_tags("{a=100}")
|
||||||
|
self.assertEqual(s1.tag_dict['a'], "100")
|
||||||
|
|
||||||
|
s2 = parse_tags("{b=This is a test} [option] $X=9")
|
||||||
|
self.assertEqual(s2.tag_dict['b'], 'This is a test')
|
||||||
|
self.assertEqual(s2.tag_dict['option'], 'option')
|
||||||
|
self.assertEqual(s2.tag_dict['X'], "9")
|
||||||
|
|
||||||
|
def test_modes(self):
|
||||||
|
s1 = parse_tags("@ Monday Tuesday {a=1}")
|
||||||
|
self.assertEqual(s1.mode, TagPreModes.TIMESPAN)
|
||||||
|
|
||||||
|
s2 = parse_tags("Monday Tuesday {a=1}")
|
||||||
|
self.assertEqual(s2.mode, TagPreModes.NORMAL)
|
||||||
|
|
||||||
|
s3 = parse_tags("&Monday Tuesday {a=1}")
|
||||||
|
self.assertEqual(s3.mode, TagPreModes.APPEND)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
70
tests/unittests/test_tagging.py
Normal file
70
tests/unittests/test_tagging.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
import unittest
|
||||||
|
from ptulsconv.docparser import doc_entity, pt_doc_parser, tag_compiler
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
|
||||||
|
class TaggingIntegratedTests(unittest.TestCase):
|
||||||
|
path = os.path.dirname(__file__) + '/../export_cases/Tag Tests/Tag Tests.txt'
|
||||||
|
|
||||||
|
def test_event_list(self):
|
||||||
|
with open(self.path, 'r') as f:
|
||||||
|
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||||
|
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||||
|
compiler = tag_compiler.TagCompiler()
|
||||||
|
compiler.session = document
|
||||||
|
|
||||||
|
events = list(compiler.compile_events())
|
||||||
|
|
||||||
|
self.assertEqual(9, len(events))
|
||||||
|
self.assertEqual("Clip Name", events[0].clip_name)
|
||||||
|
self.assertEqual("Lorem ipsum", events[1].clip_name)
|
||||||
|
self.assertEqual("Dolor sic amet the rain in spain", events[2].clip_name)
|
||||||
|
self.assertEqual("A B C", events[3].clip_name)
|
||||||
|
self.assertEqual("Silver Bridge", events[4].clip_name)
|
||||||
|
self.assertEqual("Region 02", events[5].clip_name)
|
||||||
|
self.assertEqual("Region 12", events[6].clip_name)
|
||||||
|
self.assertEqual("Region 22", events[7].clip_name)
|
||||||
|
self.assertEqual("Region 04", events[8].clip_name)
|
||||||
|
|
||||||
|
def test_append(self):
|
||||||
|
with open(self.path, 'r') as f:
|
||||||
|
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||||
|
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||||
|
compiler = tag_compiler.TagCompiler()
|
||||||
|
compiler.session = document
|
||||||
|
|
||||||
|
events = list(compiler.compile_events())
|
||||||
|
|
||||||
|
self.assertTrue(len(events) > 2)
|
||||||
|
|
||||||
|
self.assertEqual("Dolor sic amet the rain in spain", events[2].clip_name)
|
||||||
|
|
||||||
|
self.assertEqual(document.header.convert_timecode("01:00:10:00"), events[2].start)
|
||||||
|
self.assertEqual(document.header.convert_timecode("01:00:25:00"), events[2].finish)
|
||||||
|
|
||||||
|
self.assertIn('X', events[2].tags.keys())
|
||||||
|
self.assertIn('ABC', events[2].tags.keys())
|
||||||
|
self.assertIn('A', events[2].tags.keys())
|
||||||
|
self.assertEqual('302', events[2].tags['X'])
|
||||||
|
self.assertEqual('ABC', events[2].tags['ABC'])
|
||||||
|
self.assertEqual('1', events[2].tags['A'])
|
||||||
|
|
||||||
|
def test_successive_appends(self):
|
||||||
|
with open(self.path, 'r') as f:
|
||||||
|
document_ast = pt_doc_parser.protools_text_export_grammar.parse(f.read())
|
||||||
|
document: doc_entity.SessionDescriptor = pt_doc_parser.DocParserVisitor().visit(document_ast)
|
||||||
|
compiler = tag_compiler.TagCompiler()
|
||||||
|
compiler.session = document
|
||||||
|
|
||||||
|
events = list(compiler.compile_events())
|
||||||
|
|
||||||
|
self.assertTrue(len(events) > 3)
|
||||||
|
|
||||||
|
self.assertEqual("A B C", events[3].clip_name)
|
||||||
|
|
||||||
|
self.assertEqual(document.header.convert_timecode("01:00:15:00"), events[3].start)
|
||||||
|
self.assertEqual(document.header.convert_timecode("01:00:45:00"), events[3].finish)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
20
tests/unittests/test_utils.py
Normal file
20
tests/unittests/test_utils.py
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
from ptulsconv.docparser.tag_compiler import apply_appends
|
||||||
|
|
||||||
|
|
||||||
|
class MyTestCase(unittest.TestCase):
|
||||||
|
def test_something(self):
|
||||||
|
v = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||||
|
expected = [1, 2, 7, 5, 6, 15, 9, 10]
|
||||||
|
|
||||||
|
should = (lambda x, y: y % 4 == 0)
|
||||||
|
do_combine = (lambda x, y: x + y)
|
||||||
|
|
||||||
|
r = apply_appends(iter(v), should, do_combine)
|
||||||
|
r1 = list(r)
|
||||||
|
self.assertEqual(r1, expected)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
Reference in New Issue
Block a user