Compare commits

12 Commits

Author SHA1 Message Date
8964bb030b Renovating this code 2025-11-05 19:58:51 -08:00
f7c9def9bf Making some changes to modernize this code. 2025-11-05 19:57:28 -08:00
Jamie Hardt
381ec6f820 Important note 2022-11-25 10:45:39 -08:00
Jamie Hardt
79fa79e706 twiddle 2021-07-15 23:00:09 -07:00
Jamie Hardt
b3b960c1da Added particle-to-speakers converter stub 2021-07-12 22:13:34 -07:00
Jamie Hardt
85470ac367 Updated compatible blender version 2021-07-11 14:14:46 -07:00
Jamie Hardt
37f1c70e57 Removed dead panel 2021-02-14 22:05:23 -08:00
Jamie Hardt
526b798e02 Update README.md 2020-10-03 12:05:38 -07:00
Jamie Hardt
877c0aeaf0 Update README.md 2020-10-03 12:02:59 -07:00
Jamie Hardt
4e0b34edfe Update README.md 2020-10-03 12:02:35 -07:00
Jamie Hardt
2169fbb994 Merge pull request #1 from iluvcapra/add-license-1
Create LICENSE
2020-10-02 22:55:51 -07:00
Jamie Hardt
966eaecbbd Create LICENSE 2020-10-02 22:55:39 -07:00
10 changed files with 229 additions and 26 deletions

1
.python-version Normal file
View File

@@ -0,0 +1 @@
3.11

29
LICENSE Normal file
View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2020, Jamie Hardt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,8 +1,14 @@
# soundobjects Blender Add-On
**NOTE**: _Avid made some changes to ADM file import in Pro Tools and it no longer accepts ADMs made by
this plugin. It may still work with other DAWs._
This add-on adds three operators for working with immersive 3D audio in [Blender][blender], specifically it allows you to create ADM Broadcast
WAVE files for use with [Dolby Atmos][atmos] or other object-based sound mixing workflows.
[Here](https://vimeo.com/464569386) you can see a short demo of how to add sounds to an animated Blender scene and import the resulting file
into Pro Tools and then play them into a Dolby DAPS Renderer.
[blender]: https://www.blender.org
[atmos]: https://www.dolby.com/technologies/dolby-atmos/
@@ -28,7 +34,7 @@ with a Dolby Atmos rendering workflow. This produces a multichannel WAV file wit
panning information to the client. (Has been tested and works with Avid Pro Tools 2020).
## Important Note
## Requirements
This add-on requires that the [EBU Audio Renderer](https://github.com/ebu/ebu_adm_renderer) (`ear` v2.0) Python package
be installed to Blender's Python.

View File

@@ -10,13 +10,25 @@ bl_info = {
"author": "Jamie Hardt",
"version": (0, 1),
"warning": "Requires `ear` EBU ADM Renderer package to be installed",
"blender": (2, 90, 0),
"blender": (2, 93, 1),
"category": "Import-Export",
"support": "TESTING",
"tracker_url": "https://github.com/iluvcapra/soundobjects_blender_addon/issues",
"wiki_url": ""
}
# class SoundObjectAttachmentPanel(bpy.types.Panel):
# bl_idname = "OBJECT_PT_sound_object_attachment_panel"
# bl_space_type = "VIEW_3D"
# bl_label = "Attach Sounds"
# bl_region_type = "UI"
# bl_category = "Tools"
# bl_context = "object"
# bl_options = {"DEFAULT_CLOSED"}
# def draw(self, context):
# self.layout.label(text="Attach Sounds")
def import_wav_menu_callback(self, context):
self.layout.operator(ImportWav.bl_idname, text="WAV Audio Files (.wav)")
@@ -39,7 +51,7 @@ def register():
bpy.types.TOPBAR_MT_file_export.append(export_adm_menu_callback)
bpy.types.VIEW3D_MT_object.append(add_sound_to_mesh_menu_callback)
bpy.utils.register_class(SoundObjectAttachmentPanel)
# bpy.utils.register_class(SoundObjectAttachmentPanel)
def unregister():
@@ -51,4 +63,4 @@ def unregister():
bpy.types.TOPBAR_MT_file_export.remove(export_adm_menu_callback)
bpy.types.VIEW3D_MT_object.remove(add_sound_to_mesh_menu_callback)
bpy.utils.unregister_class(SoundObjectAttachmentPanel)
# bpy.utils.unregister_class(SoundObjectAttachmentPanel)

View File

@@ -27,16 +27,20 @@ class SpatialEnvelope:
exits_range: int
def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considered_range: float) -> SpatialEnvelope:
def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj,
considered_range: float) -> SpatialEnvelope:
min_dist = sys.float_info.max
min_dist_frame = scene.frame_start
enters_range_frame = None
exits_range_frame = None
assert scene.camera
in_range = False
for frame in range(scene.frame_start, scene.frame_end + 1):
scene.frame_set(frame)
rel = speaker_obj.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()
rel = speaker_obj.matrix_world.to_translation() \
- scene.camera.matrix_world.to_translation()
dist = norm(rel)
if dist < considered_range and not in_range:
@@ -44,7 +48,7 @@ def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considere
in_range = True
if dist < min_dist:
min_dist = dist
min_dist = float(dist)
min_dist_frame = frame
if dist > considered_range and in_range:
@@ -52,6 +56,9 @@ def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considere
in_range = False
break
assert enters_range_frame
assert exits_range_frame
return SpatialEnvelope(considered_range=considered_range,
enters_range=enters_range_frame,
exits_range=exits_range_frame,
@@ -64,7 +71,8 @@ def closest_approach_to_camera(scene, speaker_object):
at_time = scene.frame_start
for frame in range(scene.frame_start, scene.frame_end + 1):
scene.frame_set(frame)
rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()
rel = speaker_object.matrix_world.to_translation(
) - scene.camera.matrix_world.to_translation()
dist = norm(rel)
if dist < max_dist:
@@ -74,7 +82,7 @@ def closest_approach_to_camera(scene, speaker_object):
return (max_dist, at_time)
def track_speaker_to_camera(speaker, camera):
def track_speaker_to_camera(speaker):
camera_lock = speaker.constraints.new('TRACK_TO')
camera_lock.target = bpy.context.scene.camera
camera_lock.use_target_z = True
@@ -89,7 +97,8 @@ def spot_audio(context, speaker, trigger_mode, sync_peak, sound_peak, sound_leng
audio_scene_in = envelope.closest_range
elif trigger_mode == TriggerMode.RANDOM:
audio_scene_in = floor(uniform(context.scene.frame_start, context.scene.frame_end))
audio_scene_in = floor(
uniform(context.scene.frame_start, context.scene.frame_end))
elif trigger_mode == TriggerMode.RANDOM_GAUSSIAN:
mean = (context.scene.frame_end - context.scene.frame_start) / 2
audio_scene_in = floor(gauss(mean, gaussian_stddev))
@@ -127,11 +136,6 @@ def constrain_speaker_to_mesh(speaker_obj, mesh):
location_loc.target = mesh
location_loc.target = mesh
def apply_gain_envelope(speaker_obj, envelope):
pass
def add_speakers_to_meshes(meshes, context, sound=None,
sound_name_prefix=None,
sync_peak=False,
@@ -146,7 +150,8 @@ def add_speakers_to_meshes(meshes, context, sound=None,
print("object is not mesh")
continue
envelope = sound_camera_spatial_envelope(context.scene, mesh, considered_range=5.)
envelope = sound_camera_spatial_envelope(
context.scene, mesh, considered_range=5.)
speaker_obj = next((spk for spk in context.scene.objects
if spk.type == 'SPEAKER' and spk.constraints['Copy Location'].target == mesh), None)
@@ -156,7 +161,7 @@ def add_speakers_to_meshes(meshes, context, sound=None,
speaker_obj = context.selected_objects[0]
constrain_speaker_to_mesh(speaker_obj, mesh)
track_speaker_to_camera(speaker_obj, context.scene.camera)
track_speaker_to_camera(speaker_obj)
if sound_name_prefix is not None:
sound = sound_bank.random_sound()

View File

@@ -1,7 +1,5 @@
import bpy
from contextlib import contextmanager
import lxml
import uuid
from fractions import Fraction
@@ -33,6 +31,17 @@ from .speaker_utils import (all_speakers)
def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
"""
Accepts a list of speakers and a scene, and returns a list of lists.
Each list contains a list of speakers which are guaranteed to not have
overlapping sounds. Each of the child lists contains a list of speaker
objects in ascending order by start time.
Speakers are allocated to lists on the basis of their minimum distance to
the camera according to `speakers_by_min_distance`. Closer sounds will
appear on the earliest list if there is no overlap.
"""
def list_can_accept_speaker(speaker_list, speaker_to_test):
test_range = speaker_active_time_range(speaker_to_test)
@@ -62,7 +71,7 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
return ret_val
def adm_for_object(scene, sound_object: ObjectMix, room_size, adm_builder, object_index):
def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size, adm_builder, object_index):
fps = scene.render.fps
frame_start = scene.frame_start
frame_end = scene.frame_end
@@ -79,7 +88,7 @@ def adm_for_object(scene, sound_object: ObjectMix, room_size, adm_builder, objec
created.track_uid.bitDepth = sound_object.bits_per_sample
def adm_for_scene(scene, sound_objects: List[ObjectMix], room_size):
def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix], room_size):
adm_builder = ADMBuilder()
frame_start = scene.frame_start
@@ -92,7 +101,7 @@ def adm_for_scene(scene, sound_objects: List[ObjectMix], room_size):
adm_builder.create_content(audioContentName="Objects")
for object_index, sound_object in enumerate(sound_objects):
for object_index, sound_object in enumerate(sound_object_mixes):
adm_for_object(scene, sound_object, room_size, adm_builder, object_index)
adm = adm_builder.adm
@@ -178,11 +187,19 @@ def print_partition_results(object_groups, sound_sources, too_far_speakers):
print(" - %s" % source.name)
def partition_sounds_to_objects(scene, max_objects):
def partition_sounds_to_objects(scene, max_objects) -> \
tuple[list[list[bpy.types.Speaker]], list[bpy.types.Speaker]]:
"""
Allocates sounds in the scene into non-overlapping lists of sounds. The
second return value is the list of sounds that could not be allocated
because the max_objects limit was exceeded.
Sounds are allocated to lists according to `group_speakers`.
"""
sound_sources = all_speakers(scene)
if len(sound_sources) == 0:
return []
return [], []
object_groups = group_speakers(sound_sources, scene)
too_far_speakers = []
@@ -196,7 +213,8 @@ def partition_sounds_to_objects(scene, max_objects):
return object_groups, too_far_speakers
def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, max_objects: int):
def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
max_objects: int) -> dict:
scene = context.scene
object_groups, _ = partition_sounds_to_objects(scene, max_objects)

View File

@@ -105,6 +105,10 @@ def speaker_active_time_range(speaker) -> FrameInterval:
def speakers_by_min_distance(scene, speakers):
"""
Sorts a list of speaker objects in ascending order by their closest
approach to the camera. Objects that approach closest are sorted highest.
"""
def min_distance(speaker):
return closest_approach_to_camera(scene, speaker)[0]

View File

@@ -1,4 +1,6 @@
def all_speakers(scene):
import bpy
def all_speakers(scene: bpy.types.Scene) -> list[bpy.types.Object]:
return [obj for obj in scene.objects if obj.type == 'SPEAKER']

View File

@@ -0,0 +1,86 @@
## This is copied from
## https://blender.stackexchange.com/questions/4956/convert-particle-system-to-animated-meshes?answertab=active#tab-top
#
# And needs to be adapted
import bpy
# Set these to False if you don't want to key that property.
KEYFRAME_LOCATION = True
KEYFRAME_ROTATION = True
KEYFRAME_SCALE = True
KEYFRAME_VISIBILITY = True # Viewport and render visibility.
def create_objects_for_particles(ps, obj):
# Duplicate the given object for every particle and return the duplicates.
# Use instances instead of full copies.
obj_list = []
mesh = obj.data
particles_coll = bpy.data.collections.new(name="particles")
bpy.context.scene.collection.children.link(particles_coll)
for i, _ in enumerate(ps.particles):
dupli = bpy.data.objects.new(
name="particle.{:03d}".format(i),
object_data=mesh)
particles_coll.objects.link(dupli)
obj_list.append(dupli)
return obj_list
def match_and_keyframe_objects(ps, obj_list, start_frame, end_frame):
# Match and keyframe the objects to the particles for every frame in the
# given range.
for frame in range(start_frame, end_frame + 1):
print("frame {} processed".format(frame))
bpy.context.scene.frame_set(frame)
for p, obj in zip(ps.particles, obj_list):
match_object_to_particle(p, obj)
keyframe_obj(obj)
def match_object_to_particle(p, obj):
# Match the location, rotation, scale and visibility of the object to
# the particle.
loc = p.location
rot = p.rotation
size = p.size
if p.alive_state == 'ALIVE':
vis = True
else:
vis = False
obj.location = loc
# Set rotation mode to quaternion to match particle rotation.
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = rot
obj.scale = (size, size, size)
obj.hide_viewport = not(vis) # <<<-- this was called "hide" in <= 2.79
obj.hide_render = not(vis)
def keyframe_obj(obj):
# Keyframe location, rotation, scale and visibility if specified.
if KEYFRAME_LOCATION:
obj.keyframe_insert("location")
if KEYFRAME_ROTATION:
obj.keyframe_insert("rotation_quaternion")
if KEYFRAME_SCALE:
obj.keyframe_insert("scale")
if KEYFRAME_VISIBILITY:
obj.keyframe_insert("hide_viewport") # <<<-- this was called "hide" in <= 2.79
obj.keyframe_insert("hide_render")
def main():
#in 2.8 you need to evaluate the Dependency graph in order to get data from animation, modifiers, etc
depsgraph = bpy.context.evaluated_depsgraph_get()
# Assume only 2 objects are selected.
# The active object should be the one with the particle system.
ps_obj = bpy.context.object
ps_obj_evaluated = depsgraph.objects[ ps_obj.name ]
obj = [obj for obj in bpy.context.selected_objects if obj != ps_obj][0]
ps = ps_obj_evaluated.particle_systems[0] # Assume only 1 particle system is present.
start_frame = bpy.context.scene.frame_start
end_frame = bpy.context.scene.frame_end
obj_list = create_objects_for_particles(ps, obj)
match_and_keyframe_objects(ps, obj_list, start_frame, end_frame)
if __name__ == '__main__':
main()

40
requirements.txt Normal file
View File

@@ -0,0 +1,40 @@
asttokens==3.0.0
attrs==21.4.0
certifi==2025.10.5
charset-normalizer==3.4.4
cython==3.2.0
decorator==5.2.1
ear==2.1.0
executing==2.2.1
fake-bpy-module-4-3==20250130
idna==3.11
ipython==9.7.0
ipython-pygments-lexers==1.1.1
jedi==0.19.2
lxml==4.9.4
mathutils==3.3.0
matplotlib-inline==0.2.1
multipledispatch==0.6.0
mypy==1.18.2
mypy-extensions==1.1.0
numpy==1.26.4
parso==0.8.5
pathspec==0.12.1
pexpect==4.9.0
pip==25.0.1
prompt-toolkit==3.0.52
ptyprocess==0.7.0
pure-eval==0.2.3
pygments==2.19.2
requests==2.32.5
ruamel-yaml==0.18.16
ruamel-yaml-clib==0.2.14
scipy==1.16.3
setuptools==78.1.0
six==1.17.0
stack-data==0.6.3
traitlets==5.14.3
typing-extensions==4.15.0
urllib3==2.5.0
wcwidth==0.2.14
zstandard==0.25.0