OpenShot Library | libopenshot 0.6.0
Loading...
Searching...
No Matches
Timeline.cpp
Go to the documentation of this file.
1
9// Copyright (c) 2008-2019 OpenShot Studios, LLC
10//
11// SPDX-License-Identifier: LGPL-3.0-or-later
12
13#include "Timeline.h"
14
15#include "CacheBase.h"
16#include "CacheDisk.h"
17#include "CacheMemory.h"
18#include "CrashHandler.h"
19#include "FrameMapper.h"
20#include "Exceptions.h"
21#include "effects/Mask.h"
22
23#include <algorithm>
24#include <QDir>
25#include <QFileInfo>
26#include <QRegularExpression>
27#include <unordered_map>
28#include <cmath>
29#include <cstdint>
30
31using namespace openshot;
32
33// Default Constructor for the timeline (which sets the canvas width and height)
34Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
35 is_open(false), auto_map_clips(true), managed_cache(true), path(""), max_time(0.0), cache_epoch(0), safe_edit_frames_remaining(0)
36{
37 // Create CrashHandler and Attach (incase of errors)
39
40 // Init viewport size (curve based, because it can be animated)
41 viewport_scale = Keyframe(100.0);
42 viewport_x = Keyframe(0.0);
43 viewport_y = Keyframe(0.0);
44
45 // Init background color
46 color.red = Keyframe(0.0);
47 color.green = Keyframe(0.0);
48 color.blue = Keyframe(0.0);
49
50 // Init FileInfo struct (clear all values)
51 info.width = width;
52 info.height = height;
55 info.fps = fps;
56 info.sample_rate = sample_rate;
57 info.channels = channels;
58 info.channel_layout = channel_layout;
60 info.duration = 60 * 30; // 30 minute default duration
61 info.has_audio = true;
62 info.has_video = true;
64 info.display_ratio = openshot::Fraction(width, height);
67 info.acodec = "openshot::timeline";
68 info.vcodec = "openshot::timeline";
69
70 // Init max image size
72
73 // Init cache
74 final_cache = new CacheMemory();
75 const int cache_frames = std::max(Settings::Instance()->CACHE_MIN_FRAMES, OPEN_MP_NUM_PROCESSORS * 4);
76 final_cache->SetMaxBytesFromInfo(cache_frames, info.width, info.height, info.sample_rate, info.channels);
77}
78
79// Delegating constructor that copies parameters from a provided ReaderInfo
81 info.width, info.height, info.fps, info.sample_rate,
82 info.channels, info.channel_layout) {}
83
84// Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline)
85Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) :
86 is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath), max_time(0.0), cache_epoch(0), safe_edit_frames_remaining(0) {
87
88 // Create CrashHandler and Attach (incase of errors)
90
91 // Init final cache as NULL (will be created after loading json)
92 final_cache = NULL;
93
94 // Init viewport size (curve based, because it can be animated)
95 viewport_scale = Keyframe(100.0);
96 viewport_x = Keyframe(0.0);
97 viewport_y = Keyframe(0.0);
98
99 // Init background color
100 color.red = Keyframe(0.0);
101 color.green = Keyframe(0.0);
102 color.blue = Keyframe(0.0);
103
104 // Check if path exists
105 QFileInfo filePath(QString::fromStdString(path));
106 if (!filePath.exists()) {
107 throw InvalidFile("Timeline project file could not be opened.", path);
108 }
109
110 // Check OpenShot Install Path exists
112 QDir openshotPath(QString::fromStdString(s->PATH_OPENSHOT_INSTALL));
113 if (!openshotPath.exists()) {
114 throw InvalidFile("PATH_OPENSHOT_INSTALL could not be found.", s->PATH_OPENSHOT_INSTALL);
115 }
116 QDir openshotTransPath(openshotPath.filePath("transitions"));
117 if (!openshotTransPath.exists()) {
118 throw InvalidFile("PATH_OPENSHOT_INSTALL/transitions could not be found.", openshotTransPath.path().toStdString());
119 }
120
121 // Determine asset path
122 QString asset_name = filePath.baseName().left(30) + "_assets";
123 QDir asset_folder(filePath.dir().filePath(asset_name));
124 if (!asset_folder.exists()) {
125 // Create directory if needed
126 asset_folder.mkpath(".");
127 }
128
129 // Load UTF-8 project file into QString
130 QFile projectFile(QString::fromStdString(path));
131 projectFile.open(QFile::ReadOnly);
132 QString projectContents = QString::fromUtf8(projectFile.readAll());
133
134 // Convert all relative paths into absolute paths (if requested)
135 if (convert_absolute_paths) {
136
137 // Find all "image" or "path" references in JSON (using regex). Must loop through match results
138 // due to our path matching needs, which are not possible with the QString::replace() function.
139 QRegularExpression allPathsRegex(QStringLiteral("\"(image|path)\":.*?\"(.*?)\""));
140 std::vector<QRegularExpressionMatch> matchedPositions;
141 QRegularExpressionMatchIterator i = allPathsRegex.globalMatch(projectContents);
142 while (i.hasNext()) {
143 QRegularExpressionMatch match = i.next();
144 if (match.hasMatch()) {
145 // Push all match objects into a vector (so we can reverse them later)
146 matchedPositions.push_back(match);
147 }
148 }
149
150 // Reverse the matches (bottom of file to top, so our replacements don't break our match positions)
151 std::vector<QRegularExpressionMatch>::reverse_iterator itr;
152 for (itr = matchedPositions.rbegin(); itr != matchedPositions.rend(); itr++) {
153 QRegularExpressionMatch match = *itr;
154 QString relativeKey = match.captured(1); // image or path
155 QString relativePath = match.captured(2); // relative file path
156 QString absolutePath = "";
157
158 // Find absolute path of all path, image (including special replacements of @assets and @transitions)
159 if (relativePath.startsWith("@assets")) {
160 absolutePath = QFileInfo(asset_folder.absoluteFilePath(relativePath.replace("@assets", "."))).canonicalFilePath();
161 } else if (relativePath.startsWith("@transitions")) {
162 absolutePath = QFileInfo(openshotTransPath.absoluteFilePath(relativePath.replace("@transitions", "."))).canonicalFilePath();
163 } else {
164 absolutePath = QFileInfo(filePath.absoluteDir().absoluteFilePath(relativePath)).canonicalFilePath();
165 }
166
167 // Replace path in JSON content, if an absolute path was successfully found
168 if (!absolutePath.isEmpty()) {
169 projectContents.replace(match.capturedStart(0), match.capturedLength(0), "\"" + relativeKey + "\": \"" + absolutePath + "\"");
170 }
171 }
172 // Clear matches
173 matchedPositions.clear();
174 }
175
176 // Set JSON of project
177 SetJson(projectContents.toStdString());
178
179 // Calculate valid duration and set has_audio and has_video
180 // based on content inside this Timeline's clips.
181 float calculated_duration = 0.0;
182 for (auto clip : clips)
183 {
184 float clip_last_frame = clip->Position() + clip->Duration();
185 if (clip_last_frame > calculated_duration)
186 calculated_duration = clip_last_frame;
187 if (clip->Reader() && clip->Reader()->info.has_audio)
188 info.has_audio = true;
189 if (clip->Reader() && clip->Reader()->info.has_video)
190 info.has_video = true;
191
192 }
193 info.video_length = calculated_duration * info.fps.ToFloat();
194 info.duration = calculated_duration;
195
196 // Init FileInfo settings
197 info.acodec = "openshot::timeline";
198 info.vcodec = "openshot::timeline";
200 info.has_video = true;
201 info.has_audio = true;
202
203 // Init max image size
205
206 // Init cache
207 final_cache = new CacheMemory();
208 const int cache_frames = std::max(Settings::Instance()->CACHE_MIN_FRAMES, OPEN_MP_NUM_PROCESSORS * 4);
209 final_cache->SetMaxBytesFromInfo(cache_frames, info.width, info.height, info.sample_rate, info.channels);
210}
211
213 if (is_open) {
214 // Auto Close if not already
215 Close();
216 }
217
218 // Remove all clips, effects, and frame mappers
219 Clear();
220
221 // Destroy previous cache (if managed by timeline)
222 if (managed_cache && final_cache) {
223 delete final_cache;
224 final_cache = NULL;
225 }
226}
227
228// Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
229void Timeline::AddTrackedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
230
231 // Search for the tracked object on the map
232 auto iterator = tracked_objects.find(trackedObject->Id());
233
234 if (iterator != tracked_objects.end()){
235 // Tracked object's id already present on the map, overwrite it
236 iterator->second = trackedObject;
237 }
238 else{
239 // Tracked object's id not present -> insert it on the map
240 tracked_objects[trackedObject->Id()] = trackedObject;
241 }
242
243 return;
244}
245
246// Return tracked object pointer by it's id
247std::shared_ptr<openshot::TrackedObjectBase> Timeline::GetTrackedObject(std::string id) const{
248
249 // Search for the tracked object on the map
250 auto iterator = tracked_objects.find(id);
251
252 if (iterator != tracked_objects.end()){
253 // Id found, return the pointer to the tracked object
254 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = iterator->second;
255 return trackedObject;
256 }
257 else {
258 // Id not found, return a null pointer
259 return nullptr;
260 }
261}
262
263// Return the ID's of the tracked objects as a list of strings
264std::list<std::string> Timeline::GetTrackedObjectsIds() const{
265
266 // Create a list of strings
267 std::list<std::string> trackedObjects_ids;
268
269 // Iterate through the tracked_objects map
270 for (auto const& it: tracked_objects){
271 // Add the IDs to the list
272 trackedObjects_ids.push_back(it.first);
273 }
274
275 return trackedObjects_ids;
276}
277
278#ifdef USE_OPENCV
279// Return the trackedObject's properties as a JSON string
280std::string Timeline::GetTrackedObjectValues(std::string id, int64_t frame_number) const {
281
282 // Initialize the JSON object
283 Json::Value trackedObjectJson;
284
285 // Search for the tracked object on the map
286 auto iterator = tracked_objects.find(id);
287
288 if (iterator != tracked_objects.end())
289 {
290 // Id found, Get the object pointer and cast it as a TrackedObjectBBox
291 std::shared_ptr<TrackedObjectBBox> trackedObject = std::static_pointer_cast<TrackedObjectBBox>(iterator->second);
292
293 // Get the trackedObject values for it's first frame
294 if (trackedObject->ExactlyContains(frame_number)){
295 BBox box = trackedObject->GetBox(frame_number);
296 float x1 = box.cx - (box.width/2);
297 float y1 = box.cy - (box.height/2);
298 float x2 = box.cx + (box.width/2);
299 float y2 = box.cy + (box.height/2);
300 float rotation = box.angle;
301
302 trackedObjectJson["x1"] = x1;
303 trackedObjectJson["y1"] = y1;
304 trackedObjectJson["x2"] = x2;
305 trackedObjectJson["y2"] = y2;
306 trackedObjectJson["rotation"] = rotation;
307
308 } else {
309 BBox box = trackedObject->BoxVec.begin()->second;
310 float x1 = box.cx - (box.width/2);
311 float y1 = box.cy - (box.height/2);
312 float x2 = box.cx + (box.width/2);
313 float y2 = box.cy + (box.height/2);
314 float rotation = box.angle;
315
316 trackedObjectJson["x1"] = x1;
317 trackedObjectJson["y1"] = y1;
318 trackedObjectJson["x2"] = x2;
319 trackedObjectJson["y2"] = y2;
320 trackedObjectJson["rotation"] = rotation;
321 }
322
323 }
324 else {
325 // Id not found, return all 0 values
326 trackedObjectJson["x1"] = 0;
327 trackedObjectJson["y1"] = 0;
328 trackedObjectJson["x2"] = 0;
329 trackedObjectJson["y2"] = 0;
330 trackedObjectJson["rotation"] = 0;
331 }
332
333 return trackedObjectJson.toStyledString();
334}
335#endif
336
337// Add an openshot::Clip to the timeline
339{
340 // Get lock (prevent getting frames while this happens)
341 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
342
343 // Assign timeline to clip
344 clip->ParentTimeline(this);
345
346 // Clear cache of clip and nested reader (if any)
347 if (clip->Reader() && clip->Reader()->GetCache())
348 clip->Reader()->GetCache()->Clear();
349
350 // All clips should be converted to the frame rate of this timeline
351 if (auto_map_clips) {
352 // Apply framemapper (or update existing framemapper)
353 apply_mapper_to_clip(clip);
354 }
355
356 // Add clip to list
357 clips.push_back(clip);
358
359 // Sort clips
360 sort_clips();
361}
362
363// Add an effect to the timeline
365{
366 // Get lock (prevent getting frames while this happens)
367 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
368
369 // Assign timeline to effect
370 effect->ParentTimeline(this);
371
372 // Add effect to list
373 effects.push_back(effect);
374
375 // Sort effects
376 sort_effects();
377}
378
379// Remove an effect from the timeline
381{
382 // Get lock (prevent getting frames while this happens)
383 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
384
385 effects.remove(effect);
386
387 // Delete effect object (if timeline allocated it)
388 if (allocated_effects.count(effect)) {
389 allocated_effects.erase(effect); // erase before nulling the pointer
390 delete effect;
391 effect = NULL;
392 }
393
394 // Sort effects
395 sort_effects();
396}
397
398// Remove an openshot::Clip to the timeline
400{
401 // Get lock (prevent getting frames while this happens)
402 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
403
404 clips.remove(clip);
405
406 // Delete clip object (if timeline allocated it)
407 if (allocated_clips.count(clip)) {
408 allocated_clips.erase(clip); // erase before nulling the pointer
409 delete clip;
410 clip = NULL;
411 }
412
413 // Sort clips
414 sort_clips();
415}
416
417// Look up a clip
418openshot::Clip* Timeline::GetClip(const std::string& id)
419{
420 // Find the matching clip (if any)
421 for (const auto& clip : clips) {
422 if (clip->Id() == id) {
423 return clip;
424 }
425 }
426 return nullptr;
427}
428
429// Look up a timeline effect
431{
432 // Find the matching effect (if any)
433 for (const auto& effect : effects) {
434 if (effect->Id() == id) {
435 return effect;
436 }
437 }
438 return nullptr;
439}
440
442{
443 // Search all clips for matching effect ID
444 for (const auto& clip : clips) {
445 const auto e = clip->GetEffect(id);
446 if (e != nullptr) {
447 return e;
448 }
449 }
450 return nullptr;
451}
452
453// Return the list of effects on all clips
454std::list<openshot::EffectBase*> Timeline::ClipEffects() const {
455
456 // Initialize the list
457 std::list<EffectBase*> timelineEffectsList;
458
459 // Loop through all clips
460 for (const auto& clip : clips) {
461
462 // Get the clip's list of effects
463 std::list<EffectBase*> clipEffectsList = clip->Effects();
464
465 // Append the clip's effects to the list
466 timelineEffectsList.insert(timelineEffectsList.end(), clipEffectsList.begin(), clipEffectsList.end());
467 }
468
469 return timelineEffectsList;
470}
471
472// Compute the end time of the latest timeline element
474 // Return cached max_time variable (threadsafe)
475 return max_time;
476}
477
478// Compute the highest frame# based on the latest time and FPS
480 const double fps = info.fps.ToDouble();
481 const double t = GetMaxTime();
482 // Inclusive start, exclusive end -> ceil at the end boundary
483 return static_cast<int64_t>(std::ceil(t * fps));
484}
485
486// Compute the first frame# based on the first clip position
488 const double fps = info.fps.ToDouble();
489 const double t = GetMinTime();
490 // Inclusive start -> floor at the start boundary, then 1-index
491 return static_cast<int64_t>(std::floor(t * fps)) + 1;
492}
493
494// Compute the start time of the first timeline clip
496 // Return cached min_time variable (threadsafe)
497 return min_time;
498}
499
500// Apply a FrameMapper to a clip which matches the settings of this timeline
501void Timeline::apply_mapper_to_clip(Clip* clip)
502{
503 // Determine type of reader
504 ReaderBase* clip_reader = NULL;
505 if (clip->Reader()->Name() == "FrameMapper")
506 {
507 // Get the existing reader
508 clip_reader = (ReaderBase*) clip->Reader();
509
510 // Update the mapping
511 FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
513
514 } else {
515
516 // Create a new FrameMapper to wrap the current reader
518 allocated_frame_mappers.insert(mapper);
519 clip_reader = (ReaderBase*) mapper;
520 }
521
522 // Update clip reader
523 clip->Reader(clip_reader);
524}
525
526// Apply the timeline's framerate and samplerate to all clips
528{
529 // Clear all cached frames
531
532 // Loop through all clips
533 for (auto clip : clips)
534 {
535 // Apply framemapper (or update existing framemapper)
536 apply_mapper_to_clip(clip);
537 }
538}
539
540// Calculate time of a frame number, based on a framerate
541double Timeline::calculate_time(int64_t number, Fraction rate)
542{
543 // Get float version of fps fraction
544 double raw_fps = rate.ToFloat();
545
546 // Return the time (in seconds) of this frame
547 return double(number - 1) / raw_fps;
548}
549
550// Apply effects to the source frame (if any)
551std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options)
552{
553 // Debug output
555 "Timeline::apply_effects",
556 "frame->number", frame->number,
557 "timeline_frame_number", timeline_frame_number,
558 "layer", layer);
559
560 // Find Effects at this position and layer
561 for (auto effect : effects)
562 {
563 // Does clip intersect the current requested time
564 const double fpsD = info.fps.ToDouble();
565 int64_t effect_start_position = static_cast<int64_t>(std::llround(effect->Position() * fpsD)) + 1;
566 int64_t effect_end_position = static_cast<int64_t>(std::llround((effect->Position() + effect->Duration()) * fpsD));
567
568 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
569
570 // Clip is visible
571 if (does_effect_intersect)
572 {
573 // Determine the frame needed for this clip (based on the position on the timeline)
574 int64_t effect_start_frame = static_cast<int64_t>(std::llround(effect->Start() * fpsD)) + 1;
575 int64_t effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
576
577 if (!options->is_top_clip)
578 continue; // skip effect, if overlapped/covered by another clip on same layer
579
580 if (options->is_before_clip_keyframes != effect->info.apply_before_clip)
581 continue; // skip effect, if this filter does not match
582
583 // Debug output
585 "Timeline::apply_effects (Process Effect)",
586 "effect_frame_number", effect_frame_number,
587 "does_effect_intersect", does_effect_intersect);
588
589 // Apply the effect to this frame
590 frame = effect->ProcessFrame(frame, effect_frame_number);
591 }
592
593 } // end effect loop
594
595 // Return modified frame
596 return frame;
597}
598
599// Get or generate a blank frame
600std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> background_frame, Clip* clip, int64_t number, openshot::TimelineInfoStruct* options)
601{
602 std::shared_ptr<Frame> new_frame;
603
604 // Init some basic properties about this frame
605 int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
606
607 try {
608 // Debug output
610 "Timeline::GetOrCreateFrame (from reader)",
611 "number", number,
612 "samples_in_frame", samples_in_frame);
613
614 // Attempt to get a frame (but this could fail if a reader has just been closed)
615 new_frame = std::shared_ptr<Frame>(clip->GetFrame(background_frame, number, options));
616
617 // Return real frame
618 return new_frame;
619
620 } catch (const ReaderClosed & e) {
621 // ...
622 } catch (const OutOfBoundsFrame & e) {
623 // ...
624 }
625
626 // Debug output
628 "Timeline::GetOrCreateFrame (create blank)",
629 "number", number,
630 "samples_in_frame", samples_in_frame);
631
632 // Create blank frame
633 return new_frame;
634}
635
636// Process a new layer of video or audio
637void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, bool is_top_clip, bool force_safe_composite, float max_volume)
638{
639 // Create timeline options (with details about this current frame request)
640 TimelineInfoStruct options{};
641 options.is_top_clip = is_top_clip;
642 options.is_before_clip_keyframes = true;
643 options.force_safe_composite = force_safe_composite;
644
645 // Get the clip's frame, composited on top of the current timeline frame
646 std::shared_ptr<Frame> source_frame;
647 source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, &options);
648
649 // No frame found... so bail
650 if (!source_frame)
651 return;
652
653 // Debug output
655 "Timeline::add_layer",
656 "new_frame->number", new_frame->number,
657 "clip_frame_number", clip_frame_number);
658
659 /* COPY AUDIO - with correct volume */
660 if (source_clip->Reader()->info.has_audio) {
661 // Debug output
663 "Timeline::add_layer (Copy Audio)",
664 "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio,
665 "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
666 "info.channels", info.channels,
667 "clip_frame_number", clip_frame_number);
668
669 if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
670 {
671 // Ensure timeline frame matches the source samples once per frame
672 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
673 new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
674 }
675
676 // Apply transition-driven equal-power audio fades for clips covered by a Mask transition.
677 const auto transition_audio_gains = ResolveTransitionAudioGains(source_clip, new_frame->number, is_top_clip);
678
679 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
680 {
681 // Get volume from previous frame and this frame
682 float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
683 float volume = source_clip->volume.GetValue(clip_frame_number);
684 previous_volume *= transition_audio_gains.first;
685 volume *= transition_audio_gains.second;
686 int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
687 int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
688
689 // Apply volume mixing strategy
690 if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
691 // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
692 previous_volume = previous_volume / max_volume;
693 volume = volume / max_volume;
694 }
695 else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
696 // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
697 previous_volume = previous_volume * 0.77;
698 volume = volume * 0.77;
699 }
700
701 // If channel filter enabled, check for correct channel (and skip non-matching channels)
702 if (channel_filter != -1 && channel_filter != channel)
703 continue; // skip to next channel
704
705 // If no volume on this frame or previous frame, do nothing
706 if (previous_volume == 0.0 && volume == 0.0)
707 continue; // skip to next channel
708
709 // If channel mapping disabled, just use the current channel
710 if (channel_mapping == -1)
711 channel_mapping = channel;
712
713 // Apply ramp to source frame (if needed)
714 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
715 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
716
717 // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
718 // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
719 new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
720 }
721 }
722 else
723 // Debug output
725 "Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
726 "source_clip->Reader()->info.has_audio",
727 source_clip->Reader()->info.has_audio,
728 "source_frame->GetAudioChannelsCount()",
729 source_frame->GetAudioChannelsCount(),
730 "info.channels", info.channels,
731 "clip_frame_number", clip_frame_number);
732 }
733
734 // Debug output
736 "Timeline::add_layer (Transform: Composite Image Layer: Completed)",
737 "source_frame->number", source_frame->number,
738 "new_frame->GetImage()->width()", new_frame->GetWidth(),
739 "new_frame->GetImage()->height()", new_frame->GetHeight());
740}
741
742// Update the list of 'opened' clips
743void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
744{
745 // Get lock (prevent getting frames while this happens)
746 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
747
749 "Timeline::update_open_clips (before)",
750 "does_clip_intersect", does_clip_intersect,
751 "closing_clips.size()", closing_clips.size(),
752 "open_clips.size()", open_clips.size());
753
754 // is clip already in list?
755 bool clip_found = open_clips.count(clip);
756
757 if (clip_found && !does_clip_intersect)
758 {
759 // Remove clip from 'opened' list, because it's closed now
760 open_clips.erase(clip);
761
762 // Close clip
763 clip->Close();
764 }
765 else if (!clip_found && does_clip_intersect)
766 {
767 // Add clip to 'opened' list, because it's missing
768 open_clips[clip] = clip;
769
770 try {
771 // Open the clip
772 clip->Open();
773
774 } catch (const InvalidFile & e) {
775 // ...
776 }
777 }
778
779 // Debug output
781 "Timeline::update_open_clips (after)",
782 "does_clip_intersect", does_clip_intersect,
783 "clip_found", clip_found,
784 "closing_clips.size()", closing_clips.size(),
785 "open_clips.size()", open_clips.size());
786}
787
788// Calculate the max and min duration (in seconds) of the timeline, based on all the clips, and cache the value
789void Timeline::calculate_max_duration() {
790 double last_clip = 0.0;
791 double last_effect = 0.0;
792 double first_clip = std::numeric_limits<double>::max();
793 double first_effect = std::numeric_limits<double>::max();
794
795 // Find the last and first clip
796 if (!clips.empty()) {
797 // Find the clip with the maximum end frame
798 const auto max_clip = std::max_element(
799 clips.begin(), clips.end(), CompareClipEndFrames());
800 last_clip = (*max_clip)->Position() + (*max_clip)->Duration();
801
802 // Find the clip with the minimum start position (ignoring layer)
803 const auto min_clip = std::min_element(
804 clips.begin(), clips.end(), [](const openshot::Clip* lhs, const openshot::Clip* rhs) {
805 return lhs->Position() < rhs->Position();
806 });
807 first_clip = (*min_clip)->Position();
808 }
809
810 // Find the last and first effect
811 if (!effects.empty()) {
812 // Find the effect with the maximum end frame
813 const auto max_effect = std::max_element(
814 effects.begin(), effects.end(), CompareEffectEndFrames());
815 last_effect = (*max_effect)->Position() + (*max_effect)->Duration();
816
817 // Find the effect with the minimum start position
818 const auto min_effect = std::min_element(
819 effects.begin(), effects.end(), [](const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) {
820 return lhs->Position() < rhs->Position();
821 });
822 first_effect = (*min_effect)->Position();
823 }
824
825 // Calculate the max and min time
826 max_time = std::max(last_clip, last_effect);
827 min_time = std::min(first_clip, first_effect);
828
829 // If no clips or effects exist, set min_time to 0
830 if (clips.empty() && effects.empty()) {
831 min_time = 0.0;
832 max_time = 0.0;
833 }
834}
835
836// Sort clips by position on the timeline
837void Timeline::sort_clips()
838{
839 // Get lock (prevent getting frames while this happens)
840 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
841
842 // Debug output
844 "Timeline::SortClips",
845 "clips.size()", clips.size());
846
847 // sort clips
848 clips.sort(CompareClips());
849
850 // calculate max timeline duration
851 calculate_max_duration();
852}
853
854// Sort effects by position on the timeline
855void Timeline::sort_effects()
856{
857 // Get lock (prevent getting frames while this happens)
858 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
859
860 // sort clips
861 effects.sort(CompareEffects());
862
863 // calculate max timeline duration
864 calculate_max_duration();
865}
866
867// Clear all clips from timeline
869{
870 ZmqLogger::Instance()->AppendDebugMethod("Timeline::Clear");
871
872 // Get lock (prevent getting frames while this happens)
873 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
874
875 // Close all open clips
876 for (auto clip : clips)
877 {
878 update_open_clips(clip, false);
879
880 // Delete clip object (if timeline allocated it)
881 bool allocated = allocated_clips.count(clip);
882 if (allocated) {
883 delete clip;
884 }
885 }
886 // Clear all clips
887 clips.clear();
888 allocated_clips.clear();
889
890 // Close all effects
891 for (auto effect : effects)
892 {
893 // Delete effect object (if timeline allocated it)
894 bool allocated = allocated_effects.count(effect);
895 if (allocated) {
896 delete effect;
897 }
898 }
899 // Clear all effects
900 effects.clear();
901 allocated_effects.clear();
902
903 // Delete all FrameMappers
904 for (auto mapper : allocated_frame_mappers)
905 {
906 mapper->Reader(NULL);
907 mapper->Close();
908 delete mapper;
909 }
910 allocated_frame_mappers.clear();
911}
912
913// Close the reader (and any resources it was consuming)
915{
916 ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close");
917
918 // Get lock (prevent getting frames while this happens)
919 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
920
921 // Close all open clips
922 for (auto clip : clips)
923 {
924 // Open or Close this clip, based on if it's intersecting or not
925 update_open_clips(clip, false);
926 }
927
928 // Mark timeline as closed
929 is_open = false;
930
931 // Clear all cache (deep clear, including nested Readers)
932 ClearAllCache(true);
933}
934
935// Open the reader (and start consuming resources)
937{
938 is_open = true;
939}
940
941// Compare 2 floating point numbers for equality
942bool Timeline::isEqual(double a, double b)
943{
944 return fabs(a - b) < 0.000001;
945}
946
947// Get an openshot::Frame object for a specific frame number of this reader.
948std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
949{
950 // Adjust out of bounds frame number
951 if (requested_frame < 1)
952 requested_frame = 1;
953 const int64_t max_frame = GetMaxFrame();
954 const bool past_timeline_end = (max_frame > 0 && requested_frame > max_frame);
955
956 // Check cache
957 std::shared_ptr<Frame> frame;
958 if (!past_timeline_end)
959 frame = final_cache->GetFrame(requested_frame);
960 if (frame) {
961 // Debug output
963 "Timeline::GetFrame (Cached frame found)",
964 "requested_frame", requested_frame);
965
966 // Return cached frame
967 return frame;
968 }
969 else
970 {
971 // Prevent async calls to the following code
972 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
973
974 // Check cache 2nd time
975 std::shared_ptr<Frame> frame;
976 if (!past_timeline_end)
977 frame = final_cache->GetFrame(requested_frame);
978 if (frame) {
979 // Debug output
981 "Timeline::GetFrame (Cached frame found on 2nd check)",
982 "requested_frame", requested_frame);
983
984 // Return cached frame
985 return frame;
986 } else {
987 // Get a list of clips that intersect with the requested section of timeline
988 // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
989 std::vector<Clip *> nearby_clips;
990 nearby_clips = find_intersecting_clips(requested_frame, 1, true);
991
992 // Debug output
994 "Timeline::GetFrame (processing frame)",
995 "requested_frame", requested_frame,
996 "omp_get_thread_num()", omp_get_thread_num());
997
998 // Init some basic properties about this frame
999 int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels);
1000
1001 // Create blank frame (which will become the requested frame)
1002 std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(requested_frame, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
1003 new_frame->AddAudioSilence(samples_in_frame);
1004 new_frame->SampleRate(info.sample_rate);
1005 new_frame->ChannelsLayout(info.channel_layout);
1006
1007 // Debug output
1009 "Timeline::GetFrame (Adding solid color)",
1010 "requested_frame", requested_frame,
1011 "info.width", info.width,
1012 "info.height", info.height);
1013
1014 // Add Background Color to 1st layer (if animated or not black)
1015 if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
1016 (color.red.GetValue(requested_frame) != 0.0 || color.green.GetValue(requested_frame) != 0.0 ||
1017 color.blue.GetValue(requested_frame) != 0.0))
1018 new_frame->AddColor(preview_width, preview_height, color.GetColorHex(requested_frame));
1019
1020 // Debug output
1022 "Timeline::GetFrame (Loop through clips)",
1023 "requested_frame", requested_frame,
1024 "clips.size()", clips.size(),
1025 "nearby_clips.size()", nearby_clips.size());
1026
1027 // Precompute per-clip timing for this requested frame
1028 struct ClipInfo {
1029 Clip* clip;
1030 int64_t start_pos;
1031 int64_t end_pos;
1032 int64_t start_frame;
1033 int64_t frame_number;
1034 bool intersects;
1035 };
1036 std::vector<ClipInfo> clip_infos;
1037 clip_infos.reserve(nearby_clips.size());
1038 const double fpsD = info.fps.ToDouble();
1039
1040 for (auto clip : nearby_clips) {
1041 int64_t start_pos = static_cast<int64_t>(std::llround(clip->Position() * fpsD)) + 1;
1042 int64_t end_pos = static_cast<int64_t>(std::llround((clip->Position() + clip->Duration()) * fpsD));
1043 bool intersects = (start_pos <= requested_frame && end_pos >= requested_frame);
1044 int64_t start_frame = static_cast<int64_t>(std::llround(clip->Start() * fpsD)) + 1;
1045 int64_t frame_number = requested_frame - start_pos + start_frame;
1046 clip_infos.push_back({clip, start_pos, end_pos, start_frame, frame_number, intersects});
1047 }
1048
1049 // Determine top clip per layer (linear, no nested loop)
1050 std::unordered_map<int, int64_t> top_start_for_layer;
1051 std::unordered_map<int, Clip*> top_clip_for_layer;
1052 for (const auto& ci : clip_infos) {
1053 if (!ci.intersects) continue;
1054 const int layer = ci.clip->Layer();
1055 auto it = top_start_for_layer.find(layer);
1056 if (it == top_start_for_layer.end() || ci.start_pos > it->second) {
1057 top_start_for_layer[layer] = ci.start_pos; // strictly greater to match prior logic
1058 top_clip_for_layer[layer] = ci.clip;
1059 }
1060 }
1061
1062 // Compute max_volume across all overlapping clips once
1063 float max_volume_sum = 0.0f;
1064 for (const auto& ci : clip_infos) {
1065 if (!ci.intersects) continue;
1066 if (ci.clip->Reader() && ci.clip->Reader()->info.has_audio &&
1067 ci.clip->has_audio.GetInt(ci.frame_number) != 0) {
1068 max_volume_sum += static_cast<float>(ci.clip->volume.GetValue(ci.frame_number));
1069 }
1070 }
1071
1072 // Compose intersecting clips in a single pass
1073 const int safe_remaining = safe_edit_frames_remaining.load(std::memory_order_relaxed);
1074 const bool force_safe_composite = (safe_remaining > 0);
1075 if (force_safe_composite) {
1076 safe_edit_frames_remaining.fetch_sub(1, std::memory_order_relaxed);
1077 }
1078 for (const auto& ci : clip_infos) {
1079 // Debug output
1081 "Timeline::GetFrame (Does clip intersect)",
1082 "requested_frame", requested_frame,
1083 "clip->Position()", ci.clip->Position(),
1084 "clip->Duration()", ci.clip->Duration(),
1085 "does_clip_intersect", ci.intersects);
1086
1087 // Clip is visible
1088 if (ci.intersects) {
1089 // Is this the top clip on its layer?
1090 bool is_top_clip = false;
1091 const int layer = ci.clip->Layer();
1092 auto top_it = top_clip_for_layer.find(layer);
1093 if (top_it != top_clip_for_layer.end())
1094 is_top_clip = (top_it->second == ci.clip);
1095
1096 // Determine the frame needed for this clip (based on the position on the timeline)
1097 int64_t clip_frame_number = ci.frame_number;
1098
1099 // Debug output
1101 "Timeline::GetFrame (Calculate clip's frame #)",
1102 "clip->Position()", ci.clip->Position(),
1103 "clip->Start()", ci.clip->Start(),
1104 "info.fps.ToFloat()", info.fps.ToFloat(),
1105 "clip_frame_number", clip_frame_number);
1106
1107 // Add clip's frame as layer
1108 add_layer(new_frame, ci.clip, clip_frame_number, is_top_clip, force_safe_composite, max_volume_sum);
1109
1110 } else {
1111 // Debug output
1113 "Timeline::GetFrame (clip does not intersect)",
1114 "requested_frame", requested_frame,
1115 "does_clip_intersect", ci.intersects);
1116 }
1117
1118 } // end clip loop
1119
1120 // Debug output
1122 "Timeline::GetFrame (Add frame to cache)",
1123 "requested_frame", requested_frame,
1124 "info.width", info.width,
1125 "info.height", info.height);
1126
1127 // Set frame # on mapped frame
1128 new_frame->SetFrameNumber(requested_frame);
1129
1130 // Add final frame to cache (only for valid timeline range)
1131 if (!past_timeline_end)
1132 final_cache->Add(new_frame);
1133 // Return frame (or blank frame)
1134 return new_frame;
1135 }
1136 }
1137}
1138
1139
1140// Find intersecting clips (or non intersecting clips)
1141std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
1142{
1143 // Find matching clips
1144 std::vector<Clip*> matching_clips;
1145
1146 // Calculate time of frame
1147 const int64_t min_requested_frame = requested_frame;
1148 const int64_t max_requested_frame = requested_frame + (number_of_frames - 1);
1149
1150 // Find Clips at this time
1151 matching_clips.reserve(clips.size());
1152 const double fpsD = info.fps.ToDouble();
1153 for (auto clip : clips)
1154 {
1155 // Does clip intersect the current requested time
1156 int64_t clip_start_position = static_cast<int64_t>(std::llround(clip->Position() * fpsD)) + 1;
1157 int64_t clip_end_position = static_cast<int64_t>(std::llround((clip->Position() + clip->Duration()) * fpsD)) + 1;
1158
1159 bool does_clip_intersect =
1160 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
1161 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
1162
1163 // Debug output
1165 "Timeline::find_intersecting_clips (Is clip near or intersecting)",
1166 "requested_frame", requested_frame,
1167 "min_requested_frame", min_requested_frame,
1168 "max_requested_frame", max_requested_frame,
1169 "clip->Position()", clip->Position(),
1170 "does_clip_intersect", does_clip_intersect);
1171
1172 // Open (or schedule for closing) this clip, based on if it's intersecting or not
1173 update_open_clips(clip, does_clip_intersect);
1174
1175 // Clip is visible
1176 if (does_clip_intersect && include)
1177 // Add the intersecting clip
1178 matching_clips.push_back(clip);
1179
1180 else if (!does_clip_intersect && !include)
1181 // Add the non-intersecting clip
1182 matching_clips.push_back(clip);
1183
1184 } // end clip loop
1185
1186 // return list
1187 return matching_clips;
1188}
1189
1190// Set the cache object used by this reader
1192 // Get lock (prevent getting frames while this happens)
1193 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1194
1195 // Destroy previous cache (if managed by timeline)
1196 if (managed_cache && final_cache) {
1197 delete final_cache;
1198 final_cache = NULL;
1199 managed_cache = false;
1200 }
1201
1202 // Set new cache
1203 final_cache = new_cache;
1204}
1205
1206// Generate JSON string of this object
1207std::string Timeline::Json() const {
1208
1209 // Return formatted string
1210 return JsonValue().toStyledString();
1211}
1212
1213// Generate Json::Value for this object
1214Json::Value Timeline::JsonValue() const {
1215
1216 // Create root json object
1217 Json::Value root = ReaderBase::JsonValue(); // get parent properties
1218 root["type"] = "Timeline";
1219 root["viewport_scale"] = viewport_scale.JsonValue();
1220 root["viewport_x"] = viewport_x.JsonValue();
1221 root["viewport_y"] = viewport_y.JsonValue();
1222 root["color"] = color.JsonValue();
1223 root["path"] = path;
1224
1225 // Add array of clips
1226 root["clips"] = Json::Value(Json::arrayValue);
1227
1228 // Find Clips at this time
1229 for (const auto existing_clip : clips)
1230 {
1231 root["clips"].append(existing_clip->JsonValue());
1232 }
1233
1234 // Add array of effects
1235 root["effects"] = Json::Value(Json::arrayValue);
1236
1237 // loop through effects
1238 for (const auto existing_effect: effects)
1239 {
1240 root["effects"].append(existing_effect->JsonValue());
1241 }
1242
1243 // return JsonValue
1244 return root;
1245}
1246
1247// Load JSON string into this object
1248void Timeline::SetJson(const std::string value) {
1249
1250 // Get lock (prevent getting frames while this happens)
1251 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1252
1253 // Parse JSON string into JSON objects
1254 try
1255 {
1256 const Json::Value root = openshot::stringToJson(value);
1257 // Set all values that match
1258 SetJsonValue(root);
1259 }
1260 catch (const std::exception& e)
1261 {
1262 // Error parsing JSON (or missing keys)
1263 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1264 }
1265}
1266
1267// Load Json::Value into this object
1268void Timeline::SetJsonValue(const Json::Value root) {
1269
1270 // Get lock (prevent getting frames while this happens)
1271 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1272
1273 // Close timeline before we do anything (this closes all clips)
1274 bool was_open = is_open;
1275 Close();
1276
1277 // Set parent data
1279
1280 // Set data from Json (if key is found)
1281 if (!root["path"].isNull())
1282 path = root["path"].asString();
1283
1284 if (!root["clips"].isNull()) {
1285 // Clear existing clips
1286 clips.clear();
1287
1288 // loop through clips
1289 for (const Json::Value existing_clip : root["clips"]) {
1290 // Skip NULL nodes
1291 if (existing_clip.isNull()) {
1292 continue;
1293 }
1294
1295 // Create Clip
1296 Clip *c = new Clip();
1297
1298 // Keep track of allocated clip objects
1299 allocated_clips.insert(c);
1300
1301 // When a clip is attached to an object, it searches for the object
1302 // on it's parent timeline. Setting the parent timeline of the clip here
1303 // allows attaching it to an object when exporting the project (because)
1304 // the exporter script initializes the clip and it's effects
1305 // before setting its parent timeline.
1306 c->ParentTimeline(this);
1307
1308 // Load Json into Clip
1309 c->SetJsonValue(existing_clip);
1310
1311 // Add Clip to Timeline
1312 AddClip(c);
1313 }
1314 }
1315
1316 if (!root["effects"].isNull()) {
1317 // Clear existing effects
1318 effects.clear();
1319
1320 // loop through effects
1321 for (const Json::Value existing_effect :root["effects"]) {
1322 // Skip NULL nodes
1323 if (existing_effect.isNull()) {
1324 continue;
1325 }
1326
1327 // Create Effect
1328 EffectBase *e = NULL;
1329
1330 if (!existing_effect["type"].isNull()) {
1331 // Create instance of effect
1332 if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
1333
1334 // Keep track of allocated effect objects
1335 allocated_effects.insert(e);
1336
1337 // Load Json into Effect
1338 e->SetJsonValue(existing_effect);
1339
1340 // Add Effect to Timeline
1341 AddEffect(e);
1342 }
1343 }
1344 }
1345 }
1346
1347 if (!root["duration"].isNull()) {
1348 // Update duration of timeline
1349 info.duration = root["duration"].asDouble();
1351 }
1352
1353 // Update preview settings
1356
1357 // Resort (and recalculate min/max duration)
1358 sort_clips();
1359 sort_effects();
1360
1361 // Re-open if needed
1362 if (was_open)
1363 Open();
1364
1365 // Timeline content changed: notify cache clients to rescan active window.
1366 BumpCacheEpoch();
1367}
1368
1369// Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1370void Timeline::ApplyJsonDiff(std::string value) {
1371
1372 // Get lock (prevent getting frames while this happens)
1373 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1374
1375 // Parse JSON string into JSON objects
1376 try
1377 {
1378 const Json::Value root = openshot::stringToJson(value);
1379 // Process the JSON change array, loop through each item
1380 for (const Json::Value change : root) {
1381 std::string change_key = change["key"][(uint)0].asString();
1382
1383 // Process each type of change
1384 if (change_key == "clips")
1385 // Apply to CLIPS
1386 apply_json_to_clips(change);
1387
1388 else if (change_key == "effects")
1389 // Apply to EFFECTS
1390 apply_json_to_effects(change);
1391
1392 else
1393 // Apply to TIMELINE
1394 apply_json_to_timeline(change);
1395
1396 }
1397
1398 // Timeline content changed: notify cache clients to rescan active window.
1399 if (!root.empty()) {
1400 // After edits, force safe composition for a short window.
1401 safe_edit_frames_remaining.store(240, std::memory_order_relaxed);
1402 BumpCacheEpoch();
1403 }
1404 }
1405 catch (const std::exception& e)
1406 {
1407 // Error parsing JSON (or missing keys)
1408 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1409 }
1410}
1411
1412void Timeline::BumpCacheEpoch() {
1413 cache_epoch.fetch_add(1, std::memory_order_relaxed);
1414}
1415
1416// Apply JSON diff to clips
1417void Timeline::apply_json_to_clips(Json::Value change) {
1418
1419 // Get key and type of change
1420 std::string change_type = change["type"].asString();
1421 std::string clip_id = "";
1422 Clip *existing_clip = NULL;
1423
1424 // Find id of clip (if any)
1425 for (auto key_part : change["key"]) {
1426 // Get each change
1427 if (key_part.isObject()) {
1428 // Check for id
1429 if (!key_part["id"].isNull()) {
1430 // Set the id
1431 clip_id = key_part["id"].asString();
1432
1433 // Find matching clip in timeline (if any)
1434 for (auto c : clips)
1435 {
1436 if (c->Id() == clip_id) {
1437 existing_clip = c;
1438 break; // clip found, exit loop
1439 }
1440 }
1441 break; // id found, exit loop
1442 }
1443 }
1444 }
1445
1446 // Check for a more specific key (targetting this clip's effects)
1447 // For example: ["clips", {"id:123}, "effects", {"id":432}]
1448 if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1449 {
1450 // This change is actually targetting a specific effect under a clip (and not the clip)
1451 Json::Value key_part = change["key"][3];
1452
1453 if (key_part.isObject()) {
1454 // Check for id
1455 if (!key_part["id"].isNull())
1456 {
1457 // Set the id
1458 std::string effect_id = key_part["id"].asString();
1459
1460 // Find matching effect in timeline (if any)
1461 std::list<EffectBase*> effect_list = existing_clip->Effects();
1462 for (auto e : effect_list)
1463 {
1464 if (e->Id() == effect_id) {
1465 // Apply the change to the effect directly
1466 apply_json_to_effects(change, e);
1467
1468 // Effect-only diffs must clear the owning clip cache.
1469 if (existing_clip->GetCache()) {
1470 existing_clip->GetCache()->Clear();
1471 }
1472
1473 // Calculate start and end frames that this impacts, and remove those frames from the cache
1474 int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1475 int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1476 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1477
1478 return; // effect found, don't update clip
1479 }
1480 }
1481 }
1482 }
1483 }
1484
1485 // Determine type of change operation
1486 if (change_type == "insert") {
1487
1488 // Create clip
1489 Clip *clip = new Clip();
1490
1491 // Keep track of allocated clip objects
1492 allocated_clips.insert(clip);
1493
1494 // Set properties of clip from JSON
1495 clip->SetJsonValue(change["value"]);
1496
1497 // Add clip to timeline
1498 AddClip(clip);
1499
1500 // Calculate start and end frames that this impacts, and remove those frames from the cache
1501 int64_t new_starting_frame = (clip->Position() * info.fps.ToDouble()) + 1;
1502 int64_t new_ending_frame = ((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
1503 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1504
1505 } else if (change_type == "update") {
1506
1507 // Update existing clip
1508 if (existing_clip) {
1509 // Calculate start and end frames prior to the update
1510 int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1511 int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1512
1513 // Update clip properties from JSON
1514 existing_clip->SetJsonValue(change["value"]);
1515
1516 // Calculate new start and end frames after the update
1517 int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1518 int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1519
1520 // Remove both the old and new ranges from the timeline cache
1521 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1522 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1523
1524 // Apply framemapper (or update existing framemapper)
1525 if (auto_map_clips) {
1526 apply_mapper_to_clip(existing_clip);
1527 }
1528 }
1529
1530 } else if (change_type == "delete") {
1531
1532 // Remove existing clip
1533 if (existing_clip) {
1534 // Remove clip from timeline
1535 RemoveClip(existing_clip);
1536
1537 // Calculate start and end frames that this impacts, and remove those frames from the cache
1538 int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1539 int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1540 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1541 }
1542
1543 }
1544
1545 // Re-Sort Clips (since they likely changed)
1546 sort_clips();
1547}
1548
1549// Apply JSON diff to effects
1550void Timeline::apply_json_to_effects(Json::Value change) {
1551
1552 // Get key and type of change
1553 std::string change_type = change["type"].asString();
1554 EffectBase *existing_effect = NULL;
1555
1556 // Find id of an effect (if any)
1557 for (auto key_part : change["key"]) {
1558
1559 if (key_part.isObject()) {
1560 // Check for id
1561 if (!key_part["id"].isNull())
1562 {
1563 // Set the id
1564 std::string effect_id = key_part["id"].asString();
1565
1566 // Find matching effect in timeline (if any)
1567 for (auto e : effects)
1568 {
1569 if (e->Id() == effect_id) {
1570 existing_effect = e;
1571 break; // effect found, exit loop
1572 }
1573 }
1574 break; // id found, exit loop
1575 }
1576 }
1577 }
1578
1579 // Now that we found the effect, apply the change to it
1580 if (existing_effect || change_type == "insert") {
1581 // Apply change to effect
1582 apply_json_to_effects(change, existing_effect);
1583 }
1584}
1585
1586// Apply JSON diff to effects (if you already know which effect needs to be updated)
1587void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1588
1589 // Get key and type of change
1590 std::string change_type = change["type"].asString();
1591
1592 // Calculate start and end frames that this impacts, and remove those frames from the cache
1593 if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1594 int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1595 int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1596 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1597 }
1598
1599 // Determine type of change operation
1600 if (change_type == "insert") {
1601
1602 // Determine type of effect
1603 std::string effect_type = change["value"]["type"].asString();
1604
1605 // Create Effect
1606 EffectBase *e = NULL;
1607
1608 // Init the matching effect object
1609 if ( (e = EffectInfo().CreateEffect(effect_type)) ) {
1610
1611 // Keep track of allocated effect objects
1612 allocated_effects.insert(e);
1613
1614 // Load Json into Effect
1615 e->SetJsonValue(change["value"]);
1616
1617 // Add Effect to Timeline
1618 AddEffect(e);
1619 }
1620
1621 } else if (change_type == "update") {
1622
1623 // Update existing effect
1624 if (existing_effect) {
1625
1626 // Calculate start and end frames that this impacts, and remove those frames from the cache
1627 int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1628 int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1629 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1630
1631 // Update effect properties from JSON
1632 existing_effect->SetJsonValue(change["value"]);
1633 }
1634
1635 } else if (change_type == "delete") {
1636
1637 // Remove existing effect
1638 if (existing_effect) {
1639
1640 // Calculate start and end frames that this impacts, and remove those frames from the cache
1641 int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1642 int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1643 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1644
1645 // Remove effect from timeline
1646 RemoveEffect(existing_effect);
1647 }
1648
1649 }
1650
1651 // Re-Sort Effects (since they likely changed)
1652 sort_effects();
1653}
1654
1655// Apply JSON diff to timeline properties
1656void Timeline::apply_json_to_timeline(Json::Value change) {
1657 bool cache_dirty = true;
1658
1659 // Get key and type of change
1660 std::string change_type = change["type"].asString();
1661 std::string root_key = change["key"][(uint)0].asString();
1662 std::string sub_key = "";
1663 if (change["key"].size() >= 2)
1664 sub_key = change["key"][(uint)1].asString();
1665
1666 // Determine type of change operation
1667 if (change_type == "insert" || change_type == "update") {
1668
1669 // INSERT / UPDATE
1670 // Check for valid property
1671 if (root_key == "color")
1672 // Set color
1673 color.SetJsonValue(change["value"]);
1674 else if (root_key == "viewport_scale")
1675 // Set viewport scale
1676 viewport_scale.SetJsonValue(change["value"]);
1677 else if (root_key == "viewport_x")
1678 // Set viewport x offset
1679 viewport_x.SetJsonValue(change["value"]);
1680 else if (root_key == "viewport_y")
1681 // Set viewport y offset
1682 viewport_y.SetJsonValue(change["value"]);
1683 else if (root_key == "duration") {
1684 // Update duration of timeline
1685 info.duration = change["value"].asDouble();
1687
1688 // We don't want to clear cache for duration adjustments
1689 cache_dirty = false;
1690 }
1691 else if (root_key == "width") {
1692 // Set width
1693 info.width = change["value"].asInt();
1695 }
1696 else if (root_key == "height") {
1697 // Set height
1698 info.height = change["value"].asInt();
1700 }
1701 else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1702 // Set fps fraction
1703 if (!change["value"]["num"].isNull())
1704 info.fps.num = change["value"]["num"].asInt();
1705 if (!change["value"]["den"].isNull())
1706 info.fps.den = change["value"]["den"].asInt();
1707 }
1708 else if (root_key == "fps" && sub_key == "num")
1709 // Set fps.num
1710 info.fps.num = change["value"].asInt();
1711 else if (root_key == "fps" && sub_key == "den")
1712 // Set fps.den
1713 info.fps.den = change["value"].asInt();
1714 else if (root_key == "display_ratio" && sub_key == "" && change["value"].isObject()) {
1715 // Set display_ratio fraction
1716 if (!change["value"]["num"].isNull())
1717 info.display_ratio.num = change["value"]["num"].asInt();
1718 if (!change["value"]["den"].isNull())
1719 info.display_ratio.den = change["value"]["den"].asInt();
1720 }
1721 else if (root_key == "display_ratio" && sub_key == "num")
1722 // Set display_ratio.num
1723 info.display_ratio.num = change["value"].asInt();
1724 else if (root_key == "display_ratio" && sub_key == "den")
1725 // Set display_ratio.den
1726 info.display_ratio.den = change["value"].asInt();
1727 else if (root_key == "pixel_ratio" && sub_key == "" && change["value"].isObject()) {
1728 // Set pixel_ratio fraction
1729 if (!change["value"]["num"].isNull())
1730 info.pixel_ratio.num = change["value"]["num"].asInt();
1731 if (!change["value"]["den"].isNull())
1732 info.pixel_ratio.den = change["value"]["den"].asInt();
1733 }
1734 else if (root_key == "pixel_ratio" && sub_key == "num")
1735 // Set pixel_ratio.num
1736 info.pixel_ratio.num = change["value"].asInt();
1737 else if (root_key == "pixel_ratio" && sub_key == "den")
1738 // Set pixel_ratio.den
1739 info.pixel_ratio.den = change["value"].asInt();
1740
1741 else if (root_key == "sample_rate")
1742 // Set sample rate
1743 info.sample_rate = change["value"].asInt();
1744 else if (root_key == "channels")
1745 // Set channels
1746 info.channels = change["value"].asInt();
1747 else if (root_key == "channel_layout")
1748 // Set channel layout
1749 info.channel_layout = (ChannelLayout) change["value"].asInt();
1750 else
1751 // Error parsing JSON (or missing keys)
1752 throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1753
1754
1755 } else if (change["type"].asString() == "delete") {
1756
1757 // DELETE / RESET
1758 // Reset the following properties (since we can't delete them)
1759 if (root_key == "color") {
1760 color = Color();
1761 color.red = Keyframe(0.0);
1762 color.green = Keyframe(0.0);
1763 color.blue = Keyframe(0.0);
1764 }
1765 else if (root_key == "viewport_scale")
1766 viewport_scale = Keyframe(1.0);
1767 else if (root_key == "viewport_x")
1768 viewport_x = Keyframe(0.0);
1769 else if (root_key == "viewport_y")
1770 viewport_y = Keyframe(0.0);
1771 else
1772 // Error parsing JSON (or missing keys)
1773 throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1774
1775 }
1776
1777 if (cache_dirty) {
1778 // Clear entire cache
1779 ClearAllCache();
1780 }
1781}
1782
1783// Clear all caches
1785 // Get lock (prevent getting frames while this happens)
1786 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
1787
1788 // Clear primary cache
1789 if (final_cache) {
1790 final_cache->Clear();
1791 }
1792
1793 // Loop through all clips
1794 try {
1795 for (const auto clip : clips) {
1796 // Clear cache on clip and reader if present
1797 if (clip->Reader()) {
1798 if (auto rc = clip->Reader()->GetCache())
1799 rc->Clear();
1800
1801 // Clear nested Reader (if deep clear requested)
1802 if (deep && clip->Reader()->Name() == "FrameMapper") {
1803 FrameMapper *nested_reader = static_cast<FrameMapper *>(clip->Reader());
1804 if (nested_reader->Reader()) {
1805 if (auto nc = nested_reader->Reader()->GetCache())
1806 nc->Clear();
1807 }
1808 }
1809 }
1810
1811 // Clear clip cache
1812 if (auto cc = clip->GetCache())
1813 cc->Clear();
1814 }
1815 } catch (const ReaderClosed & e) {
1816 // ...
1817 }
1818
1819 // Cache content changed: notify cache clients to rebuild their window baseline.
1820 BumpCacheEpoch();
1821}
1822
1823// Set Max Image Size (used for performance optimization). Convenience function for setting
1824// Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT.
1825void Timeline::SetMaxSize(int width, int height) {
1826 // Maintain aspect ratio regardless of what size is passed in
1827 QSize display_ratio_size = QSize(info.width, info.height);
1828 QSize proposed_size = QSize(std::min(width, info.width), std::min(height, info.height));
1829
1830 // Scale QSize up to proposed size
1831 display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
1832
1833 // Update preview settings
1834 preview_width = display_ratio_size.width();
1835 preview_height = display_ratio_size.height();
1836}
1837
1838// Resolve equal-power audio gains from transition placement relative to the clip edges.
1839std::pair<float, float> Timeline::ResolveTransitionAudioGains(Clip* source_clip, int64_t timeline_frame_number, bool is_top_clip) const
1840{
1841 constexpr double half_pi = 1.57079632679489661923;
1842
1843 if (!source_clip)
1844 return {1.0f, 1.0f};
1845
1846 const double fpsD = info.fps.ToDouble();
1847 Mask* active_mask = nullptr;
1848 int64_t effect_start_position = 0;
1849 int64_t effect_end_position = 0;
1850
1851 // Find the single active transition on this layer that requested overlapping audio fades.
1852 for (auto effect : effects) {
1853 if (effect->Layer() != source_clip->Layer())
1854 continue;
1855
1856 auto* mask = dynamic_cast<Mask*>(effect);
1857 if (!mask || !mask->fade_audio_hint)
1858 continue;
1859
1860 const int64_t start_pos = static_cast<int64_t>(std::llround(effect->Position() * fpsD)) + 1;
1861 const int64_t end_pos = static_cast<int64_t>(std::llround((effect->Position() + effect->Duration()) * fpsD));
1862 if (start_pos > timeline_frame_number || end_pos < timeline_frame_number)
1863 continue;
1864
1865 if (active_mask)
1866 return {1.0f, 1.0f};
1867
1868 active_mask = mask;
1869 effect_start_position = start_pos;
1870 effect_end_position = end_pos;
1871 }
1872
1873 if (!active_mask)
1874 return {1.0f, 1.0f};
1875
1876 struct AudibleClipInfo {
1877 Clip* clip;
1878 int64_t start_pos;
1879 int64_t end_pos;
1880 };
1881
1882 std::vector<AudibleClipInfo> audible_clips;
1883 audible_clips.reserve(2);
1884
1885 // Collect the audible clips covered by this transition on the current layer.
1886 for (auto clip : clips) {
1887 if (clip->Layer() != source_clip->Layer())
1888 continue;
1889 if (!clip->Reader() || !clip->Reader()->info.has_audio)
1890 continue;
1891
1892 const int64_t clip_start_pos = static_cast<int64_t>(std::llround(clip->Position() * fpsD)) + 1;
1893 const int64_t clip_end_pos = static_cast<int64_t>(std::llround((clip->Position() + clip->Duration()) * fpsD));
1894 if (clip_start_pos > timeline_frame_number || clip_end_pos < timeline_frame_number)
1895 continue;
1896
1897 const int64_t clip_start_frame = static_cast<int64_t>(std::llround(clip->Start() * fpsD)) + 1;
1898 const int64_t clip_frame_number = timeline_frame_number - clip_start_pos + clip_start_frame;
1899 if (clip->has_audio.GetInt(clip_frame_number) == 0)
1900 continue;
1901
1902 audible_clips.push_back({clip, clip_start_pos, clip_end_pos});
1903 if (audible_clips.size() > 2)
1904 return {1.0f, 1.0f};
1905 }
1906
1907 if (audible_clips.empty())
1908 return {1.0f, 1.0f};
1909
1910 // Skip clips that are not actually participating in this transition audio decision.
1911 const auto source_it = std::find_if(
1912 audible_clips.begin(),
1913 audible_clips.end(),
1914 [source_clip](const AudibleClipInfo& info) {
1915 return info.clip == source_clip;
1916 });
1917 if (source_it == audible_clips.end())
1918 return {1.0f, 1.0f};
1919
1920 // Keep the current top/non-top clip routing intact when two clips overlap.
1921 if (audible_clips.size() == 2) {
1922 auto top_it = std::max_element(
1923 audible_clips.begin(),
1924 audible_clips.end(),
1925 [](const AudibleClipInfo& lhs, const AudibleClipInfo& rhs) {
1926 if (lhs.start_pos != rhs.start_pos)
1927 return lhs.start_pos < rhs.start_pos;
1928 return std::less<Clip*>()(lhs.clip, rhs.clip);
1929 });
1930 if ((is_top_clip && source_clip != top_it->clip) || (!is_top_clip && source_clip == top_it->clip))
1931 return {1.0f, 1.0f};
1932 }
1933
1934 // Infer fade direction from which transition edge is closer to this clip.
1935 const int64_t left_distance = std::llabs(effect_start_position - source_it->start_pos);
1936 const int64_t right_distance = std::llabs(effect_end_position - source_it->end_pos);
1937 const bool clip_fades_in = left_distance <= right_distance;
1938
1939 // Evaluate the current frame and previous frame so Timeline can preserve per-frame gain ramps.
1940 const auto compute_gain = [&](int64_t frame_number) -> float {
1941 if (effect_end_position <= effect_start_position)
1942 return 1.0f;
1943
1944 const double span = static_cast<double>(effect_end_position - effect_start_position);
1945 double t = static_cast<double>(frame_number - effect_start_position) / span;
1946 if (t < 0.0)
1947 t = 0.0;
1948 else if (t > 1.0)
1949 t = 1.0;
1950
1951 return static_cast<float>(clip_fades_in ? std::sin(t * half_pi) : std::cos(t * half_pi));
1952 };
1953
1954 return {compute_gain(timeline_frame_number - 1), compute_gain(timeline_frame_number)};
1955}
Header file for CacheBase class.
Header file for CacheDisk class.
Header file for CacheMemory class.
Header file for CrashHandler class.
Header file for all Exception classes.
Header file for the FrameMapper class.
Header file for Mask class.
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
All cache managers in libopenshot are based on this CacheBase class.
Definition CacheBase.h:35
virtual void Clear()=0
Clear the cache of all frames.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition CacheBase.cpp:28
This class is a memory-based cache manager for Frame objects.
Definition CacheMemory.h:29
void Clear()
Clear the cache of all frames.
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition ClipBase.h:88
float Duration() const
Get the length of this clip (in seconds)
Definition ClipBase.h:90
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
std::string Id() const
Get the Id of this clip object.
Definition ClipBase.h:85
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition ClipBase.h:87
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition ClipBase.cpp:80
float Position() const
Get position on timeline (in seconds)
Definition ClipBase.h:86
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition ClipBase.h:91
This class represents a clip (used to arrange readers on the timeline)
Definition Clip.h:89
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition Clip.h:182
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition Clip.h:349
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition Clip.cpp:1034
openshot::CacheMemory * GetCache() override
Get the cache object (always return NULL for this reader)
Definition Clip.h:206
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition Clip.h:353
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition Clip.h:296
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
Definition Clip.h:245
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition Clip.h:333
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition Clip.h:350
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
Definition Clip.cpp:338
This class represents a color (used on the timeline and clips)
Definition Color.h:27
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition Color.cpp:47
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition Color.cpp:117
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition Color.cpp:86
static CrashHandler * Instance()
This abstract class is the base class, used by all effects in libopenshot.
Definition EffectBase.h:57
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
This class returns a listing of all effects supported by libopenshot.
Definition EffectInfo.h:29
EffectBase * CreateEffect(std::string effect_type)
Create an instance of an effect (factory style)
This class represents a fraction.
Definition Fraction.h:30
int num
Numerator for the fraction.
Definition Fraction.h:32
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition Fraction.cpp:40
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Definition Fraction.cpp:65
Fraction Reciprocal() const
Return the reciprocal as a Fraction.
Definition Fraction.cpp:78
int den
Denominator for the fraction.
Definition Fraction.h:33
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
ReaderBase * Reader()
Get the current reader.
void Close() override
Close the openshot::FrameMapper and internal reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition Frame.cpp:484
Exception for files that can not be found or opened.
Definition Exceptions.h:194
Exception for missing JSON Change key.
Definition Exceptions.h:269
Exception for invalid JSON.
Definition Exceptions.h:224
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition KeyFrame.h:53
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition KeyFrame.cpp:372
double GetValue(int64_t index) const
Get the value at a specific index.
Definition KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition KeyFrame.cpp:339
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition KeyFrame.cpp:424
This class uses the image libraries to apply alpha (or transparency) masks to any frame....
Definition Mask.h:37
Exception for frames that are out of bounds.
Definition Exceptions.h:307
This abstract class is the base class, used by all readers in libopenshot.
Definition ReaderBase.h:76
openshot::ReaderInfo info
Information about the current media file.
Definition ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition ReaderBase.h:79
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
openshot::ClipBase * clip
Pointer to the parent clip instance (if any)
Definition ReaderBase.h:80
Exception when a reader is closed, and a frame is requested.
Definition Exceptions.h:370
This class is contains settings used by libopenshot (and can be safely toggled at any point)
Definition Settings.h:26
std::string PATH_OPENSHOT_INSTALL
Definition Settings.h:114
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition Settings.cpp:23
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
This class represents a timeline.
Definition Timeline.h:153
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition Timeline.cpp:229
Json::Value JsonValue() const override
Generate Json::Value for this object.
openshot::Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition Timeline.h:334
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add,...
openshot::EffectBase * GetClipEffect(const std::string &id)
Look up a clip effect by ID.
Definition Timeline.cpp:441
void AddClip(openshot::Clip *clip)
Add an openshot::Clip to the timeline.
Definition Timeline.cpp:338
virtual ~Timeline()
Definition Timeline.cpp:212
std::list< openshot::EffectBase * > ClipEffects() const
Return the list of effects on all clips.
Definition Timeline.cpp:454
std::list< std::string > GetTrackedObjectsIds() const
Return the ID's of the tracked objects as a list of strings.
Definition Timeline.cpp:264
std::string Json() const override
Generate JSON string of this object.
int64_t GetMaxFrame()
Look up the end frame number of the latest element on the timeline.
Definition Timeline.cpp:479
double GetMinTime()
Look up the position/start time of the first timeline element.
Definition Timeline.cpp:495
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
Definition Timeline.cpp:948
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition Timeline.cpp:527
openshot::Color color
Background color of timeline canvas.
Definition Timeline.h:339
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const
Return the trackedObject's properties as a JSON string.
Definition Timeline.cpp:280
Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout)
Constructor for the timeline (which configures the default frame properties)
Definition Timeline.cpp:34
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition Timeline.cpp:247
int64_t GetMinFrame()
Look up the start frame number of the first element on the timeline (first frame is 1)
Definition Timeline.cpp:487
openshot::EffectBase * GetEffect(const std::string &id)
Look up a timeline effect by ID.
Definition Timeline.cpp:430
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition Timeline.cpp:418
void ClearAllCache(bool deep=false)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the timeline.
Definition Timeline.cpp:364
void SetCache(openshot::CacheBase *new_cache)
void Clear()
Clear all clips, effects, and frame mappers from timeline (and free memory)
Definition Timeline.cpp:868
openshot::Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition Timeline.h:335
void RemoveClip(openshot::Clip *clip)
Remove an openshot::Clip from the timeline.
Definition Timeline.cpp:399
void SetMaxSize(int width, int height)
double GetMaxTime()
Look up the end time of the latest timeline element.
Definition Timeline.cpp:473
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the timeline.
Definition Timeline.cpp:380
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition Timeline.cpp:551
void Open() override
Open the reader (and start consuming resources)
Definition Timeline.cpp:936
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition Timeline.h:336
void Close() override
Close the timeline reader (and any resources it was consuming)
Definition Timeline.cpp:914
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition Compressor.h:29
@ PULLDOWN_NONE
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition FrameMapper.h:46
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition Enums.h:70
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition Enums.h:71
const Json::Value stringToJson(const std::string value)
Definition Json.cpp:16
This struct holds the information of a bounding-box.
float cy
y-coordinate of the bounding box center
float height
bounding box height
float cx
x-coordinate of the bounding box center
float width
bounding box width
float angle
bounding box rotation angle [degrees]
Like CompareClipEndFrames, but for effects.
Definition Timeline.h:80
This struct contains info about a media file, such as height, width, frames per second,...
Definition ReaderBase.h:39
float duration
Length of time (in seconds)
Definition ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition ReaderBase.h:48
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
Definition ReaderBase.h:51
int height
The height of the video (in pixels)
Definition ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition ReaderBase.h:53
std::string acodec
The name of the audio codec used to encode / decode the video stream.
Definition ReaderBase.h:58
std::string vcodec
The name of the video codec used to encode / decode the video stream.
Definition ReaderBase.h:52
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
Definition ReaderBase.h:50
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition ReaderBase.h:62
bool has_video
Determines if this file has a video stream.
Definition ReaderBase.h:40
bool has_audio
Determines if this file has an audio stream.
Definition ReaderBase.h:41
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition ReaderBase.h:55
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
bool force_safe_composite
If true, avoid mutating cached clip images during composition.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
bool is_top_clip
Is clip on top (if overlapping another clip)