OpenShot Library | libopenshot 0.6.0
Loading...
Searching...
No Matches
Clip.cpp
Go to the documentation of this file.
1
9// Copyright (c) 2008-2019 OpenShot Studios, LLC
10//
11// SPDX-License-Identifier: LGPL-3.0-or-later
12
13#include "Clip.h"
14
15#include "AudioResampler.h"
16#include "Exceptions.h"
17#include "FFmpegReader.h"
18#include "FrameMapper.h"
19#include "QtImageReader.h"
20#include "ChunkReader.h"
21#include "DummyReader.h"
22#include "Timeline.h"
23#include "ZmqLogger.h"
24
25#include <algorithm>
26#include <cmath>
27#include <sstream>
28#include <QPainter>
29
30#ifdef USE_IMAGEMAGICK
31 #include "MagickUtilities.h"
32 #include "ImageReader.h"
33 #include "TextReader.h"
34#endif
35
36#include <Qt>
37
38using namespace openshot;
39
40namespace {
41 struct CompositeChoice { const char* name; CompositeType value; };
42 const CompositeChoice composite_choices[] = {
43 {"Normal", COMPOSITE_SOURCE_OVER},
44
45 // Darken group
46 {"Darken", COMPOSITE_DARKEN},
47 {"Multiply", COMPOSITE_MULTIPLY},
48 {"Color Burn", COMPOSITE_COLOR_BURN},
49
50 // Lighten group
51 {"Lighten", COMPOSITE_LIGHTEN},
52 {"Screen", COMPOSITE_SCREEN},
53 {"Color Dodge", COMPOSITE_COLOR_DODGE},
54 {"Add", COMPOSITE_PLUS},
55
56 // Contrast group
57 {"Overlay", COMPOSITE_OVERLAY},
58 {"Soft Light", COMPOSITE_SOFT_LIGHT},
59 {"Hard Light", COMPOSITE_HARD_LIGHT},
60
61 // Compare
62 {"Difference", COMPOSITE_DIFFERENCE},
63 {"Exclusion", COMPOSITE_EXCLUSION},
64 };
65 const int composite_choices_count = sizeof(composite_choices)/sizeof(CompositeChoice);
66}
67
68// Init default settings for a clip
70{
71 // Init clip settings
72 Position(0.0);
73 Layer(0);
74 Start(0.0);
75 ClipBase::End(0.0);
82 waveform = false;
84 parentObjectId = "";
85
86 // Init scale curves
87 scale_x = Keyframe(1.0);
88 scale_y = Keyframe(1.0);
89
90 // Init location curves
91 location_x = Keyframe(0.0);
92 location_y = Keyframe(0.0);
93
94 // Init alpha
95 alpha = Keyframe(1.0);
96
97 // Init time & volume
98 time = Keyframe(1.0);
99 volume = Keyframe(1.0);
100
101 // Init audio waveform color
102 wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
103
104 // Init shear and perspective curves
105 shear_x = Keyframe(0.0);
106 shear_y = Keyframe(0.0);
107 origin_x = Keyframe(0.5);
108 origin_y = Keyframe(0.5);
117
118 // Init audio channel filter and mappings
119 channel_filter = Keyframe(-1.0);
121
122 // Init audio and video overrides
123 has_audio = Keyframe(-1.0);
124 has_video = Keyframe(-1.0);
125
126 // Initialize the attached object and attached clip as null pointers
127 parentTrackedObject = nullptr;
128 parentClipObject = NULL;
129
130 // Init reader info struct
132}
133
134// Init reader info details
136 if (reader) {
137 // Init rotation (if any)
139
140 // Initialize info struct
141 info = reader->info;
142
143 // Init cache
145 }
146}
147
149 // Only apply metadata rotation if clip rotation has not been explicitly set.
150 if (rotation.GetCount() > 0 || !reader)
151 return;
152
153 const auto rotate_meta = reader->info.metadata.find("rotate");
154 if (rotate_meta == reader->info.metadata.end()) {
155 // Ensure rotation keyframes always start with a default 0° point.
156 rotation = Keyframe(0.0f);
157 return;
158 }
159
160 float rotate_angle = 0.0f;
161 try {
162 rotate_angle = strtof(rotate_meta->second.c_str(), nullptr);
163 } catch (const std::exception& e) {
164 return; // ignore invalid metadata
165 }
166
167 rotation = Keyframe(rotate_angle);
168
169 // Do not overwrite user-authored scale curves.
170 auto has_default_scale = [](const Keyframe& kf) {
171 return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
172 };
173 if (!has_default_scale(scale_x) || !has_default_scale(scale_y))
174 return;
175
176 // No need to adjust scaling when the metadata rotation is effectively zero.
177 if (fabs(rotate_angle) < 0.0001f)
178 return;
179
180 float w = static_cast<float>(reader->info.width);
181 float h = static_cast<float>(reader->info.height);
182 if (w <= 0.0f || h <= 0.0f)
183 return;
184
185 float rad = rotate_angle * static_cast<float>(M_PI) / 180.0f;
186
187 float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
188 float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
189 if (new_width <= 0.0f || new_height <= 0.0f)
190 return;
191
192 float uniform_scale = std::min(w / new_width, h / new_height);
193
194 scale_x = Keyframe(uniform_scale);
195 scale_y = Keyframe(uniform_scale);
196}
197
198// Default Constructor for a clip
199Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
200{
201 // Init all default settings
203}
204
205// Constructor with reader
206Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
207{
208 // Init all default settings
210
211 // Open and Close the reader (to set the duration of the clip)
212 Open();
213 Close();
214
215 // Update duration and set parent
216 if (reader) {
217 ClipBase::End(reader->info.duration);
218 reader->ParentClip(this);
219 // Init reader info struct
221 }
222}
223
224// Constructor with filepath
225Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
226{
227 // Init all default settings
229
230 // Get file extension (and convert to lower case)
231 std::string ext = get_file_extension(path);
232 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
233
234 // Determine if common video formats (or image sequences)
235 if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
236 ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || ext=="gif" || path.find("%") != std::string::npos)
237 {
238 try
239 {
240 // Open common video format
241 reader = new openshot::FFmpegReader(path);
242
243 } catch(...) { }
244 }
245 if (ext=="osp")
246 {
247 try
248 {
249 // Open common video format
250 reader = new openshot::Timeline(path, true);
251
252 } catch(...) { }
253 }
254
255
256 // If no video found, try each reader
257 if (!reader)
258 {
259 try
260 {
261 // Try an image reader
262 reader = new openshot::QtImageReader(path);
263
264 } catch(...) {
265 try
266 {
267 // Try a video reader
268 reader = new openshot::FFmpegReader(path);
269
270 } catch(...) { }
271 }
272 }
273
274 // Update duration and set parent
275 if (reader) {
276 ClipBase::End(reader->info.duration);
277 reader->ParentClip(this);
278 allocated_reader = reader;
279 // Init reader info struct
281 }
282}
283
284// Destructor
286{
287 // Delete the reader if clip created it
288 if (allocated_reader) {
289 delete allocated_reader;
290 allocated_reader = NULL;
291 reader = NULL;
292 }
293
294 // Close the resampler
295 if (resampler) {
296 delete resampler;
297 resampler = NULL;
298 }
299
300 // Close clip
301 Close();
302}
303
304// Attach clip to bounding box
305void Clip::AttachToObject(std::string object_id)
306{
307 // Search for the tracked object on the timeline
308 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
309
310 if (parentTimeline) {
311 // Create a smart pointer to the tracked object from the timeline
312 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
313 Clip* clipObject = parentTimeline->GetClip(object_id);
314
315 // Check for valid tracked object
316 if (trackedObject){
317 SetAttachedObject(trackedObject);
318 parentClipObject = NULL;
319 }
320 else if (clipObject) {
321 SetAttachedClip(clipObject);
322 parentTrackedObject = nullptr;
323 }
324 }
325}
326
327// Set the pointer to the trackedObject this clip is attached to
328void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
329 parentTrackedObject = trackedObject;
330}
331
332// Set the pointer to the clip this clip is attached to
333void Clip::SetAttachedClip(Clip* clipObject){
334 parentClipObject = clipObject;
335}
336
338void Clip::Reader(ReaderBase* new_reader)
339{
340 // Delete previously allocated reader (if not related to new reader)
341 // FrameMappers that point to the same allocated reader are ignored
342 bool is_same_reader = false;
343 if (new_reader && allocated_reader) {
344 if (new_reader->Name() == "FrameMapper") {
345 // Determine if FrameMapper is pointing at the same allocated ready
346 FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
347 if (allocated_reader == clip_mapped_reader->Reader()) {
348 is_same_reader = true;
349 }
350 }
351 }
352 // Clear existing allocated reader (if different)
353 if (allocated_reader && !is_same_reader) {
354 reader->Close();
355 allocated_reader->Close();
356 delete allocated_reader;
357 reader = NULL;
358 allocated_reader = NULL;
359 }
360
361 // set reader pointer
362 reader = new_reader;
363
364 // set parent
365 if (reader) {
366 reader->ParentClip(this);
367
368 // Init reader info struct
370 }
371}
372
375{
376 if (reader)
377 return reader;
378 else
379 // Throw error if reader not initialized
380 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
381}
382
383// Open the internal reader
385{
386 if (reader)
387 {
388 // Open the reader
389 reader->Open();
390 is_open = true;
391
392 // Copy Reader info to Clip
393 info = reader->info;
394
395 // Set some clip properties from the file reader
396 if (end == 0.0)
397 ClipBase::End(reader->info.duration);
398 }
399 else
400 // Throw error if reader not initialized
401 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
402}
403
404// Close the internal reader
406{
407 if (is_open && reader) {
408 ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
409
410 // Close the reader
411 reader->Close();
412 }
413
414 // Clear cache
415 final_cache.Clear();
416 is_open = false;
417}
418
419// Get end position of clip (trim end of video), which can be affected by the time curve.
420float Clip::End() const
421{
422 // if a time curve is present, use its length
423 if (time.GetCount() > 1)
424 {
425 // Determine the FPS fo this clip
426 float fps = 24.0;
427 if (reader)
428 // file reader
429 fps = reader->info.fps.ToFloat();
430 else
431 // Throw error if reader not initialized
432 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
433
434 return float(time.GetLength()) / fps;
435 }
436 else
437 // just use the duration (as detected by the reader)
438 return end;
439}
440
441// Override End() position
442void Clip::End(float value) {
443 ClipBase::End(value);
444}
445
446// Set associated Timeline pointer
448 timeline = new_timeline;
449
450 // Clear cache (it might have changed)
451 final_cache.Clear();
452}
453
454// Create an openshot::Frame object for a specific frame number of this reader.
455std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
456{
457 // Call override of GetFrame
458 return GetFrame(NULL, clip_frame_number, NULL);
459}
460
461// Create an openshot::Frame object for a specific frame number of this reader.
462// NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
463std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
464{
465 // Call override of GetFrame
466 return GetFrame(background_frame, clip_frame_number, NULL);
467}
468
469// Use an existing openshot::Frame object and draw this Clip's frame onto it
470std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
471{
472 // Check for open reader (or throw exception)
473 if (!is_open)
474 throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
475
476 if (reader)
477 {
478 // Get frame object
479 std::shared_ptr<Frame> frame = NULL;
480
481 // Check cache
482 frame = final_cache.GetFrame(clip_frame_number);
483 if (!frame) {
484 // Generate clip frame
485 frame = GetOrCreateFrame(clip_frame_number);
486
487 // Get frame size and frame #
488 int64_t timeline_frame_number = clip_frame_number;
489 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
490 if (background_frame) {
491 // If a background frame is provided, use it instead
492 timeline_frame_number = background_frame->number;
493 timeline_size.setWidth(background_frame->GetWidth());
494 timeline_size.setHeight(background_frame->GetHeight());
495 }
496
497 // Get time mapped frame object (used to increase speed, change direction, etc...)
498 apply_timemapping(frame);
499
500 // Apply waveform image (if any)
501 apply_waveform(frame, timeline_size);
502
503 // Apply effects BEFORE applying keyframes (if any local or global effects are used)
504 apply_effects(frame, timeline_frame_number, options, true);
505
506 // Apply keyframe / transforms to current clip image
507 apply_keyframes(frame, timeline_size);
508
509 // Apply effects AFTER applying keyframes (if any local or global effects are used)
510 apply_effects(frame, timeline_frame_number, options, false);
511
512 // Add final frame to cache (before flattening into background_frame)
513 final_cache.Add(frame);
514 }
515
516 const bool has_external_background = (background_frame != nullptr);
517
518 // Timeline path.
519 if (options) {
520 if (!background_frame) {
521 // Create a transparent background if missing.
522 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
523 "#00000000", frame->GetAudioSamplesCount(),
524 frame->GetAudioChannelsCount());
525 }
526 if (options->force_safe_composite) {
527 // Edit mode: composite without mutating cached frame pixels.
528 apply_background(frame, background_frame, false);
529 return frame;
530 }
531
532 // Playback mode: keep original fast path.
533 apply_background(frame, background_frame, true);
534 return frame;
535 }
536
537 // No background: return the frame directly.
538 if (!has_external_background) {
539 return frame;
540 }
541
542 // External background: composite on a copy.
543 auto output = std::make_shared<Frame>(*frame.get());
544 apply_background(output, background_frame, true);
545 return output;
546 }
547 else
548 // Throw error if reader not initialized
549 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
550}
551
552// Look up an effect by ID
554{
555 // Find the matching effect (if any)
556 for (const auto& effect : effects) {
557 if (effect->Id() == id) {
558 return effect;
559 }
560 }
561 return nullptr;
562}
563
564// Return the associated ParentClip (if any)
566 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
567 // Attach parent clip OR object to this clip
568 AttachToObject(parentObjectId);
569 }
570 return parentClipObject;
571}
572
573// Return the associated Parent Tracked Object (if any)
574std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
575 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
576 // Attach parent clip OR object to this clip
577 AttachToObject(parentObjectId);
578 }
579 return parentTrackedObject;
580}
581
582// Get file extension
583std::string Clip::get_file_extension(std::string path)
584{
585 // Return last part of path safely (handle filenames without a dot)
586 const auto dot_pos = path.find_last_of('.');
587 if (dot_pos == std::string::npos || dot_pos + 1 >= path.size()) {
588 return std::string();
589 }
590
591 return path.substr(dot_pos + 1);
592}
593
594// Adjust the audio and image of a time mapped frame
595void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
596{
597 // Check for valid reader
598 if (!reader)
599 // Throw error if reader not initialized
600 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
601
602 // Check for a valid time map curve
603 if (time.GetLength() > 1)
604 {
605 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
606
607 int64_t clip_frame_number = frame->number;
608 int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
609
610 // create buffer
611 juce::AudioBuffer<float> *source_samples = nullptr;
612
613 // Get delta (difference from this frame to the next time mapped frame: Y value)
614 double delta = time.GetDelta(clip_frame_number + 1);
615 const bool prev_is_increasing = time.IsIncreasing(clip_frame_number);
616 const bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
617
618 // Determine length of source audio (in samples)
619 // A delta of 1.0 == normal expected samples
620 // A delta of 0.5 == 50% of normal expected samples
621 // A delta of 2.0 == 200% of normal expected samples
622 int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
624 Reader()->info.channels);
625 int source_sample_count = round(target_sample_count * fabs(delta));
626
627 // Determine starting audio location
628 AudioLocation location;
629 if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2 || prev_is_increasing != is_increasing) {
630 // No previous location OR gap detected
631 location.frame = new_frame_number;
632 location.sample_start = 0;
633
634 // Create / Reset resampler
635 // We don't want to interpolate between unrelated audio data
636 if (resampler) {
637 delete resampler;
638 resampler = nullptr;
639 }
640 // Init resampler with # channels from Reader (should match the timeline)
641 resampler = new AudioResampler(Reader()->info.channels);
642
643 // Allocate buffer of silence to initialize some data inside the resampler
644 // To prevent it from becoming input limited
645 juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
646 init_samples.clear();
647 resampler->SetBuffer(&init_samples, 1.0);
648 resampler->GetResampledBuffer();
649
650 } else {
651 // Use previous location
652 location = previous_location;
653 }
654
655 if (source_sample_count <= 0) {
656 // Add silence and bail (we don't need any samples)
657 frame->AddAudioSilence(target_sample_count);
658 return;
659 }
660
661 // Allocate a new sample buffer for these delta frames
662 source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
663 source_samples->clear();
664
665 // Determine ending audio location
666 int remaining_samples = source_sample_count;
667 int source_pos = 0;
668 while (remaining_samples > 0) {
669 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
670 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
671
672 // Inform FrameMapper of the direction for THIS mapper frame
673 if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
674 fm->SetDirectionHint(is_increasing);
675 }
676 source_frame->SetAudioDirection(is_increasing);
677
678 if (frame_sample_count == 0) {
679 // No samples found in source frame (fill with silence)
680 if (is_increasing) {
681 location.frame++;
682 } else {
683 location.frame--;
684 }
685 location.sample_start = 0;
686 break;
687 }
688 if (remaining_samples - frame_sample_count >= 0) {
689 // Use all frame samples & increment location
690 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
691 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
692 }
693 if (is_increasing) {
694 location.frame++;
695 } else {
696 location.frame--;
697 }
698 location.sample_start = 0;
699 remaining_samples -= frame_sample_count;
700 source_pos += frame_sample_count;
701
702 } else {
703 // Use just what is needed (and reverse samples)
704 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
705 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
706 }
707 location.sample_start += remaining_samples;
708 remaining_samples = 0;
709 source_pos += remaining_samples;
710 }
711
712 }
713
714 // Resize audio for current frame object + fill with silence
715 // We are fixing to clobber this with actual audio data (possibly resampled)
716 frame->AddAudioSilence(target_sample_count);
717
718 if (source_sample_count != target_sample_count) {
719 // Resample audio (if needed)
720 double resample_ratio = double(source_sample_count) / double(target_sample_count);
721 resampler->SetBuffer(source_samples, resample_ratio);
722
723 // Resample the data
724 juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
725
726 // Fill the frame with resampled data
727 for (int channel = 0; channel < Reader()->info.channels; channel++) {
728 // Add new (slower) samples, to the frame object
729 frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
730 }
731 } else {
732 // Fill the frame
733 for (int channel = 0; channel < Reader()->info.channels; channel++) {
734 // Add new (slower) samples, to the frame object
735 frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
736 }
737 }
738
739 // Clean up
740 delete source_samples;
741
742 // Set previous location
743 previous_location = location;
744 }
745}
746
747// Adjust frame number minimum value
748int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
749{
750 // Never return a frame number 0 or below
751 if (frame_number < 1)
752 return 1;
753 else
754 return frame_number;
755
756}
757
758// Get or generate a blank frame
759std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
760{
761 try {
762 // Init to requested frame
763 int64_t clip_frame_number = adjust_frame_number_minimum(number);
764 bool is_increasing = true;
765
766 // Adjust for time-mapping (if any)
767 if (enable_time && time.GetLength() > 1) {
768 is_increasing = time.IsIncreasing(clip_frame_number + 1);
769 const int64_t time_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
770 if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
771 // Inform FrameMapper which direction this mapper frame is being requested
772 fm->SetDirectionHint(is_increasing);
773 }
774 clip_frame_number = time_frame_number;
775 }
776
777 // Debug output
779 "Clip::GetOrCreateFrame (from reader)",
780 "number", number, "clip_frame_number", clip_frame_number);
781
782 // Attempt to get a frame (but this could fail if a reader has just been closed)
783 auto reader_frame = reader->GetFrame(clip_frame_number);
784 if (reader_frame) {
785 // Override frame # (due to time-mapping might change it)
786 reader_frame->number = number;
787 reader_frame->SetAudioDirection(is_increasing);
788
789 // Return real frame
790 // Create a new copy of reader frame
791 // This allows a clip to modify the pixels and audio of this frame without
792 // changing the underlying reader's frame data
793 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
794 if (has_video.GetInt(number) == 0) {
795 // No video, so add transparent pixels
796 reader_copy->AddColor(QColor(Qt::transparent));
797 }
798 if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
799 // No audio, so include silence (also, mute audio if past end of reader)
800 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
801 }
802 return reader_copy;
803 }
804
805 } catch (const ReaderClosed & e) {
806 // ...
807 } catch (const OutOfBoundsFrame & e) {
808 // ...
809 }
810
811 // Estimate # of samples needed for this frame
812 int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
813
814 // Debug output
816 "Clip::GetOrCreateFrame (create blank)",
817 "number", number,
818 "estimated_samples_in_frame", estimated_samples_in_frame);
819
820 // Create blank frame
821 auto new_frame = std::make_shared<Frame>(
822 number, reader->info.width, reader->info.height,
823 "#000000", estimated_samples_in_frame, reader->info.channels);
824 new_frame->SampleRate(reader->info.sample_rate);
825 new_frame->ChannelsLayout(reader->info.channel_layout);
826 new_frame->AddAudioSilence(estimated_samples_in_frame);
827 return new_frame;
828}
829
830// Generate JSON string of this object
831std::string Clip::Json() const {
832
833 // Return formatted string
834 return JsonValue().toStyledString();
835}
836
837// Get all properties for a specific frame
838std::string Clip::PropertiesJSON(int64_t requested_frame) const {
839
840 // Generate JSON properties list
841 Json::Value root;
842 root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
843 root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
844 root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
845 root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
846 root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
847 root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
848 root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
849 root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
850 root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
851 root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
852 root["composite"] = add_property_json("Composite", composite, "int", "", NULL, 0, composite_choices_count - 1, false, requested_frame);
853 root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
854 root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
855
856 // Add gravity choices (dropdown style)
857 root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
858 root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
859 root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
860 root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
861 root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
862 root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
863 root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
864 root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
865 root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
866
867 // Add scale choices (dropdown style)
868 root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
869 root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
870 root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
871 root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
872
873 // Add frame number display choices (dropdown style)
874 root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
875 root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
876 root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
877 root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
878
879 // Add volume mixing choices (dropdown style)
880 root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
881 root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
882 root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
883
884 // Add composite choices (dropdown style)
885 for (int i = 0; i < composite_choices_count; ++i)
886 root["composite"]["choices"].append(add_property_choice_json(composite_choices[i].name, composite_choices[i].value, composite));
887
888 // Add waveform choices (dropdown style)
889 root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
890 root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
891
892 // Add the parentClipObject's properties
893 if (parentClipObject)
894 {
895 // Convert Clip's frame position to Timeline's frame position
896 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
897 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
898 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
899
900 // Correct the parent Clip Object properties by the clip's reference system
901 float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
902 float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
903 float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
904 float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
905 float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
906 float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
907 float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
908
909 // Add the parent Clip Object properties to JSON
910 root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
911 root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
912 root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
913 root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
914 root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
915 root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
916 root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
917 }
918 else
919 {
920 // Add this own clip's properties to JSON
921 root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
922 root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
923 root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
924 root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
925 root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
926 root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
927 root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
928 }
929
930 // Keyframes
931 root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
932 root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
933 root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
934 root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
935 root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
936 root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
937 root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
938 root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
939 root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
940
941 // Add enable audio/video choices (dropdown style)
942 root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
943 root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
944 root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
945 root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
946 root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
947 root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
948
949 root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
950 root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
951 root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
952 root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
953 root["wave_color"]["alpha"] = add_property_json("Alpha", wave_color.alpha.GetValue(requested_frame), "float", "", &wave_color.alpha, 0, 255, false, requested_frame);
954
955 // Return formatted string
956 return root.toStyledString();
957}
958
959// Generate Json::Value for this object
960Json::Value Clip::JsonValue() const {
961
962 // Create root json object
963 Json::Value root = ClipBase::JsonValue(); // get parent properties
964 root["parentObjectId"] = parentObjectId;
965 root["gravity"] = gravity;
966 root["scale"] = scale;
967 root["anchor"] = anchor;
968 root["display"] = display;
969 root["mixing"] = mixing;
970 root["composite"] = composite;
971 root["waveform"] = waveform;
972 root["scale_x"] = scale_x.JsonValue();
973 root["scale_y"] = scale_y.JsonValue();
974 root["location_x"] = location_x.JsonValue();
975 root["location_y"] = location_y.JsonValue();
976 root["alpha"] = alpha.JsonValue();
977 root["rotation"] = rotation.JsonValue();
978 root["time"] = time.JsonValue();
979 root["volume"] = volume.JsonValue();
980 root["wave_color"] = wave_color.JsonValue();
981 root["shear_x"] = shear_x.JsonValue();
982 root["shear_y"] = shear_y.JsonValue();
983 root["origin_x"] = origin_x.JsonValue();
984 root["origin_y"] = origin_y.JsonValue();
985 root["channel_filter"] = channel_filter.JsonValue();
986 root["channel_mapping"] = channel_mapping.JsonValue();
987 root["has_audio"] = has_audio.JsonValue();
988 root["has_video"] = has_video.JsonValue();
989 root["perspective_c1_x"] = perspective_c1_x.JsonValue();
990 root["perspective_c1_y"] = perspective_c1_y.JsonValue();
991 root["perspective_c2_x"] = perspective_c2_x.JsonValue();
992 root["perspective_c2_y"] = perspective_c2_y.JsonValue();
993 root["perspective_c3_x"] = perspective_c3_x.JsonValue();
994 root["perspective_c3_y"] = perspective_c3_y.JsonValue();
995 root["perspective_c4_x"] = perspective_c4_x.JsonValue();
996 root["perspective_c4_y"] = perspective_c4_y.JsonValue();
997
998 // Add array of effects
999 root["effects"] = Json::Value(Json::arrayValue);
1000
1001 // loop through effects
1002 for (auto existing_effect : effects)
1003 {
1004 root["effects"].append(existing_effect->JsonValue());
1005 }
1006
1007 if (reader)
1008 root["reader"] = reader->JsonValue();
1009 else
1010 root["reader"] = Json::Value(Json::objectValue);
1011
1012 // return JsonValue
1013 return root;
1014}
1015
1016// Load JSON string into this object
1017void Clip::SetJson(const std::string value) {
1018
1019 // Parse JSON string into JSON objects
1020 try
1021 {
1022 const Json::Value root = openshot::stringToJson(value);
1023 // Set all values that match
1024 SetJsonValue(root);
1025 }
1026 catch (const std::exception& e)
1027 {
1028 // Error parsing JSON (or missing keys)
1029 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1030 }
1031}
1032
1033// Load Json::Value into this object
1034void Clip::SetJsonValue(const Json::Value root) {
1035
1036 // Set parent data
1038
1039 // Set data from Json (if key is found)
1040 if (!root["parentObjectId"].isNull()){
1041 parentObjectId = root["parentObjectId"].asString();
1042 if (parentObjectId.size() > 0 && parentObjectId != ""){
1043 AttachToObject(parentObjectId);
1044 } else{
1045 parentTrackedObject = nullptr;
1046 parentClipObject = NULL;
1047 }
1048 }
1049 if (!root["gravity"].isNull())
1050 gravity = (GravityType) root["gravity"].asInt();
1051 if (!root["scale"].isNull())
1052 scale = (ScaleType) root["scale"].asInt();
1053 if (!root["anchor"].isNull())
1054 anchor = (AnchorType) root["anchor"].asInt();
1055 if (!root["display"].isNull())
1056 display = (FrameDisplayType) root["display"].asInt();
1057 if (!root["mixing"].isNull())
1058 mixing = (VolumeMixType) root["mixing"].asInt();
1059 if (!root["composite"].isNull())
1060 composite = (CompositeType) root["composite"].asInt();
1061 if (!root["waveform"].isNull())
1062 waveform = root["waveform"].asBool();
1063 if (!root["scale_x"].isNull())
1064 scale_x.SetJsonValue(root["scale_x"]);
1065 if (!root["scale_y"].isNull())
1066 scale_y.SetJsonValue(root["scale_y"]);
1067 if (!root["location_x"].isNull())
1068 location_x.SetJsonValue(root["location_x"]);
1069 if (!root["location_y"].isNull())
1070 location_y.SetJsonValue(root["location_y"]);
1071 if (!root["alpha"].isNull())
1072 alpha.SetJsonValue(root["alpha"]);
1073 if (!root["rotation"].isNull())
1074 rotation.SetJsonValue(root["rotation"]);
1075 if (!root["time"].isNull())
1076 time.SetJsonValue(root["time"]);
1077 if (!root["volume"].isNull())
1078 volume.SetJsonValue(root["volume"]);
1079 if (!root["wave_color"].isNull())
1080 wave_color.SetJsonValue(root["wave_color"]);
1081 if (!root["shear_x"].isNull())
1082 shear_x.SetJsonValue(root["shear_x"]);
1083 if (!root["shear_y"].isNull())
1084 shear_y.SetJsonValue(root["shear_y"]);
1085 if (!root["origin_x"].isNull())
1086 origin_x.SetJsonValue(root["origin_x"]);
1087 if (!root["origin_y"].isNull())
1088 origin_y.SetJsonValue(root["origin_y"]);
1089 if (!root["channel_filter"].isNull())
1090 channel_filter.SetJsonValue(root["channel_filter"]);
1091 if (!root["channel_mapping"].isNull())
1092 channel_mapping.SetJsonValue(root["channel_mapping"]);
1093 if (!root["has_audio"].isNull())
1094 has_audio.SetJsonValue(root["has_audio"]);
1095 if (!root["has_video"].isNull())
1096 has_video.SetJsonValue(root["has_video"]);
1097 if (!root["perspective_c1_x"].isNull())
1098 perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1099 if (!root["perspective_c1_y"].isNull())
1100 perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1101 if (!root["perspective_c2_x"].isNull())
1102 perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1103 if (!root["perspective_c2_y"].isNull())
1104 perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1105 if (!root["perspective_c3_x"].isNull())
1106 perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1107 if (!root["perspective_c3_y"].isNull())
1108 perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1109 if (!root["perspective_c4_x"].isNull())
1110 perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1111 if (!root["perspective_c4_y"].isNull())
1112 perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1113 if (!root["effects"].isNull()) {
1114
1115 // Clear existing effects
1116 effects.clear();
1117
1118 // loop through effects
1119 for (const auto existing_effect : root["effects"]) {
1120 // Skip NULL nodes
1121 if (existing_effect.isNull()) {
1122 continue;
1123 }
1124
1125 // Create Effect
1126 EffectBase *e = NULL;
1127 if (!existing_effect["type"].isNull()) {
1128
1129 // Create instance of effect
1130 if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1131
1132 // Load Json into Effect
1133 e->SetJsonValue(existing_effect);
1134
1135 // Add Effect to Timeline
1136 AddEffect(e);
1137 }
1138 }
1139 }
1140 }
1141 if (!root["reader"].isNull()) // does Json contain a reader?
1142 {
1143 if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1144 {
1145 // Close previous reader (if any)
1146 bool already_open = false;
1147 if (reader)
1148 {
1149 // Track if reader was open
1150 already_open = reader->IsOpen();
1151
1152 // Close and delete existing allocated reader (if any)
1153 Reader(NULL);
1154 }
1155
1156 // Create new reader (and load properties)
1157 std::string type = root["reader"]["type"].asString();
1158
1159 if (type == "FFmpegReader") {
1160
1161 // Create new reader
1162 reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1163 reader->SetJsonValue(root["reader"]);
1164
1165 } else if (type == "QtImageReader") {
1166
1167 // Create new reader
1168 reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1169 reader->SetJsonValue(root["reader"]);
1170
1171#ifdef USE_IMAGEMAGICK
1172 } else if (type == "ImageReader") {
1173
1174 // Create new reader
1175 reader = new ImageReader(root["reader"]["path"].asString(), false);
1176 reader->SetJsonValue(root["reader"]);
1177
1178 } else if (type == "TextReader") {
1179
1180 // Create new reader
1181 reader = new TextReader();
1182 reader->SetJsonValue(root["reader"]);
1183#endif
1184
1185 } else if (type == "ChunkReader") {
1186
1187 // Create new reader
1188 reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1189 reader->SetJsonValue(root["reader"]);
1190
1191 } else if (type == "DummyReader") {
1192
1193 // Create new reader
1194 reader = new openshot::DummyReader();
1195 reader->SetJsonValue(root["reader"]);
1196
1197 } else if (type == "Timeline") {
1198
1199 // Create new reader (always load from file again)
1200 // This prevents FrameMappers from being loaded on accident
1201 reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1202 }
1203
1204 // mark as managed reader and set parent
1205 if (reader) {
1206 reader->ParentClip(this);
1207 allocated_reader = reader;
1208 }
1209
1210 // Re-Open reader (if needed)
1211 if (already_open) {
1212 reader->Open();
1213 }
1214 }
1215 }
1216
1217 // Clear cache (it might have changed)
1218 final_cache.Clear();
1219}
1220
1221// Sort effects by order
1222void Clip::sort_effects()
1223{
1224 // sort clips
1225 effects.sort(CompareClipEffects());
1226}
1227
1228// Add an effect to the clip
1230{
1231 // Set parent clip pointer
1232 effect->ParentClip(this);
1233
1234 // Add effect to list
1235 effects.push_back(effect);
1236
1237 // Sort effects
1238 sort_effects();
1239
1240 // Get the parent timeline of this clip
1241 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1242
1243 if (parentTimeline)
1244 effect->ParentTimeline(parentTimeline);
1245
1246 #ifdef USE_OPENCV
1247 // Add Tracked Object to Timeline
1248 if (effect->info.has_tracked_object){
1249
1250 // Check if this clip has a parent timeline
1251 if (parentTimeline){
1252
1253 effect->ParentTimeline(parentTimeline);
1254
1255 // Iterate through effect's vector of Tracked Objects
1256 for (auto const& trackedObject : effect->trackedObjects){
1257
1258 // Cast the Tracked Object as TrackedObjectBBox
1259 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1260
1261 // Set the Tracked Object's parent clip to this
1262 trackedObjectBBox->ParentClip(this);
1263
1264 // Add the Tracked Object to the timeline
1265 parentTimeline->AddTrackedObject(trackedObjectBBox);
1266 }
1267 }
1268 }
1269 #endif
1270
1271 // Clear cache (it might have changed)
1272 final_cache.Clear();
1273}
1274
1275// Remove an effect from the clip
1277{
1278 effects.remove(effect);
1279
1280 // Clear cache (it might have changed)
1281 final_cache.Clear();
1282}
1283
1284// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1285void Clip::apply_background(std::shared_ptr<openshot::Frame> frame,
1286 std::shared_ptr<openshot::Frame> background_frame,
1287 bool update_frame_image) {
1288 // Add background canvas
1289 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1290 QPainter painter(background_canvas.get());
1291
1292 // Composite a new layer onto the image
1293 painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1294 painter.drawImage(0, 0, *frame->GetImage());
1295 painter.end();
1296
1297 // Standalone clip requests update frame->image, but timeline composition
1298 // draws onto the timeline-owned background frame only.
1299 if (update_frame_image)
1300 frame->AddImage(background_canvas);
1301}
1302
1303// Apply effects to the source frame (if any)
1304void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1305{
1306 for (auto effect : effects)
1307 {
1308 // Apply the effect to this frame
1309 if (effect->info.apply_before_clip && before_keyframes) {
1310 effect->ProcessFrame(frame, frame->number);
1311 } else if (!effect->info.apply_before_clip && !before_keyframes) {
1312 effect->ProcessFrame(frame, frame->number);
1313 }
1314 }
1315
1316 if (timeline != NULL && options != NULL) {
1317 // Apply global timeline effects (i.e. transitions & masks... if any)
1318 Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1319 options->is_before_clip_keyframes = before_keyframes;
1320 timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1321 }
1322}
1323
1324// Compare 2 floating point numbers for equality
1325bool Clip::isNear(double a, double b)
1326{
1327 return fabs(a - b) < 0.000001;
1328}
1329
1330// Apply keyframes to the source frame (if any)
1331void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1332 // Skip out if video was disabled or only an audio frame (no visualisation in use)
1333 if (!frame->has_image_data) {
1334 // Skip the rest of the image processing for performance reasons
1335 return;
1336 }
1337
1338 // Get image from clip, and create transparent background image
1339 std::shared_ptr<QImage> source_image = frame->GetImage();
1340 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1341 timeline_size.height(),
1342 QImage::Format_RGBA8888_Premultiplied);
1343 background_canvas->fill(QColor(Qt::transparent));
1344
1345 // Get transform from clip's keyframes
1346 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1347
1348 // Load timeline's new frame image into a QPainter
1349 QPainter painter(background_canvas.get());
1350 painter.setRenderHint(QPainter::TextAntialiasing, true);
1351 if (!transform.isIdentity()) {
1352 painter.setRenderHint(QPainter::SmoothPixmapTransform, true);
1353 }
1354 // Apply transform (translate, rotate, scale)
1355 painter.setTransform(transform);
1356
1357 // Composite a new layer onto the image
1358 painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1359
1360 // Apply opacity via painter instead of per-pixel alpha manipulation
1361 const float alpha_value = alpha.GetValue(frame->number);
1362 if (alpha_value != 1.0f) {
1363 painter.setOpacity(alpha_value);
1364 painter.drawImage(0, 0, *source_image);
1365 // Reset so any subsequent drawing (e.g., overlays) isn’t faded
1366 painter.setOpacity(1.0);
1367 } else {
1368 painter.drawImage(0, 0, *source_image);
1369 }
1370
1371 if (timeline) {
1372 Timeline *t = static_cast<Timeline *>(timeline);
1373
1374 // Draw frame #'s on top of image (if needed)
1375 if (display != FRAME_DISPLAY_NONE) {
1376 std::stringstream frame_number_str;
1377 switch (display) {
1378 case (FRAME_DISPLAY_NONE):
1379 // This is only here to prevent unused-enum warnings
1380 break;
1381
1382 case (FRAME_DISPLAY_CLIP):
1383 frame_number_str << frame->number;
1384 break;
1385
1387 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1388 break;
1389
1390 case (FRAME_DISPLAY_BOTH):
1391 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1392 break;
1393 }
1394
1395 // Draw frame number on top of image
1396 painter.setPen(QColor("#ffffff"));
1397 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1398 }
1399 }
1400 painter.end();
1401
1402 // Add new QImage to frame
1403 frame->AddImage(background_canvas);
1404}
1405
1406// Apply apply_waveform image to the source frame (if any)
1407void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1408
1409 if (!Waveform()) {
1410 // Exit if no waveform is needed
1411 return;
1412 }
1413
1414 // Get image from clip
1415 std::shared_ptr<QImage> source_image = frame->GetImage();
1416
1417 // Debug output
1418 ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1419 "frame->number", frame->number,
1420 "Waveform()", Waveform(),
1421 "width", timeline_size.width(),
1422 "height", timeline_size.height());
1423
1424 // Get the color of the waveform
1425 int red = wave_color.red.GetInt(frame->number);
1426 int green = wave_color.green.GetInt(frame->number);
1427 int blue = wave_color.blue.GetInt(frame->number);
1428 int alpha = wave_color.alpha.GetInt(frame->number);
1429
1430 // Generate Waveform Dynamically (the size of the timeline)
1431 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue, alpha);
1432 frame->AddImage(source_image);
1433}
1434
1435// Scale a source size to a target size (given a specific scale-type)
1436QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1437 switch (source_scale)
1438 {
1439 case (SCALE_FIT): {
1440 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1441 break;
1442 }
1443 case (SCALE_STRETCH): {
1444 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1445 break;
1446 }
1447 case (SCALE_CROP): {
1448 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1449 break;
1450 }
1451 }
1452
1453 return source_size;
1454}
1455
1456// Get QTransform from keyframes
1457QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1458{
1459 // Get image from clip
1460 std::shared_ptr<QImage> source_image = frame->GetImage();
1461
1462 /* RESIZE SOURCE IMAGE - based on scale type */
1463 QSize source_size = scale_size(source_image->size(), scale, width, height);
1464
1465 // Initialize parent object's properties (Clip or Tracked Object)
1466 float parentObject_location_x = 0.0;
1467 float parentObject_location_y = 0.0;
1468 float parentObject_scale_x = 1.0;
1469 float parentObject_scale_y = 1.0;
1470 float parentObject_shear_x = 0.0;
1471 float parentObject_shear_y = 0.0;
1472 float parentObject_rotation = 0.0;
1473
1474 // Get the parentClipObject properties
1475 if (GetParentClip()){
1476 // Get the start trim position of the parent clip
1477 long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1478 long parent_frame_number = frame->number + parent_start_offset;
1479
1480 // Get parent object's properties (Clip)
1481 parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1482 parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1483 parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1484 parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1485 parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1486 parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1487 parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1488 }
1489
1490 // Get the parentTrackedObject properties
1492 // Get the attached object's parent clip's properties
1493 Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1494 if (parentClip)
1495 {
1496 // Get the start trim position of the parent clip
1497 long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1498 long parent_frame_number = frame->number + parent_start_offset;
1499
1500 // Access the parentTrackedObject's properties
1501 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1502
1503 // Get actual scaled parent size
1504 QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1505 parentClip->scale, width, height);
1506
1507 // Get actual scaled tracked object size
1508 int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1509 parentClip->scale_x.GetValue(parent_frame_number);
1510 int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1511 parentClip->scale_y.GetValue(parent_frame_number);
1512
1513 // Scale the clip source_size based on the actual tracked object size
1514 source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1515
1516 // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1517 parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1518 parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1519 parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1520 }
1521 }
1522
1523 /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1524 float x = 0.0; // left
1525 float y = 0.0; // top
1526
1527 // Adjust size for scale x and scale y
1528 float sx = scale_x.GetValue(frame->number); // percentage X scale
1529 float sy = scale_y.GetValue(frame->number); // percentage Y scale
1530
1531 // Change clip's scale to parentObject's scale
1532 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1533 sx*= parentObject_scale_x;
1534 sy*= parentObject_scale_y;
1535 }
1536
1537 float scaled_source_width = source_size.width() * sx;
1538 float scaled_source_height = source_size.height() * sy;
1539
1540 switch (gravity)
1541 {
1542 case (GRAVITY_TOP_LEFT):
1543 // This is only here to prevent unused-enum warnings
1544 break;
1545 case (GRAVITY_TOP):
1546 x = (width - scaled_source_width) / 2.0; // center
1547 break;
1548 case (GRAVITY_TOP_RIGHT):
1549 x = width - scaled_source_width; // right
1550 break;
1551 case (GRAVITY_LEFT):
1552 y = (height - scaled_source_height) / 2.0; // center
1553 break;
1554 case (GRAVITY_CENTER):
1555 x = (width - scaled_source_width) / 2.0; // center
1556 y = (height - scaled_source_height) / 2.0; // center
1557 break;
1558 case (GRAVITY_RIGHT):
1559 x = width - scaled_source_width; // right
1560 y = (height - scaled_source_height) / 2.0; // center
1561 break;
1562 case (GRAVITY_BOTTOM_LEFT):
1563 y = (height - scaled_source_height); // bottom
1564 break;
1565 case (GRAVITY_BOTTOM):
1566 x = (width - scaled_source_width) / 2.0; // center
1567 y = (height - scaled_source_height); // bottom
1568 break;
1569 case (GRAVITY_BOTTOM_RIGHT):
1570 x = width - scaled_source_width; // right
1571 y = (height - scaled_source_height); // bottom
1572 break;
1573 }
1574
1575 // Debug output
1577 "Clip::get_transform (Gravity)",
1578 "frame->number", frame->number,
1579 "source_clip->gravity", gravity,
1580 "scaled_source_width", scaled_source_width,
1581 "scaled_source_height", scaled_source_height);
1582
1583 QTransform transform;
1584
1585 /* LOCATION, ROTATION, AND SCALE */
1586 float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1587 x += width * (location_x.GetValue(frame->number) + parentObject_location_x); // move in percentage of final width
1588 y += height * (location_y.GetValue(frame->number) + parentObject_location_y); // move in percentage of final height
1589 float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1590 float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1591 float origin_x_value = origin_x.GetValue(frame->number);
1592 float origin_y_value = origin_y.GetValue(frame->number);
1593
1594 // Transform source image (if needed)
1596 "Clip::get_transform (Build QTransform - if needed)",
1597 "frame->number", frame->number,
1598 "x", x, "y", y,
1599 "r", r,
1600 "sx", sx, "sy", sy);
1601
1602 if (!isNear(x, 0) || !isNear(y, 0)) {
1603 // TRANSLATE/MOVE CLIP
1604 transform.translate(x, y);
1605 }
1606 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1607 // ROTATE CLIP (around origin_x, origin_y)
1608 float origin_x_offset = (scaled_source_width * origin_x_value);
1609 float origin_y_offset = (scaled_source_height * origin_y_value);
1610 transform.translate(origin_x_offset, origin_y_offset);
1611 transform.rotate(r);
1612 transform.shear(shear_x_value, shear_y_value);
1613 transform.translate(-origin_x_offset,-origin_y_offset);
1614 }
1615 // SCALE CLIP (if needed)
1616 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1617 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1618 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1619 transform.scale(source_width_scale, source_height_scale);
1620 }
1621
1622 return transform;
1623}
1624
1625// Adjust frame number for Clip position and start (which can result in a different number)
1626int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1627
1628 // Get clip position from parent clip (if any)
1629 float position = 0.0;
1630 float start = 0.0;
1631 Clip *parent = static_cast<Clip *>(ParentClip());
1632 if (parent) {
1633 position = parent->Position();
1634 start = parent->Start();
1635 }
1636
1637 // Adjust start frame and position based on parent clip.
1638 // This ensures the same frame # is used by mapped readers and clips,
1639 // when calculating samples per frame.
1640 // Thus, this prevents gaps and mismatches in # of samples.
1641 int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1642 int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1643 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1644
1645 return frame_number;
1646}
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition CacheBase.cpp:28
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition ChunkReader.h:79
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition ClipBase.h:88
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition ClipBase.h:37
float Duration() const
Get the length of this clip (in seconds)
Definition ClipBase.h:90
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Definition ClipBase.h:89
std::string Id() const
Get the Id of this clip object.
Definition ClipBase.h:85
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition ClipBase.cpp:64
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition ClipBase.cpp:132
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition ClipBase.h:87
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition ClipBase.cpp:80
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition ClipBase.h:40
float Position() const
Get position on timeline (in seconds)
Definition ClipBase.h:86
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition ClipBase.h:91
std::string id
ID Property for all derived Clip and Effect classes.
Definition ClipBase.h:34
float position
The position on the timeline where this clip should start playing.
Definition ClipBase.h:35
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition ClipBase.h:38
std::string previous_properties
This string contains the previous JSON properties.
Definition ClipBase.h:39
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition ClipBase.cpp:96
This class represents a clip (used to arrange readers on the timeline)
Definition Clip.h:89
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition Clip.cpp:328
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition Clip.h:318
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition Clip.h:321
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition Clip.h:326
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition Clip.h:345
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition Clip.h:180
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition Clip.h:182
void Open() override
Open the internal reader.
Definition Clip.cpp:384
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition Clip.h:325
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition Clip.h:349
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition Clip.h:181
void init_reader_rotation()
Update default rotation from reader.
Definition Clip.cpp:148
Clip()
Default Constructor.
Definition Clip.cpp:199
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition Clip.h:339
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition Clip.cpp:305
std::string Json() const override
Generate JSON string of this object.
Definition Clip.cpp:831
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition Clip.cpp:553
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition Clip.cpp:1034
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition Clip.h:322
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition Clip.h:353
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition Clip.h:343
void init_reader_settings()
Init reader info details.
Definition Clip.cpp:135
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition Clip.h:340
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition Clip.cpp:960
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition Clip.cpp:333
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition Clip.h:296
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition Clip.h:346
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition Clip.h:332
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition Clip.cpp:565
bool Waveform()
Get the waveform property of this clip.
Definition Clip.h:314
openshot::CompositeType composite
How this clip is composited onto lower layers.
Definition Clip.h:183
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition Clip.h:178
AudioLocation previous_location
Previous time-mapped audio location.
Definition Clip.h:95
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition Clip.h:344
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition Clip.cpp:574
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition Clip.cpp:1229
void Close() override
Close the internal reader.
Definition Clip.cpp:405
virtual ~Clip()
Destructor.
Definition Clip.cpp:285
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition Clip.h:342
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition Clip.h:333
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition Clip.h:327
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition Clip.h:319
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition Clip.cpp:420
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition Clip.cpp:455
openshot::ReaderBase * Reader()
Get the current reader.
Definition Clip.cpp:374
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition Clip.cpp:1276
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition Clip.h:350
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition Clip.h:354
std::string PropertiesJSON(int64_t requested_frame) const override
Definition Clip.cpp:838
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition Clip.h:336
void init_settings()
Init default settings for a clip.
Definition Clip.cpp:69
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition Clip.h:341
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition Clip.h:179
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition Clip.h:320
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition Clip.h:328
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition Clip.h:92
void SetJson(const std::string value) override
Load JSON string into this object.
Definition Clip.cpp:1017
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition Clip.h:329
This class represents a color (used on the timeline and clips)
Definition Color.h:27
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition Color.cpp:117
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition Color.h:33
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition Color.cpp:86
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition DummyReader.h:86
This abstract class is the base class, used by all effects in libopenshot.
Definition EffectBase.h:57
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
Definition EffectBase.h:110
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition EffectBase.h:107
This class returns a listing of all effects supported by libopenshot.
Definition EffectInfo.h:29
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition Fraction.cpp:40
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition Frame.cpp:484
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition ImageReader.h:56
Exception for invalid JSON.
Definition Exceptions.h:224
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition KeyFrame.h:53
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition KeyFrame.cpp:372
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition KeyFrame.cpp:399
int64_t GetLength() const
Definition KeyFrame.cpp:417
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition KeyFrame.cpp:287
double GetValue(int64_t index) const
Get the value at a specific index.
Definition KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition KeyFrame.cpp:339
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition KeyFrame.cpp:292
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition KeyFrame.cpp:424
Exception for frames that are out of bounds.
Definition Exceptions.h:307
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
Definition ReaderBase.h:76
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
Definition ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
Definition Exceptions.h:370
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition TextReader.h:63
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
Definition Timeline.h:153
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition Timeline.cpp:229
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition Timeline.cpp:247
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition Timeline.cpp:418
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition Timeline.cpp:551
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition Compressor.h:29
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition Enums.h:45
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition Enums.h:46
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition ChunkReader.h:50
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition Enums.h:22
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition Enums.h:23
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition Enums.h:26
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition Enums.h:25
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition Enums.h:28
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition Enums.h:29
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition Enums.h:30
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition Enums.h:24
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition Enums.h:31
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition Enums.h:27
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition Enums.h:36
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition Enums.h:37
@ SCALE_NONE
Do not scale the clip.
Definition Enums.h:40
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition Enums.h:68
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition Enums.h:70
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition Enums.h:69
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition Enums.h:71
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition Enums.h:52
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition Enums.h:54
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition Enums.h:55
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition Enums.h:56
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition Enums.h:53
const Json::Value stringToJson(const std::string value)
Definition Json.cpp:16
CompositeType
This enumeration determines how clips are composited onto lower layers.
Definition Enums.h:75
@ COMPOSITE_LIGHTEN
Definition Enums.h:95
@ COMPOSITE_MULTIPLY
Definition Enums.h:91
@ COMPOSITE_EXCLUSION
Definition Enums.h:101
@ COMPOSITE_PLUS
Definition Enums.h:90
@ COMPOSITE_SOURCE_OVER
Definition Enums.h:76
@ COMPOSITE_DARKEN
Definition Enums.h:94
@ COMPOSITE_COLOR_DODGE
Definition Enums.h:96
@ COMPOSITE_SOFT_LIGHT
Definition Enums.h:99
@ COMPOSITE_DIFFERENCE
Definition Enums.h:100
@ COMPOSITE_HARD_LIGHT
Definition Enums.h:98
@ COMPOSITE_OVERLAY
Definition Enums.h:93
@ COMPOSITE_COLOR_BURN
Definition Enums.h:97
@ COMPOSITE_SCREEN
Definition Enums.h:92
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition EffectBase.h:45
float duration
Length of time (in seconds)
Definition ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition ReaderBase.h:48
int height
The height of the video (in pixels)
Definition ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition ReaderBase.h:53
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition ReaderBase.h:65
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition ReaderBase.h:62
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
bool force_safe_composite
If true, avoid mutating cached clip images during composition.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.