26#include <QRegularExpression>
27#include <unordered_map>
35 is_open(false), auto_map_clips(true), managed_cache(true),
path(
""), max_time(0.0), cache_epoch(0), safe_edit_frames_remaining(0)
81 info.width, info.height, info.fps, info.sample_rate,
82 info.channels, info.channel_layout) {}
86 is_open(false), auto_map_clips(true), managed_cache(true),
path(projectPath), max_time(0.0), cache_epoch(0), safe_edit_frames_remaining(0) {
105 QFileInfo filePath(QString::fromStdString(path));
106 if (!filePath.exists()) {
107 throw InvalidFile(
"Timeline project file could not be opened.", path);
113 if (!openshotPath.exists()) {
116 QDir openshotTransPath(openshotPath.filePath(
"transitions"));
117 if (!openshotTransPath.exists()) {
118 throw InvalidFile(
"PATH_OPENSHOT_INSTALL/transitions could not be found.", openshotTransPath.path().toStdString());
122 QString asset_name = filePath.baseName().left(30) +
"_assets";
123 QDir asset_folder(filePath.dir().filePath(asset_name));
124 if (!asset_folder.exists()) {
126 asset_folder.mkpath(
".");
130 QFile projectFile(QString::fromStdString(path));
131 projectFile.open(QFile::ReadOnly);
132 QString projectContents = QString::fromUtf8(projectFile.readAll());
135 if (convert_absolute_paths) {
139 QRegularExpression allPathsRegex(QStringLiteral(
"\"(image|path)\":.*?\"(.*?)\""));
140 std::vector<QRegularExpressionMatch> matchedPositions;
141 QRegularExpressionMatchIterator i = allPathsRegex.globalMatch(projectContents);
142 while (i.hasNext()) {
143 QRegularExpressionMatch match = i.next();
144 if (match.hasMatch()) {
146 matchedPositions.push_back(match);
151 std::vector<QRegularExpressionMatch>::reverse_iterator itr;
152 for (itr = matchedPositions.rbegin(); itr != matchedPositions.rend(); itr++) {
153 QRegularExpressionMatch match = *itr;
154 QString relativeKey = match.captured(1);
155 QString relativePath = match.captured(2);
156 QString absolutePath =
"";
159 if (relativePath.startsWith(
"@assets")) {
160 absolutePath = QFileInfo(asset_folder.absoluteFilePath(relativePath.replace(
"@assets",
"."))).canonicalFilePath();
161 }
else if (relativePath.startsWith(
"@transitions")) {
162 absolutePath = QFileInfo(openshotTransPath.absoluteFilePath(relativePath.replace(
"@transitions",
"."))).canonicalFilePath();
164 absolutePath = QFileInfo(filePath.absoluteDir().absoluteFilePath(relativePath)).canonicalFilePath();
168 if (!absolutePath.isEmpty()) {
169 projectContents.replace(match.capturedStart(0), match.capturedLength(0),
"\"" + relativeKey +
"\": \"" + absolutePath +
"\"");
173 matchedPositions.clear();
177 SetJson(projectContents.toStdString());
181 float calculated_duration = 0.0;
182 for (
auto clip : clips)
185 if (clip_last_frame > calculated_duration)
186 calculated_duration = clip_last_frame;
187 if (
clip->Reader() &&
clip->Reader()->info.has_audio)
189 if (
clip->Reader() &&
clip->Reader()->info.has_video)
222 if (managed_cache && final_cache) {
232 auto iterator = tracked_objects.find(trackedObject->Id());
234 if (iterator != tracked_objects.end()){
236 iterator->second = trackedObject;
240 tracked_objects[trackedObject->Id()] = trackedObject;
250 auto iterator = tracked_objects.find(
id);
252 if (iterator != tracked_objects.end()){
254 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = iterator->second;
255 return trackedObject;
267 std::list<std::string> trackedObjects_ids;
270 for (
auto const& it: tracked_objects){
272 trackedObjects_ids.push_back(it.first);
275 return trackedObjects_ids;
283 Json::Value trackedObjectJson;
286 auto iterator = tracked_objects.find(
id);
288 if (iterator != tracked_objects.end())
291 std::shared_ptr<TrackedObjectBBox> trackedObject = std::static_pointer_cast<TrackedObjectBBox>(iterator->second);
294 if (trackedObject->ExactlyContains(frame_number)){
295 BBox box = trackedObject->GetBox(frame_number);
296 float x1 = box.
cx - (box.
width/2);
298 float x2 = box.
cx + (box.
width/2);
300 float rotation = box.
angle;
302 trackedObjectJson[
"x1"] = x1;
303 trackedObjectJson[
"y1"] = y1;
304 trackedObjectJson[
"x2"] = x2;
305 trackedObjectJson[
"y2"] = y2;
306 trackedObjectJson[
"rotation"] = rotation;
309 BBox box = trackedObject->BoxVec.begin()->second;
310 float x1 = box.
cx - (box.
width/2);
312 float x2 = box.
cx + (box.
width/2);
314 float rotation = box.
angle;
316 trackedObjectJson[
"x1"] = x1;
317 trackedObjectJson[
"y1"] = y1;
318 trackedObjectJson[
"x2"] = x2;
319 trackedObjectJson[
"y2"] = y2;
320 trackedObjectJson[
"rotation"] = rotation;
326 trackedObjectJson[
"x1"] = 0;
327 trackedObjectJson[
"y1"] = 0;
328 trackedObjectJson[
"x2"] = 0;
329 trackedObjectJson[
"y2"] = 0;
330 trackedObjectJson[
"rotation"] = 0;
333 return trackedObjectJson.toStyledString();
341 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
347 if (
clip->Reader() &&
clip->Reader()->GetCache())
348 clip->Reader()->GetCache()->Clear();
351 if (auto_map_clips) {
353 apply_mapper_to_clip(
clip);
357 clips.push_back(
clip);
367 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
373 effects.push_back(effect);
383 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
385 effects.remove(effect);
388 if (allocated_effects.count(effect)) {
389 allocated_effects.erase(effect);
402 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
407 if (allocated_clips.count(
clip)) {
408 allocated_clips.erase(
clip);
421 for (
const auto&
clip : clips) {
433 for (
const auto& effect : effects) {
434 if (effect->Id() == id) {
444 for (
const auto&
clip : clips) {
445 const auto e =
clip->GetEffect(
id);
457 std::list<EffectBase*> timelineEffectsList;
460 for (
const auto&
clip : clips) {
463 std::list<EffectBase*> clipEffectsList =
clip->Effects();
466 timelineEffectsList.insert(timelineEffectsList.end(), clipEffectsList.begin(), clipEffectsList.end());
469 return timelineEffectsList;
483 return static_cast<int64_t
>(std::ceil(t * fps));
491 return static_cast<int64_t
>(std::floor(t * fps)) + 1;
501void Timeline::apply_mapper_to_clip(
Clip* clip)
505 if (
clip->Reader()->Name() ==
"FrameMapper")
518 allocated_frame_mappers.insert(mapper);
523 clip->Reader(clip_reader);
533 for (
auto clip : clips)
536 apply_mapper_to_clip(
clip);
541double Timeline::calculate_time(int64_t number,
Fraction rate)
544 double raw_fps = rate.
ToFloat();
547 return double(number - 1) / raw_fps;
555 "Timeline::apply_effects",
556 "frame->number", frame->number,
557 "timeline_frame_number", timeline_frame_number,
561 for (
auto effect : effects)
565 int64_t effect_start_position =
static_cast<int64_t
>(std::llround(effect->Position() * fpsD)) + 1;
566 int64_t effect_end_position =
static_cast<int64_t
>(std::llround((effect->Position() + effect->Duration()) * fpsD));
568 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
571 if (does_effect_intersect)
574 int64_t effect_start_frame =
static_cast<int64_t
>(std::llround(effect->Start() * fpsD)) + 1;
575 int64_t effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
585 "Timeline::apply_effects (Process Effect)",
586 "effect_frame_number", effect_frame_number,
587 "does_effect_intersect", does_effect_intersect);
590 frame = effect->ProcessFrame(frame, effect_frame_number);
600std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> background_frame,
Clip* clip, int64_t number,
openshot::TimelineInfoStruct* options)
602 std::shared_ptr<Frame> new_frame;
610 "Timeline::GetOrCreateFrame (from reader)",
612 "samples_in_frame", samples_in_frame);
615 new_frame = std::shared_ptr<Frame>(
clip->
GetFrame(background_frame, number, options));
628 "Timeline::GetOrCreateFrame (create blank)",
630 "samples_in_frame", samples_in_frame);
637void Timeline::add_layer(std::shared_ptr<Frame> new_frame,
Clip* source_clip, int64_t clip_frame_number,
bool is_top_clip,
bool force_safe_composite,
float max_volume)
646 std::shared_ptr<Frame> source_frame;
647 source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, &options);
655 "Timeline::add_layer",
656 "new_frame->number", new_frame->number,
657 "clip_frame_number", clip_frame_number);
660 if (source_clip->
Reader()->info.has_audio) {
663 "Timeline::add_layer (Copy Audio)",
664 "source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
665 "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
667 "clip_frame_number", clip_frame_number);
672 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
677 const auto transition_audio_gains = ResolveTransitionAudioGains(source_clip, new_frame->number, is_top_clip);
679 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
682 float previous_volume = source_clip->
volume.
GetValue(clip_frame_number - 1);
684 previous_volume *= transition_audio_gains.first;
685 volume *= transition_audio_gains.second;
692 previous_volume = previous_volume / max_volume;
693 volume = volume / max_volume;
697 previous_volume = previous_volume * 0.77;
698 volume = volume * 0.77;
702 if (channel_filter != -1 && channel_filter != channel)
706 if (previous_volume == 0.0 && volume == 0.0)
710 if (channel_mapping == -1)
711 channel_mapping = channel;
714 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
715 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
719 new_frame->AddAudio(
false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
725 "Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
726 "source_clip->Reader()->info.has_audio",
727 source_clip->
Reader()->info.has_audio,
728 "source_frame->GetAudioChannelsCount()",
729 source_frame->GetAudioChannelsCount(),
731 "clip_frame_number", clip_frame_number);
736 "Timeline::add_layer (Transform: Composite Image Layer: Completed)",
737 "source_frame->number", source_frame->number,
738 "new_frame->GetImage()->width()", new_frame->GetWidth(),
739 "new_frame->GetImage()->height()", new_frame->GetHeight());
743void Timeline::update_open_clips(
Clip *clip,
bool does_clip_intersect)
746 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
749 "Timeline::update_open_clips (before)",
750 "does_clip_intersect", does_clip_intersect,
751 "closing_clips.size()", closing_clips.size(),
752 "open_clips.size()", open_clips.size());
755 bool clip_found = open_clips.count(
clip);
757 if (clip_found && !does_clip_intersect)
760 open_clips.erase(
clip);
765 else if (!clip_found && does_clip_intersect)
781 "Timeline::update_open_clips (after)",
782 "does_clip_intersect", does_clip_intersect,
783 "clip_found", clip_found,
784 "closing_clips.size()", closing_clips.size(),
785 "open_clips.size()", open_clips.size());
789void Timeline::calculate_max_duration() {
790 double last_clip = 0.0;
791 double last_effect = 0.0;
792 double first_clip = std::numeric_limits<double>::max();
793 double first_effect = std::numeric_limits<double>::max();
796 if (!clips.empty()) {
798 const auto max_clip = std::max_element(
800 last_clip = (*max_clip)->Position() + (*max_clip)->Duration();
803 const auto min_clip = std::min_element(
805 return lhs->Position() < rhs->Position();
807 first_clip = (*min_clip)->Position();
811 if (!effects.empty()) {
813 const auto max_effect = std::max_element(
815 last_effect = (*max_effect)->Position() + (*max_effect)->Duration();
818 const auto min_effect = std::min_element(
820 return lhs->Position() < rhs->Position();
822 first_effect = (*min_effect)->Position();
826 max_time = std::max(last_clip, last_effect);
827 min_time = std::min(first_clip, first_effect);
830 if (clips.empty() && effects.empty()) {
837void Timeline::sort_clips()
840 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
844 "Timeline::SortClips",
845 "clips.size()", clips.size());
851 calculate_max_duration();
855void Timeline::sort_effects()
858 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
864 calculate_max_duration();
873 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
876 for (
auto clip : clips)
878 update_open_clips(
clip,
false);
881 bool allocated = allocated_clips.count(
clip);
888 allocated_clips.clear();
891 for (
auto effect : effects)
894 bool allocated = allocated_effects.count(effect);
901 allocated_effects.clear();
904 for (
auto mapper : allocated_frame_mappers)
910 allocated_frame_mappers.clear();
919 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
922 for (
auto clip : clips)
925 update_open_clips(
clip,
false);
942bool Timeline::isEqual(
double a,
double b)
944 return fabs(a - b) < 0.000001;
951 if (requested_frame < 1)
954 const bool past_timeline_end = (max_frame > 0 && requested_frame > max_frame);
957 std::shared_ptr<Frame> frame;
958 if (!past_timeline_end)
959 frame = final_cache->
GetFrame(requested_frame);
963 "Timeline::GetFrame (Cached frame found)",
964 "requested_frame", requested_frame);
972 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
975 std::shared_ptr<Frame> frame;
976 if (!past_timeline_end)
977 frame = final_cache->
GetFrame(requested_frame);
981 "Timeline::GetFrame (Cached frame found on 2nd check)",
982 "requested_frame", requested_frame);
989 std::vector<Clip *> nearby_clips;
990 nearby_clips = find_intersecting_clips(requested_frame, 1,
true);
994 "Timeline::GetFrame (processing frame)",
995 "requested_frame", requested_frame,
996 "omp_get_thread_num()", omp_get_thread_num());
1003 new_frame->AddAudioSilence(samples_in_frame);
1009 "Timeline::GetFrame (Adding solid color)",
1010 "requested_frame", requested_frame,
1022 "Timeline::GetFrame (Loop through clips)",
1023 "requested_frame", requested_frame,
1024 "clips.size()", clips.size(),
1025 "nearby_clips.size()", nearby_clips.size());
1032 int64_t start_frame;
1033 int64_t frame_number;
1036 std::vector<ClipInfo> clip_infos;
1037 clip_infos.reserve(nearby_clips.size());
1040 for (
auto clip : nearby_clips) {
1041 int64_t start_pos =
static_cast<int64_t
>(std::llround(
clip->
Position() * fpsD)) + 1;
1043 bool intersects = (start_pos <= requested_frame && end_pos >= requested_frame);
1044 int64_t start_frame =
static_cast<int64_t
>(std::llround(
clip->
Start() * fpsD)) + 1;
1045 int64_t frame_number = requested_frame - start_pos + start_frame;
1046 clip_infos.push_back({
clip, start_pos, end_pos, start_frame, frame_number, intersects});
1050 std::unordered_map<int, int64_t> top_start_for_layer;
1051 std::unordered_map<int, Clip*> top_clip_for_layer;
1052 for (
const auto& ci : clip_infos) {
1053 if (!ci.intersects)
continue;
1054 const int layer = ci.clip->Layer();
1055 auto it = top_start_for_layer.find(layer);
1056 if (it == top_start_for_layer.end() || ci.start_pos > it->second) {
1057 top_start_for_layer[layer] = ci.start_pos;
1058 top_clip_for_layer[layer] = ci.clip;
1063 float max_volume_sum = 0.0f;
1064 for (
const auto& ci : clip_infos) {
1065 if (!ci.intersects)
continue;
1066 if (ci.clip->Reader() && ci.clip->Reader()->info.has_audio &&
1067 ci.clip->has_audio.GetInt(ci.frame_number) != 0) {
1068 max_volume_sum +=
static_cast<float>(ci.clip->volume.GetValue(ci.frame_number));
1073 const int safe_remaining = safe_edit_frames_remaining.load(std::memory_order_relaxed);
1074 const bool force_safe_composite = (safe_remaining > 0);
1075 if (force_safe_composite) {
1076 safe_edit_frames_remaining.fetch_sub(1, std::memory_order_relaxed);
1078 for (
const auto& ci : clip_infos) {
1081 "Timeline::GetFrame (Does clip intersect)",
1082 "requested_frame", requested_frame,
1083 "clip->Position()", ci.clip->Position(),
1084 "clip->Duration()", ci.clip->Duration(),
1085 "does_clip_intersect", ci.intersects);
1088 if (ci.intersects) {
1090 bool is_top_clip =
false;
1091 const int layer = ci.clip->Layer();
1092 auto top_it = top_clip_for_layer.find(layer);
1093 if (top_it != top_clip_for_layer.end())
1094 is_top_clip = (top_it->second == ci.clip);
1097 int64_t clip_frame_number = ci.frame_number;
1101 "Timeline::GetFrame (Calculate clip's frame #)",
1102 "clip->Position()", ci.clip->Position(),
1103 "clip->Start()", ci.clip->Start(),
1105 "clip_frame_number", clip_frame_number);
1108 add_layer(new_frame, ci.clip, clip_frame_number, is_top_clip, force_safe_composite, max_volume_sum);
1113 "Timeline::GetFrame (clip does not intersect)",
1114 "requested_frame", requested_frame,
1115 "does_clip_intersect", ci.intersects);
1122 "Timeline::GetFrame (Add frame to cache)",
1123 "requested_frame", requested_frame,
1128 new_frame->SetFrameNumber(requested_frame);
1131 if (!past_timeline_end)
1132 final_cache->
Add(new_frame);
1141std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame,
int number_of_frames,
bool include)
1144 std::vector<Clip*> matching_clips;
1147 const int64_t min_requested_frame = requested_frame;
1148 const int64_t max_requested_frame = requested_frame + (number_of_frames - 1);
1151 matching_clips.reserve(clips.size());
1153 for (
auto clip : clips)
1156 int64_t clip_start_position =
static_cast<int64_t
>(std::llround(
clip->
Position() * fpsD)) + 1;
1159 bool does_clip_intersect =
1160 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
1161 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
1165 "Timeline::find_intersecting_clips (Is clip near or intersecting)",
1166 "requested_frame", requested_frame,
1167 "min_requested_frame", min_requested_frame,
1168 "max_requested_frame", max_requested_frame,
1170 "does_clip_intersect", does_clip_intersect);
1173 update_open_clips(
clip, does_clip_intersect);
1176 if (does_clip_intersect && include)
1178 matching_clips.push_back(
clip);
1180 else if (!does_clip_intersect && !include)
1182 matching_clips.push_back(
clip);
1187 return matching_clips;
1193 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1196 if (managed_cache && final_cache) {
1199 managed_cache =
false;
1203 final_cache = new_cache;
1218 root[
"type"] =
"Timeline";
1223 root[
"path"] = path;
1226 root[
"clips"] = Json::Value(Json::arrayValue);
1229 for (
const auto existing_clip : clips)
1231 root[
"clips"].append(existing_clip->JsonValue());
1235 root[
"effects"] = Json::Value(Json::arrayValue);
1238 for (
const auto existing_effect: effects)
1240 root[
"effects"].append(existing_effect->JsonValue());
1251 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1260 catch (
const std::exception& e)
1263 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1271 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1274 bool was_open = is_open;
1281 if (!root[
"path"].isNull())
1282 path = root[
"path"].asString();
1284 if (!root[
"clips"].isNull()) {
1289 for (
const Json::Value existing_clip : root[
"clips"]) {
1291 if (existing_clip.isNull()) {
1299 allocated_clips.insert(c);
1316 if (!root[
"effects"].isNull()) {
1321 for (
const Json::Value existing_effect :root[
"effects"]) {
1323 if (existing_effect.isNull()) {
1330 if (!existing_effect[
"type"].isNull()) {
1332 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString())) ) {
1335 allocated_effects.insert(e);
1347 if (!root[
"duration"].isNull()) {
1373 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1380 for (
const Json::Value change : root) {
1381 std::string change_key = change[
"key"][(uint)0].asString();
1384 if (change_key ==
"clips")
1386 apply_json_to_clips(change);
1388 else if (change_key ==
"effects")
1390 apply_json_to_effects(change);
1394 apply_json_to_timeline(change);
1399 if (!root.empty()) {
1401 safe_edit_frames_remaining.store(240, std::memory_order_relaxed);
1405 catch (
const std::exception& e)
1408 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1412void Timeline::BumpCacheEpoch() {
1413 cache_epoch.fetch_add(1, std::memory_order_relaxed);
1417void Timeline::apply_json_to_clips(Json::Value change) {
1420 std::string change_type = change[
"type"].asString();
1421 std::string clip_id =
"";
1422 Clip *existing_clip = NULL;
1425 for (
auto key_part : change[
"key"]) {
1427 if (key_part.isObject()) {
1429 if (!key_part[
"id"].isNull()) {
1431 clip_id = key_part[
"id"].asString();
1434 for (
auto c : clips)
1436 if (c->Id() == clip_id) {
1448 if (existing_clip && change[
"key"].size() == 4 && change[
"key"][2] ==
"effects")
1451 Json::Value key_part = change[
"key"][3];
1453 if (key_part.isObject()) {
1455 if (!key_part[
"id"].isNull())
1458 std::string effect_id = key_part[
"id"].asString();
1461 std::list<EffectBase*> effect_list = existing_clip->
Effects();
1462 for (
auto e : effect_list)
1464 if (e->Id() == effect_id) {
1466 apply_json_to_effects(change, e);
1476 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1486 if (change_type ==
"insert") {
1492 allocated_clips.insert(
clip);
1503 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1505 }
else if (change_type ==
"update") {
1508 if (existing_clip) {
1521 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1522 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1525 if (auto_map_clips) {
1526 apply_mapper_to_clip(existing_clip);
1530 }
else if (change_type ==
"delete") {
1533 if (existing_clip) {
1540 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1550void Timeline::apply_json_to_effects(Json::Value change) {
1553 std::string change_type = change[
"type"].asString();
1557 for (
auto key_part : change[
"key"]) {
1559 if (key_part.isObject()) {
1561 if (!key_part[
"id"].isNull())
1564 std::string effect_id = key_part[
"id"].asString();
1567 for (
auto e : effects)
1569 if (e->Id() == effect_id) {
1570 existing_effect = e;
1580 if (existing_effect || change_type ==
"insert") {
1582 apply_json_to_effects(change, existing_effect);
1587void Timeline::apply_json_to_effects(Json::Value change,
EffectBase* existing_effect) {
1590 std::string change_type = change[
"type"].asString();
1593 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1594 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1595 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1596 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1600 if (change_type ==
"insert") {
1603 std::string effect_type = change[
"value"][
"type"].asString();
1612 allocated_effects.insert(e);
1621 }
else if (change_type ==
"update") {
1624 if (existing_effect) {
1629 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1635 }
else if (change_type ==
"delete") {
1638 if (existing_effect) {
1643 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1656void Timeline::apply_json_to_timeline(Json::Value change) {
1657 bool cache_dirty =
true;
1660 std::string change_type = change[
"type"].asString();
1661 std::string root_key = change[
"key"][(uint)0].asString();
1662 std::string sub_key =
"";
1663 if (change[
"key"].size() >= 2)
1664 sub_key = change[
"key"][(uint)1].asString();
1667 if (change_type ==
"insert" || change_type ==
"update") {
1671 if (root_key ==
"color")
1674 else if (root_key ==
"viewport_scale")
1677 else if (root_key ==
"viewport_x")
1680 else if (root_key ==
"viewport_y")
1683 else if (root_key ==
"duration") {
1689 cache_dirty =
false;
1691 else if (root_key ==
"width") {
1696 else if (root_key ==
"height") {
1701 else if (root_key ==
"fps" && sub_key ==
"" && change[
"value"].isObject()) {
1703 if (!change[
"value"][
"num"].isNull())
1704 info.
fps.
num = change[
"value"][
"num"].asInt();
1705 if (!change[
"value"][
"den"].isNull())
1706 info.
fps.
den = change[
"value"][
"den"].asInt();
1708 else if (root_key ==
"fps" && sub_key ==
"num")
1711 else if (root_key ==
"fps" && sub_key ==
"den")
1714 else if (root_key ==
"display_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1716 if (!change[
"value"][
"num"].isNull())
1718 if (!change[
"value"][
"den"].isNull())
1721 else if (root_key ==
"display_ratio" && sub_key ==
"num")
1724 else if (root_key ==
"display_ratio" && sub_key ==
"den")
1727 else if (root_key ==
"pixel_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1729 if (!change[
"value"][
"num"].isNull())
1731 if (!change[
"value"][
"den"].isNull())
1734 else if (root_key ==
"pixel_ratio" && sub_key ==
"num")
1737 else if (root_key ==
"pixel_ratio" && sub_key ==
"den")
1741 else if (root_key ==
"sample_rate")
1744 else if (root_key ==
"channels")
1747 else if (root_key ==
"channel_layout")
1752 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1755 }
else if (change[
"type"].asString() ==
"delete") {
1759 if (root_key ==
"color") {
1765 else if (root_key ==
"viewport_scale")
1767 else if (root_key ==
"viewport_x")
1769 else if (root_key ==
"viewport_y")
1773 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1786 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
1790 final_cache->
Clear();
1795 for (
const auto clip : clips) {
1797 if (
clip->Reader()) {
1798 if (
auto rc =
clip->Reader()->GetCache())
1802 if (deep &&
clip->Reader()->Name() ==
"FrameMapper") {
1804 if (nested_reader->
Reader()) {
1812 if (
auto cc =
clip->GetCache())
1831 display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
1839std::pair<float, float> Timeline::ResolveTransitionAudioGains(
Clip* source_clip, int64_t timeline_frame_number,
bool is_top_clip)
const
1841 constexpr double half_pi = 1.57079632679489661923;
1844 return {1.0f, 1.0f};
1847 Mask* active_mask =
nullptr;
1848 int64_t effect_start_position = 0;
1849 int64_t effect_end_position = 0;
1852 for (
auto effect : effects) {
1853 if (effect->Layer() != source_clip->
Layer())
1856 auto* mask =
dynamic_cast<Mask*
>(effect);
1857 if (!mask || !mask->fade_audio_hint)
1860 const int64_t start_pos =
static_cast<int64_t
>(std::llround(effect->Position() * fpsD)) + 1;
1861 const int64_t end_pos =
static_cast<int64_t
>(std::llround((effect->Position() + effect->Duration()) * fpsD));
1862 if (start_pos > timeline_frame_number || end_pos < timeline_frame_number)
1866 return {1.0f, 1.0f};
1869 effect_start_position = start_pos;
1870 effect_end_position = end_pos;
1874 return {1.0f, 1.0f};
1876 struct AudibleClipInfo {
1882 std::vector<AudibleClipInfo> audible_clips;
1883 audible_clips.reserve(2);
1886 for (
auto clip : clips) {
1889 if (!
clip->Reader() || !
clip->Reader()->info.has_audio)
1892 const int64_t clip_start_pos =
static_cast<int64_t
>(std::llround(
clip->
Position() * fpsD)) + 1;
1894 if (clip_start_pos > timeline_frame_number || clip_end_pos < timeline_frame_number)
1897 const int64_t clip_start_frame =
static_cast<int64_t
>(std::llround(
clip->
Start() * fpsD)) + 1;
1898 const int64_t clip_frame_number = timeline_frame_number - clip_start_pos + clip_start_frame;
1899 if (
clip->has_audio.GetInt(clip_frame_number) == 0)
1902 audible_clips.push_back({
clip, clip_start_pos, clip_end_pos});
1903 if (audible_clips.size() > 2)
1904 return {1.0f, 1.0f};
1907 if (audible_clips.empty())
1908 return {1.0f, 1.0f};
1911 const auto source_it = std::find_if(
1912 audible_clips.begin(),
1913 audible_clips.end(),
1914 [source_clip](
const AudibleClipInfo&
info) {
1915 return info.clip == source_clip;
1917 if (source_it == audible_clips.end())
1918 return {1.0f, 1.0f};
1921 if (audible_clips.size() == 2) {
1922 auto top_it = std::max_element(
1923 audible_clips.begin(),
1924 audible_clips.end(),
1925 [](
const AudibleClipInfo& lhs,
const AudibleClipInfo& rhs) {
1926 if (lhs.start_pos != rhs.start_pos)
1927 return lhs.start_pos < rhs.start_pos;
1928 return std::less<Clip*>()(lhs.clip, rhs.clip);
1930 if ((is_top_clip && source_clip != top_it->clip) || (!is_top_clip && source_clip == top_it->clip))
1931 return {1.0f, 1.0f};
1935 const int64_t left_distance = std::llabs(effect_start_position - source_it->start_pos);
1936 const int64_t right_distance = std::llabs(effect_end_position - source_it->end_pos);
1937 const bool clip_fades_in = left_distance <= right_distance;
1940 const auto compute_gain = [&](int64_t frame_number) ->
float {
1941 if (effect_end_position <= effect_start_position)
1944 const double span =
static_cast<double>(effect_end_position - effect_start_position);
1945 double t =
static_cast<double>(frame_number - effect_start_position) / span;
1951 return static_cast<float>(clip_fades_in ? std::sin(t * half_pi) : std::cos(t * half_pi));
1954 return {compute_gain(timeline_frame_number - 1), compute_gain(timeline_frame_number)};
Header file for CacheBase class.
Header file for CacheDisk class.
Header file for CacheMemory class.
Header file for CrashHandler class.
Header file for all Exception classes.
Header file for the FrameMapper class.
Header file for Mask class.
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
All cache managers in libopenshot are based on this CacheBase class.
virtual void Clear()=0
Clear the cache of all frames.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
This class is a memory-based cache manager for Frame objects.
void Clear()
Clear the cache of all frames.
float Start() const
Get start position (in seconds) of clip (trim start of video)
float Duration() const
Get the length of this clip (in seconds)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
std::string Id() const
Get the Id of this clip object.
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
This class represents a clip (used to arrange readers on the timeline)
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::CacheMemory * GetCache() override
Get the cache object (always return NULL for this reader)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
This class represents a color (used on the timeline and clips)
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Json::Value JsonValue() const
Generate Json::Value for this object.
static CrashHandler * Instance()
This abstract class is the base class, used by all effects in libopenshot.
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
This class returns a listing of all effects supported by libopenshot.
EffectBase * CreateEffect(std::string effect_type)
Create an instance of an effect (factory style)
This class represents a fraction.
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Fraction Reciprocal() const
Return the reciprocal as a Fraction.
int den
Denominator for the fraction.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
ReaderBase * Reader()
Get the current reader.
void Close() override
Close the openshot::FrameMapper and internal reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Exception for files that can not be found or opened.
Exception for missing JSON Change key.
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
int64_t GetCount() const
Get the number of points (i.e. # of points)
This class uses the image libraries to apply alpha (or transparency) masks to any frame....
Exception for frames that are out of bounds.
This abstract class is the base class, used by all readers in libopenshot.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
openshot::ClipBase * clip
Pointer to the parent clip instance (if any)
Exception when a reader is closed, and a frame is requested.
This class is contains settings used by libopenshot (and can be safely toggled at any point)
std::string PATH_OPENSHOT_INSTALL
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Json::Value JsonValue() const override
Generate Json::Value for this object.
openshot::Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add,...
openshot::EffectBase * GetClipEffect(const std::string &id)
Look up a clip effect by ID.
void AddClip(openshot::Clip *clip)
Add an openshot::Clip to the timeline.
std::list< openshot::EffectBase * > ClipEffects() const
Return the list of effects on all clips.
std::list< std::string > GetTrackedObjectsIds() const
Return the ID's of the tracked objects as a list of strings.
std::string Json() const override
Generate JSON string of this object.
int64_t GetMaxFrame()
Look up the end frame number of the latest element on the timeline.
double GetMinTime()
Look up the position/start time of the first timeline element.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
openshot::Color color
Background color of timeline canvas.
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const
Return the trackedObject's properties as a JSON string.
Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout)
Constructor for the timeline (which configures the default frame properties)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
int64_t GetMinFrame()
Look up the start frame number of the first element on the timeline (first frame is 1)
openshot::EffectBase * GetEffect(const std::string &id)
Look up a timeline effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
void ClearAllCache(bool deep=false)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the timeline.
void SetCache(openshot::CacheBase *new_cache)
void Clear()
Clear all clips, effects, and frame mappers from timeline (and free memory)
openshot::Keyframe viewport_x
Curve representing the x coordinate for the viewport.
void RemoveClip(openshot::Clip *clip)
Remove an openshot::Clip from the timeline.
void SetMaxSize(int width, int height)
double GetMaxTime()
Look up the end time of the latest timeline element.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the timeline.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
void Open() override
Open the reader (and start consuming resources)
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe viewport_y
Curve representing the y coordinate for the viewport.
void Close() override
Close the timeline reader (and any resources it was consuming)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
@ PULLDOWN_NONE
Do not apply pull-down techniques, just repeat or skip entire frames.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
const Json::Value stringToJson(const std::string value)
This struct holds the information of a bounding-box.
float cy
y-coordinate of the bounding box center
float height
bounding box height
float cx
x-coordinate of the bounding box center
float width
bounding box width
float angle
bounding box rotation angle [degrees]
Like CompareClipEndFrames, but for effects.
This struct contains info about a media file, such as height, width, frames per second,...
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::string acodec
The name of the audio codec used to encode / decode the video stream.
std::string vcodec
The name of the video codec used to encode / decode the video stream.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool force_safe_composite
If true, avoid mutating cached clip images during composition.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
bool is_top_clip
Is clip on top (if overlapping another clip)