41 struct CompositeChoice {
const char* name;
CompositeType value; };
42 const CompositeChoice composite_choices[] = {
65 const int composite_choices_count =
sizeof(composite_choices)/
sizeof(CompositeChoice);
102 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
127 parentTrackedObject =
nullptr;
128 parentClipObject = NULL;
153 const auto rotate_meta = reader->
info.
metadata.find(
"rotate");
160 float rotate_angle = 0.0f;
162 rotate_angle = strtof(rotate_meta->second.c_str(),
nullptr);
163 }
catch (
const std::exception& e) {
170 auto has_default_scale = [](
const Keyframe& kf) {
171 return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
177 if (fabs(rotate_angle) < 0.0001f)
180 float w =
static_cast<float>(reader->
info.
width);
181 float h =
static_cast<float>(reader->
info.
height);
182 if (w <= 0.0f || h <= 0.0f)
185 float rad = rotate_angle *
static_cast<float>(M_PI) / 180.0f;
187 float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
188 float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
189 if (new_width <= 0.0f || new_height <= 0.0f)
192 float uniform_scale = std::min(w / new_width, h / new_height);
199Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
206Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
225Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
231 std::string ext = get_file_extension(
path);
232 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
235 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
236 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" || ext==
"gif" ||
path.find(
"%") != std::string::npos)
278 allocated_reader = reader;
288 if (allocated_reader) {
289 delete allocated_reader;
290 allocated_reader = NULL;
310 if (parentTimeline) {
312 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
313 Clip* clipObject = parentTimeline->
GetClip(object_id);
318 parentClipObject = NULL;
320 else if (clipObject) {
322 parentTrackedObject =
nullptr;
329 parentTrackedObject = trackedObject;
334 parentClipObject = clipObject;
342 bool is_same_reader =
false;
343 if (new_reader && allocated_reader) {
344 if (new_reader->
Name() ==
"FrameMapper") {
347 if (allocated_reader == clip_mapped_reader->
Reader()) {
348 is_same_reader =
true;
353 if (allocated_reader && !is_same_reader) {
355 allocated_reader->
Close();
356 delete allocated_reader;
358 allocated_reader = NULL;
380 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
401 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
407 if (is_open && reader) {
432 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
458 return GetFrame(NULL, clip_frame_number, NULL);
463std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
466 return GetFrame(background_frame, clip_frame_number, NULL);
474 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
479 std::shared_ptr<Frame> frame = NULL;
482 frame = final_cache.
GetFrame(clip_frame_number);
485 frame = GetOrCreateFrame(clip_frame_number);
488 int64_t timeline_frame_number = clip_frame_number;
489 QSize timeline_size(frame->GetWidth(), frame->GetHeight());
490 if (background_frame) {
492 timeline_frame_number = background_frame->number;
493 timeline_size.setWidth(background_frame->GetWidth());
494 timeline_size.setHeight(background_frame->GetHeight());
498 apply_timemapping(frame);
501 apply_waveform(frame, timeline_size);
504 apply_effects(frame, timeline_frame_number, options,
true);
507 apply_keyframes(frame, timeline_size);
510 apply_effects(frame, timeline_frame_number, options,
false);
513 final_cache.
Add(frame);
516 const bool has_external_background = (background_frame !=
nullptr);
520 if (!background_frame) {
522 background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
523 "#00000000", frame->GetAudioSamplesCount(),
524 frame->GetAudioChannelsCount());
528 apply_background(frame, background_frame,
false);
533 apply_background(frame, background_frame,
true);
538 if (!has_external_background) {
543 auto output = std::make_shared<Frame>(*frame.get());
544 apply_background(output, background_frame,
true);
549 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
556 for (
const auto& effect : effects) {
557 if (effect->Id() ==
id) {
566 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
570 return parentClipObject;
575 if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
579 return parentTrackedObject;
583std::string Clip::get_file_extension(std::string
path)
586 const auto dot_pos =
path.find_last_of(
'.');
587 if (dot_pos == std::string::npos || dot_pos + 1 >=
path.size()) {
588 return std::string();
591 return path.substr(dot_pos + 1);
595void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
600 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
605 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
607 int64_t clip_frame_number = frame->number;
608 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
625 int source_sample_count = round(target_sample_count * fabs(delta));
631 location.
frame = new_frame_number;
646 init_samples.clear();
647 resampler->
SetBuffer(&init_samples, 1.0);
655 if (source_sample_count <= 0) {
657 frame->AddAudioSilence(target_sample_count);
663 source_samples->clear();
666 int remaining_samples = source_sample_count;
668 while (remaining_samples > 0) {
669 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
670 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
673 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
674 fm->SetDirectionHint(is_increasing);
676 source_frame->SetAudioDirection(is_increasing);
678 if (frame_sample_count == 0) {
688 if (remaining_samples - frame_sample_count >= 0) {
690 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
691 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
699 remaining_samples -= frame_sample_count;
700 source_pos += frame_sample_count;
704 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
705 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
708 remaining_samples = 0;
709 source_pos += remaining_samples;
716 frame->AddAudioSilence(target_sample_count);
718 if (source_sample_count != target_sample_count) {
720 double resample_ratio = double(source_sample_count) / double(target_sample_count);
721 resampler->
SetBuffer(source_samples, resample_ratio);
729 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
735 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
740 delete source_samples;
748int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
751 if (frame_number < 1)
759std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
763 int64_t clip_frame_number = adjust_frame_number_minimum(number);
764 bool is_increasing =
true;
769 const int64_t time_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
770 if (
auto *fm =
dynamic_cast<FrameMapper*
>(reader)) {
772 fm->SetDirectionHint(is_increasing);
774 clip_frame_number = time_frame_number;
779 "Clip::GetOrCreateFrame (from reader)",
780 "number", number,
"clip_frame_number", clip_frame_number);
783 auto reader_frame = reader->
GetFrame(clip_frame_number);
786 reader_frame->number = number;
787 reader_frame->SetAudioDirection(is_increasing);
793 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
796 reader_copy->AddColor(QColor(Qt::transparent));
800 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
816 "Clip::GetOrCreateFrame (create blank)",
818 "estimated_samples_in_frame", estimated_samples_in_frame);
821 auto new_frame = std::make_shared<Frame>(
823 "#000000", estimated_samples_in_frame, reader->
info.
channels);
826 new_frame->AddAudioSilence(estimated_samples_in_frame);
842 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
843 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
845 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
846 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
847 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
852 root[
"composite"] =
add_property_json(
"Composite",
composite,
"int",
"", NULL, 0, composite_choices_count - 1,
false, requested_frame);
853 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
854 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
885 for (
int i = 0; i < composite_choices_count; ++i)
893 if (parentClipObject)
898 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
901 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
902 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
903 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
904 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
905 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
906 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
907 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
910 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
911 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
912 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
913 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
914 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
915 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
916 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
956 return root.toStyledString();
964 root[
"parentObjectId"] = parentObjectId;
966 root[
"scale"] =
scale;
971 root[
"waveform"] = waveform;
999 root[
"effects"] = Json::Value(Json::arrayValue);
1002 for (
auto existing_effect : effects)
1004 root[
"effects"].append(existing_effect->JsonValue());
1010 root[
"reader"] = Json::Value(Json::objectValue);
1026 catch (
const std::exception& e)
1029 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1040 if (!root[
"parentObjectId"].isNull()){
1041 parentObjectId = root[
"parentObjectId"].asString();
1042 if (parentObjectId.size() > 0 && parentObjectId !=
""){
1045 parentTrackedObject =
nullptr;
1046 parentClipObject = NULL;
1049 if (!root[
"gravity"].isNull())
1051 if (!root[
"scale"].isNull())
1053 if (!root[
"anchor"].isNull())
1055 if (!root[
"display"].isNull())
1057 if (!root[
"mixing"].isNull())
1059 if (!root[
"composite"].isNull())
1061 if (!root[
"waveform"].isNull())
1062 waveform = root[
"waveform"].asBool();
1063 if (!root[
"scale_x"].isNull())
1065 if (!root[
"scale_y"].isNull())
1067 if (!root[
"location_x"].isNull())
1069 if (!root[
"location_y"].isNull())
1071 if (!root[
"alpha"].isNull())
1073 if (!root[
"rotation"].isNull())
1075 if (!root[
"time"].isNull())
1077 if (!root[
"volume"].isNull())
1079 if (!root[
"wave_color"].isNull())
1081 if (!root[
"shear_x"].isNull())
1083 if (!root[
"shear_y"].isNull())
1085 if (!root[
"origin_x"].isNull())
1087 if (!root[
"origin_y"].isNull())
1089 if (!root[
"channel_filter"].isNull())
1091 if (!root[
"channel_mapping"].isNull())
1093 if (!root[
"has_audio"].isNull())
1095 if (!root[
"has_video"].isNull())
1097 if (!root[
"perspective_c1_x"].isNull())
1099 if (!root[
"perspective_c1_y"].isNull())
1101 if (!root[
"perspective_c2_x"].isNull())
1103 if (!root[
"perspective_c2_y"].isNull())
1105 if (!root[
"perspective_c3_x"].isNull())
1107 if (!root[
"perspective_c3_y"].isNull())
1109 if (!root[
"perspective_c4_x"].isNull())
1111 if (!root[
"perspective_c4_y"].isNull())
1113 if (!root[
"effects"].isNull()) {
1119 for (
const auto existing_effect : root[
"effects"]) {
1121 if (existing_effect.isNull()) {
1127 if (!existing_effect[
"type"].isNull()) {
1130 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1141 if (!root[
"reader"].isNull())
1143 if (!root[
"reader"][
"type"].isNull())
1146 bool already_open =
false;
1150 already_open = reader->
IsOpen();
1157 std::string type = root[
"reader"][
"type"].asString();
1159 if (type ==
"FFmpegReader") {
1165 }
else if (type ==
"QtImageReader") {
1171#ifdef USE_IMAGEMAGICK
1172 }
else if (type ==
"ImageReader") {
1175 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1178 }
else if (type ==
"TextReader") {
1185 }
else if (type ==
"ChunkReader") {
1191 }
else if (type ==
"DummyReader") {
1197 }
else if (type ==
"Timeline") {
1207 allocated_reader = reader;
1218 final_cache.
Clear();
1222void Clip::sort_effects()
1235 effects.push_back(effect);
1251 if (parentTimeline){
1259 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1262 trackedObjectBBox->ParentClip(
this);
1272 final_cache.
Clear();
1278 effects.remove(effect);
1281 final_cache.
Clear();
1285void Clip::apply_background(std::shared_ptr<openshot::Frame> frame,
1286 std::shared_ptr<openshot::Frame> background_frame,
1287 bool update_frame_image) {
1289 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1290 QPainter painter(background_canvas.get());
1293 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1294 painter.drawImage(0, 0, *frame->GetImage());
1299 if (update_frame_image)
1300 frame->AddImage(background_canvas);
1304void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
TimelineInfoStruct* options,
bool before_keyframes)
1306 for (
auto effect : effects)
1309 if (effect->info.apply_before_clip && before_keyframes) {
1310 effect->ProcessFrame(frame, frame->number);
1311 }
else if (!effect->info.apply_before_clip && !before_keyframes) {
1312 effect->ProcessFrame(frame, frame->number);
1316 if (
timeline != NULL && options != NULL) {
1325bool Clip::isNear(
double a,
double b)
1327 return fabs(a - b) < 0.000001;
1331void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1333 if (!frame->has_image_data) {
1339 std::shared_ptr<QImage> source_image = frame->GetImage();
1340 std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1341 timeline_size.height(),
1342 QImage::Format_RGBA8888_Premultiplied);
1343 background_canvas->fill(QColor(Qt::transparent));
1346 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1349 QPainter painter(background_canvas.get());
1350 painter.setRenderHint(QPainter::TextAntialiasing,
true);
1351 if (!transform.isIdentity()) {
1352 painter.setRenderHint(QPainter::SmoothPixmapTransform,
true);
1355 painter.setTransform(transform);
1358 painter.setCompositionMode(
static_cast<QPainter::CompositionMode
>(
composite));
1362 if (alpha_value != 1.0f) {
1363 painter.setOpacity(alpha_value);
1364 painter.drawImage(0, 0, *source_image);
1366 painter.setOpacity(1.0);
1368 painter.drawImage(0, 0, *source_image);
1376 std::stringstream frame_number_str;
1383 frame_number_str << frame->number;
1396 painter.setPen(QColor(
"#ffffff"));
1397 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1403 frame->AddImage(background_canvas);
1407void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1415 std::shared_ptr<QImage> source_image = frame->GetImage();
1419 "frame->number", frame->number,
1421 "width", timeline_size.width(),
1422 "height", timeline_size.height());
1431 source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue,
alpha);
1432 frame->AddImage(source_image);
1436QSize Clip::scale_size(QSize source_size,
ScaleType source_scale,
int target_width,
int target_height) {
1437 switch (source_scale)
1440 source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1444 source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1448 source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1457QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1460 std::shared_ptr<QImage> source_image = frame->GetImage();
1463 QSize source_size = scale_size(source_image->size(),
scale, width, height);
1466 float parentObject_location_x = 0.0;
1467 float parentObject_location_y = 0.0;
1468 float parentObject_scale_x = 1.0;
1469 float parentObject_scale_y = 1.0;
1470 float parentObject_shear_x = 0.0;
1471 float parentObject_shear_y = 0.0;
1472 float parentObject_rotation = 0.0;
1478 long parent_frame_number = frame->number + parent_start_offset;
1481 parentObject_location_x = parentClipObject->
location_x.
GetValue(parent_frame_number);
1482 parentObject_location_y = parentClipObject->
location_y.
GetValue(parent_frame_number);
1483 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(parent_frame_number);
1484 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(parent_frame_number);
1485 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(parent_frame_number);
1486 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(parent_frame_number);
1487 parentObject_rotation = parentClipObject->
rotation.
GetValue(parent_frame_number);
1493 Clip* parentClip = (
Clip*) parentTrackedObject->ParentClip();
1498 long parent_frame_number = frame->number + parent_start_offset;
1501 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1505 parentClip->
scale, width, height);
1508 int trackedWidth = trackedObjectProperties[
"w"] * trackedObjectProperties[
"sx"] * parent_size.width() *
1510 int trackedHeight = trackedObjectProperties[
"h"] * trackedObjectProperties[
"sy"] * parent_size.height() *
1514 source_size = scale_size(source_size,
scale, trackedWidth, trackedHeight);
1517 parentObject_location_x = parentClip->
location_x.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cx"] - 0.5) * parentClip->
scale_x.
GetValue(parent_frame_number));
1518 parentObject_location_y = parentClip->
location_y.
GetValue(parent_frame_number) + ((trackedObjectProperties[
"cy"] - 0.5) * parentClip->
scale_y.
GetValue(parent_frame_number));
1519 parentObject_rotation = trackedObjectProperties[
"r"] + parentClip->
rotation.
GetValue(parent_frame_number);
1532 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1533 sx*= parentObject_scale_x;
1534 sy*= parentObject_scale_y;
1537 float scaled_source_width = source_size.width() * sx;
1538 float scaled_source_height = source_size.height() * sy;
1546 x = (width - scaled_source_width) / 2.0;
1549 x = width - scaled_source_width;
1552 y = (height - scaled_source_height) / 2.0;
1555 x = (width - scaled_source_width) / 2.0;
1556 y = (height - scaled_source_height) / 2.0;
1559 x = width - scaled_source_width;
1560 y = (height - scaled_source_height) / 2.0;
1563 y = (height - scaled_source_height);
1566 x = (width - scaled_source_width) / 2.0;
1567 y = (height - scaled_source_height);
1570 x = width - scaled_source_width;
1571 y = (height - scaled_source_height);
1577 "Clip::get_transform (Gravity)",
1578 "frame->number", frame->number,
1579 "source_clip->gravity",
gravity,
1580 "scaled_source_width", scaled_source_width,
1581 "scaled_source_height", scaled_source_height);
1583 QTransform transform;
1589 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1590 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1596 "Clip::get_transform (Build QTransform - if needed)",
1597 "frame->number", frame->number,
1600 "sx", sx,
"sy", sy);
1602 if (!isNear(x, 0) || !isNear(y, 0)) {
1604 transform.translate(x, y);
1606 if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1608 float origin_x_offset = (scaled_source_width * origin_x_value);
1609 float origin_y_offset = (scaled_source_height * origin_y_value);
1610 transform.translate(origin_x_offset, origin_y_offset);
1611 transform.rotate(r);
1612 transform.shear(shear_x_value, shear_y_value);
1613 transform.translate(-origin_x_offset,-origin_y_offset);
1616 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1617 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1618 if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1619 transform.scale(source_width_scale, source_height_scale);
1626int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1643 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1645 return frame_number;
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
float Start() const
Get start position (in seconds) of clip (trim start of video)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
float Duration() const
Get the length of this clip (in seconds)
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
std::string id
ID Property for all derived Clip and Effect classes.
float position
The position on the timeline where this clip should start playing.
float end
The position in seconds to end playing (used to trim the ending of a clip)
std::string previous_properties
This string contains the previous JSON properties.
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
void Open() override
Open the internal reader.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::FrameDisplayType display
The format to display the frame number (if any)
void init_reader_rotation()
Update default rotation from reader.
Clip()
Default Constructor.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
std::string Json() const override
Generate JSON string of this object.
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void init_reader_settings()
Init reader info details.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Json::Value JsonValue() const override
Generate Json::Value for this object.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
bool Waveform()
Get the waveform property of this clip.
openshot::CompositeType composite
How this clip is composited onto lower layers.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
AudioLocation previous_location
Previous time-mapped audio location.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
void Close() override
Close the internal reader.
virtual ~Clip()
Destructor.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
openshot::ReaderBase * Reader()
Get the current reader.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
std::string PropertiesJSON(int64_t requested_frame) const override
openshot::Color wave_color
Curve representing the color of the audio wave form.
void init_settings()
Init default settings for a clip.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
This class represents a color (used on the timeline and clips)
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Json::Value JsonValue() const
Generate Json::Value for this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
This abstract class is the base class, used by all effects in libopenshot.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
This class returns a listing of all effects supported by libopenshot.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
int64_t GetLength() const
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
AnchorType
This enumeration determines what parent a clip should be aligned to.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
GravityType
This enumeration determines how clips are aligned to their parent container.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
@ GRAVITY_TOP
Align clip to the top center of its parent.
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
@ SCALE_NONE
Do not scale the clip.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
@ FRAME_DISPLAY_NONE
Do not display the frame number.
const Json::Value stringToJson(const std::string value)
CompositeType
This enumeration determines how clips are composited onto lower layers.
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool force_safe_composite
If true, avoid mutating cached clip images during composition.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.