63 *out << std::fixed << std::setprecision(2) << std::boolalpha;
64 *out <<
"----------------------------" << std::endl;
65 *out <<
"----- Effect Information -----" << std::endl;
66 *out <<
"----------------------------" << std::endl;
67 *out <<
"--> Name: " <<
info.
name << std::endl;
72 *out <<
"--> Order: " << order << std::endl;
73 *out <<
"----------------------------" << std::endl;
82 else if (color_value > 255)
108 root[
"order"] =
Order();
113 root[
"mask_reader"] = mask_reader->
JsonValue();
115 root[
"mask_reader"] = Json::objectValue;
131 catch (
const std::exception& e)
134 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
146 std::list<EffectBase*> effects = parentTimeline->
ClipEffects();
151 for (
auto const& effect : effects){
153 if ((effect->info.parent_effect_id == this->Id()) && (effect->Id() != this->
Id()))
154 effect->SetJsonValue(root);
162 my_root[
"id"] = this->
Id();
170 if (my_root[
"start"].isNull() && !my_root[
"mask_start"].isNull())
171 my_root[
"start"] = my_root[
"mask_start"];
172 if (my_root[
"end"].isNull() && !my_root[
"mask_end"].isNull())
173 my_root[
"end"] = my_root[
"mask_end"];
179 if (!my_root[
"order"].isNull())
180 Order(my_root[
"order"].asInt());
182 if (!my_root[
"apply_before_clip"].isNull())
185 if (!my_root[
"mask_invert"].isNull())
187 if (!my_root[
"mask_time_mode"].isNull()) {
188 const int time_mode = my_root[
"mask_time_mode"].asInt();
192 if (!my_root[
"mask_loop_mode"].isNull()) {
193 const int loop_mode = my_root[
"mask_loop_mode"].asInt();
200 const Json::Value mask_reader_json =
201 !my_root[
"mask_reader"].isNull() ? my_root[
"mask_reader"] : my_root[
"reader"];
203 if (!mask_reader_json.isNull()) {
204 if (!mask_reader_json[
"type"].isNull()) {
206 }
else if (mask_reader_json.isObject() && mask_reader_json.empty()) {
211 if (!my_root[
"parent_effect_id"].isNull()){
239 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
240 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
242 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
243 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
244 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
269 root[
"mask_reader"] =
add_property_json(
"Mask: Source", 0.0,
"reader", mask_reader->
Json(), NULL, 0, 1,
false, requested_frame);
271 root[
"mask_reader"] =
add_property_json(
"Mask: Source", 0.0,
"reader",
"{}", NULL, 0, 1,
false, requested_frame);
278 if (reader_json[
"type"].isNull())
282 const std::string type = reader_json[
"type"].asString();
284 if (type ==
"FFmpegReader") {
285 reader =
new FFmpegReader(reader_json[
"path"].asString());
291 }
else if (type ==
"QtImageReader") {
294 }
else if (type ==
"ChunkReader") {
295 reader =
new ChunkReader(reader_json[
"path"].asString(),
296 static_cast<ChunkVersion>(reader_json[
"chunk_version"].asInt()));
298#ifdef USE_IMAGEMAGICK
299 }
else if (type ==
"ImageReader") {
300 reader =
new ImageReader(reader_json[
"path"].asString());
309 if (mask_reader == new_reader)
313 mask_reader->
Close();
317 mask_reader = new_reader;
318 cached_single_mask_image.reset();
319 cached_single_mask_width = 0;
320 cached_single_mask_height = 0;
361 int64_t requested_index = std::max(int64_t(0), frame_number - 1);
364 if (host_fps > 0.0) {
365 const int64_t start_offset =
static_cast<int64_t
>(std::llround(std::max(0.0f,
Start()) * host_fps));
366 requested_index = std::max(int64_t(0), requested_index - start_offset);
369 int64_t mapped_index = requested_index;
375 if (host_fps > 0.0 && source_fps > 0.0) {
376 const double seconds =
static_cast<double>(requested_index) / host_fps;
377 mapped_index =
static_cast<int64_t
>(std::llround(seconds * source_fps));
385 const double start_sec = std::min<double>(std::max(0.0f,
Start()), source_duration);
386 const double end_sec = std::min<double>(std::max(0.0f,
End()), source_duration);
388 const int64_t range_start = std::max(int64_t(1),
static_cast<int64_t
>(std::llround(start_sec * source_fps)) + 1);
389 int64_t range_end = (end_sec > 0.0)
390 ?
static_cast<int64_t
>(std::llround(end_sec * source_fps)) + 1
393 range_end = std::min(range_end, source_len);
394 if (range_end < range_start)
395 range_end = range_start;
397 const int64_t range_len = std::max(int64_t(1), range_end - range_start + 1);
398 int64_t range_index = mapped_index;
402 range_index = mapped_index % range_len;
406 const int64_t cycle_len = (range_len * 2) - 2;
407 int64_t phase = mapped_index % cycle_len;
408 if (phase >= range_len)
409 phase = cycle_len - phase;
417 if (mapped_index < 0)
419 else if (mapped_index >= range_len)
420 range_index = range_len - 1;
422 range_index = mapped_index;
426 int64_t mapped_frame = range_start + range_index;
428 mapped_frame = std::min(std::max(int64_t(1), mapped_frame), source_len);
429 return std::max(int64_t(1), mapped_frame);
432std::shared_ptr<QImage> EffectBase::GetMaskImage(std::shared_ptr<QImage> target_image, int64_t frame_number) {
433 if (!mask_reader || !target_image || target_image->isNull())
436 std::shared_ptr<QImage> source_mask;
437 bool used_cached_scaled =
false;
438 #pragma omp critical (open_effect_mask_reader)
441 if (!mask_reader->
IsOpen())
445 cached_single_mask_image &&
446 cached_single_mask_width == target_image->width() &&
447 cached_single_mask_height == target_image->height()) {
448 source_mask = cached_single_mask_image;
449 used_cached_scaled =
true;
453 auto source_frame = mask_reader->
GetFrame(mapped_frame);
454 if (source_frame && source_frame->GetImage() && !source_frame->GetImage()->isNull())
455 source_mask = std::make_shared<QImage>(*source_frame->GetImage());
457 }
catch (
const std::exception& e) {
459 std::string(
"EffectBase::GetMaskImage unable to read mask frame: ") + e.what());
464 if (!source_mask || source_mask->isNull())
467 if (used_cached_scaled)
470 auto scaled_mask = std::make_shared<QImage>(
472 target_image->width(), target_image->height(),
473 Qt::IgnoreAspectRatio, Qt::SmoothTransformation));
475 cached_single_mask_image = scaled_mask;
476 cached_single_mask_width = target_image->width();
477 cached_single_mask_height = target_image->height();
482void EffectBase::BlendWithMask(std::shared_ptr<QImage> original_image, std::shared_ptr<QImage> effected_image,
483 std::shared_ptr<QImage> mask_image)
const {
484 if (!original_image || !effected_image || !mask_image)
486 if (original_image->size() != effected_image->size() || effected_image->size() != mask_image->size())
489 unsigned char* original_pixels =
reinterpret_cast<unsigned char*
>(original_image->bits());
490 unsigned char* effected_pixels =
reinterpret_cast<unsigned char*
>(effected_image->bits());
491 unsigned char* mask_pixels =
reinterpret_cast<unsigned char*
>(mask_image->bits());
492 const int pixel_count = effected_image->width() * effected_image->height();
494 #pragma omp parallel for schedule(static)
495 for (
int i = 0; i < pixel_count; ++i) {
496 const int idx = i * 4;
497 int gray = qGray(mask_pixels[idx], mask_pixels[idx + 1], mask_pixels[idx + 2]);
500 const float factor =
static_cast<float>(gray) / 255.0f;
501 const float inverse = 1.0f - factor;
503 effected_pixels[idx] =
static_cast<unsigned char>(
504 (original_pixels[idx] * inverse) + (effected_pixels[idx] * factor));
505 effected_pixels[idx + 1] =
static_cast<unsigned char>(
506 (original_pixels[idx + 1] * inverse) + (effected_pixels[idx + 1] * factor));
507 effected_pixels[idx + 2] =
static_cast<unsigned char>(
508 (original_pixels[idx + 2] * inverse) + (effected_pixels[idx + 2] * factor));
509 effected_pixels[idx + 3] =
static_cast<unsigned char>(
510 (original_pixels[idx + 3] * inverse) + (effected_pixels[idx + 3] * factor));
517 return GetFrame(frame, frame_number);
521 return GetFrame(frame, frame_number);
523 auto pre_image = frame->GetImage();
524 if (!pre_image || pre_image->isNull())
525 return GetFrame(frame, frame_number);
527 const auto original_image = std::make_shared<QImage>(pre_image->copy());
528 auto output_frame =
GetFrame(frame, frame_number);
531 auto effected_image = output_frame->GetImage();
532 if (!effected_image || effected_image->isNull() ||
533 effected_image->size() != original_image->size())
536 auto mask_image = GetMaskImage(effected_image, frame_number);
537 if (!mask_image || mask_image->isNull())
543 BlendWithMask(original_image, effected_image, mask_image);
571 if (parentEffectPtr){
577 EffectJSON[
"id"] = this->
Id();
Header file for ChunkReader class.
Header file for Clip class.
Header file for EffectBase class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for ImageReader class.
Header file for QtImageReader class.
Header file for ReaderBase class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
This abstract class is the base class, used by all clips in libopenshot.
float Start() const
Get start position (in seconds) of clip (trim start of video)
float Duration() const
Get the length of this clip (in seconds)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
std::string Id() const
Get the Id of this clip object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
This class represents a clip (used to arrange readers on the timeline)
This abstract class is the base class, used by all effects in libopenshot.
ReaderBase * CreateReaderFromJson(const Json::Value &reader_json) const
Create a reader instance from reader JSON.
int mask_loop_mode
Behavior when mask range reaches the end.
virtual bool UseCustomMaskBlend(int64_t frame_number) const
Optional override for effects that need custom mask behavior.
virtual void SetJson(const std::string value)
Load JSON string into this object.
ReaderBase * MaskReader()
Get the common mask reader.
EffectBase * parentEffect
Parent effect (which properties will set this effect properties)
Json::Value JsonInfo() const
Generate JSON object of meta data / info.
virtual bool HandlesMaskInternally() const
Optional override for effects that apply mask processing inside GetFrame().
bool mask_invert
Invert grayscale mask values before blending.
std::shared_ptr< openshot::Frame > ProcessFrame(std::shared_ptr< openshot::Frame > frame, int64_t frame_number)
Apply effect processing with common mask support (if enabled).
std::string ParentClipId() const
Return the ID of this effect's parent clip.
void SetParentEffect(std::string parentEffect_id)
Set the parent effect from which this properties will be set to.
virtual Json::Value JsonValue() const
Generate Json::Value for this object.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
int constrain(int color_value)
Constrain a color value from 0 to 255.
virtual std::string Json() const
Generate JSON string of this object.
double ResolveMaskHostFps()
Determine host FPS used to convert timeline frames to mask source FPS.
Json::Value BasePropertiesJSON(int64_t requested_frame) const
Generate JSON object of base properties (recommended to be used by all effects)
int mask_time_mode
How effect frames map to mask source frames.
virtual void ApplyCustomMaskBlend(std::shared_ptr< QImage > original_image, std::shared_ptr< QImage > effected_image, std::shared_ptr< QImage > mask_image, int64_t frame_number) const
Optional override for effects with custom mask implementation.
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
int Order() const
Get the order that this effect should be executed.
double ResolveMaskSourceDuration() const
Determine mask source duration in seconds.
openshot::ClipBase * clip
Pointer to the parent clip instance (if any)
EffectInfoStruct info
Information about the current effect.
int64_t MapMaskFrameNumber(int64_t frame_number)
Convert an effect frame number to a mask source frame number.
void DisplayInfo(std::ostream *out=&std::cout)
Display effect information in the standard output stream (stdout)
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
int num
Numerator for the fraction.
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
int den
Denominator for the fraction.
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Exception for invalid JSON.
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::ReaderInfo info
Information about the current media file.
virtual std::string Json() const =0
Generate JSON string of this object.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
This class represents a timeline.
openshot::EffectBase * GetClipEffect(const std::string &id)
Look up a clip effect by ID.
std::list< openshot::EffectBase * > ClipEffects() const
Return the list of effects on all clips.
void Log(std::string message)
Log message to all subscribers of this logger (if any)
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
const Json::Value stringToJson(const std::string value)
bool has_video
Determines if this effect manipulates the image of a frame.
bool apply_before_clip
Apply effect before we evaluate the clip's keyframes.
std::string parent_effect_id
Id of the parent effect (if there is one)
bool has_audio
Determines if this effect manipulates the audio of a frame.
std::string class_name
The class name of the effect.
std::string name
The name of the effect.
std::string description
The description of this effect and what it does.
bool has_tracked_object
Determines if this effect track objects through the clip.
bool has_single_image
Determines if this file only contains a single image.
float duration
Length of time (in seconds)
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int64_t video_length
The number of frames in the video stream.
bool has_audio
Determines if this file has an audio stream.
int audio_stream_index
The index of the audio stream.