OpenShot Library | libopenshot  0.1.1
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Init viewport size (curve based, because it can be animated)
37  viewport_scale = Keyframe(100.0);
38  viewport_x = Keyframe(0.0);
39  viewport_y = Keyframe(0.0);
40 
41  // Init background color
42  color.red = Keyframe(0.0);
43  color.green = Keyframe(0.0);
44  color.blue = Keyframe(0.0);
45 
46  // Init FileInfo struct (clear all values)
47  info.width = width;
48  info.height = height;
49  info.fps = fps;
50  info.sample_rate = sample_rate;
51  info.channels = channels;
52  info.channel_layout = channel_layout;
54  info.duration = 60 * 30; // 30 minute default duration
55  info.has_audio = true;
56  info.has_video = true;
58 
59  // Init cache
61 }
62 
63 // Add an openshot::Clip to the timeline
65 {
66  // All clips should be converted to the frame rate of this timeline
67  if (auto_map_clips)
68  // Apply framemapper (or update existing framemapper)
69  apply_mapper_to_clip(clip);
70 
71  // Add clip to list
72  clips.push_back(clip);
73 
74  // Sort clips
75  sort_clips();
76 }
77 
78 // Add an effect to the timeline
80 {
81  // Add effect to list
82  effects.push_back(effect);
83 
84  // Sort effects
85  sort_effects();
86 }
87 
88 // Remove an effect from the timeline
90 {
91  effects.remove(effect);
92 }
93 
94 // Remove an openshot::Clip to the timeline
96 {
97  clips.remove(clip);
98 }
99 
100 // Apply a FrameMapper to a clip which matches the settings of this timeline
101 void Timeline::apply_mapper_to_clip(Clip* clip)
102 {
103  // Determine type of reader
104  ReaderBase* clip_reader = NULL;
105  if (clip->Reader()->Name() == "FrameMapper")
106  {
107  // Get the existing reader
108  clip_reader = (ReaderBase*) clip->Reader();
109 
110  } else {
111 
112  // Create a new FrameMapper to wrap the current reader
114  }
115 
116  // Update the mapping
117  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
119 
120  // Update clip reader
121  clip->Reader(clip_reader);
122 }
123 
124 // Apply the timeline's framerate and samplerate to all clips
126 {
127  // Clear all cached frames
128  final_cache.Clear();
129 
130  // Loop through all clips
131  list<Clip*>::iterator clip_itr;
132  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
133  {
134  // Get clip object from the iterator
135  Clip *clip = (*clip_itr);
136 
137  // Apply framemapper (or update existing framemapper)
138  apply_mapper_to_clip(clip);
139  }
140 }
141 
142 // Calculate time of a frame number, based on a framerate
143 float Timeline::calculate_time(long int number, Fraction rate)
144 {
145  // Get float version of fps fraction
146  float raw_fps = rate.ToFloat();
147 
148  // Return the time (in seconds) of this frame
149  return float(number - 1) / raw_fps;
150 }
151 
152 // Apply effects to the source frame (if any)
153 tr1::shared_ptr<Frame> Timeline::apply_effects(tr1::shared_ptr<Frame> frame, long int timeline_frame_number, int layer)
154 {
155  // Calculate time of frame
156  float requested_time = calculate_time(timeline_frame_number, info.fps);
157 
158  // Debug output
159  AppendDebugMethod("Timeline::apply_effects", "requested_time", requested_time, "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
160 
161  // Find Effects at this position and layer
162  list<EffectBase*>::iterator effect_itr;
163  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
164  {
165  // Get effect object from the iterator
166  EffectBase *effect = (*effect_itr);
167 
168  // Does clip intersect the current requested time
169  float effect_duration = effect->End() - effect->Start();
170  bool does_effect_intersect = (effect->Position() <= requested_time && effect->Position() + effect_duration >= requested_time && effect->Layer() == layer);
171 
172  // Debug output
173  AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "requested_time", requested_time, "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "effect_duration", effect_duration);
174 
175  // Clip is visible
176  if (does_effect_intersect)
177  {
178  // Determine the frame needed for this clip (based on the position on the timeline)
179  float time_diff = (requested_time - effect->Position()) + effect->Start();
180  int effect_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
181 
182  // Debug output
183  AppendDebugMethod("Timeline::apply_effects (Process Effect)", "time_diff", time_diff, "effect_frame_number", effect_frame_number, "effect_duration", effect_duration, "does_effect_intersect", does_effect_intersect, "", -1, "", -1);
184 
185  // Apply the effect to this frame
186  frame = effect->GetFrame(frame, effect_frame_number);
187  }
188 
189  } // end effect loop
190 
191  // Return modified frame
192  return frame;
193 }
194 
195 // Get or generate a blank frame
196 tr1::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, long int number)
197 {
198  tr1::shared_ptr<Frame> new_frame;
199 
200  // Init some basic properties about this frame
201  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
202 
203  try {
204  // Attempt to get a frame (but this could fail if a reader has just been closed)
205  //new_frame = tr1::shared_ptr<Frame>(clip->GetFrame(number));
206  new_frame = tr1::shared_ptr<Frame>(clip->GetFrame(number));
207 
208  // Return real frame
209  return new_frame;
210 
211  } catch (const ReaderClosed & e) {
212  // ...
213  } catch (const TooManySeeks & e) {
214  // ...
215  } catch (const OutOfBoundsFrame & e) {
216  // ...
217  }
218 
219  // Create blank frame
220  new_frame = tr1::shared_ptr<Frame>(new Frame(number, info.width, info.height, "#000000", samples_in_frame, info.channels));
221  new_frame->SampleRate(info.sample_rate);
222  new_frame->ChannelsLayout(info.channel_layout);
223  return new_frame;
224 }
225 
226 // Process a new layer of video or audio
227 void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
228 {
229  // Get the clip's frame & image
230  tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
231 
232  // No frame found... so bail
233  if (!source_frame)
234  return;
235 
236  // Debug output
237  AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
238 
239  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
240  if (source_clip->Waveform())
241  {
242  // Debug output
243  AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
244 
245  // Get the color of the waveform
246  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
247  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
248  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
249  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
250 
251  // Generate Waveform Dynamically (the size of the timeline)
252  tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
253  source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
254  }
255 
256  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
257  * effects on the top clip. */
258  if (is_top_clip)
259  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
260 
261  // Declare an image to hold the source frame's image
262  tr1::shared_ptr<QImage> source_image;
263 
264  /* COPY AUDIO - with correct volume */
265  if (source_clip->Reader()->info.has_audio) {
266 
267  // Debug output
268  AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
269 
270  if (source_frame->GetAudioChannelsCount() == info.channels)
271  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
272  {
273  float initial_volume = 1.0f;
274  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
275  float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
276 
277  // If no ramp needed, set initial volume = clip's volume
278  if (isEqual(previous_volume, volume))
279  initial_volume = volume;
280 
281  // Apply ramp to source frame (if needed)
282  if (!isEqual(previous_volume, volume))
283  source_frame->ApplyGainRamp(channel, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
284 
285  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
286  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
287  // number of samples returned is variable... and does not match the number expected.
288  // This is a crude solution at best. =)
289  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
290  // Force timeline frame to match the source frame
291  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
292 
293  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
294  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
295  new_frame->AddAudio(false, channel, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
296 
297  }
298  else
299  // Debug output
300  AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
301 
302  }
303 
304  // Skip out if only an audio frame
305  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
306  // Skip the rest of the image processing for performance reasons
307  return;
308 
309  // Debug output
310  AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
311 
312  // Get actual frame image data
313  source_image = source_frame->GetImage();
314 
315  // Get some basic image properties
316  int source_width = source_image->width();
317  int source_height = source_image->height();
318 
319  /* ALPHA & OPACITY */
320  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
321  {
322  float alpha = source_clip->alpha.GetValue(clip_frame_number);
323 
324  // Get source image's pixels
325  unsigned char *pixels = (unsigned char *) source_image->bits();
326 
327  // Loop through pixels
328  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
329  {
330  // Get the alpha values from the pixel
331  int A = pixels[byte_index + 3];
332 
333  // Apply alpha to pixel
334  pixels[byte_index + 3] *= alpha;
335  }
336 
337  // Debug output
338  AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
339  }
340 
341  /* RESIZE SOURCE IMAGE - based on scale type */
342  switch (source_clip->scale)
343  {
344  case (SCALE_FIT):
345  // keep aspect ratio
346  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
347  source_width = source_image->width();
348  source_height = source_image->height();
349 
350  // Debug output
351  AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
352  break;
353 
354  case (SCALE_STRETCH):
355  // ignore aspect ratio
356  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
357  source_width = source_image->width();
358  source_height = source_image->height();
359 
360  // Debug output
361  AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
362  break;
363 
364  case (SCALE_CROP):
365  QSize width_size(info.width, round(info.width / (float(source_width) / float(source_height))));
366  QSize height_size(round(info.height / (float(source_height) / float(source_width))), info.height);
367 
368  // respect aspect ratio
369  if (width_size.width() >= info.width && width_size.height() >= info.height)
370  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
371  else
372  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it
373  source_width = source_image->width();
374  source_height = source_image->height();
375 
376  // Debug output
377  AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
378  break;
379  }
380 
381  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
382  float x = 0.0; // left
383  float y = 0.0; // top
384 
385  // Adjust size for scale x and scale y
386  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
387  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
388  float scaled_source_width = source_width * sx;
389  float scaled_source_height = source_height * sy;
390 
391  switch (source_clip->gravity)
392  {
393  case (GRAVITY_TOP):
394  x = (info.width - scaled_source_width) / 2.0; // center
395  break;
396  case (GRAVITY_TOP_RIGHT):
397  x = info.width - scaled_source_width; // right
398  break;
399  case (GRAVITY_LEFT):
400  y = (info.height - scaled_source_height) / 2.0; // center
401  break;
402  case (GRAVITY_CENTER):
403  x = (info.width - scaled_source_width) / 2.0; // center
404  y = (info.height - scaled_source_height) / 2.0; // center
405  break;
406  case (GRAVITY_RIGHT):
407  x = info.width - scaled_source_width; // right
408  y = (info.height - scaled_source_height) / 2.0; // center
409  break;
410  case (GRAVITY_BOTTOM_LEFT):
411  y = (info.height - scaled_source_height); // bottom
412  break;
413  case (GRAVITY_BOTTOM):
414  x = (info.width - scaled_source_width) / 2.0; // center
415  y = (info.height - scaled_source_height); // bottom
416  break;
417  case (GRAVITY_BOTTOM_RIGHT):
418  x = info.width - scaled_source_width; // right
419  y = (info.height - scaled_source_height); // bottom
420  break;
421  }
422 
423  // Debug output
424  AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "source_width", source_width, "info.height", info.height, "source_height", source_height);
425 
426  /* LOCATION, ROTATION, AND SCALE */
427  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
428  x += (info.width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
429  y += (info.height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
430  bool is_x_animated = source_clip->location_x.Points.size() > 1;
431  bool is_y_animated = source_clip->location_y.Points.size() > 1;
432 
433  int offset_x = -1;
434  int offset_y = -1;
435  bool transformed = false;
436  QTransform transform;
437  if ((!isEqual(x, 0) || !isEqual(y, 0)) && (isEqual(r, 0) && isEqual(sx, 1) && isEqual(sy, 1) && !is_x_animated && !is_y_animated))
438  {
439  // SIMPLE OFFSET
440  AppendDebugMethod("Timeline::add_layer (Transform: SIMPLE)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
441 
442  // If only X and Y are different, and no animation is being used (just set the offset for speed)
443  transformed = true;
444 
445  // Set QTransform
446  transform.translate(x, y);
447 
448  } else if (!isEqual(r, 0) || !isEqual(x, 0) || !isEqual(y, 0) || !isEqual(sx, 1) || !isEqual(sy, 1))
449  {
450  // COMPLEX DISTORTION
451  AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
452 
453  // Use the QTransform object, which can be very CPU intensive
454  transformed = true;
455 
456  // Set QTransform
457  if (!isEqual(r, 0)) {
458  // ROTATE CLIP
459  float origin_x = x + (source_width / 2.0);
460  float origin_y = y + (source_height / 2.0);
461  transform.translate(origin_x, origin_y);
462  transform.rotate(r);
463  transform.translate(-origin_x,-origin_y);
464  }
465 
466  // Set QTransform
467  if (!isEqual(x, 0) || !isEqual(y, 0)) {
468  // TRANSLATE/MOVE CLIP
469  transform.translate(x, y);
470  }
471 
472  if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
473  // TRANSLATE/MOVE CLIP
474  transform.scale(sx, sy);
475  }
476 
477  // Debug output
478  AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Completed ScaleRotateTranslateDistortion)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
479  }
480 
481  // Debug output
482  AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
483 
484  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
485  tr1::shared_ptr<QImage> new_image = new_frame->GetImage();
486 
487  // Load timeline's new frame image into a QPainter
488  QPainter painter(new_image.get());
489  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
490 
491  // Apply transform (translate, rotate, scale)... if any
492  if (transformed)
493  painter.setTransform(transform);
494 
495  // Composite a new layer onto the image
496  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
497  painter.drawImage(0, 0, *source_image);
498  painter.end();
499 
500  // Debug output
501  AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
502 }
503 
504 // Update the list of 'opened' clips
505 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
506 {
507  AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
508 
509  // is clip already in list?
510  bool clip_found = open_clips.count(clip);
511 
512  if (clip_found && !does_clip_intersect)
513  {
514  // Remove clip from 'opened' list, because it's closed now
515  open_clips.erase(clip);
516 
517  // Close clip
518  clip->Close();
519  }
520  else if (!clip_found && does_clip_intersect)
521  {
522  // Add clip to 'opened' list, because it's missing
523  open_clips[clip] = clip;
524 
525  // Set debug mode (if needed)
526  if (debug)
527  // Also set each Clip's reader to debug mode
528  clip->Reader()->debug = true;
529 
530  // Open the clip
531  clip->Open();
532  }
533 
534  // Debug output
535  AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
536 }
537 
538 // Sort clips by position on the timeline
539 void Timeline::sort_clips()
540 {
541  // Debug output
542  AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
543 
544  // sort clips
545  clips.sort(CompareClips());
546 }
547 
548 // Sort effects by position on the timeline
549 void Timeline::sort_effects()
550 {
551  // sort clips
552  effects.sort(CompareEffects());
553 }
554 
555 // Close the reader (and any resources it was consuming)
557 {
558  // Close all open clips
559  list<Clip*>::iterator clip_itr;
560  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
561  {
562  // Get clip object from the iterator
563  Clip *clip = (*clip_itr);
564 
565  // Open or Close this clip, based on if it's intersecting or not
566  update_open_clips(clip, false);
567  }
568 
569  // Mark timeline as closed
570  is_open = false;
571 
572  // Clear cache
573  final_cache.Clear();
574 }
575 
576 // Open the reader (and start consuming resources)
578 {
579  is_open = true;
580 }
581 
582 // Compare 2 floating point numbers for equality
583 bool Timeline::isEqual(double a, double b)
584 {
585  return fabs(a - b) < 0.000001;
586 }
587 
588 // Get an openshot::Frame object for a specific frame number of this reader.
589 tr1::shared_ptr<Frame> Timeline::GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
590 {
591  // Check for open reader (or throw exception)
592  if (!is_open)
593  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
594 
595  // Adjust out of bounds frame number
596  if (requested_frame < 1)
597  requested_frame = 1;
598 
599  // Check cache
600  tr1::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
601  if (frame) {
602  // Debug output
603  AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
604 
605  // Return cached frame
606  return frame;
607  }
608  else
609  {
610  // Create a scoped lock, allowing only a single thread to run the following code at one time
611  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
612 
613  // Check cache again (due to locking)
614  frame = final_cache.GetFrame(requested_frame);
615  if (frame) {
616  // Debug output
617  AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
618 
619  // Return cached frame
620  return frame;
621  }
622 
623  // Minimum number of frames to process (for performance reasons)
624  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
625 
626  // Get a list of clips that intersect with the requested section of timeline
627  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
628  vector<Clip*> nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
629 
630  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
631  // Allow nested OpenMP sections
632  omp_set_nested(true);
633 
634  // Debug output
635  AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
636 
637  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
638  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
639  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
640  {
641  // Calculate time of timeline frame
642  float requested_time = calculate_time(frame_number, info.fps);
643  // Loop through clips
644  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
645  {
646  // Get clip object from the iterator
647  Clip *clip = nearby_clips[clip_index];
648  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
649  if (does_clip_intersect)
650  {
651  // Get clip frame #
652  float time_diff = (requested_time - clip->Position()) + clip->Start();
653  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
654  // Cache clip object
655  clip->GetFrame(clip_frame_number);
656  }
657  }
658  }
659 
660  #pragma omp parallel
661  {
662  // Loop through all requested frames
663  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
664  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
665  {
666  // Debug output
667  AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
668 
669  // Init some basic properties about this frame
670  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
671 
672  // Create blank frame (which will become the requested frame)
673  tr1::shared_ptr<Frame> new_frame(tr1::shared_ptr<Frame>(new Frame(frame_number, info.width, info.height, "#000000", samples_in_frame, info.channels)));
674  new_frame->SampleRate(info.sample_rate);
675  new_frame->ChannelsLayout(info.channel_layout);
676 
677  // Debug output
678  AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
679 
680  // Add Background Color to 1st layer (if animated or not black)
681  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
682  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
683  new_frame->AddColor(info.width, info.height, color.GetColorHex(frame_number));
684 
685  // Calculate time of frame
686  float requested_time = calculate_time(frame_number, info.fps);
687 
688  // Debug output
689  AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "requested_time", requested_time, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1);
690 
691  // Find Clips near this time
692  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
693  {
694  // Get clip object from the iterator
695  Clip *clip = nearby_clips[clip_index];
696 
697  // Does clip intersect the current requested time
698  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
699 
700  // Debug output
701  AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1);
702 
703  // Clip is visible
704  if (does_clip_intersect)
705  {
706  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
707  bool is_top_clip = true;
708  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
709  {
710  Clip *nearby_clip = nearby_clips[top_clip_index];
711  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
712  nearby_clip->Position() <= requested_time && nearby_clip->Position() + nearby_clip->Duration() >= requested_time &&
713  nearby_clip->Position() > clip->Position()) {
714  is_top_clip = false;
715  break;
716  }
717  }
718 
719  // Determine the frame needed for this clip (based on the position on the timeline)
720  float time_diff = (requested_time - clip->Position()) + clip->Start();
721  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
722 
723  // Debug output
724  AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "time_diff", time_diff, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
725 
726  // Add clip's frame as layer
727  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
728 
729  } else
730  // Debug output
731  AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "requested_time", requested_time, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1);
732 
733  } // end clip loop
734 
735  // Debug output
736  AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
737 
738  // Add final frame to cache
739  final_cache.Add(frame_number, new_frame);
740 
741  } // end frame loop
742  } // end parallel
743 
744  // Debug output
745  AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
746 
747  // Return frame (or blank frame)
748  return final_cache.GetFrame(requested_frame);
749  }
750 }
751 
752 
753 // Find intersecting clips (or non intersecting clips)
754 vector<Clip*> Timeline::find_intersecting_clips(long int requested_frame, int number_of_frames, bool include)
755 {
756  // Find matching clips
757  vector<Clip*> matching_clips;
758 
759  // Calculate time of frame
760  float min_requested_time = calculate_time(requested_frame, info.fps);
761  float max_requested_time = calculate_time(requested_frame + (number_of_frames - 1), info.fps);
762 
763  // Re-Sort Clips (since they likely changed)
764  sort_clips();
765 
766  // Find Clips at this time
767  list<Clip*>::iterator clip_itr;
768  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
769  {
770  // Get clip object from the iterator
771  Clip *clip = (*clip_itr);
772 
773  // Does clip intersect the current requested time
774  float clip_duration = clip->End() - clip->Start();
775  bool does_clip_intersect = (clip->Position() <= min_requested_time && clip->Position() + clip_duration >= min_requested_time) ||
776  (clip->Position() > min_requested_time && clip->Position() <= max_requested_time);
777 
778  // Debug output
779  AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_time", min_requested_time, "max_requested_time", max_requested_time, "clip->Position()", clip->Position(), "clip_duration", clip_duration, "does_clip_intersect", does_clip_intersect);
780 
781  // Open (or schedule for closing) this clip, based on if it's intersecting or not
782  #pragma omp critical (reader_lock)
783  update_open_clips(clip, does_clip_intersect);
784 
785 
786  // Clip is visible
787  if (does_clip_intersect && include)
788  // Add the intersecting clip
789  matching_clips.push_back(clip);
790 
791  else if (!does_clip_intersect && !include)
792  // Add the non-intersecting clip
793  matching_clips.push_back(clip);
794 
795  } // end clip loop
796 
797  // return list
798  return matching_clips;
799 }
800 
801 // Generate JSON string of this object
802 string Timeline::Json() {
803 
804  // Return formatted string
805  return JsonValue().toStyledString();
806 }
807 
808 // Generate Json::JsonValue for this object
809 Json::Value Timeline::JsonValue() {
810 
811  // Create root json object
812  Json::Value root = ReaderBase::JsonValue(); // get parent properties
813  root["type"] = "Timeline";
814  root["viewport_scale"] = viewport_scale.JsonValue();
815  root["viewport_x"] = viewport_x.JsonValue();
816  root["viewport_y"] = viewport_y.JsonValue();
817  root["color"] = color.JsonValue();
818 
819  // Add array of clips
820  root["clips"] = Json::Value(Json::arrayValue);
821 
822  // Find Clips at this time
823  list<Clip*>::iterator clip_itr;
824  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
825  {
826  // Get clip object from the iterator
827  Clip *existing_clip = (*clip_itr);
828  root["clips"].append(existing_clip->JsonValue());
829  }
830 
831  // Add array of effects
832  root["effects"] = Json::Value(Json::arrayValue);
833 
834  // loop through effects
835  list<EffectBase*>::iterator effect_itr;
836  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
837  {
838  // Get clip object from the iterator
839  EffectBase *existing_effect = (*effect_itr);
840  root["effects"].append(existing_effect->JsonValue());
841  }
842 
843  // return JsonValue
844  return root;
845 }
846 
847 // Load JSON string into this object
848 void Timeline::SetJson(string value) throw(InvalidJSON) {
849 
850  // Parse JSON string into JSON objects
851  Json::Value root;
852  Json::Reader reader;
853  bool success = reader.parse( value, root );
854  if (!success)
855  // Raise exception
856  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
857 
858  try
859  {
860  // Set all values that match
861  SetJsonValue(root);
862  }
863  catch (exception e)
864  {
865  // Error parsing JSON (or missing keys)
866  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
867  }
868 }
869 
870 // Load Json::JsonValue into this object
871 void Timeline::SetJsonValue(Json::Value root) throw(InvalidFile, ReaderClosed) {
872 
873  // Close timeline before we do anything (this also removes all open and closing clips)
874  Close();
875 
876  // Set parent data
878 
879  if (!root["clips"].isNull()) {
880  // Clear existing clips
881  clips.clear();
882 
883  // loop through clips
884  for (int x = 0; x < root["clips"].size(); x++) {
885  // Get each clip
886  Json::Value existing_clip = root["clips"][x];
887 
888  // Create Clip
889  Clip *c = new Clip();
890 
891  // Load Json into Clip
892  c->SetJsonValue(existing_clip);
893 
894  // Add Clip to Timeline
895  AddClip(c);
896  }
897  }
898 
899  if (!root["effects"].isNull()) {
900  // Clear existing effects
901  effects.clear();
902 
903  // loop through effects
904  for (int x = 0; x < root["effects"].size(); x++) {
905  // Get each effect
906  Json::Value existing_effect = root["effects"][x];
907 
908  // Create Effect
909  EffectBase *e = NULL;
910 
911  if (!existing_effect["type"].isNull())
912  // Init the matching effect object
913  if (existing_effect["type"].asString() == "ChromaKey")
914  e = new ChromaKey();
915 
916  else if (existing_effect["type"].asString() == "Deinterlace")
917  e = new Deinterlace();
918 
919  else if (existing_effect["type"].asString() == "Mask")
920  e = new Mask();
921 
922  else if (existing_effect["type"].asString() == "Negate")
923  e = new Negate();
924 
925  // Load Json into Effect
926  e->SetJsonValue(existing_effect);
927 
928  // Add Effect to Timeline
929  AddEffect(e);
930  }
931  }
932 }
933 
934 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
936 
937  // Clear internal cache (since things are about to change)
938  final_cache.Clear();
939 
940  // Parse JSON string into JSON objects
941  Json::Value root;
942  Json::Reader reader;
943  bool success = reader.parse( value, root );
944  if (!success || !root.isArray())
945  // Raise exception
946  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
947 
948  try
949  {
950  // Process the JSON change array, loop through each item
951  for (int x = 0; x < root.size(); x++) {
952  // Get each change
953  Json::Value change = root[x];
954  string root_key = change["key"][(uint)0].asString();
955 
956  // Process each type of change
957  if (root_key == "clips")
958  // Apply to CLIPS
959  apply_json_to_clips(change);
960 
961  else if (root_key == "effects")
962  // Apply to EFFECTS
963  apply_json_to_effects(change);
964 
965  else
966  // Apply to TIMELINE
967  apply_json_to_timeline(change);
968 
969  }
970  }
971  catch (exception e)
972  {
973  // Error parsing JSON (or missing keys)
974  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
975  }
976 
977  // Adjust cache (in case something changed)
978  final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 4, info.width, info.height, info.sample_rate, info.channels);
979 }
980 
981 // Apply JSON diff to clips
982 void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
983 
984  // Get key and type of change
985  string change_type = change["type"].asString();
986  string clip_id = "";
987  Clip *existing_clip = NULL;
988 
989  // Find id of clip (if any)
990  for (int x = 0; x < change["key"].size(); x++) {
991  // Get each change
992  Json::Value key_part = change["key"][x];
993 
994  if (key_part.isObject()) {
995  // Check for id
996  if (!key_part["id"].isNull()) {
997  // Set the id
998  clip_id = key_part["id"].asString();
999 
1000  // Find matching clip in timeline (if any)
1001  list<Clip*>::iterator clip_itr;
1002  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1003  {
1004  // Get clip object from the iterator
1005  Clip *c = (*clip_itr);
1006  if (c->Id() == clip_id) {
1007  existing_clip = c;
1008  break; // clip found, exit loop
1009  }
1010  }
1011  break; // id found, exit loop
1012  }
1013  }
1014  }
1015 
1016  // Check for a more specific key (targetting this clip's effects)
1017  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1018  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1019  {
1020  // This change is actually targetting a specific effect under a clip (and not the clip)
1021  EffectBase *existing_effect = NULL;
1022  Json::Value key_part = change["key"][3];
1023 
1024  if (key_part.isObject()) {
1025  // Check for id
1026  if (!key_part["id"].isNull())
1027  {
1028  // Set the id
1029  string effect_id = key_part["id"].asString();
1030 
1031  // Find matching effect in timeline (if any)
1032  list<EffectBase*>::iterator effect_itr;
1033  for (effect_itr=existing_clip->Effects().begin(); effect_itr != existing_clip->Effects().end(); ++effect_itr)
1034  {
1035  // Get effect object from the iterator
1036  EffectBase *e = (*effect_itr);
1037  if (e->Id() == effect_id) {
1038  existing_effect = e;
1039 
1040  // Apply the change to the effect directly
1041  apply_json_to_effects(change, existing_effect);
1042  return; // effect found, don't update clip
1043  }
1044  }
1045  }
1046  }
1047  }
1048 
1049  // Determine type of change operation
1050  if (change_type == "insert") {
1051 
1052  // Create new clip
1053  Clip *clip = new Clip();
1054  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1055  AddClip(clip); // Add clip to timeline
1056 
1057  } else if (change_type == "update") {
1058 
1059  // Update existing clip
1060  if (existing_clip)
1061  existing_clip->SetJsonValue(change["value"]); // Update clip properties from JSON
1062 
1063  } else if (change_type == "delete") {
1064 
1065  // Remove existing clip
1066  if (existing_clip)
1067  RemoveClip(existing_clip); // Remove clip from timeline
1068 
1069  }
1070 
1071 }
1072 
1073 // Apply JSON diff to effects
1074 void Timeline::apply_json_to_effects(Json::Value change) throw(InvalidJSONKey) {
1075 
1076  // Get key and type of change
1077  string change_type = change["type"].asString();
1078  EffectBase *existing_effect = NULL;
1079 
1080  // Find id of an effect (if any)
1081  for (int x = 0; x < change["key"].size(); x++) {
1082  // Get each change
1083  Json::Value key_part = change["key"][x];
1084 
1085  if (key_part.isObject()) {
1086  // Check for id
1087  if (!key_part["id"].isNull())
1088  {
1089  // Set the id
1090  string effect_id = key_part["id"].asString();
1091 
1092  // Find matching effect in timeline (if any)
1093  list<EffectBase*>::iterator effect_itr;
1094  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1095  {
1096  // Get effect object from the iterator
1097  EffectBase *e = (*effect_itr);
1098  if (e->Id() == effect_id) {
1099  existing_effect = e;
1100  break; // effect found, exit loop
1101  }
1102  }
1103  break; // id found, exit loop
1104  }
1105  }
1106  }
1107 
1108  // Now that we found the effect, apply the change to it
1109  if (existing_effect || change_type == "insert")
1110  // Apply change to effect
1111  apply_json_to_effects(change, existing_effect);
1112 }
1113 
1114 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1115 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) throw(InvalidJSONKey) {
1116 
1117  // Get key and type of change
1118  string change_type = change["type"].asString();
1119 
1120  // Determine type of change operation
1121  if (change_type == "insert") {
1122 
1123  // Determine type of effect
1124  string effect_type = change["value"]["type"].asString();
1125 
1126  // Create Effect
1127  EffectBase *e = NULL;
1128 
1129  // Init the matching effect object
1130  if (effect_type == "Blur")
1131  e = new Blur();
1132 
1133  else if (effect_type == "Brightness")
1134  e = new Brightness();
1135 
1136  else if (effect_type == "ChromaKey")
1137  e = new ChromaKey();
1138 
1139  else if (effect_type == "Deinterlace")
1140  e = new Deinterlace();
1141 
1142  else if (effect_type == "Mask")
1143  e = new Mask();
1144 
1145  else if (effect_type == "Negate")
1146  e = new Negate();
1147 
1148  else if (effect_type == "Saturation")
1149  e = new Saturation();
1150 
1151  // Load Json into Effect
1152  e->SetJsonValue(change["value"]);
1153 
1154  // Add Effect to Timeline
1155  AddEffect(e);
1156 
1157  } else if (change_type == "update") {
1158 
1159  // Update existing effect
1160  if (existing_effect)
1161  existing_effect->SetJsonValue(change["value"]); // Update effect properties from JSON
1162 
1163  } else if (change_type == "delete") {
1164 
1165  // Remove existing effect
1166  if (existing_effect)
1167  RemoveEffect(existing_effect); // Remove effect from timeline
1168 
1169  }
1170 }
1171 
1172 // Apply JSON diff to timeline properties
1173 void Timeline::apply_json_to_timeline(Json::Value change) throw(InvalidJSONKey) {
1174 
1175  // Get key and type of change
1176  string change_type = change["type"].asString();
1177  string root_key = change["key"][(uint)0].asString();
1178  string sub_key = "";
1179  if (change["key"].size() >= 2)
1180  sub_key = change["key"][(uint)1].asString();
1181 
1182  // Determine type of change operation
1183  if (change_type == "insert" || change_type == "update") {
1184 
1185  // INSERT / UPDATE
1186  // Check for valid property
1187  if (root_key == "color")
1188  // Set color
1189  color.SetJsonValue(change["value"]);
1190  else if (root_key == "viewport_scale")
1191  // Set viewport scale
1192  viewport_scale.SetJsonValue(change["value"]);
1193  else if (root_key == "viewport_x")
1194  // Set viewport x offset
1195  viewport_x.SetJsonValue(change["value"]);
1196  else if (root_key == "viewport_y")
1197  // Set viewport y offset
1198  viewport_y.SetJsonValue(change["value"]);
1199  else if (root_key == "duration") { }
1200  // Ignore for now
1201  else if (root_key == "width")
1202  // Set width
1203  info.width = change["value"].asInt();
1204  else if (root_key == "height")
1205  // Set height
1206  info.height = change["value"].asInt();
1207  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1208  // Set fps fraction
1209  if (!change["value"]["num"].isNull())
1210  info.fps.num = change["value"]["num"].asInt();
1211  if (!change["value"]["den"].isNull())
1212  info.fps.den = change["value"]["den"].asInt();
1213  }
1214  else if (root_key == "fps" && sub_key == "num")
1215  // Set fps.num
1216  info.fps.num = change["value"].asInt();
1217  else if (root_key == "fps" && sub_key == "den")
1218  // Set fps.den
1219  info.fps.den = change["value"].asInt();
1220  else if (root_key == "sample_rate")
1221  // Set sample rate
1222  info.sample_rate = change["value"].asInt();
1223  else if (root_key == "channels")
1224  // Set channels
1225  info.channels = change["value"].asInt();
1226  else if (root_key == "channel_layout")
1227  // Set channel layout
1228  info.channel_layout = (ChannelLayout) change["value"].asInt();
1229 
1230  else
1231 
1232  // Error parsing JSON (or missing keys)
1233  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1234 
1235 
1236  } else if (change["type"].asString() == "delete") {
1237 
1238  // DELETE / RESET
1239  // Reset the following properties (since we can't delete them)
1240  if (root_key == "color") {
1241  color = Color();
1242  color.red = Keyframe(0.0);
1243  color.green = Keyframe(0.0);
1244  color.blue = Keyframe(0.0);
1245  }
1246  else if (root_key == "viewport_scale")
1247  viewport_scale = Keyframe(1.0);
1248  else if (root_key == "viewport_x")
1249  viewport_x = Keyframe(0.0);
1250  else if (root_key == "viewport_y")
1251  viewport_y = Keyframe(0.0);
1252  else
1253  // Error parsing JSON (or missing keys)
1254  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1255 
1256  }
1257 
1258 }
1259 
1260 
1261 
1262 
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:871
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Definition: Timeline.cpp:589
void Close()
Close the internal reader.
Definition: Clip.cpp:213
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:802
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 100)
Definition: Clip.h:218
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:239
This class adjusts the blur of an image, and can be animated with openshot::Keyframe curves over time...
Definition: Blur.h:62
float End()
Override End() method.
Definition: Clip.cpp:225
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:247
Align clip to the bottom right of its parent.
Definition: Enums.h:45
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:319
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:150
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:228
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:114
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
This class uses the ImageMagick++ libraries, to remove (i.e. key out) a color (i.e. greenscreen)
Definition: ChromaKey.h:54
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:809
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
float End()
Get end position (in seconds) of clip (trim end of video)
Definition: ClipBase.h:80
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:241
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:78
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
virtual tr1::shared_ptr< Frame > GetFrame(tr1::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:231
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:734
Exception for missing JSON Change key.
Definition: Exceptions.h:182
void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: Cache.cpp:256
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-100 to 100) ...
Definition: Clip.h:219
float GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:224
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-100 to 100) ...
Definition: Clip.h:220
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information as JSON.
Definition: ReaderBase.cpp:67
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:108
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
bool Waveform()
Waveform property.
Definition: Clip.h:213
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:151
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:76
This class uses the ImageMagick++ libraries, to negate image (i.e. negative)
Definition: Negate.h:53
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:77
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition: Timeline.cpp:125
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:179
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:176
This class represents a fraction.
Definition: Fraction.h:42
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
This class adjusts the saturation of color on a frame's image.
Definition: Saturation.h:59
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:64
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:153
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:103
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:556
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:224
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:202
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
This class uses the ImageMagick++ libraries, to apply alpha (or transparency) masks to any frame...
Definition: Mask.h:63
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:120
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
This class adjusts the brightness and contrast of an image, and can be animated with openshot::Keyfra...
Definition: Brightness.h:59
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:196
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:246
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:577
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:935
void Clear()
Clear the cache of all frames.
Definition: Cache.cpp:202
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:95
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:89
This class uses the ImageMagick++ libraries, to de-interlace the image, which removes the EVEN or ODD...
Definition: Deinterlace.h:56
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:223
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:240
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 100)
Definition: Clip.h:217
Color color
Background color of timeline canvas.
Definition: Timeline.h:244
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:848
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
long int video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:79
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:482
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:659
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:81
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:79
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254