1
0
mirror of https://github.com/Zulko/moviepy.git synced 2021-07-27 01:17:47 +03:00

Add docstrings linting with 'pydocstyle' using 'flake8' (#1483)

* Checkpoint

* Continue refactoring

* Docstrings linting completed

* Fix error in setup

* Fix script docstring

* Update setup docstring

* Remove noqas from fallback functions in FXs

* Apply suggestions from code review

* Fix inconsistencies in arguments

* Remove indentation from examples in function

* Minor inconsistencies
This commit is contained in:
Álvaro Mondéjar
2021-01-22 22:34:21 +01:00
committed by GitHub
parent 143618cf5f
commit 736abcc2fe
83 changed files with 584 additions and 650 deletions

View File

@@ -6,14 +6,13 @@ serious historical study here: https://www.youtube.com/watch?v=zvCvOC2VwDc
Here is what we do:
0- Get the video of a dancing knight, and a (Creative Commons) audio music file.
1- load the audio file and automatically find the tempo
2- load the video and automatically find a segment that loops well
3- extract this segment, slow it down so that it matches the audio tempo,
and make it loop forever.
4- Symmetrize this segment so that we will get two knights instead of one
5- Add a title screen and some credits, write to a file.
0. Get the video of a dancing knight, and a (Creative Commons) audio music file.
1. Load the audio file and automatically find the tempo.
2. Load the video and automatically find a segment that loops well
3. Extract this segment, slow it down so that it matches the audio tempo, and make
it loop forever.
4. Symmetrize this segment so that we will get two knights instead of one
5. Add a title screen and some credits, write to a file.
This example has been originally edited in an IPython Notebook, which makes it
easy to preview and fine-tune each part of the editing.

View File

@@ -8,7 +8,7 @@ w, h = moviesize = (720, 380)
duration = 1
def f(t, size, a=np.pi / 3, thickness=20):
def f(t, size, a=np.pi / 3, thickness=20): # noqa D103
w, h = size
v = thickness * np.array([np.cos(a), np.sin(a)])[::-1]
center = [int(t * w / duration), h / 2]

View File

@@ -19,7 +19,7 @@ cvc = CompositeVideoClip([txtClip.with_position("center")], size=screensize)
rotMatrix = lambda a: np.array([[np.cos(a), np.sin(a)], [-np.sin(a), np.cos(a)]])
def vortex(screenpos, i, nletters):
def vortex(screenpos, i, nletters): # noqa D103
d = lambda t: 1.0 / (0.3 + t ** 8) # damping
a = i * np.pi / nletters # angle of the movement
v = rotMatrix(a).dot([-1, 0])
@@ -28,19 +28,19 @@ def vortex(screenpos, i, nletters):
return lambda t: screenpos + 400 * d(t) * rotMatrix(0.5 * d(t) * a).dot(v)
def cascade(screenpos, i, nletters):
def cascade(screenpos, i, nletters): # noqa D103
v = np.array([0, -1])
d = lambda t: 1 if t < 0 else abs(np.sinc(t) / (1 + t ** 4))
return lambda t: screenpos + v * 400 * d(t - 0.15 * i)
def arrive(screenpos, i, nletters):
def arrive(screenpos, i, nletters): # noqa D103
v = np.array([-1, 0])
d = lambda t: max(0, 3 - 3 * t)
return lambda t: screenpos - 400 * v * d(t - 0.2 * i)
def vortexout(screenpos, i, nletters):
def vortexout(screenpos, i, nletters): # noqa D103
d = lambda t: max(0, t) # damping
a = i * np.pi / nletters # angle of the movement
v = rotMatrix(a).dot([-1, 0])
@@ -59,7 +59,7 @@ letters = find_objects(cvc) # a list of ImageClips
# WE ANIMATE THE LETTERS
def moveLetters(letters, funcpos):
def moveLetters(letters, funcpos): # noqa D103
return [
letter.set_pos(funcpos(letter.screenpos, i, len(letters)))
for i, letter in enumerate(letters)

View File

@@ -1,4 +1,4 @@
""" requires scikit-image installed (for vfx.painting) """
"""Requires scikit-image installed (for ``vfx.painting``)."""
from moviepy import *

View File

@@ -1,4 +1,4 @@
""" A simple test script on how to put a soundtrack to a movie """
"""A simple test script on how to put a soundtrack to a movie."""
from moviepy import *

View File

@@ -3,7 +3,6 @@ Description of the video:
Mimic of Star Wars' opening title. A text with a (false)
perspective effect goes towards the end of space, on a
background made of stars. Slight fading effect on the text.
"""
import numpy as np
@@ -74,7 +73,7 @@ moving_txt.mask = moving_txt.mask.image_transform(fl)
def trapzWarp(pic, cx, cy, is_mask=False):
""" Complicated function (will be latex packaged as a fx) """
"""Complicated function (will be latex packaged as a fx)."""
Y, X = pic.shape[:2]
src = np.array([[0, 0], [X, 0], [X, Y], [0, Y]])
dst = np.array([[cx * X, cy * Y], [(1 - cx) * X, cy * Y], [X, Y], [0, Y]])
@@ -124,8 +123,7 @@ final.with_duration(8).write_videofile("starworms.avi", fps=5)
def annotate(clip, txt, txt_color="white", bg_color=(0, 0, 255)):
""" Writes a text at the bottom of the clip. """
"""Writes a text at the bottom of the clip."""
txtclip = TextClip(txt, font_size=20, font="Ubuntu-bold", color=txt_color)
txtclip = txtclip.on_color(
@@ -137,11 +135,11 @@ def annotate(clip, txt, txt_color="white", bg_color=(0, 0, 255)):
return cvc.with_duration(clip.duration)
def resizeCenter(clip):
def resizeCenter(clip): # noqa D103
return clip.resize(height=h).set_pos("center")
def composeCenter(clip):
def composeCenter(clip): # noqa D103
return CompositeVideoClip([clip.set_pos("center")], size=moviesize)

View File

@@ -1,15 +1,14 @@
"""Parses url below to extract latest image magick version (major version 6.9),
to feed it into CI system. Not the best way for reproducible builds, but it's
preferred for now over storing imagemagick installer into the GIT repository.
"""
import re
from urllib import request
url = "https://legacy.imagemagick.org/script/index.php"
"""This little script parses url above to extract latest image magick version
(major version 6.9), to feed it into CI system. Not the best way for reproducible
builds, but it's preferred for now over storing imagemagick installer into the
git repository
"""
response = request.urlopen(url)
html = response.read().decode(r"utf-8")
r = re.compile(r"6\.9\.[0-9]+-[0-9]+")

View File

@@ -1,7 +1,5 @@
"""
This module implements the central object of MoviePy, the Clip, and
all the methods that are common to the two subclasses of Clip, VideoClip
and AudioClip.
"""Implements the central object of MoviePy, the Clip, and all the methods that
are common to the two subclasses of Clip, VideoClip and AudioClip.
"""
import copy as _copy
@@ -20,27 +18,22 @@ from moviepy.decorators import (
class Clip:
"""
Base class of all clips (VideoClips and AudioClips).
"""Base class of all clips (VideoClips and AudioClips).
Attributes
-----------
----------
start:
start : float
When the clip is included in a composition, time of the
composition at which the clip starts playing (in seconds).
end:
end : float
When the clip is included in a composition, time of the
composition at which the clip stops playing (in seconds).
duration:
duration : float
Duration of the clip (in seconds). Some clips are infinite, in
this case their duration will be ``None``.
"""
# prefix for all temporary video and audio files.
@@ -60,16 +53,13 @@ class Clip:
self.memoized_frame = None
def copy(self):
"""
Allows the usage of ``.copy()`` in clips as chained methods invocation.
"""
"""Allows the usage of ``.copy()`` in clips as chained methods invocation."""
return _copy.copy(self)
@convert_parameter_to_seconds(["t"])
def get_frame(self, t):
"""
Gets a numpy array representing the RGB picture of the clip at time t
or (mono or stereo) value for a sound clip
"""Gets a numpy array representing the RGB picture of the clip at time ``t``
or (mono or stereo) value for a sound clip.
"""
# Coming soon: smart error handling for debugging at this point
if self.memoize:
@@ -91,22 +81,22 @@ class Clip:
(through function ``func``) of the frames of the current clip.
Parameters
-----------
----------
func
func : function
A function with signature (gf,t -> frame) where ``gf`` will
represent the current clip's ``get_frame`` method,
i.e. ``gf`` is a function (t->image). Parameter `t` is a time
in seconds, `frame` is a picture (=Numpy array) which will be
returned by the transformed clip (see examples below).
apply_to
apply_to : {"mask", "audio", ["mask", "audio"]}, optional
Can be either ``'mask'``, or ``'audio'``, or
``['mask','audio']``.
Specifies if the filter should also be applied to the
audio or the mask of the clip, if any.
keep_duration
keep_duration : bool, optional
Set to True if the transformation does not change the
``duration`` of the clip.
@@ -151,17 +141,17 @@ class Clip:
time `time_func(t)`.
Parameters
-----------
----------
time_func:
A function ``t -> new_t``
time_func : function
A function ``t -> new_t``.
apply_to:
apply_to : {"mask", "audio", ["mask", "audio"]}, optional
Can be either 'mask', or 'audio', or ['mask','audio'].
Specifies if the filter ``transform`` should also be applied to the
audio or the mask of the clip, if any.
keep_duration:
keep_duration : bool, optional
``False`` (default) if the transformation modifies the
``duration`` of the clip.
@@ -185,10 +175,7 @@ class Clip:
)
def fx(self, func, *args, **kwargs):
"""
Returns the result of ``func(self, *args, **kwargs)``.
for instance
"""Returns the result of ``func(self, *args, **kwargs)``, for instance
>>> new_clip = clip.fx(resize, 0.2, method="bilinear")
@@ -204,7 +191,6 @@ class Clip:
>>> # Is equivalent, but clearer than
>>> mirrorx(resize(multiply_volume(clip, 0.5), 0.3))
"""
return func(self, *args, **kwargs)
@apply_to_mask
@@ -212,8 +198,7 @@ class Clip:
@convert_parameter_to_seconds(["t"])
@outplace
def with_start(self, t, change_end=True):
"""
Returns a copy of the clip, with the ``start`` attribute set
"""Returns a copy of the clip, with the ``start`` attribute set
to ``t``, which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
@@ -229,7 +214,6 @@ class Clip:
These changes are also applied to the ``audio`` and ``mask``
clips of the current clip, if they exist.
"""
self.start = t
if (self.duration is not None) and change_end:
self.end = t + self.duration
@@ -241,11 +225,9 @@ class Clip:
@convert_parameter_to_seconds(["t"])
@outplace
def with_end(self, t):
"""
Returns a copy of the clip, with the ``end`` attribute set to
``t``, which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
Also sets the duration of the mask and audio, if any,
"""Returns a copy of the clip, with the ``end`` attribute set to ``t``, which
can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as
a string: '01:03:05.35'. Also sets the duration of the mask and audio, if any,
of the returned clip.
"""
self.end = t
@@ -262,12 +244,11 @@ class Clip:
@convert_parameter_to_seconds(["t"])
@outplace
def with_duration(self, duration, change_end=True):
"""
Returns a copy of the clip, with the ``duration`` attribute
set to ``t``, which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
Also sets the duration of the mask and audio, if any, of the
returned clip.
"""Returns a copy of the clip, with the ``duration`` attribute set to ``t``,
which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec),
or as a string: '01:03:05.35'. Also sets the duration of the mask and audio,
if any, of the returned clip.
If change_end is False, the start attribute of the clip will
be modified in function of the duration and the preset end
of the clip.
@@ -283,8 +264,7 @@ class Clip:
@outplace
def with_make_frame(self, make_frame):
"""
Sets a ``make_frame`` attribute for the clip. Useful for setting
"""Sets a ``make_frame`` attribute for the clip. Useful for setting
arbitrary/complicated videoclips.
"""
self.make_frame = make_frame
@@ -292,10 +272,11 @@ class Clip:
def with_fps(self, fps, change_duration=False):
"""Returns a copy of the clip with a new default fps for functions like
write_videofile, iterframe, etc.
If ``change_duration=True``, then the video speed will change to match the
new fps (conserving all frames 1:1). For example, if the fps is
halved in this mode, the duration will be doubled."""
halved in this mode, the duration will be doubled.
"""
if change_duration:
from moviepy.video.fx.multiply_speed import multiply_speed
@@ -308,25 +289,22 @@ class Clip:
@outplace
def with_is_mask(self, is_mask):
""" Says wheter the clip is a mask or not (is_mask is a boolean)"""
"""Says wheter the clip is a mask or not (is_mask is a boolean)."""
self.is_mask = is_mask
@outplace
def with_memoize(self, memoize):
""" Sets wheter the clip should keep the last frame read in memory """
"""Sets wheter the clip should keep the last frame read in memory."""
self.memoize = memoize
@convert_parameter_to_seconds(["t"])
def is_playing(self, t):
"""If ``t`` is a time, returns true if t is between the start and the end
of the clip. t can be expressed in seconds (15.35), in (min, sec), in
(hour, min, sec), or as a string: '01:03:05.35'. If t is a numpy array,
returns False if none of the t is in the clip, else returns a vector
[b_1, b_2, b_3...] where b_i is true iff tti is in the clip.
"""
If t is a time, returns true if t is between the start and
the end of the clip. t can be expressed in seconds (15.35),
in (min, sec), in (hour, min, sec), or as a string: '01:03:05.35'.
If t is a numpy array, returns False if none of the t is in
theclip, else returns a vector [b_1, b_2, b_3...] where b_i
is true iff tti is in the clip.
"""
if isinstance(t, np.ndarray):
# is the whole list of t outside the clip ?
tmin, tmax = t.min(), t.max()
@@ -351,27 +329,25 @@ class Clip:
@apply_to_mask
@apply_to_audio
def subclip(self, start_time=0, end_time=None):
"""
Returns a clip playing the content of the current clip
between times ``start_time`` and ``end_time``, which can be expressed
in seconds (15.35), in (min, sec), in (hour, min, sec), or as a
string: '01:03:05.35'.
If ``end_time`` is not provided, it is assumed to be the duration
of the clip (potentially infinite).
If ``end_time`` is a negative value, it is reset to
``clip.duration + end_time. ``. For instance: ::
"""Returns a clip playing the content of the current clip between times
``start_time`` and ``end_time``, which can be expressed in seconds (15.35),
in (min, sec), in (hour, min, sec), or as a string: '01:03:05.35'.
If ``end_time`` is not provided, it is assumed to be the duration of the clip
(potentially infinite).
If ``end_time`` is negative, it is reset to ``clip.duration + end_time``.
For instance::
>>> # cut the last two seconds of the clip:
>>> new_clip = clip.subclip(0,-2)
If ``end_time`` is provided or if the clip has a duration attribute,
the duration of the returned clip is set automatically.
If ``end_time`` is provided or if the clip has a duration attribute, the
duration of the returned clip is set automatically.
The ``mask`` and ``audio`` of the resulting subclip will be
subclips of ``mask`` and ``audio`` the original clip, if
they exist.
The ``mask`` and ``audio`` of the resulting subclip will be subclips of ``mask``
and ``audio`` the original clip, if they exist.
"""
if start_time < 0:
# Make this more Python-like, a negative value means to move
# backward from the end of the clip
@@ -425,7 +401,6 @@ class Clip:
The resulting clip's ``audio`` and ``mask`` will also be cutout
if they exist.
"""
new_clip = self.time_transform(
lambda t: t + (t >= start_time) * (end_time - start_time),
apply_to=["audio", "mask"],
@@ -454,7 +429,7 @@ class Clip:
Use dtype="uint8" when using the pictures to write video, images...
Examples
---------
--------
>>> # prints the maximum of red that is contained
>>> # on the first line of each frame of the clip.
@@ -478,10 +453,7 @@ class Clip:
yield frame
def close(self):
"""
Release any resources that are in use.
"""
"""Release any resources that are in use."""
# Implementation note for subclasses:
#
# * Memory-based resources can be left to the garbage-collector.

View File

@@ -1,12 +1,10 @@
"""
Imports everything that you need from the MoviePy submodules so that every thing
"""Imports everything that you need from the MoviePy submodules so that every thing
can be directly imported like `from moviepy import VideoFileClip`.
In particular it loads all effects from the video.fx and audio.fx folders
and turns them into VideoClip and AudioClip methods, so that instead of
``clip.fx(vfx.resize, 2)`` or ``vfx.resize(clip, 2)``
you can write ``clip.resize(2)``.
"""
import inspect

View File

@@ -23,7 +23,7 @@ class AudioClip(Clip):
sound back into the bounds at conversion time, without much impact).
Parameters
-----------
----------
make_frame
A function `t-> frame at time t`. The frame does not mean much
@@ -38,7 +38,7 @@ class AudioClip(Clip):
Number of channels (one or two for mono or stereo).
Examples
---------
--------
>>> # Plays the note A in mono (a sine wave of frequency 440 Hz)
>>> import numpy as np
@@ -113,7 +113,7 @@ class AudioClip(Clip):
or written in a wav file. See ``AudioClip.preview``.
Parameters
------------
----------
fps
Frame rate of the sound for the conversion.
@@ -158,6 +158,7 @@ class AudioClip(Clip):
return snd_array
def max_volume(self, stereo=False, chunksize=50000, logger=None):
"""Returns the maximum volume level of the clip."""
# max volume separated by channels if ``stereo`` and not mono
stereo = stereo and self.nchannels > 1
@@ -187,7 +188,7 @@ class AudioClip(Clip):
Parameters
-----------
----------
filename
Name of the output file, as a string or a path-like object.
@@ -259,7 +260,7 @@ class AudioArrayClip(AudioClip):
An audio clip made from a sound array.
Parameters
-----------
----------
array
A Numpy array representing the sound, of size Nx1 for mono,
@@ -279,9 +280,9 @@ class AudioArrayClip(AudioClip):
self.duration = 1.0 * len(array) / fps
def make_frame(t):
"""complicated, but must be able to handle the case where t
is a list of the form sin(t)"""
"""Complicated, but must be able to handle the case where t
is a list of the form sin(t).
"""
if isinstance(t, np.ndarray):
array_inds = np.round(self.fps * t).astype(int)
in_array = (array_inds >= 0) & (array_inds < len(self.array))
@@ -305,14 +306,13 @@ class CompositeAudioClip(AudioClip):
An audio clip made by putting together several audio clips.
Parameters
------------
----------
clips
List of audio clips, which may start playing at different times or
together, depends on their ``start`` attributes. If all have their
``duration`` attribute set, the duration of the composite clip is
computed automatically.
"""
def __init__(self, clips):
@@ -336,13 +336,16 @@ class CompositeAudioClip(AudioClip):
@property
def starts(self):
"""Returns starting times for all clips in the composition."""
return (clip.start for clip in self.clips)
@property
def ends(self):
"""Returns ending times for all clips in the composition."""
return (clip.end for clip in self.clips)
def make_frame(self, t):
"""Renders a frame for the composition for the time ``t``."""
played_parts = [clip.is_playing(t) for clip in self.clips]
sounds = [

View File

@@ -6,7 +6,8 @@ from moviepy.decorators import audio_video_fx
@audio_video_fx
def audio_fadein(clip, duration):
"""Return an audio (or video) clip that is first mute, then the
sound arrives progressively over ``duration`` seconds."""
sound arrives progressively over ``duration`` seconds.
"""
def fading(get_frame, t):
frame = get_frame(t)

View File

@@ -7,7 +7,8 @@ from moviepy.decorators import audio_video_fx, requires_duration
@requires_duration
def audio_fadeout(clip, duration):
"""Return a sound clip where the sound fades out progressively
over ``duration`` seconds at the end of the clip."""
over ``duration`` seconds at the end of the clip.
"""
def fading(get_frame, t):
frame = get_frame(t)

View File

@@ -10,7 +10,7 @@ def audio_loop(clip, n_loops=None, duration=None):
`n_loops` times, or during `duration` seconds.
Examples
========
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4')

View File

@@ -10,13 +10,12 @@ def audio_normalize(clip):
so that the maximum volume is at 0db, the maximum achievable volume.
Examples
========
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
"""
max_volume = clip.max_volume()
if max_volume == 0:
# Nothing to normalize.

View File

@@ -3,14 +3,13 @@ from moviepy.decorators import audio_video_fx
@audio_video_fx
def multiply_stereo_volume(clip, left=1, right=1):
"""
For a stereo audioclip, this function enables to change the volume
"""For a stereo audioclip, this function enables to change the volume
of the left and right channel separately (with the factors `left`
and `right`). Makes a stereo audio clip in which the volume of left
and right is controllable.
Examples
========
--------
>>> from moviepy import AudioFileClip
>>> music = AudioFileClip('music.ogg')

View File

@@ -10,7 +10,7 @@ def multiply_volume(clip, factor):
so you can just write ``clip.multiply_volume(2)``
Examples
---------
--------
>>> from moviepy import AudioFileClip
>>> music = AudioFileClip('music.ogg')

View File

@@ -4,7 +4,6 @@ from moviepy.decorators import convert_path_to_string
class AudioFileClip(AudioClip):
"""
An audio clip read from a sound file, or an array.
The whole file is not loaded in memory. Instead, only a portion is
@@ -13,7 +12,7 @@ class AudioFileClip(AudioClip):
backward and forward.
Parameters
------------
----------
filename
Either a soundfile name (of any extension supported by ffmpeg)
@@ -27,7 +26,7 @@ class AudioFileClip(AudioClip):
Attributes
------------
----------
nbytes
Number of bits per frame of the original audio file.
@@ -46,7 +45,7 @@ class AudioFileClip(AudioClip):
will not be cleaned up until the process ends.
Examples
----------
--------
>>> snd = AudioFileClip("song.wav")
>>> snd.close()
@@ -77,7 +76,7 @@ class AudioFileClip(AudioClip):
self.nchannels = self.reader.nchannels
def close(self):
""" Close the internal reader. """
"""Close the internal reader."""
if self.reader:
self.reader.close()
self.reader = None

View File

@@ -1,3 +1 @@
"""
Class and methods to read, write, preview audiofiles.
"""
"""Class and methods to read, write, preview audiofiles."""

View File

@@ -12,7 +12,7 @@ class FFMPEG_AudioWriter:
A class to write an AudioClip into an audio file.
Parameters
------------
----------
filename
Name of any video or audio file, like ``video.mp4`` or ``sound.wav`` etc.
@@ -89,6 +89,7 @@ class FFMPEG_AudioWriter:
self.proc = sp.Popen(cmd, **popen_params)
def write_frames(self, frames_array):
"""TODO: add documentation"""
try:
self.proc.stdin.write(frames_array.tobytes())
except IOError as err:
@@ -141,6 +142,7 @@ class FFMPEG_AudioWriter:
raise IOError(error)
def close(self):
"""Closes the writer, terminating the subprocess if is still alive."""
if hasattr(self, "proc") and self.proc:
self.proc.stdin.close()
self.proc.stdin = None
@@ -181,7 +183,6 @@ def ffmpeg_audiowrite(
A function that wraps the FFMPEG_AudioWriter to write an AudioClip
to a file.
"""
if write_logfile:
logfile = open(filename + ".log", "w+")
else:

View File

@@ -18,7 +18,7 @@ def preview(
Plays the sound clip with pygame.
Parameters
-----------
----------
fps
Frame rate of the sound. 44100 gives top quality, but may cause
@@ -41,7 +41,6 @@ def preview(
video and audio during ``VideoClip.preview()``.
"""
pg.mixer.quit()
pg.mixer.init(fps, -8 * nbytes, clip.nchannels, 1024)

View File

@@ -15,7 +15,7 @@ class FFMPEG_AudioReader:
raw data.
Parameters
------------
----------
filename
Name of any video or audio file, like ``video.mp4`` or
@@ -71,8 +71,7 @@ class FFMPEG_AudioReader:
self.buffer_around(1)
def initialize(self, start_time=0):
""" Opens the file, creates the pipe. """
"""Opens the file, creates the pipe."""
self.close() # if any
if start_time != 0:
@@ -121,11 +120,13 @@ class FFMPEG_AudioReader:
self.pos = np.round(self.fps * start_time)
def skip_chunk(self, chunksize):
"""TODO: add documentation"""
_ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
self.proc.stdout.flush()
self.pos = self.pos + chunksize
def read_chunk(self, chunksize):
"""TODO: add documentation"""
# chunksize is not being autoconverted from float to int
chunksize = int(round(chunksize))
s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
@@ -164,6 +165,7 @@ class FFMPEG_AudioReader:
self.pos = pos
def get_frame(self, tt):
"""TODO: add documentation"""
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
@@ -229,7 +231,6 @@ class FFMPEG_AudioReader:
Fills the buffer with frames, centered on ``frame_number``
if possible
"""
# start-frame for the buffer
new_bufferstart = max(0, frame_number - self.buffersize // 2)
@@ -251,6 +252,7 @@ class FFMPEG_AudioReader:
self.buffer_startframe = new_bufferstart
def close(self):
"""Closes the reader, terminating the subprocess if is still alive."""
if self.proc:
if self.proc.poll() is None:
self.proc.terminate()

View File

@@ -1,3 +1 @@
"""
Tools to better process/edit/cut audio.
"""
"""Tools to better processing and edition of audio."""

View File

@@ -21,6 +21,7 @@ IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", "auto-detect")
def try_cmd(cmd):
"""TODO: add documentation"""
try:
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
@@ -95,6 +96,7 @@ else:
def check():
"""TODO: add documentation"""
if try_cmd([FFMPEG_BINARY])[0]:
print(f"MoviePy: ffmpeg successfully found in '{FFMPEG_BINARY}'.")
else:

View File

@@ -1,6 +1,4 @@
"""
all decorators used in moviepy go there
"""
"""Decorators used by moviepy."""
import os
import decorator
@@ -10,7 +8,7 @@ from moviepy.tools import convert_to_seconds
@decorator.decorator
def outplace(func, clip, *args, **kwargs):
""" Applies func(clip.copy(), *args, **kwargs) and returns clip.copy()"""
"""Applies ``func(clip.copy(), *args, **kwargs)`` and returns ``clip.copy()``."""
new_clip = clip.copy()
func(new_clip, *args, **kwargs)
return new_clip
@@ -18,7 +16,7 @@ def outplace(func, clip, *args, **kwargs):
@decorator.decorator
def convert_masks_to_RGB(func, clip, *args, **kwargs):
""" If the clip is a mask, convert it to RGB before running the function """
"""If the clip is a mask, convert it to RGB before running the function."""
if clip.is_mask:
clip = clip.to_RGB()
return func(clip, *args, **kwargs)
@@ -26,9 +24,9 @@ def convert_masks_to_RGB(func, clip, *args, **kwargs):
@decorator.decorator
def apply_to_mask(func, clip, *args, **kwargs):
"""This decorator will apply the same function func to the mask of
the clip created with func"""
"""Applies the same function ``func`` to the mask of the clip created with
``func``.
"""
new_clip = func(clip, *args, **kwargs)
if getattr(new_clip, "mask", None):
new_clip.mask = func(new_clip.mask, *args, **kwargs)
@@ -37,9 +35,7 @@ def apply_to_mask(func, clip, *args, **kwargs):
@decorator.decorator
def apply_to_audio(func, clip, *args, **kwargs):
"""This decorator will apply the function func to the audio of
the clip created with func"""
"""Applies the function ``func`` to the audio of the clip created with ``func``."""
new_clip = func(clip, *args, **kwargs)
if getattr(new_clip, "audio", None):
new_clip.audio = func(new_clip.audio, *args, **kwargs)
@@ -48,8 +44,7 @@ def apply_to_audio(func, clip, *args, **kwargs):
@decorator.decorator
def requires_duration(func, clip, *args, **kwargs):
""" Raise an error if the clip has no duration."""
"""Raises an error if the clip has no duration."""
if clip.duration is None:
raise ValueError("Attribute 'duration' not set")
else:
@@ -58,8 +53,7 @@ def requires_duration(func, clip, *args, **kwargs):
@decorator.decorator
def requires_fps(func, clip, *args, **kwargs):
""" Raise an error if the clip has no fps."""
"""Raises an error if the clip has no fps."""
if not hasattr(clip, "fps") or clip.fps is None:
raise ValueError("Attribute 'fps' not set")
else:
@@ -68,13 +62,12 @@ def requires_fps(func, clip, *args, **kwargs):
@decorator.decorator
def audio_video_fx(func, clip, *args, **kwargs):
"""Use an audio function on a video/audio clip
"""Use an audio function on a video/audio clip.
This decorator tells that the function func (audioclip -> audioclip)
can be also used on a video clip, at which case it returns a
videoclip with unmodified video and modified audio.
"""
if hasattr(clip, "audio"):
new_clip = clip.copy()
if clip.audio is not None:
@@ -85,7 +78,7 @@ def audio_video_fx(func, clip, *args, **kwargs):
def preprocess_args(fun, varnames):
""" Applies fun to variables in varnames before launching the function """
"""Applies fun to variables in varnames before launching the function."""
def wrapper(func, *args, **kwargs):
func_code = func.__code__
@@ -105,18 +98,18 @@ def preprocess_args(fun, varnames):
def convert_parameter_to_seconds(varnames):
"""Converts the specified variables to seconds"""
"""Converts the specified variables to seconds."""
return preprocess_args(convert_to_seconds, varnames)
def convert_path_to_string(varnames):
"""Converts the specified variables to a path string"""
"""Converts the specified variables to a path string."""
return preprocess_args(os.fspath, varnames)
@decorator.decorator
def add_mask_if_none(func, clip, *args, **kwargs):
""" Add a mask to the clip if there is none. """
"""Add a mask to the clip if there is none."""
if clip.mask is None:
clip = clip.add_mask()
return func(clip, *args, **kwargs)
@@ -124,7 +117,7 @@ def add_mask_if_none(func, clip, *args, **kwargs):
@decorator.decorator
def use_clip_fps_by_default(func, clip, *args, **kwargs):
""" Will use clip.fps if no fps=... is provided in **kwargs """
"""Will use ``clip.fps`` if no ``fps=...`` is provided in **kwargs**."""
def find_fps(fps):
if fps is not None:

View File

@@ -1,6 +1,6 @@
"""
This file is meant to make it easy to load the features of
MoviePy that you will use for live editing by simply typing:
Module meant to make it easy to load the features of MoviePy that you will use
for live editing by simply typing:
>>> from moviepy.editor import *
@@ -22,7 +22,7 @@ try:
except ImportError:
def sliders(*args, **kwargs):
"""NOT AVAILABLE: sliders requires matplotlib installed"""
"""NOT AVAILABLE: sliders requires matplotlib installed."""
raise ImportError("sliders requires matplotlib installed")
@@ -44,11 +44,11 @@ try:
except ImportError:
def preview(self, *args, **kwargs):
"""NOT AVAILABLE: clip.preview requires Pygame installed"""
"""NOT AVAILABLE: clip.preview requires Pygame installed."""
raise ImportError("clip.preview requires Pygame installed")
def show(self, *args, **kwargs):
"""NOT AVAILABLE: clip.show requires Pygame installed"""
"""NOT AVAILABLE: clip.show requires Pygame installed."""
raise ImportError("clip.show requires Pygame installed")
@@ -60,7 +60,7 @@ try:
except ImportError:
def preview(self, *args, **kwargs):
""" NOT AVAILABLE: clip.preview requires Pygame installed"""
"""NOT AVAILABLE: clip.preview requires Pygame installed."""
raise ImportError("clip.preview requires Pygame installed")

View File

@@ -1,6 +1,4 @@
"""
Misc. useful functions that can be used at many places in the program.
"""
"""Misc. useful functions that can be used at many places in the program."""
import os
import subprocess as sp
import warnings
@@ -53,7 +51,7 @@ def convert_to_seconds(time):
If the type of `time` is not valid,
it's returned as is.
Here are the accepted formats::
Here are the accepted formats:
>>> convert_to_seconds(15.4) # seconds
15.4
@@ -88,7 +86,7 @@ def deprecated_version_of(func, old_name):
function.
Returns
========
-------
deprecated_func
A function that does the same thing as `func`, but with a docstring
@@ -96,7 +94,7 @@ def deprecated_version_of(func, old_name):
deprecated and that you should use `func` instead.
Examples
=========
--------
>>> # The badly named method 'to_file' is replaced by 'write_file'
>>> class Clip:
@@ -105,7 +103,6 @@ def deprecated_version_of(func, old_name):
>>>
>>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file')
"""
# Detect new name of func
new_name = func.__name__
@@ -146,6 +143,14 @@ for ext in ["jpg", "jpeg", "png", "bmp", "tiff"]:
def find_extension(codec):
"""Returns the correspondent file extension for a codec.
Parameters
----------
codec : str
Video or audio codec name.
"""
if codec in extensions_dict:
# codec is already the extension
return codec

View File

@@ -11,6 +11,25 @@ CLIP_TYPES = {
def close_all_clips(objects="globals", types=("audio", "video", "image")):
"""Closes all clips in a context.
Follows different strategies retrieving the namespace from which the clips
to close will be retrieved depending on the ``objects`` argument, and filtering
by type of clips depending on the ``types`` argument.
Parameters
----------
objects : str or dict, optional
- If is a string an the value is ``"globals"``, will close all the clips
contained by the ``globals()`` namespace.
- If is a dictionary, the values of the dictionary could be clips to close,
useful if you want to use ``locals()``.
types : Iterable, optional
Set of types of clips to close, being "audio", "video" or "image" the supported
values.
"""
if objects == "globals":
objects = globals()
if hasattr(objects, "values"):

View File

@@ -1,6 +1,5 @@
"""
This module implements VideoClip (base class for video clips) and its
main subclasses:
"""Implements VideoClip (base class for video clips) and its main subclasses:
- Animated clips: VideofileClip, ImageSequenceClip
- Static image clips: ImageClip, ColorClip, TextClip,
"""
@@ -45,12 +44,11 @@ from moviepy.video.tools.drawing import blit
class VideoClip(Clip):
"""Base class for video clips.
See ``VideoFileClip``, ``ImageClip`` etc. for more user-friendly
classes.
See ``VideoFileClip``, ``ImageClip`` etc. for more user-friendly classes.
Parameters
-----------
----------
is_mask
`True` if the clip is going to be used as a mask.
@@ -114,20 +112,24 @@ class VideoClip(Clip):
@property
def w(self):
"""Returns the width of the video."""
return self.size[0]
@property
def h(self):
"""Returns the height of the video."""
return self.size[1]
@property
def aspect_ratio(self):
"""Returns the aspect ratio of the video."""
return self.w / float(self.h)
@property
@requires_duration
@requires_fps
def n_frames(self):
"""Returns the number of frames of the video."""
return int(self.duration * self.fps)
def __copy__(self):
@@ -168,9 +170,7 @@ class VideoClip(Clip):
If ``with_mask`` is ``True`` the mask is saved in
the alpha layer of the picture (only works with PNGs).
"""
im = self.get_frame(t)
if with_mask and self.mask is not None:
mask = 255 * self.mask.get_frame(t)
@@ -209,7 +209,7 @@ class VideoClip(Clip):
"""Write the clip to a videofile.
Parameters
-----------
----------
filename
Name of the video file to write in, as a string or a path-like object.
@@ -229,30 +229,19 @@ class VideoClip(Clip):
Some examples of codecs are:
``'libx264'`` (default codec for file extension ``.mp4``)
makes well-compressed videos (quality tunable using 'bitrate').
``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative
to ``'libx264'``, and produces higher quality videos by default.
``'rawvideo'`` (use file extension ``.avi``) will produce
a video of perfect quality, of possibly very huge size.
``png`` (use file extension ``.avi``) will produce a video
of perfect quality, of smaller size than with ``rawvideo``.
``'libvorbis'`` (use file extension ``.ogv``) is a nice video
format, which is completely free/ open source. However not
everyone has the codecs installed by default on their machine.
``'libvpx'`` (use file extension ``.webm``) is tiny a video
format well indicated for web videos (with HTML5). Open source.
- ``'libx264'`` (default codec for file extension ``.mp4``)
makes well-compressed videos (quality tunable using 'bitrate').
- ``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative
to ``'libx264'``, and produces higher quality videos by default.
- ``'rawvideo'`` (use file extension ``.avi``) will produce
a video of perfect quality, of possibly very huge size.
- ``png`` (use file extension ``.avi``) will produce a video
of perfect quality, of smaller size than with ``rawvideo``.
- ``'libvorbis'`` (use file extension ``.ogv``) is a nice video
format, which is completely free/ open source. However not
everyone has the codecs installed by default on their machine.
- ``'libvpx'`` (use file extension ``.webm``) is tiny a video
format well indicated for web videos (with HTML5). Open source.
audio
Either ``True``, ``False``, or a file name.
@@ -313,7 +302,7 @@ class VideoClip(Clip):
Pixel format for the output video file.
Examples
========
--------
>>> from moviepy import VideoFileClip
>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
@@ -405,7 +394,7 @@ class VideoClip(Clip):
"""Writes the videoclip to a sequence of image files.
Parameters
-----------
----------
name_format
A filename specifying the numerotation format and extension
@@ -426,13 +415,13 @@ class VideoClip(Clip):
Returns
--------
-------
names_list
A list of all the files generated.
Notes
------
-----
The resulting image sequence can be read using e.g. the class
``ImageSequenceClip``.
@@ -476,7 +465,7 @@ class VideoClip(Clip):
or ffmpeg.
Parameters
-----------
----------
filename
Name of the resulting gif file, as a string or a path-like object.
@@ -585,7 +574,7 @@ class VideoClip(Clip):
(in seconds).
Examples
---------
--------
>>> # The scene between times t=3s and t=6s in ``clip`` will be
>>> # be played twice slower in ``new_clip``
@@ -606,9 +595,8 @@ class VideoClip(Clip):
# IMAGE FILTERS
def image_transform(self, image_func, apply_to=None):
"""
Modifies the images of a clip by replacing the frame
`get_frame(t)` by another frame, `image_func(get_frame(t))`
"""Modifies the images of a clip by replacing the frame `get_frame(t)` by
another frame, `image_func(get_frame(t))`.
"""
apply_to = apply_to or []
return self.transform(lambda get_frame, t: image_func(get_frame(t)), apply_to)
@@ -617,6 +605,7 @@ class VideoClip(Clip):
# C O M P O S I T I N G
def fill_array(self, pre_array, shape=(0, 0)):
"""TODO: needs documentation."""
pre_shape = pre_array.shape
dx = shape[0] - pre_shape[0]
dy = shape[1] - pre_shape[1]
@@ -634,8 +623,7 @@ class VideoClip(Clip):
return post_array
def blit_on(self, picture, t):
"""
Returns the result of the blit of the clip's frame at time `t`
"""Returns the result of the blit of the clip's frame at time `t`
on the given `picture`, the position of the clip being given
by the clip's ``pos`` attribute. Meant for compositing.
"""
@@ -730,7 +718,7 @@ class VideoClip(Clip):
clips.
Parameters
-----------
----------
size
Size (width, height) in pixels of the final clip.
@@ -745,7 +733,6 @@ class VideoClip(Clip):
col_opacity
Parameter in 0..1 indicating the opacity of the colored
background.
"""
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
@@ -801,7 +788,8 @@ class VideoClip(Clip):
"""Set the clip's mask.
Returns a copy of the VideoClip with the mask attribute set to
``mask``, which must be a greyscale (values in 0-1) VideoClip"""
``mask``, which must be a greyscale (values in 0-1) VideoClip.
"""
assert mask is None or mask.is_mask
self.mask = mask
@@ -827,7 +815,7 @@ class VideoClip(Clip):
of several types.
Examples
----------
--------
>>> clip.with_position((45,150)) # x=45, y=150
>>>
@@ -853,7 +841,8 @@ class VideoClip(Clip):
"""Set the clip's layer in compositions. Clips with a greater ``layer``
attribute will be displayed on top of others.
Note: Only has effect when the clip is used in a CompositeVideoClip."""
Note: Only has effect when the clip is used in a CompositeVideoClip.
"""
self.layer = layer
# --------------------------------------------------------------
@@ -899,7 +888,6 @@ class VideoClip(Clip):
"""Remove the clip's audio.
Return a copy of the clip with audio set to None.
"""
self.audio = None
@@ -908,7 +896,6 @@ class VideoClip(Clip):
"""Transform the clip's audio.
Return a new clip whose audio has been transformed by ``fun``.
"""
self.audio = self.audio.fx(fun, *args, **kwargs)
@@ -919,7 +906,7 @@ class DataVideoClip(VideoClip):
of successive datasets
Parameters
-----------
----------
data
A liste of datasets, each dataset being used for one frame of the clip
@@ -928,9 +915,6 @@ class DataVideoClip(VideoClip):
fps
Number of frames per second in the animation
Examples
---------
"""
def __init__(self, data, data_to_frame, fps, is_mask=False, has_constant_size=True):
@@ -965,15 +949,14 @@ class UpdatedVideoClip(VideoClip):
>>> return world.to_frame()
Parameters
-----------
----------
world
An object with the following attributes:
- world.clip_t : the clip's time corresponding to the
world's state
- world.update() : update the world's state, (including
increasing world.clip_t of one time step)
- world.to_frame() : renders a frame depending on the world's state
- world.clip_t: the clip's time corresponding to the world's state.
- world.update() : update the world's state, (including increasing
world.clip_t of one time step).
- world.to_frame() : renders a frame depending on the world's state.
is_mask
True if the clip is a WxH mask with values in 0-1
@@ -1013,13 +996,13 @@ class ImageClip(VideoClip):
display the given picture at all times.
Examples
---------
--------
>>> clip = ImageClip("myHouse.jpeg")
>>> clip = ImageClip( someArray ) # a Numpy array represent
Parameters
-----------
----------
img
Any picture file (png, tiff, jpeg, etc.) as a string or a path-like object,
@@ -1033,7 +1016,7 @@ class ImageClip(VideoClip):
of the picture (if it exists) to be used as a mask.
Attributes
-----------
----------
img
Array representing the image of the clip.
@@ -1128,7 +1111,7 @@ class ColorClip(ImageClip):
"""An ImageClip showing just one color.
Parameters
-----------
----------
size
Size (width, height) in pixels of the clip.
@@ -1171,7 +1154,7 @@ class TextClip(ImageClip):
Requires ImageMagick.
Parameters
-----------
----------
text
A string of the text to write. Can be replaced by argument
@@ -1226,7 +1209,6 @@ class TextClip(ImageClip):
transparent
``True`` (default) if you want to take into account the
transparency in the image.
"""
@convert_path_to_string("filename")
@@ -1335,8 +1317,8 @@ class TextClip(ImageClip):
@staticmethod
def list(arg):
"""Returns a list of all valid entries for the ``font`` or ``color`` argument of
``TextClip``"""
``TextClip``.
"""
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.DEVNULL, "stdin": sp.DEVNULL}
)
@@ -1365,7 +1347,6 @@ class TextClip(ImageClip):
>>> # Find all the available fonts which contain "Courier"
>>> print(TextClip.search('Courier', 'font'))
"""
string = string.lower()
names_list = TextClip.list(arg)
@@ -1373,6 +1354,8 @@ class TextClip(ImageClip):
class BitmapClip(VideoClip):
"""Clip made of color bitmaps. Mainly designed for testing purposes."""
DEFAULT_COLOR_DICT = {
"R": (255, 0, 0),
"G": (0, 255, 0),
@@ -1390,23 +1373,23 @@ class BitmapClip(VideoClip):
def __init__(
self, bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False
):
"""
Creates a VideoClip object from a bitmap representation. Primarily used
"""Creates a VideoClip object from a bitmap representation. Primarily used
in the test suite.
Parameters
-----------
----------
bitmap_frames
A list of frames. Each frame is a list of strings. Each string
represents a row of colors. Each color represents an (r, g, b) tuple.
Example input (2 frames, 5x3 pixel size):
[["RRRRR",
"RRBRR",
"RRBRR"],
["RGGGR",
"RGGGR",
"RGGGR"]]
Example input (2 frames, 5x3 pixel size)::
[["RRRRR",
"RRBRR",
"RRBRR"],
["RGGGR",
"RGGGR",
"RGGGR"]]
fps
The number of frames per second to display the clip at. `duration` will
@@ -1423,23 +1406,23 @@ class BitmapClip(VideoClip):
correspond to the letters used in ``bitmap_frames``.
eg ``{"A": (50, 150, 150)}``.
Defaults to
::
{
"R": (255, 0, 0),
"G": (0, 255, 0),
"B": (0, 0, 255),
"O": (0, 0, 0), # "O" represents black
"W": (255, 255, 255),
"A": (89, 225, 62), # "A", "C", "D", "E", "F" represent arbitrary colors
"C": (113, 157, 108),
"D": (215, 182, 143),
"E": (57, 26, 252),
}
Defaults to::
{
"R": (255, 0, 0),
"G": (0, 255, 0),
"B": (0, 0, 255),
"O": (0, 0, 0), # "O" represents black
"W": (255, 255, 255),
# "A", "C", "D", "E", "F" represent arbitrary colors
"A": (89, 225, 62),
"C": (113, 157, 108),
"D": (215, 182, 143),
"E": (57, 26, 252),
}
is_mask
Set to ``True`` if the clip is going to be used as a mask.
"""
assert fps is not None or duration is not None
@@ -1469,8 +1452,7 @@ class BitmapClip(VideoClip):
self.fps = fps
def to_bitmap(self, color_dict=None):
"""
Returns a valid bitmap list that represents each frame of the clip.
"""Returns a valid bitmap list that represents each frame of the clip.
If `color_dict` is not specified, then it will use the same `color_dict`
that was used to create the clip.
"""

View File

@@ -6,7 +6,6 @@ from moviepy.video.VideoClip import ColorClip, VideoClip
class CompositeVideoClip(VideoClip):
"""
A VideoClip made of other videoclips displayed together. This is the
base class for most compositions.
@@ -116,7 +115,6 @@ class CompositeVideoClip(VideoClip):
def make_frame(self, t):
"""The clips playing at time `t` are blitted over one another."""
frame = self.bg.get_frame(t).astype("uint8")
im = Image.fromarray(frame)
@@ -132,10 +130,12 @@ class CompositeVideoClip(VideoClip):
def playing_clips(self, t=0):
"""Returns a list of the clips in the composite clips that are
actually playing at the given time `t`."""
actually playing at the given time `t`.
"""
return [clip for clip in self.clips if clip.is_playing(t)]
def close(self):
"""Closes the instance, releasing all the resources."""
if self.created_bg and self.bg:
# Only close the background clip if it was locally created.
# Otherwise, it remains the job of whoever created it.

View File

@@ -10,7 +10,7 @@ from moviepy.video.VideoClip import ColorClip, VideoClip
def concatenate_videoclips(
clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0
):
"""Concatenates several video clips
"""Concatenates several video clips.
Returns a video clip made by clip by concatenating several video clips.
(Concatenated means that they will be played one after another).
@@ -25,19 +25,17 @@ def concatenate_videoclips(
If you have clips of different size and you want to write directly the
result of the concatenation to a file, use the method "compose" instead.
- method="compose", if the clips do not have the same
resolution, the final resolution will be such that no clip has
to be resized.
As a consequence the final clip has the height of the highest
clip and the width of the widest clip of the list. All the
clips with smaller dimensions will appear centered. The border
will be transparent if mask=True, else it will be of the
color specified by ``bg_color``.
- method="compose", if the clips do not have the same resolution, the final
resolution will be such that no clip has to be resized.
As a consequence the final clip has the height of the highest clip and the
width of the widest clip of the list. All the clips with smaller dimensions
will appear centered. The border will be transparent if mask=True, else it
will be of the color specified by ``bg_color``.
The clip with the highest FPS will be the FPS of the result clip.
Parameters
-----------
----------
clips
A list of video clips which must all have their ``duration``
attributes set.
@@ -58,7 +56,6 @@ def concatenate_videoclips(
`compose`.
"""
if transition is not None:
clip_transition_pairs = [[v, transition] for v in clips[:-1]]
clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]]

View File

@@ -14,7 +14,6 @@ def on_color(clip, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
:param pos: the position of the clip in the final clip.
:param col_opacity: should the added zones be transparent ?
"""
if size is None:
size = clip.size
if pos is None:

View File

@@ -1,5 +0,0 @@
"""
This module provides classes that make positioning easy
"""
# class ClipPosition:

View File

@@ -1,6 +1,5 @@
"""
Here is the current catalogue. These are meant
to be used with clip.fx. There are available as transfx.crossfadein etc.
"""Here is the current catalogue. These are meant to be used with ``clip.fx``
There are available as ``transfx.crossfadein`` etc.
"""
from moviepy.decorators import add_mask_if_none, requires_duration
@@ -42,7 +41,7 @@ def slide_in(clip, duration, side):
and if the clip has the same size as the whole composition.
Parameters
===========
----------
clip
A video clip.
@@ -55,7 +54,7 @@ def slide_in(clip, duration, side):
'top' | 'bottom' | 'left' | 'right'
Examples
=========
--------
>>> from moviepy import *
>>> clips = [... make a list of clips]
@@ -84,7 +83,7 @@ def slide_out(clip, duration, side):
and if the clip has the same size as the whole composition.
Parameters
===========
----------
clip
A video clip.
@@ -97,7 +96,7 @@ def slide_out(clip, duration, side):
'top' | 'bottom' | 'left' | 'right'
Examples
=========
--------
>>> from moviepy import *
>>> clips = [... make a list of clips]
@@ -107,7 +106,6 @@ def slide_out(clip, duration, side):
>>> final_clip = concatenate_videoclips( slided_clips, padding=-1)
"""
w, h = clip.size
ts = clip.duration - duration # start time of the effect.
pos_dict = {

View File

@@ -1,5 +1,6 @@
def f_accel_decel(t, old_duration, new_duration, abruptness=1.0, soonness=1.0):
"""
"""Acceleration and deceleration function.
abruptness
negative abruptness (>-1): speed up down up
zero abruptness : no effect
@@ -9,7 +10,6 @@ def f_accel_decel(t, old_duration, new_duration, abruptness=1.0, soonness=1.0):
for positive abruptness, determines how soon the
speedup occurs (0<soonness < inf)
"""
a = 1.0 + abruptness
def _f(t):
@@ -25,7 +25,7 @@ def f_accel_decel(t, old_duration, new_duration, abruptness=1.0, soonness=1.0):
def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0):
"""
"""Accelerates and decelerates a clip, useful for GIF making.
new_duration
If None, will be that of the current clip.
@@ -39,7 +39,6 @@ def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0):
for positive abruptness, determines how soon the
speedup occurs (0<soonness < inf)
"""
if new_duration is None:
new_duration = clip.duration

View File

@@ -6,7 +6,8 @@ def blackwhite(clip, RGB=None, preserve_luminosity=True):
Parameter RGB allows to set weights for the different color
channels.
If RBG is 'CRT_phosphor' a special set of values is used.
preserve_luminosity maintains the sum of RGB to 1."""
preserve_luminosity maintains the sum of RGB to 1.
"""
if RGB is None:
RGB = [1, 1, 1]

View File

@@ -38,7 +38,6 @@ def crop(
>>> crop(clip, x_center=300, width=400, y1=100, y2=600)
"""
if width and x1 is not None:
x2 = x1 + width
elif width and x2 is not None:

View File

@@ -3,9 +3,7 @@ from moviepy.decorators import apply_to_mask
@apply_to_mask
def even_size(clip):
"""
Crops the clip to make dimensions even.
"""
"""Crops the clip to make dimensions even."""
w, h = clip.size
w_even = w % 2 == 0
h_even = h % 2 == 0

View File

@@ -2,14 +2,13 @@ import numpy as np
def fadein(clip, duration, initial_color=None):
"""
Makes the clip progressively appear from some color (black by default),
"""Makes the clip progressively appear from some color (black by default),
over ``duration`` seconds at the beginning of the clip. Can be used for
masks too, where the initial color must be a number between 0 and 1.
For cross-fading (progressive appearance or disappearance of a clip
over another clip, see ``transfx.crossfadein``
"""
if initial_color is None:
initial_color = 0 if clip.is_mask else [0, 0, 0]

View File

@@ -5,14 +5,13 @@ from moviepy.decorators import requires_duration
@requires_duration
def fadeout(clip, duration, final_color=None):
"""
Makes the clip progressively fade to some color (black by default),
over ``duration`` seconds at the end of the clip. Can be used for
masks too, where the final color must be a number between 0 and 1.
For cross-fading (progressive appearance or disappearance of a clip
over another clip, see ``transfx.crossfadeout``
"""
"""Makes the clip progressively fade to some color (black by default),
over ``duration`` seconds at the end of the clip. Can be used for masks too,
where the final color must be a number between 0 and 1.
For cross-fading (progressive appearance or disappearance of a clip over another
clip, see ``transfx.crossfadeout``
"""
if final_color is None:
final_color = 0 if clip.is_mask else [0, 0, 0]

View File

@@ -13,7 +13,6 @@ def freeze(clip, t=0, freeze_duration=None, total_duration=None, padding_end=0):
the clip and the freeze (i.e. the duration of the freeze is
automatically computed). One of them must be provided.
"""
if t == "end":
t = clip.duration - padding_end - 1 / clip.fps

View File

@@ -9,7 +9,7 @@ def freeze_region(clip, t=0, region=None, outside_region=None, mask=None):
`outside_region`, or `mask`.
Parameters
-----------
----------
t
Time at which to freeze the freezed region.
@@ -28,7 +28,6 @@ def freeze_region(clip, t=0, region=None, outside_region=None, mask=None):
indicate the freezed region in the final picture.
"""
if region is not None:
x1, y1, x2, y2 = region

View File

@@ -1,5 +1,5 @@
def gamma_corr(clip, gamma):
""" Gamma-correction of a video clip """
"""Gamma-correction of a video clip."""
def filter(im):
corrected = 255 * (1.0 * im / 255) ** gamma

View File

@@ -14,16 +14,14 @@ except Exception:
def headblur(clip, fx, fy, radius, intensity=None):
"""
Returns a filter that will blur a moving part (a head ?) of
the frames. The position of the blur at time t is
defined by (fx(t), fy(t)), the radius of the blurring
by ``radius`` and the intensity of the blurring by ``intensity``.
Requires OpenCV for the circling and the blurring.
Automatically deals with the case where part of the image goes
offscreen.
"""
"""Returns a filter that will blur a moving part (a head ?) of the frames.
The position of the blur at time t is defined by (fx(t), fy(t)), the radius
of the blurring by ``radius`` and the intensity of the blurring by ``intensity``.
Requires OpenCV for the circling and the blurring. Automatically deals with the
case where part of the image goes offscreen.
"""
if intensity is None:
intensity = int(2 * radius / 3)
@@ -53,6 +51,10 @@ if not headblur_possible:
doc = headblur.__doc__
def headblur(clip, fx, fy, r_zone, r_blur=None):
"""Fallback headblur FX function, used if OpenCV is not installed.
This docstring will be replaced at runtime.
"""
raise IOError("fx painting needs opencv")
headblur.__doc__ = doc

View File

@@ -8,7 +8,7 @@ def loop(clip, n=None, duration=None):
Ideal for clips coming from gifs.
Parameters
------------
----------
n
Number of times the clip should be played. If `None` the
the clip will loop indefinitely (i.e. with no set duration).

View File

@@ -1,5 +1,5 @@
def lum_contrast(clip, lum=0, contrast=0, contrast_threshold=127):
""" luminosity-contrast correction of a clip """
"""Luminosity-contrast correction of a clip."""
def image_filter(im):
im = 1.0 * im # float conversion

View File

@@ -29,7 +29,6 @@ def margin(
this value to 0 yields transparent margins.
"""
if (opacity != 1.0) and (clip.mask is None) and not (clip.is_mask):
clip = clip.add_mask()

View File

@@ -5,10 +5,10 @@ from moviepy.video.VideoClip import ImageClip
def mask_and(clip, other_clip):
"""Returns the logical 'and' (min) between two masks.
other_clip can be a mask clip or a picture (np.array).
``other_clip`` can be a mask clip or a picture (np.array).
The result has the duration of 'clip' (if it has any)
"""
# To ensure that 'or' of two ImageClips will be an ImageClip.
if isinstance(other_clip, ImageClip):
other_clip = other_clip.img

View File

@@ -5,10 +5,10 @@ from moviepy.video.VideoClip import ImageClip
def mask_or(clip, other_clip):
"""Returns the logical 'or' (max) between two masks.
other_clip can be a mask clip or a picture (np.array).
The result has the duration of 'clip' (if it has any)
"""
``other_clip`` can be a mask clip or a picture (np.array).
The result has the duration of 'clip' (if it has any).
"""
# To ensure that 'or' of two ImageClips will be an ImageClip.
if isinstance(other_clip, ImageClip):
other_clip = other_clip.img

View File

@@ -1,3 +1,3 @@
def mirror_x(clip, apply_to="mask"):
""" flips the clip horizontally (and its mask too, by default) """
"""Flips the clip horizontally (and its mask too, by default)."""
return clip.image_transform(lambda img: img[:, ::-1], apply_to=apply_to)

View File

@@ -1,3 +1,3 @@
def mirror_y(clip, apply_to="mask"):
""" flips the clip vertically (and its mask too, by default) """
"""Flips the clip vertically (and its mask too, by default)."""
return clip.image_transform(lambda img: img[::-1], apply_to=apply_to)

View File

@@ -1,12 +1,10 @@
def multiply_speed(clip, factor=None, final_duration=None):
"""
Returns a clip playing the current clip but at a speed multiplied
by ``factor``. Instead of factor one can indicate the desired
``final_duration`` of the clip, and the factor will be automatically
computed.
The same effect is applied to the clip's audio and mask if any.
"""
"""Returns a clip playing the current clip but at a speed multiplied by ``factor``.
Instead of factor one can indicate the desired ``final_duration`` of the clip, and
the factor will be automatically computed. The same effect is applied to the clip's
audio and mask if any.
"""
if final_duration:
factor = 1.0 * clip.duration / final_duration

View File

@@ -14,7 +14,7 @@ except Exception:
def to_painting(image, saturation=1.4, black=0.006):
""" transforms any photo into some kind of painting """
"""Transforms any photo into some kind of painting."""
edges = sobel(image.mean(axis=2))
darkening = black * (255 * np.dstack(3 * [edges]))
painting = saturation * image - darkening
@@ -37,6 +37,11 @@ if not painting_possible:
doc = painting.__doc__
def painting(clip, saturation=None, black=None):
"""Fallback painting FX function, used if scikit-image and scipy are not
installed.
This docstring will be replaced at runtime.
"""
raise IOError("fx painting needs scikit-image or scipy")
painting.__doc__ = doc

View File

@@ -87,8 +87,8 @@ def _get_resizer():
- ``resizer``: Function used to resize images in ``resize`` FX function.
- ``origin``: Library used to resize.
- ``error_msgs``: If any of the libraries is available, shows the user why
this feature is not available and how to fix it in several error messages
which are formatted in the error displayed, if resizing is not possible.
this feature is not available and how to fix it in several error messages
which are formatted in the error displayed, if resizing is not possible.
"""
error_messages = []
@@ -116,36 +116,33 @@ if _resizer_data["resizer"] is not None:
def resize(clip, new_size=None, height=None, width=None, apply_to_mask=True):
"""
Returns a video clip that is a resized version of the clip.
"""Returns a video clip that is a resized version of the clip.
Parameters
------------
----------
new_size:
new_size : tuple or float or function, optional
Can be either
- ``(width,height)`` in pixels or a float representing
- A scaling factor, like 0.5
- ``(width, height)`` in pixels or a float representing
- A scaling factor, like ``0.5``.
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
width : int, optional
Width of the new clip in pixels. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
height : int, optional
Height of the new clip in pixels. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
--------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if new_size is not None:
@@ -240,6 +237,10 @@ if resizer is None:
doc = resize.__doc__
def resize(clip, new_size=None, height=None, width=None):
"""Fallback resize FX function, if OpenCV, Scipy and PIL are not installed.
This docstring will be replaced at runtime.
"""
fix_tips = "- " + "\n- ".join(_resizer_data["error_msgs"])
raise ImportError(f"fx resize needs OpenCV or Scipy or PIL\n{fix_tips}")

View File

@@ -48,7 +48,7 @@ def rotate(
>>> new_clip = clip.add_mask().rotate(72)
Parameters
===========
----------
clip : VideoClip
A video clip.

View File

@@ -5,7 +5,7 @@ def scroll(
Scrolls horizontally or vertically a clip, e.g. to make end credits
Parameters
-----------
----------
w, h
The width and height of the final clip. Default to clip.w and clip.h

View File

@@ -3,7 +3,8 @@ import numpy as np
def supersample(clip, d, n_frames):
"""Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur."""
taken in the interval [t-d, t+d]. This results in motion blur.
"""
def filter(get_frame, t):
timings = np.linspace(t - d, t + d, n_frames)

View File

@@ -7,16 +7,14 @@ from moviepy.video.VideoClip import VideoClip
class ImageSequenceClip(VideoClip):
"""
A VideoClip made from a series of images.
"""A VideoClip made from a series of images.
Parameters
-----------
----------
sequence
Can be one of these:
- The name of a folder (containing only pictures). The pictures
will be considered in alphanumerical order.
- A list of names of image files. In this case you can choose to
@@ -36,14 +34,6 @@ class ImageSequenceClip(VideoClip):
is_mask
Will this sequence of pictures be used as an animated mask.
Notes
------
If your sequence is made of image files, the only image kept in
"""
def __init__(

View File

@@ -5,9 +5,7 @@ from moviepy.video.VideoClip import VideoClip
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideoFileClip("myHolidays.mp4")
@@ -17,7 +15,7 @@ class VideoFileClip(VideoClip):
Parameters
------------
----------
filename:
The name of the video file, as a string or a path-like object.
@@ -58,7 +56,7 @@ class VideoFileClip(VideoClip):
Attributes
-----------
----------
filename:
Name of the original video file.
@@ -148,9 +146,11 @@ class VideoFileClip(VideoClip):
)
def __deepcopy__(self, memo):
"""AudioFileClip can't be deeply copied because the locked Thread
"""Implements ``copy.deepcopy(clip)`` behaviour as ``copy.copy(clip)``.
VideoFileClip class instances can't be deeply copied because the locked Thread
of ``proc`` isn't pickleable. Without this override, calls to
``copy.deepcopy(clip)`` will raise next ``TypeError``:
``copy.deepcopy(clip)`` would raise a ``TypeError``:
```
TypeError: cannot pickle '_thread.lock' object
@@ -159,7 +159,7 @@ class VideoFileClip(VideoClip):
return self.__copy__()
def close(self):
""" Close the internal reader. """
"""Close the internal reader."""
if self.reader:
self.reader.close()
self.reader = None

View File

@@ -1,3 +1 @@
"""
Classes and methods for reading, writing and previewing video files.
"""
"""Classes and methods for reading, writing and previewing video files."""

View File

@@ -1,22 +1,12 @@
"""
This module implements all the functions to communicate with other Python
modules (PIL, matplotlib, mayavi, etc.)
"""Implements all the functions to communicate with other Python modules (PIL,
matplotlib, mayavi, etc.)
"""
import numpy as np
def PIL_to_npimage(im):
"""Transforms a PIL/Pillow image into a numpy RGB(A) image.
Actually all this do is returning numpy.array(im)."""
return np.array(im)
# w,h = im.size
# d = (4 if im.mode=="RGBA" else 3)
# return +np.frombuffer(im.tobytes(), dtype='uint8').reshape((h,w,d))
def mplfig_to_npimage(fig):
""" Converts a matplotlib figure to a RGB frame after updating the canvas"""
"""Converts a matplotlib figure to a RGB frame after updating the canvas."""
# only the Agg backend now supports the tostring_rgb function
from matplotlib.backends.backend_agg import FigureCanvasAgg

View File

@@ -1,6 +1,4 @@
"""
Utilities to get a file from the internet
"""
"""Utilities to get a file from the internet."""
import os
@@ -11,9 +9,10 @@ from moviepy.tools import subprocess_call
def download_webfile(url, filename, overwrite=False):
"""Small utility to download the file at 'url' under name 'filename'.
If url is a youtube video ID like z410eauCnH it will download the video
using youtube-dl (install youtube-dl first !).
If the filename already exists and overwrite=False, nothing will happen.
- If url is a youtube video ID like z410eauCnH it will download the video
using youtube-dl. Requires youtube-dl (pip install youtube-dl).
- If the filename already exists and overwrite=False, nothing will happen.
"""
if os.path.exists(filename) and not overwrite:
return

View File

@@ -1,8 +1,4 @@
"""
This module implements all the functions to read a video or a picture
using ffmpeg. It is quite ugly, as there are many pitfalls to avoid
"""
"""Implements all the functions to read a video or a picture using ffmpeg."""
import os
import re
import subprocess as sp
@@ -15,6 +11,8 @@ from moviepy.tools import convert_to_seconds, cross_platform_popen_params
class FFMPEG_VideoReader:
"""Class for video byte-level reading with ffmpeg."""
def __init__(
self,
filename,
@@ -74,10 +72,10 @@ class FFMPEG_VideoReader:
def initialize(self, start_time=0):
"""
Opens the file, creates the pipe.
Sets self.pos to the appropriate value (1 if start_time == 0 because
it pre-reads the first frame)
"""
Sets self.pos to the appropriate value (1 if start_time == 0 because
it pre-reads the first frame).
"""
self.close(delete_lastread=False) # if any
if start_time != 0:
@@ -201,7 +199,6 @@ class FFMPEG_VideoReader:
This function tries to avoid fetching arbitrary frames
whenever possible, by moving between adjacent frames.
"""
# + 1 so that it represents the frame position that it will be
# after the frame is read. This makes the later comparisions easier.
pos = self.get_frame_number(t) + 1
@@ -233,6 +230,7 @@ class FFMPEG_VideoReader:
return int(self.fps * t + 0.00001)
def close(self, delete_lastread=True):
"""Closes the reader terminating the process, if is still open."""
if self.proc:
if self.proc.poll() is None:
self.proc.terminate()
@@ -257,7 +255,7 @@ def ffmpeg_read_image(filename, with_mask=True, pixel_format=None):
Use ImageClip instead to make clips out of image files.
Parameters
-----------
----------
filename
Name of the image file. Can be of any format supported by ffmpeg.
@@ -352,7 +350,7 @@ class FFmpegInfosParser:
def parse(self):
"""Parses the information returned by FFmpeg in stderr executing their binary
for a file with ``-i`` option and returns a dictionary with all data needed
by moviepy.
by MoviePy.
"""
result = {
"video_found": False,
@@ -630,9 +628,11 @@ class FFmpegInfosParser:
return (global_data, stream_data)
def parse_fps(self, line):
"""Parses number of FPS from a line of the ``ffmpeg -i`` command output."""
return float(re.search(r" (\d+.?\d*) fps", line).group(1))
def parse_tbr(self, line):
"""Parses number of TBS from a line of the ``ffmpeg -i`` command output."""
s_tbr = re.search(r" (\d+.?\d*k?) tbr", line).group(1)
# Sometimes comes as e.g. 12k. We need to replace that with 12000.

View File

@@ -1,4 +1,4 @@
""" Misc. bindings to ffmpeg and ImageMagick."""
"""Miscellaneous bindings to ffmpeg."""
import os
@@ -12,7 +12,8 @@ def ffmpeg_extract_subclip(
inputfile, start_time, end_time, outputfile=None, logger="bar"
):
"""Makes a new video file playing video file ``inputfile`` between
the times ``start_time`` and ``end_time``."""
the times ``start_time`` and ``end_time``.
"""
name, ext = os.path.splitext(inputfile)
if not outputfile:
T1, T2 = [int(1000 * t) for t in [start_time, end_time]]
@@ -49,7 +50,8 @@ def ffmpeg_merge_video_audio(
logger="bar",
):
"""Merges video file ``videofile`` and audio file ``audiofile`` into one
movie file ``outputfile``."""
movie file ``outputfile``.
"""
cmd = [
FFMPEG_BINARY,
"-y",
@@ -69,7 +71,7 @@ def ffmpeg_merge_video_audio(
@convert_path_to_string(("inputfile", "outputfile"))
def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger="bar"):
""" Extract the sound from a video file and save it in ``outputfile`` """
"""Extract the sound from a video file and save it in ``outputfile``."""
cmd = [
FFMPEG_BINARY,
"-y",
@@ -86,8 +88,9 @@ def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger=
@convert_path_to_string(("inputfile", "outputfile"))
def ffmpeg_resize(inputfile, outputfile, size, logger="bar"):
"""resizes ``inputfile`` to new size ``size`` and write the result
in file ``outputfile``."""
"""Resizes ``inputfile`` to new size ``size`` and write the result
in file ``outputfile``.
"""
cmd = [
FFMPEG_BINARY,
"-i",
@@ -108,7 +111,7 @@ def ffmpeg_stabilize_video(
Stabilizes ``filename`` and write the result to ``output``.
Parameters
-----------
----------
inputfile
The name of the shaky video

View File

@@ -19,7 +19,7 @@ class FFMPEG_VideoWriter:
choice of formats.
Parameters
-----------
----------
filename
Any filename like 'video.mp4' etc. but if you want to avoid
@@ -135,7 +135,7 @@ class FFMPEG_VideoWriter:
self.proc = sp.Popen(cmd, **popen_params)
def write_frame(self, img_array):
""" Writes one frame in the file."""
"""Writes one frame in the file."""
try:
self.proc.stdin.write(img_array.tobytes())
except IOError as err:
@@ -193,6 +193,7 @@ class FFMPEG_VideoWriter:
raise IOError(error)
def close(self):
"""Closes the writer, terminating the subprocess if is still alive."""
if self.proc:
self.proc.stdin.close()
if self.proc.stderr is not None:
@@ -267,9 +268,7 @@ def ffmpeg_write_video(
def ffmpeg_write_image(filename, image, logfile=False, pixel_format=None):
"""Writes an image (HxWx3 or HxWx4 numpy array) to a file, using
ffmpeg."""
"""Writes an image (HxWx3 or HxWx4 numpy array) to a file, using ffmpeg."""
if image.dtype != "uint8":
image = image.astype("uint8")
if not pixel_format:

View File

@@ -157,7 +157,7 @@ def write_gif(
Parameters
-----------
----------
filename
Name of the resulting gif file.
@@ -198,7 +198,6 @@ def write_gif(
>>> myClip.multiply_speed(0.5).write_gif('myClip.gif')
"""
#
# We use processes chained with pipes.
#
@@ -352,15 +351,7 @@ def write_gif(
def write_gif_with_image_io(
clip, filename, fps=None, opt=0, loop=0, colors=None, logger="bar"
):
"""
Writes the gif with the Python library ImageIO (calls FreeImage).
Parameters
-----------
opt
"""
"""Writes the gif with the Python library ImageIO (calls FreeImage)."""
if colors is None:
colors = 256
logger = proglog.default_bar_logger(logger)

View File

@@ -1,6 +1,5 @@
"""
This module implements ipython_display
A function to embed images/videos/audio in the IPython Notebook
"""Implements ``ipython_display``, a function to embed images/videos/audio in the
IPython Notebook.
"""
# Notes:
@@ -23,7 +22,7 @@ try:
ipython_available = True
class HTML2(HTML):
class HTML2(HTML): # noqa D101
def __add__(self, other):
return HTML2(self.data + other.data)
@@ -64,18 +63,17 @@ def html_embed(
based on the extension of ``filename``, but this can bug.
rd_kwargs
keyword arguments for the rendering, like {'fps':15, 'bitrate':'50k'}
Keyword arguments for the rendering, like ``{'fps':15, 'bitrate':'50k'}``
**html_kwargs
Allow you to give some options, like width=260, autoplay=True,
loop=1 etc.
html_kwargs
Allow you to give some options, like ``width=260``, ``autoplay=True``,
``loop=1`` etc.
Examples
=========
--------
TODO Create example based on ipython_display examples
"""
if rd_kwargs is None:
rd_kwargs = {}
@@ -174,7 +172,8 @@ def ipython_display(
center=True,
**html_kwargs,
):
"""
"""Displays clip content in an IPython Notebook.
clip
Either the name of a file, or a clip to preview. The clip will
actually be written to a file and embedded as if a filename was
@@ -195,7 +194,7 @@ def ipython_display(
fps
Enables to specify an fps, as required for clips whose fps is unknown.
**kwargs:
kwargs
Allow you to give some options, like width=260, etc. When editing
looping gifs, a good choice is loop=1, autoplay=1.
@@ -204,7 +203,7 @@ def ipython_display(
Important: The media will be physically embedded in the notebook.
Examples
=========
--------
>>> from moviepy.editor import *
>>> # later ...
@@ -217,7 +216,6 @@ def ipython_display(
>>> clip.save_frame("first_frame.jpeg")
>>> ipython_display("first_frame.jpeg")
"""
if not ipython_available:
raise ImportError("Only works inside an IPython Notebook")

View File

@@ -14,7 +14,7 @@ pg.display.set_caption("MoviePy")
def imdisplay(imarray, screen=None):
"""Splashes the given image array on the given pygame screen """
"""Splashes the given image array on the given pygame screen."""
a = pg.surfarray.make_surface(imarray.swapaxes(0, 1))
if screen is None:
screen = pg.display.set_mode(imarray.shape[:2][::-1])
@@ -28,7 +28,7 @@ def show(clip, t=0, with_mask=True, interactive=False):
Splashes the frame of clip corresponding to time ``t``.
Parameters
------------
----------
t
Time in seconds of the frame to display.
@@ -38,7 +38,6 @@ def show(clip, t=0, with_mask=True, interactive=False):
without the mask.
"""
if isinstance(t, tuple):
t = convert_to_seconds(*t)
@@ -83,7 +82,7 @@ def preview(
reducing the ``fps``.
Parameters
------------
----------
fps
Number of frames per seconds in the displayed video.

View File

@@ -3,19 +3,19 @@ from matplotlib.widgets import Slider
def sliders(func, sliders_properties, wait_for_validation=False):
"""A light GUI to manually explore and tune the outputs of
a function.
slider_properties is a list of dicts (arguments for Slider)
"""A light GUI to manually explore and tune the outputs of a function.
def volume(x,y,z):
return x*y*z
``slider_properties`` is a list of dicts (arguments for Slider)::
def volume(x,y,z):
return x*y*z
intervals = [ { 'label' : 'width', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'height', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'depth', 'valmin': 1 , 'valmax': 5 } ]
inputExplorer(volume, intervals)
intervals = [ { 'label' : 'width', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'height', 'valmin': 1 , 'valmax': 5 },
{ 'label' : 'depth', 'valmin': 1 , 'valmax': 5 } ]
inputExplorer(volume,intervals)
"""
n_vars = len(sliders_properties)
slider_width = 1.0 / n_vars

View File

@@ -1,7 +1,5 @@
"""
This module contains different functions to make end and opening
credits, even though it is difficult to fill everyone needs in this
matter.
"""Contains different functions to make end and opening credits, even though it is
difficult to fill everyone needs in this matter.
"""
from moviepy.decorators import convert_path_to_string
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
@@ -10,6 +8,71 @@ from moviepy.video.VideoClip import ImageClip, TextClip
class CreditsClip(TextClip):
"""Credits clip.
Parameters
----------
creditfile
A string or path like object pointing to a text file
whose content must be as follows: ::
# This is a comment
# The next line says : leave 4 blank lines
.blank 4
..Executive Story Editor
MARCEL DURAND
..Associate Producers
MARTIN MARCEL
DIDIER MARTIN
..Music Supervisor
JEAN DIDIER
width
Total width of the credits text in pixels
gap
Horizontal gap in pixels between the jobs and the names
color
Color of the text. See ``TextClip.list('color')``
for a list of acceptable names.
font
Name of the font to use. See ``TextClip.list('font')`` for
the list of fonts you can use on your computer.
font_size
Size of font to use
stroke_color
Color of the stroke (=contour line) of the text. If ``None``,
there will be no stroke.
stroke_width
Width of the stroke, in pixels. Can be a float, like 1.5.
bg_color
Color of the background. If ``None``, the background will be transparent.
Returns
-------
image
An ImageClip instance that looks like this and can be scrolled
to make some credits: ::
Executive Story Editor MARCEL DURAND
Associate Producers MARTIN MARCEL
DIDIER MARTIN
Music Supervisor JEAN DIDIER
"""
@convert_path_to_string("creditfile")
def __init__(
self,
@@ -24,72 +87,6 @@ class CreditsClip(TextClip):
bg_color=None,
gap=0,
):
"""
Parameters
-----------
creditfile
A string or path like object pointing to a text file
whose content must be as follows: ::
# This is a comment
# The next line says : leave 4 blank lines
.blank 4
..Executive Story Editor
MARCEL DURAND
..Associate Producers
MARTIN MARCEL
DIDIER MARTIN
..Music Supervisor
JEAN DIDIER
width
Total width of the credits text in pixels
gap
Horizontal gap in pixels between the jobs and the names
color
Color of the text. See ``TextClip.list('color')``
for a list of acceptable names.
font
Name of the font to use. See ``TextClip.list('font')`` for
the list of fonts you can use on your computer.
font_size
Size of font to use
stroke_color
Color of the stroke (=contour line) of the text. If ``None``,
there will be no stroke.
stroke_width
Width of the stroke, in pixels. Can be a float, like 1.5.
bg_color
Color of the background. If ``None``, the background will
be transparent
Returns
---------
image
An ImageClip instance that looks like this and can be scrolled
to make some credits:
Executive Story Editor MARCEL DURAND
Associate Producers MARTIN MARCEL
DIDIER MARTIN
Music Supervisor JEAN DIDIER
"""
# Parse the .txt file
texts = []
one_line = True

View File

@@ -1,5 +1,4 @@
""" This module contains everything that can help automatize
the cuts in MoviePy """
"""Contains everything that can help automatize the cuts in MoviePy."""
from collections import defaultdict
@@ -10,7 +9,7 @@ from moviepy.decorators import use_clip_fps_by_default
@use_clip_fps_by_default
def find_video_period(clip, fps=None, start_time=0.3):
""" Finds the period of a video based on frames correlation """
"""Finds the period of a video based on frames correlation."""
def frame(t):
return clip.get_frame(t).flatten()
@@ -25,7 +24,7 @@ class FramesMatch:
"""
Parameters
-----------
----------
start_time
Starting time
@@ -72,28 +71,31 @@ class FramesMatch:
class FramesMatches(list):
"""TODO: needs documentation"""
def __init__(self, lst):
list.__init__(self, sorted(lst, key=lambda e: e.max_distance))
def best(self, n=1, percent=None):
"""TODO: needs documentation"""
if percent is not None:
n = len(self) * percent / 100
return self[0] if n == 1 else FramesMatches(self[:n])
def filter(self, condition):
"""
Returns a FramesMatches object obtained by filtering out the FramesMatch
"""Returns a FramesMatches object obtained by filtering out the FramesMatch
which do not satistify the condition ``condition``. ``condition``
is a function (FrameMatch -> bool).
Examples
---------
--------
>>> # Only keep the matches corresponding to (> 1 second) sequences.
>>> new_matches = matches.filter( lambda match: match.time_span > 1)
"""
return FramesMatches(filter(condition, self))
def save(self, filename):
"""TODO: needs documentation"""
np.savetxt(
filename,
np.array([np.array(list(e)) for e in self]),
@@ -104,6 +106,9 @@ class FramesMatches(list):
@staticmethod
def load(filename):
"""Loads a FramesMatches object from a file.
Examples
--------
>>> matching_frames = FramesMatches.load("somefile")
"""
arr = np.loadtxt(filename)
@@ -122,7 +127,7 @@ class FramesMatches(list):
This is well optimized routine and quite fast.
Examples
---------
--------
We find all matching frames in a given video and turn the best match with
a duration of 1.5s or more into a GIF:
@@ -136,7 +141,7 @@ class FramesMatches(list):
>>> clip.subclip(best.start_time, best.end_time).write_gif("foo.gif")
Parameters
-----------
----------
clip
A MoviePy video clip, possibly transformed/resized
@@ -151,7 +156,6 @@ class FramesMatches(list):
Frames per second (default will be clip.fps)
"""
N_pixels = clip.w * clip.h * 3
def dot_product(F1, F2):
@@ -225,6 +229,8 @@ class FramesMatches(list):
):
"""
Parameters
----------
match_threshold
The smaller, the better-looping the gifs are.
@@ -236,7 +242,6 @@ class FramesMatches(list):
If None, then it is chosen equal to match_threshold
"""
if nomatch_threshold is None:
nomatch_threshold = match_threshold
@@ -290,6 +295,7 @@ class FramesMatches(list):
return FramesMatches(result)
def write_gifs(self, clip, gif_dir):
"""TODO: needs documentation"""
for (start, end, _, _) in self:
name = "%s/%08d_%08d.gif" % (gif_dir, 100 * start, 100 * end)
clip.subclip(start, end).write_gif(name)
@@ -304,14 +310,14 @@ def detect_scenes(
Note that for large clip this may take some time
Returns
--------
-------
cuts, luminosities
cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)]
luminosities are the luminosities computed for each
frame of the clip.
Parameters
-----------
----------
clip
A video clip. Can be None if a list of luminosities is

View File

@@ -1,5 +1,4 @@
"""
This module deals with making images (np arrays). It provides drawing
"""Deals with making images (np arrays). It provides drawing
methods that are difficult to do with the existing Python libraries.
"""
@@ -42,7 +41,7 @@ def color_gradient(
Parameters
------------
----------
size
Size (width, height) in pixels of the final picture/array.
@@ -80,7 +79,7 @@ def color_gradient(
this creates a blurry disc of radius d(p1,p2).
Returns
--------
-------
image
An Numpy array of dimensions (W,H,ncolors) of type float
@@ -88,12 +87,11 @@ def color_gradient(
Examples
---------
--------
>>> grad = color_gradient(blabla).astype('uint8')
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
@@ -179,7 +177,7 @@ def color_split(
respectively.
Parameters
-----------
----------
x: (int)
If provided, the image is splitted horizontally in x, the left
@@ -206,7 +204,7 @@ def color_split(
Examples
---------
--------
>>> size = [200,200]
>>> # an image with all pixels with x<50 =0, the others =1
@@ -216,7 +214,6 @@ def color_split(
>>> # An image splitted along an arbitrary line (see below)
>>> color_split(size, p1=[20,50], p2=[25,70] color_1=0, color_2=1)
"""
if gradient_width or ((x is None) and (y is None)):
if p2 is not None:
vector = np.array(p2) - np.array(p1)

View File

@@ -1,5 +1,4 @@
"""
Classes for easy interpolation of trajectories and Curves.
"""Classes for easy interpolation of trajectories and curves.
Requires Scipy installed.
"""
@@ -7,7 +6,7 @@ import numpy as np
class Interpolator:
""" Poorman's linear interpolator, doesn't require Scipy. """
"""Poorman's linear interpolator, doesn't require Scipy."""
def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None):
@@ -21,10 +20,13 @@ class Interpolator:
self.tmin, self.tmax = min(tt), max(tt)
def __call__(self, t):
"""TODO: needs documentation"""
return np.interp(t, self.tt, self.ss, self.left, self.right)
class Trajectory:
"""TODO: needs documentation"""
def __init__(self, tt, xx, yy):
self.tt = 1.0 * np.array(tt)
@@ -33,32 +35,40 @@ class Trajectory:
self.update_interpolators()
def __call__(self, t):
"""TODO: needs documentation"""
return np.array([self.xi(t), self.yi(t)])
def addx(self, x):
"""TODO: needs documentation"""
return Trajectory(self.tt, self.xx + x, self.yy)
def addy(self, y):
"""TODO: needs documentation"""
return Trajectory(self.tt, self.xx, self.yy + y)
def update_interpolators(self):
"""TODO: needs documentation"""
self.xi = Interpolator(self.tt, self.xx)
self.yi = Interpolator(self.tt, self.yy)
def txy(self, tms=False):
"""TODO: needs documentation"""
return zip((1000 if tms else 1) * self.tt, self.xx, self.yy)
def to_file(self, filename):
"""TODO: needs documentation"""
np.savetxt(filename, np.array(self.txy(tms=True)), fmt="%d", delimiter="\t")
@staticmethod
def from_file(filename):
"""TODO: needs documentation"""
arr = np.loadtxt(filename, delimiter="\t")
tt, xx, yy = arr.T
return Trajectory(1.0 * tt / 1000, xx, yy)
@staticmethod
def save_list(trajs, filename):
"""TODO: needs documentation"""
N = len(trajs)
arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs])
np.savetxt(
@@ -71,6 +81,7 @@ class Trajectory:
@staticmethod
def load_list(filename):
"""TODO: needs documentation"""
arr = np.loadtxt(filename, delimiter="\t").T
Nlines = arr.shape[0]
return [

View File

@@ -5,15 +5,12 @@ from moviepy.video.VideoClip import ImageClip
def find_objects(clip, size_threshold=500, preview=False):
"""
Returns a list of ImageClips representing each a separate object on
"""Returns a list of ImageClips representing each a separate object on
the screen.
size_threshold : all objects found with size < size_threshold will be
considered false positives and will be removed
"""
image = clip.get_frame(0)
if not clip.mask:
clip = clip.add_mask()

View File

@@ -1,4 +1,4 @@
""" Experimental module for subtitles support. """
"""Experimental module for subtitles support."""
import re
@@ -17,7 +17,7 @@ class SubtitlesClip(VideoClip):
needed.
Parameters
==========
----------
subtitles
Either the name of a file as a string or path-like object, or a list
@@ -28,7 +28,7 @@ class SubtitlesClip(VideoClip):
https://docs.python.org/3.8/library/codecs.html#standard-encodings)
Examples
=========
--------
>>> from moviepy.video.tools.subtitles import SubtitlesClip
>>> from moviepy.video.io.VideoFileClip import VideoFileClip
@@ -75,7 +75,8 @@ class SubtitlesClip(VideoClip):
def add_textclip_if_none(t):
"""Will generate a textclip if it hasn't been generated asked
to generate it yet. If there is no subtitle to show at t, return
false."""
false.
"""
sub = [
((text_start, text_end), text)
for ((text_start, text_end), text) in self.textclips.keys()
@@ -110,7 +111,8 @@ class SubtitlesClip(VideoClip):
def in_subclip(self, start_time=None, end_time=None):
"""Returns a sequence of [(t1,t2), text] covering all the given subclip
from start_time to end_time. The first and last times will be cropped so as
to be exactly start_time and end_time if possible."""
to be exactly start_time and end_time if possible.
"""
def is_in_subclip(t1, t2):
try:
@@ -146,11 +148,13 @@ class SubtitlesClip(VideoClip):
return "\n\n".join(to_srt(sub) for sub in self.subtitles)
def match_expr(self, expr):
"""Matchs a regular expression against the subtitles of the clip."""
return SubtitlesClip(
[sub for sub in self.subtitles if re.findall(expr, sub[1]) != []]
)
def write_srt(self, filename):
"""Writes an ``.srt`` file with the content of the clip."""
with open(filename, "w+") as file:
file.write(str(self))
@@ -164,7 +168,6 @@ def file_to_subtitles(filename, encoding=None):
Only works for '.srt' format for the moment.
"""
times_texts = []
current_times = None
current_text = ""

View File

@@ -1,11 +1,10 @@
"""
This module contains different functions for tracking objects in videos,
manually or automatically. The tracking functions return results under
the form: ``( txy, (fx,fy) )`` where txy is of the form [(ti, xi, yi)...]
and (fx(t),fy(t)) give the position of the track for all times t (if the
time t is out of the time bounds of the tracking time interval
fx and fy return the position of the object at the start or at the end
of the tracking time interval).
Contains different functions for tracking objects in videos, manually or automatically.
The tracking functions return results under the form: ``( txy, (fx,fy) )`` where txy
is of the form [(ti, xi, yi)...] and (fx(t),fy(t)) give the position of the track for
all times t (if the time t is out of the time bounds of the tracking time interval fx
and fy return the position of the object at the start or at the end of the tracking time
interval).
"""
import numpy as np
@@ -44,7 +43,7 @@ def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None
form (ti, [(xi1,yi1), (xi2,yi2), ...] )
Parameters
-------------
----------
t1,t2:
times during which to track (defaults are start and
@@ -61,7 +60,7 @@ def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None
it easier to edit and re-use later.
Examples
---------
--------
>>> from moviepy import VideoFileClip
>>> from moviepy.video.tools.tracking import manual_tracking
@@ -76,7 +75,6 @@ def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None
>>> # If ever you only have one object being tracked, recover it with
>>> traj, = Trajectory.load_list('track.text')
"""
import pygame as pg
screen = pg.display.set_mode(clip.size)
@@ -136,11 +134,10 @@ def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None
def findAround(pic, pat, xy=None, r=None):
"""
find image pattern ``pat`` in ``pic[x +/- r, y +/- r]``.
if xy is none, consider the whole picture.
"""
"""Find a image pattern ``pat`` in ``pic[x +/- r, y +/- r]``.
If xy is none, consider the whole picture.
"""
if xy and r:
h, w = pat.shape[:2]
x, y = xy
@@ -152,19 +149,18 @@ def findAround(pic, pat, xy=None, r=None):
def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):
"""
Tracks a given pattern (small image array) in a video clip.
Returns [(x1,y1),(x2,y2)...] where xi,yi are
the coordinates of the pattern in the clip on frame i.
To select the frames you can either specify a list of times with ``tt``
or select a frame rate with ``fps``.
This algorithm assumes that the pattern's aspect does not vary much
and that the distance between two occurences of the pattern in
two consecutive frames is smaller than ``radius`` (if you set ``radius``
to -1 the pattern will be searched in the whole screen at each frame).
You can also provide the original position of the pattern with xy0.
"""
"""Tracks a given pattern (small image array) in a video clip.
Returns [(x1,y1),(x2,y2)...] where xi,yi are the coordinates of the pattern in the
clip on frame i. To select the frames you can either specify a list of times with
``tt`` or select a frame rate with ``fps``.
This algorithm assumes that the pattern's aspect does not vary much and that the
distance between two occurences of the pattern in two consecutive frames is smaller
than ``radius`` (if you set ``radius`` to -1 the pattern will be searched in the
whole screen at each frame). You can also provide the original position of the
pattern with xy0.
"""
if not autotracking_possible:
raise IOError(
"Sorry, autotrack requires OpenCV for the moment. "

View File

@@ -5,13 +5,34 @@ extend-ignore =
E203,
W503,
# allow lambda expressions
E731
E731,
# don't require docstrings for public modules
D100,
# don't require docstrings for public packages
D104,
# don't require docstrings for magic methods
D105,
# don't require summary and description in docstrings
D205,
# allow first line of docstrings not ending in period (too much limited)
D400,
# allow first line of docstrings being not imperative (too much intrusive)
D401,
# allow blank lines between section headers and their content in docstrings
D412,
# allow composed `__all__` statements
RST902
per-file-ignores =
# allow imports not placed at the top of the file
# allow 'from moviepy import *' in editor.py
moviepy/editor.py: E402, F403, F405
# the version file doesn't need module level docstring
moviepy/version.py: D100
# tests doesn't require docstring (although is recommended)
tests/*.py: D103
# allow 'from moviepy import *' in examples
examples/*.py: F403, F405
docstring-convention = numpy
# Complexity should be decreased before uncomment:
#max-complexity = 10

View File

@@ -1,9 +1,5 @@
#!/usr/bin/env python
"""
This file will first try to import setuptools,
then reach for the embedded ez_setup.py file (or the ez_setup package),
and fail with a message if neither are successful.
"""
"""MoviePy setup script."""
import sys
from codecs import open
@@ -108,8 +104,10 @@ test_reqs = [
lint_reqs = [
"black>=20.8b1",
"flake8>3.7.0,<4.0.0",
"flake8-implicit-str-concat==0.2.0",
"flake8-absolute-import>=1.0",
"flake8-docstrings>=1.5.0",
"flake8-rst-docstrings>=0.0.14",
"flake8-implicit-str-concat==0.2.0",
"isort>=5.7.0",
"pre-commit>=2.9.3",
]

View File

@@ -81,7 +81,7 @@ def test_concatenate_audioclips_CompositeAudioClip():
- Duration is the sum of their durations.
- Ends are the accumulated sum of their durations.
- Starts are the accumulated sum of their durations, but first start is 0
and lastest is ignored.
and lastest is ignored.
- Channels are the max channels of their clips.
"""
frequencies = [440, 880, 1760]

View File

@@ -90,9 +90,7 @@ def test_PR_529():
def test_PR_610():
"""
Test that the max fps of the video clips is used for the composite video clip
"""
"""Test that the max fps of video clips is used for the composite video clip."""
clip1 = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
clip2 = ColorClip((640, 480), color=(0, 255, 0)).with_duration(1)
clip1.fps = 24
@@ -102,9 +100,7 @@ def test_PR_610():
def test_PR_1137_video():
"""
Test support for path-like objects as arguments for VideoFileClip.
"""
"""Test support for path-like objects as arguments for VideoFileClip."""
with VideoFileClip(Path("media/big_buck_bunny_432_433.webm")).subclip(
0.2, 0.4
) as video:
@@ -113,25 +109,19 @@ def test_PR_1137_video():
def test_PR_1137_audio():
"""
Test support for path-like objects as arguments for AudioFileClip.
"""
"""Test support for path-like objects as arguments for AudioFileClip."""
with AudioFileClip(Path("media/crunching.mp3")) as audio:
audio.write_audiofile(Path(TMP_DIR) / "pathlike.mp3")
assert isinstance(audio.filename, str)
def test_PR_1137_image():
"""
Test support for path-like objects as arguments for ImageClip.
"""
"""Test support for path-like objects as arguments for ImageClip."""
ImageClip(Path("media/vacation_2017.jpg")).close()
def test_PR_1137_subtitles():
"""
Test support for path-like objects as arguments for SubtitlesClip.
"""
"""Test support for path-like objects as arguments for SubtitlesClip."""
def make_textclip(txt):
return TextClip(

View File

@@ -34,7 +34,7 @@ def test_check_codec():
def test_write_frame_errors():
"""Checks error cases return helpful messages"""
"""Checks error cases return helpful messages."""
clip = VideoFileClip("media/big_buck_bunny_432_433.webm")
location = os.path.join(TMP_DIR, "unlogged-write.mp4")
with pytest.raises(IOError) as e:
@@ -51,8 +51,9 @@ def test_write_frame_errors():
def test_write_frame_errors_with_redirected_logs():
"""Checks error cases return helpful messages even when logs redirected
See https://github.com/Zulko/moviepy/issues/877"""
"""Checks error cases return helpful messages even when logs redirected.
See https://github.com/Zulko/moviepy/issues/877
"""
clip = VideoFileClip("media/big_buck_bunny_432_433.webm")
location = os.path.join(TMP_DIR, "logged-write.mp4")
with pytest.raises(IOError) as e:

View File

@@ -277,7 +277,8 @@ def test_ffmpeg_parse_infos_chapters():
def test_sequential_frame_pos():
"""test_video.mp4 contains 5 frames at 1 fps.
Each frame is 1x1 pixels and the sequence is Red, Green, Blue, Black, White.
The rgb values are not pure due to compression."""
The rgb values are not pure due to compression.
"""
reader = FFMPEG_VideoReader("media/test_video.mp4")
assert reader.pos == 1
@@ -344,8 +345,6 @@ def test_large_skip_frame_pos():
def test_large_small_skip_equal():
"""Get the 241st frame of the file in 4 different ways:
Reading every frame, Reading every 24th frame, Jumping straight there"""
sequential_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm")
small_skip_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm")
large_skip_reader = FFMPEG_VideoReader("media/big_buck_bunny_0_30.webm")

View File

@@ -17,15 +17,13 @@ from tests.test_helper import TMP_DIR
def test_failure_to_release_file():
"""This isn't really a test, because it is expected to fail.
It demonstrates that there *is* a problem with not releasing resources when
running on Windows.
"""Expected to fail. It demonstrates that there *is* a problem with not
releasing resources when running on Windows.
The real issue was that, as of movepy 0.2.3.2, there was no way around it.
See test_resourcerelease.py to see how the close() methods provide a solution.
"""
# Get the name of a temporary file we can use.
local_video_filename = os.path.join(
TMP_DIR, "test_release_of_file_%s.mp4" % int(time.time())

View File

@@ -44,9 +44,7 @@ def test_credits():
def test_detect_scenes():
"""
Test that a cut is detected between concatenated red and green clips
"""
"""Test that a cut is detected between concatenated red and green clips."""
red = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
green = ColorClip((640, 480), color=(0, 200, 0)).with_duration(1)
video = concatenate_videoclips([red, green])