Closed ifishing closed 9 years ago
Good catch :+1:
this seems to happens since PR #10, I will fix that
This is now fixed according to npm test
.
I will release a new version with the latest performance improvements made on January by @cibernox – test performance have move from (325ms)
to (210ms)
:-)
I have a better bezier function based on yours. I actually did a small prototype that's very fast.
Can you provide a PR? thanks Le 19 juin 2015 1:15 PM, "thednp" notifications@github.com a écrit :
I have a better bezier function based on yours. I actually did a small prototype that's very fast.
— Reply to this email directly or view it on GitHub https://github.com/gre/bezier-easing/issues/11#issuecomment-113479577.
Here it is, have a look:
var bezier = function(mX1, mY1, mX2, mY2) {
// These values are established by empiricism with tests (tradeoff: performance VS precision)
var ni = 4, // NEWTON_ITERATIONS
nms = 0.001, // NEWTON_MIN_SLOPE
sp = 0.0000001, // SUBDIVISION_PRECISION
smi = 10, // SUBDIVISION_MAX_ITERATIONS
ksts = 11, // k Spline Table Size
ksss = 1.0 / (ksts - 1.0), // k Sample Step Size
f32as = 'Float32Array' in window, // float32ArraySupported
msv = f32as ? new Float32Array (ksts) : new Array (ksts), // m Sample Values
_p = false;
this.A = function(aA1, aA2) { return 1.0 - 3.0 * aA2 + 3.0 * aA1; },
this.B = function(aA1, aA2) { return 3.0 * aA2 - 6.0 * aA1; },
this.C = function(aA1) { return 3.0 * aA1; },
// Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2.
this.cB = function(aT, aA1, aA2) { // calc Bezier
return ((this.A(aA1, aA2)*aT + this.B(aA1, aA2))*aT + this.C(aA1))*aT;
},
// Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2.
this.gS = function (aT, aA1, aA2) { // getSlope
return 3.0 * this.A(aA1, aA2)*aT*aT + 2.0 * this.B(aA1, aA2) * aT + this.C(aA1);
},
this.bS = function(a, aA, aB, mX1, mX2) { // binary Subdivide
var x, t, i = 0;
do {
t = aA + (aB - aA) / 2.0;
x = this.cB(t, mX1, mX2) - a;
if (x > 0.0) {
aB = t;
} else {
aA = t;
}
} while (Math.abs(x) > sp && ++i < smi);
return t;
},
this.nri = function (aX, agt) { // newton Raphs on Iterate
for (var i = 0; i < ni; ++i) {
var cs = this.gS(agt, mX1, mX2);
if (cs === 0.0) return agt;
var x = this.cB(agt, mX1, mX2) - aX;
agt -= x / cs;
}
return agt;
},
this.csv = function () { // calc Sample Values
for (var i = 0; i < ksts; ++i) {
msv[i] = this.cB(i * ksss, mX1, mX2);
}
},
this.gx = function (aX) { //get to X
var is = 0.0, cs = 1, ls = ksts - 1;
for (; cs != ls && msv[cs] <= aX; ++cs) {
is += ksss;
}
--cs;
// Interpolate to provide an initial guess for t
var dist = (aX - msv[cs]) / (msv[cs+1] - msv[cs]),
gt = is + dist * ksss,
ins = this.gS(gt, mX1, mX2);
if (ins >= nms) {
return this.nri(aX, gt);
} else if (ins === 0.0) {
return gt;
} else {
return this.bS(aX, is, is + ksss, mX1, mX2);
}
},
this.b = function (mX1, mY1, mX2, mY2) {
var self = this;
return function(aX) { // process bezier
if (!_p) self.pc();
if (mX1 === mY1 && mX2 === mY2) return aX; // linear
if (aX === 0) return 0;
if (aX === 1) return 1;
return self.cB(self.gx(aX), mY1, mY2);
};
},
this.pc = function() {
_p = true;
if (mX1 != mY1 || mX2 != mY2)
this.csv();
};
return this.b(mX1, mY1, mX2, mY2);
};
is that faster than the current code? apart from storing things in this
and using an different scoping, it doesn't diverge much.
BTW storing things in this
when you just use bezier
as a function is probably not what you expect, right?
The best would be to compare using npm test
on this current project
This code performs better than your code on large number of elements animation. I don't know the npm stuff :dancers:
I don't understand which you put the functions in 'this' and why you don't access the functions directly instead of using self? Le 19 juin 2015 17:24, "thednp" notifications@github.com a écrit :
This code performs better than your code on large number of elements animation. I don't know the npm stuff [image: :dancers:]
— Reply to this email directly or view it on GitHub https://github.com/gre/bezier-easing/issues/11#issuecomment-113547493.
I've been benchmarking the 2 codes using node.js.
I have issue to understand why this is happening but your code make b=bezier()
instanciation about 4-5 times faster
BUT
make the b(percentage)
calls about 11-12 times slower.
Your version
BezierEasing: instanciations x 585 ops/sec ±5.37% (80 runs sampled)
BezierEasing: calls x 850 ops/sec ±1.36% (96 runs sampled)
BezierEasing: instanciations & calls x 576 ops/sec ±4.16% (52 runs sampled)
lib version
BezierEasing: instanciations x 121 ops/sec ±5.11% (70 runs sampled)
BezierEasing: calls x 9856 ops/sec ±0.74% (100 runs sampled)
BezierEasing: instanciations & calls x 592 ops/sec ±8.79% (50 runs sampled)
the code that runs these benchmark is in benchmark.js
This is an optim of your code BTW:
function(mX1, mY1, mX2, mY2) {
// These values are established by empiricism with tests (tradeoff: performance VS precision)
var ni = 4, // NEWTON_ITERATIONS
nms = 0.001, // NEWTON_MIN_SLOPE
sp = 0.0000001, // SUBDIVISION_PRECISION
smi = 10, // SUBDIVISION_MAX_ITERATIONS
ksts = 11, // k Spline Table Size
ksss = 1.0 / (ksts - 1.0), // k Sample Step Size
f32as = 'Float32Array' in window, // float32ArraySupported
msv = f32as ? new Float32Array (ksts) : new Array (ksts), // m Sample Values
_p = false;
var A = function(aA1, aA2) { return 1.0 - 3.0 * aA2 + 3.0 * aA1; }
var B = function(aA1, aA2) { return 3.0 * aA2 - 6.0 * aA1; }
var C = function(aA1) { return 3.0 * aA1; }
// Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2.
var cB = function(aT, aA1, aA2) { // calc Bezier
return ((A(aA1, aA2)*aT + B(aA1, aA2))*aT + C(aA1))*aT;
}
// Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2.
var gS = function (aT, aA1, aA2) { // getSlope
return 3.0 * A(aA1, aA2)*aT*aT + 2.0 * B(aA1, aA2) * aT + C(aA1);
}
var bS = function(a, aA, aB, mX1, mX2) { // binary Subdivide
var x, t, i = 0;
do {
t = aA + (aB - aA) / 2.0;
x = cB(t, mX1, mX2) - a;
if (x > 0.0) {
aB = t;
} else {
aA = t;
}
} while (Math.abs(x) > sp && ++i < smi);
return t;
}
var nri = function (aX, agt) { // newton Raphs on Iterate
for (var i = 0; i < ni; ++i) {
var cs = gS(agt, mX1, mX2);
if (cs === 0.0) return agt;
var x = cB(agt, mX1, mX2) - aX;
agt -= x / cs;
}
return agt;
}
var csv = function () { // calc Sample Values
for (var i = 0; i < ksts; ++i) {
msv[i] = cB(i * ksss, mX1, mX2);
}
}
var gx = function (aX) { //get to X
var is = 0.0, cs = 1, ls = ksts - 1;
for (; cs != ls && msv[cs] <= aX; ++cs) {
is += ksss;
}
--cs;
// Interpolate to provide an initial guess for t
var dist = (aX - msv[cs]) / (msv[cs+1] - msv[cs]),
gt = is + dist * ksss,
ins = gS(gt, mX1, mX2);
if (ins >= nms) {
return nri(aX, gt);
} else if (ins === 0.0) {
return gt;
} else {
return bS(aX, is, is + ksss, mX1, mX2);
}
}
var b = function (mX1, mY1, mX2, mY2) {
var self = this;
return function(aX) { // process bezier
if (!_p) pc();
if (mX1 === mY1 && mX2 === mY2) return aX; // linear
if (aX === 0) return 0;
if (aX === 1) return 1;
return cB(gx(aX), mY1, mY2);
};
}
var pc = function() {
_p = true;
if (mX1 != mY1 || mX2 != mY2)
csv();
};
return b(mX1, mY1, mX2, mY2);
};
no need to use this
in your version. this is about 10 times faster according to node.
BezierEasing: instanciations x 3,331 ops/sec ±1.11% (97 runs sampled)
BezierEasing: calls x 10,425 ops/sec ±1.08% (95 runs sampled)
BezierEasing: instanciations & calls x 1,877 ops/sec ±1.47% (91 runs sampled)
Your optimisation is still mysterious to me, have you figured out which difference produce a significant performance bottleneck? I don't think renaming all the variables helps at all in this, i've actually tried to minify/mangle my code, no change ;)
without a concrete example showing the issue and the performance enhancement I can't really adopt a new version.
Also new enhancements should not remove features (check for arguments, toString , toCSS) unless those are real bottlenecks
I am using it for my kute.js, I need BEST performance for my next version :)
I think the npm or any other make no sense unless you test it on the battleground 1000+ elements to animate :) also this here is not for your next version, is for you to have an idea about what I'm about to say.
So the purpose of my optimizations is to eliminate idle times with requestAnimationFrame animations and thus eliminate any unnecessary code, even features. The best way to do a test is the timeline in Chrome console with large number of animations, npm tests or jsperf are simply not realistic because animations are strictly in bed with render time.
I especially don't like this functions nesting but I guess it's just the way it works and cannot be changed.
var b = function (mX1, mY1, mX2, mY2) {
var self = this;
return function(aX) { // process bezier
if (!_p) pc();
if (mX1 === mY1 && mX2 === mY2) return aX; // linear
if (aX === 0) return 0;
if (aX === 1) return 1;
return cB(gx(aX), mY1, mY2);
};
}
I will let you know more if that would be the case.
I just tested your optimized version and is the same with mine in fps value so even if your bezier function performs 10x faster, in real life it's the same as mine.
Are you sure bezier-easing is the bottleneck? Can you share a demo somewhere?
Thanks
Here is a demo I've made for scroll using Robert Penner's easing functions http://codepen.io/thednp/pen/yNzLBQ
The demo right now is using the quadratic easing
quadraticInOut = function(k) { if ( ( k *= 2 ) < 1 ) return 0.5 * k * k; return - 0.5 * ( --k * ( k - 2 ) - 1 ); }
It's painfully obvious this completes the easing function a few thousand times faster than your code, even with my optimizations, but it's not as accurate as yours.
I am still researching and testing, haven't found anything perfect yet, I guess I have to make a hybrid with best of both two worlds.
Have you tried to use the Profiler / Timeline to see what takes times?
I highly things that DOM updates is going to be much much slower than bezier-easing, especially if you have called bezier(a,b,c,d)
once (you should instanciate it once, right?).
It's just that, believe me when I say:
My point is, I know you want to make an improved future version, if we can keep in touch and share some best practices or keep track of what's coming. I want to make sure I talk to you before you release it, so I starred your projects, I want to be FIRST to know when we have a better code. I don't want to release my next KUTE.js version without an excellent bezier.
I also care you have an excellent script, just saying.
I believe you :) I'll try my best to bring better performance.
I've written a small experiment for me to easily reproduce the issue:
var BezierEasing = require(".");
function create (n, w, lh) {
var array = [];
var container = document.createElement("div");
document.body.appendChild(container);
container.style.width = w + "px";
container.style.height = (n * lh) + "px";
container.style.overflow = "hidden";
container.style.background = "black";
container.style.margin = "auto";
for (var i=0; i<n; ++i) {
var el = document.createElement("div");
el.style.width = w + "px";
el.style.height = lh + "px";
el.style.background = "red";
container.appendChild(el);
array.push(el);
}
return array;
}
var easing = BezierEasing(0.2, 0.3, 1.0, 0.2);
var els = create(400, 400, 1);
requestAnimationFrame(function loop (t) {
requestAnimationFrame(loop);
for (var i=0; i<els.length; ++i) {
var percent = easing((1 + Math.cos((t + i * 100) / (1000 + 200 * Math.sin(t / 1000)))) / 2);
els[i].style.transform = "translateX("+(percent * 100)+"%)";
}
});
I'll try to locate the issue with current code
I think you can achieve something like a new script with slightly better performance and same or better accuracy.
This is what benchmark.js does ;-) this one is complementary
EDIT: oops sorry I misunderstood your comment ;)
Le 19 juin 2015 8:12 PM, "thednp" notifications@github.com a écrit :
I think you can achieve something like a new script with slightly better performance and same or better accuracy.
— Reply to this email directly or view it on GitHub https://github.com/gre/bezier-easing/issues/11#issuecomment-113594880.
Just a statement on my progress: I've just made 2 versions:
A first version where the animation of each line do not use BezierEasing
A second version where the animation uses BezierEasing (600 calls every requested animation frame)
And using the FPS meter. there are no visible difference (both ~30 FPS on my machine / under chrome). So I still can't reproduce that we can observe such difference.
Are you sure that, in your code, you are reusing the BezierEasing(_,_,_,_)
and not recreating it everytime? (because constructing is not that optimized, and more in your version – but not is the function call according to benchmarks)
The profiler says that on my example, the BezierEasing.f function takes 3.68% of total time:
which is pretty great regarding that I only trigger a single style update for each bezierEasing call.
Optimising more might be tough and may be premature optimisation.
In my code is part of the KUTE object.
KUTE = {
Tween : {/some specific stuff}
Easing : { bezier: function() //here is it }
}
The function is initialized once and executed everytime needed. I can do some code testing with your new script, once it's testable :)
BTW, the demo WITH bezier feels suuperb :) How many elements animate?
600 elements
So I tested your demo... why do I feel it's going variable speed?
yeah there seem to be some lag/pause recurrently. This is not because of JavaScript but because of repainting (if you see in timeline)
this is extremely hard to benchmark the library precisely. Optimising the current library is a good challenge. I don't find anything to improve right now.
Removing this part helps a bit but ONLY at instantiation time.
I can't find how to improve the current f
bezier lookup function.
I would take a different approach:
var Bezier = Bezier || (function(){
//this does the initialization, accessing other objects and functions
})();
Bezier.methods = {
theBezierValues: {mx1,mx2,my1,my2} //object
theBezierFunction1 : function(mx1, mx2,my1,my2){
//do some processing
},
theBezierFunction2 : function(a,b){
//do some more processing
}
}
Well?
I think this is going too far in premature optimisation. nesting is very fast, it is a feature of the language, compilers like V8 know how to optimize that well and compile to assembly. This means more memory (context, stack) but this has probably no significant impact on CPU. IMHO the important part is the algorithm not the code style. It currently adopts a functional programming style, less object style.
That said, I might considering a major refactoring of the code (mainly for simplification / normalisation purpose) but that would imply breaking a bit the API (not constructing a function, but an object with methods). I think as you recommended I need to use more prototype (you meant prototype, not methods, right?), less dynamic change.
In traditional proto JS style, this could be wrote like this:
function Bezier (a, b, c, d) {
this.a = a; ...same for b, c, d
}
Bezier.prototype = {
...
};
I'm not sure if I want to go that way yet.
to not annoy @ifishing more, I'm going to close this issue (discussion have diverged from the initially raised issue that is fixed).
I still can't reproduce the performance gain of your version, and benchmark doesn't show it neither. If you find anything / the reason of the perf gain, feel free to open a new issue. If you want to contribute to bezier-easing directly, I've written some "contributing" notes in the README of the project.
Thanks for your interest in the project :) and have fun with your tween library :)
In line 51, the variable mX1, mX2 are not declared , it will throw an exception when the function is called.