PATCH against rev 755 add support for yl2x, yl2xp1
text/plain
2331
Comments
Comment #0 by witold.baryluk+d — 2010-01-28T12:19:56Z
import std.stdio;
import std.math;
double iter(double x) {
static immutable a = log(4.0);
return x*a;
}
void main() {
writefln("%s", iter(5.0));
}
/usr/include/d/dmd2-posix/phobos/import/std/math.d(1415): Error: cannot evaluate yl2x(x,0xb.17217f7d1cf79acp-4L) at compile time
aaaa.d(6): Error: cannot evaluate log(4L) at compile time
aaaa.d(6): Error: cannot evaluate log(4L) at compile time
This also means that currently DMD compiler will not perform constant folding on a.
Comment #1 by witold.baryluk+d — 2010-01-28T12:29:09Z
Same problem is with exp function.
/usr/include/d/dmd2-posix/phobos/import/std/math.d(895): Error: cannot evaluate exp2(0xb.8aa3b295c17f0bcp-3L * x) at compile time
/usr/include/d/dmd2-posix/phobos/import/std/math.d(903): Error: cannot evaluate exp(cast(real)x) at compile time
aaaa.d(6): Error: cannot evaluate exp(4F) at compile time
aaaa.d(6): Error: cannot evaluate exp(4F) at compile time
I know it can solved by using CTFE functions, but for example sqrt or sin, cos are working correctly. I really don't want to put by hand obscure numerical constants, and then don't know from where they came :)
Releated to bug1475.
Comment #2 by dsimcha — 2010-01-28T12:46:12Z
A patch has just been submitted fairly recently for if(__ctfe). Once that's released, people (myself included) will start submitting patches to make compile-time versions of most of std.math. Most of the runtime implementations use assembly language somewhere for efficiency, or call the C standard lib implementations. We'll have to write more naive compile-time versions of these functions.
Comment #3 by clugdbug — 2010-01-28T13:01:46Z
(In reply to comment #2)
> A patch has just been submitted fairly recently for if(__ctfe). Once that's
> released, people (myself included) will start submitting patches to make
> compile-time versions of most of std.math. Most of the runtime implementations
> use assembly language somewhere for efficiency, or call the C standard lib
> implementations. We'll have to write more naive compile-time versions of these
> functions.
if(__ctfe) is in the next release. yl2x() probably should have built-in support, though. Since it's an intrinsic, if (__ctfe) won't work for it.
Comment #4 by witold.baryluk+d — 2010-01-28T15:55:35Z
So I release this as public domain. I written this code as workaround to lack of log and exp. They looks to be accurate to 16 digital digits on whole real line.
Results can be slightly different than values returned by std.math.{log,exp} unfortunetly. Anybody want to review this code or know better methods?
/** Calculate natural logarithm of x.
*
*
* Performs reduction of large values to (0..3) inverval, using log(x 3^n) = log(x) + n*log(3)
*
* Then uses truncated Taylor series in variable y=(x-1)/(x+1) for x > 0.
*
* For values x < 1, calculate -log(1/x)
*
*/
double ctfe_log(double x_) {
if (x_ == 0.0) {
return -double.infinity;
}
if (x_ < 0.0) {
return double.nan;
}
if (x_ == double.infinity) {
return double.infinity;
}
if (x_ == 1.0) {
return 0.0;
}
if (!(x_ == x_)) { // nan
return double.nan;
}
real x = x_;
if (x > 1.0) {
uint m = 0;
// reduce to (1 .. 3) interval
while (x > 3.0) {
x = x / 3.0;
m = m + 1;
}
real y = (x-1.0)/(x+1.0);
real y2 = y*y;
/* Evaluate Horner's scheme on polynomial
* log(x) = log((1+y) / (1-y)) = 2 y (1 + y^2/3 + y^4/5 + y^6/7 + ... y^70/71)
*/
real temp = 0.0;
for (int i = 71; i >= 3; i -= 2) {
temp += 1.0/cast(real)i;
temp *= y2;
}
temp += 1.0;
y = 2.0*y*temp;
if (m) {
return y + m*ctfe_log(3.0);
} else {
return y;
}
} else {
return -ctfe_log(1.0/x);
}
}
/** Compute exponential function of x.
*
* Uses truncated Taylor series expeansion of exp function.
*
* Performs reduction for |x| > 2, of the form exp(x 2^m) = exp(x)^(2^m)
*/
double ctfe_exp(double x_) {
if (x_ == 0.0) {
return 1.0;
}
if (x_ >= 710.0) { // includes +infinity
return double.infinity;
}
if (x_ <= -746.0) { // includes -infinity
return 0.0;
}
if (!(x_ == x_)) { // nan
return double.nan;
}
real x = x_;
int m = 0;
// reduce to (-2 .. 2) interval
if (x > 0.0) {
while (x > 2.0) {
x = x / 2.0;
m = m + 1;
}
} else {
while (x < -2.0) {
x = x / 2.0;
m = m - 1;
}
}
real temp = 1.0;
real term = 1.0;
for (int i = 1; i <= 25; i++) {
term *= x/cast(real)i;
temp += term;
}
if (m) {
real exp2 = ctfe_exp(2.0);
if (m > 0) {
for (int i = 0; i < m; i++) {
temp = temp*temp;
}
} else {
for (int i = 0; i < -m; i++) {
temp = temp*temp;
}
}
return temp;
} else {
return temp;
}
}
int tests(double[] xs, int min, int max) {
assert(min <= max);
int r = 0;
double c = 1.0;
if (min < 0) {
for (int i = 0; i < -min; i++) {
c = c / 2.0;
}
}
if (min > 0) {
for (int i = 0; i < -min; i++) {
c = c * 2.0;
}
}
for (int i = min; i <= max; i++) {
foreach (x0; xs) {
auto x = c * x0;
if ( (ctfe_exp(ctfe_log(x)) - x) / x < 1.0e-16 ) {
// ok
} else {
r = r + 1;
}
}
}
return r;
}
enum c = tests(
[0.1, 0.1001, 0.11, 0.2, 0.24, 0.3, 0.341, 0.387123, 0.4,
0.5, 0.55, 0.6, 0.7, 0.732, 0.8, 0.88, 0.9, 0.98, 0.9991, 0.999992],
-300, 300);
static assert(c == 0);
static assert(ctfe_exp(0.0) == 1.0);
static assert(ctfe_exp(-1000.0) == 0.0);
static assert(ctfe_exp(1000.0) == double.infinity);
static assert(ctfe_log(0.0) == -double.infinity);
Comment #5 by witold.baryluk+d — 2010-01-31T08:03:28Z
Small but important error in unittest (c was not multiplied correctly. Also -300..300 range was somehow too big, compilations was very long.
int tests(double[] xs, int min, int max) {
assert(min <= max);
int r = 0;
double c = 1.0;
if (min < 0) {
for (int i = 0; i < -min; i++) {
c = c / 2.0;
}
}
if (min > 0) {
for (int i = 0; i < min; i++) {
c = c * 2.0;
}
}
for (int i = min; i <= max; i++) {
foreach (x0; xs) {
auto x = c * x0;
if ( (ctfe_exp(ctfe_log(x)) - x) / x < 1.0e-16 ) {
;
} else {
r = r + 1;
}
}
c = c * 2.0;
}
return r;
}
unittest {
enum c = tests(
[0.1, 0.1001, 0.11, 0.2, 0.24, 0.3, 0.341, 0.387123, 0.4,
0.5, 0.55, 0.6, 0.7, 0.732, 0.8, 0.88, 0.9, 0.98, 0.9991, 0.999992],
-30, 30);
static assert(c == 0);
}
static assert(ctfe_exp(0.0) == 1.0);
static assert(ctfe_exp(-1000.0) == 0.0);
static assert(ctfe_exp(1000.0) == double.infinity);
static assert(ctfe_log(0.0) == -double.infinity);
Comment #6 by bearophile_hugs — 2010-05-12T03:12:43Z
For a generic compile-time function fit to be used with D FP types, it can be better to use reals everywhere:
/** Calculate natural logarithm of x.
*
* Performs reduction of large values to (0..3) inverval, using
* log(x 3^n) = log(x) + n*log(3)
*
* Then uses truncated Taylor series in variable y=(x-1)/(x+1) for x > 0.
*
* For values x < 1, calculate -log(1/x)
*
*/
real ctfe_log(real x_) {
if (x_ == 0.0) {
return -real.infinity;
}
if (x_ < 0.0) {
return real.nan;
}
if (x_ == real.infinity) {
return real.infinity;
}
if (x_ == 1.0) {
return 0.0;
}
if (!(x_ == x_)) { // nan
return real.nan;
}
real x = x_;
if (x > 1.0) {
uint m = 0;
// reduce to (1 .. 3) interval
while (x > 3.0) {
x = x / 3.0;
m = m + 1;
}
real y = (x-1.0)/(x+1.0);
real y2 = y*y;
/* Evaluate Horner's scheme on polynomial
* log(x) = log((1+y) / (1-y)) = 2 y (1 + y^2/3 + y^4/5 + y^6/7 + ...
y^70/71)
*/
real temp = 0.0;
for (int i = 71; i >= 3; i -= 2) {
temp += 1.0/cast(real)i;
temp *= y2;
}
temp += 1.0;
y = 2.0*y*temp;
if (m) {
return y + m*ctfe_log(3.0);
} else {
return y;
}
} else {
return -ctfe_log(1.0/x);
}
}
/** Compute exponential function of x.
*
* Uses truncated Taylor series expeansion of exp function.
*
* Performs reduction for |x| > 2, of the form exp(x 2^m) = exp(x)^(2^m)
*/
real ctfe_exp(real x_) {
if (x_ == 0.0) {
return 1.0;
}
if (x_ >= 710.0) { // includes +infinity
return real.infinity;
}
if (x_ <= -746.0) { // includes -infinity
return 0.0;
}
if (!(x_ == x_)) { // nan
return real.nan;
}
real x = x_;
int m = 0;
// reduce to (-2 .. 2) interval
if (x > 0.0) {
while (x > 2.0) {
x = x / 2.0;
m = m + 1;
}
} else {
while (x < -2.0) {
x = x / 2.0;
m = m - 1;
}
}
real temp = 1.0;
real term = 1.0;
for (int i = 1; i <= 25; i++) {
term *= x/cast(real)i;
temp += term;
}
if (m) {
real exp2 = ctfe_exp(2.0);
if (m > 0) {
for (int i = 0; i < m; i++) {
temp = temp*temp;
}
} else {
for (int i = 0; i < -m; i++) {
temp = temp*temp;
}
}
return temp;
} else {
return temp;
}
}
Comment #7 by bearophile_hugs — 2010-05-12T04:10:38Z
See bug 4177 for a blocker of this.
Comment #8 by s.d.hammett — 2010-11-15T05:27:01Z
Created attachment 811
PATCH against rev 755 add support for yl2x, yl2xp1
Implements yl2x, yl2xp1 as builtins for DMC, VisualStudio.
Needs implementation for GCC
Comment #9 by bugzilla — 2010-12-05T22:31:08Z
I'm going to defer this until there's a reasonably portable implementation of the specifics of yl2x and yl2xp1. Probably one can be built out of the C standard library math functions.
Failing that, we'll need an inline asm version for gcc, and a 64 bit inline version.
Comment #10 by code — 2013-01-15T16:07:47Z
What's the state of this?
Having log/exp functions at compile time would be very useful, e.g. to pregenerate scientific tables.
Comment #11 by ibuclaw — 2014-01-07T14:56:54Z
(In reply to comment #10)
> What's the state of this?
> Having log/exp functions at compile time would be very useful, e.g. to
> pregenerate scientific tables.
std.math now has pure generic implementations.
One of the main problems holding CTFE support back is that there is no straightforward way to do math operations such as isNaN, isInfinity, signbit, frexp which require access (and for some, manipulation) of the bits in float types.
GDC has the ability to convert float <-> array already in place, but its not in use because CTFE doesn't allow it.
Saying that, DMD has a problem that GDC doesn't - it uses the IASM implementations of std.math functions that can't be evaluated at compile time.
Comment #12 by code — 2014-01-07T19:47:17Z
(In reply to comment #11)
> (In reply to comment #10)
> > What's the state of this?
> > Having log/exp functions at compile time would be very useful, e.g. to
> > pregenerate scientific tables.
>
> std.math now has pure generic implementations.
>
> One of the main problems holding CTFE support back is that there is no
> straightforward way to do math operations such as isNaN, isInfinity, signbit,
> frexp which require access (and for some, manipulation) of the bits in float
> types.
We had the same issue with hashOf in druntime and now there is a huge machinery to compute exponent and mantissa.
Could we allow to read specific floating point values through intrinsics at compile time?
Something like exponent(float), signbit(float), mantissa(float)?
> Saying that, DMD has a problem that GDC doesn't - it uses the IASM
> implementations of std.math functions that can't be evaluated at compile time.
How do you treat std.math.log at runtime as intrinsic or does it run the same code? Is there a performance penalty?
Comment #13 by ibuclaw — 2014-01-08T04:31:41Z
(In reply to comment #12)
> (In reply to comment #11)
> > (In reply to comment #10)
> > > What's the state of this?
> > > Having log/exp functions at compile time would be very useful, e.g. to
> > > pregenerate scientific tables.
> >
> > std.math now has pure generic implementations.
> >
> > One of the main problems holding CTFE support back is that there is no
> > straightforward way to do math operations such as isNaN, isInfinity, signbit,
> > frexp which require access (and for some, manipulation) of the bits in float
> > types.
>
> We had the same issue with hashOf in druntime and now there is a huge machinery
> to compute exponent and mantissa.
> Could we allow to read specific floating point values through intrinsics at
> compile time?
> Something like exponent(float), signbit(float), mantissa(float)?
>
> > Saying that, DMD has a problem that GDC doesn't - it uses the IASM
> > implementations of std.math functions that can't be evaluated at compile time.
>
> How do you treat std.math.log at runtime as intrinsic or does it run the same
> code? Is there a performance penalty?
We don't implement y2lx in GDC, so it uses the !INLINE_Y2LX implemenatation. :)
If you instead meant eg: std.math.sin, this is the process (approximately).
User code (import std.math)
-> Compiler registers real sin() as BUILT_IN_FRONTEND
User code calls std.math.sin
-> CTFE sees function is BUILTINsin and calls eval_builtin()
-> eval_builtin generates call to BUILT_IN_SINL and then calls fold_builtin()
-> fold_builtin validates input parameters and calls mpfr_sin()
-> Returns MPFR evaluated value to fold_builtin as GCC tree.
-> Returns GCC tree to eval_builtin which converts it to RealExp value.
Compiler unable to fold builtin
-> Returns CallExp to C math lib function sin()
It's a long winded way, and I'd like to ideally skip passing through the middle-end and call MPFR directly. :)
Comment #14 by ibuclaw — 2014-01-08T04:37:37Z
(In reply to comment #13)
> (In reply to comment #12)
> > (In reply to comment #11)
> > > (In reply to comment #10)
> > > > What's the state of this?
> > > > Having log/exp functions at compile time would be very useful, e.g. to
> > > > pregenerate scientific tables.
> > >
> > > std.math now has pure generic implementations.
> > >
> > > One of the main problems holding CTFE support back is that there is no
> > > straightforward way to do math operations such as isNaN, isInfinity, signbit,
> > > frexp which require access (and for some, manipulation) of the bits in float
> > > types.
> >
> > We had the same issue with hashOf in druntime and now there is a huge machinery
> > to compute exponent and mantissa.
I don't honestly know what to say about that. I've not tested it and I'm anticipating it to simply not work in GDC.
> > Could we allow to read specific floating point values through intrinsics at
> > compile time?
> > Something like exponent(float), signbit(float), mantissa(float)?
> >
>
Something like this was suggested and discarded IIRC. I'm not sure how I would be able to do this from within GCC's infrastructure - or at least return meaningful / usable data in the case of exponent and mantissa bits. :)
Comment #15 by code — 2014-01-08T17:07:23Z
(In reply to comment #13)
> We don't implement y2lx in GDC, so it uses the !INLINE_Y2LX implemenatation. :)
At runtime? I'm quite surprised to hear that, isn't the performance a bummer compared to the X87 instruction? On other platforms this might not an argument though.
(In reply to comment #14)
> > > Something like exponent(float), signbit(float), mantissa(float)?
>
> Something like this was suggested and discarded IIRC. I'm not sure how I would
> be able to do this from within GCC's infrastructure - or at least return
> meaningful / usable data in the case of exponent and mantissa bits. :)
Well the parse(Float)(Float f) function only uses normal CTFE floating point
computations to determine these values.
https://github.com/D-Programming-Language/druntime/blob/a977a65ac7df366c002033c43e11cd05925a25fb/src/core/internal/convert.d#L91
It's quite complex, so you can see how desperate we are to get CT hashing.
Although we might disable CT hashing for floats if this gets out of hand.
Comment #16 by ibuclaw — 2014-01-09T03:04:44Z
(In reply to comment #15)
> (In reply to comment #13)
>
> > We don't implement y2lx in GDC, so it uses the !INLINE_Y2LX implemenatation. :)
>
> At runtime? I'm quite surprised to hear that, isn't the performance a bummer
> compared to the X87 instruction? On other platforms this might not an argument
> though.
>
I guess so. But I don't give x86 any favours over other targets in the glue. (I kicked out all x86-specific codegen last year :)
Comment #17 by hsteoh — 2014-06-27T05:39:15Z
I've been trying to get more of std.math usable in CTFE, unfortunately, the only obvious fixes I can find are isInfinity and isNaN (and I'm not 100% sure they are correct):
https://github.com/quickfur/phobos/tree/ctfe_nan
Any hope of getting floating point intrinsics into DMDFE? It would be *really* nice to have that.