Julia: nextfloat(::BigFloat, n) does not exist

Created on 6 Mar 2019  ยท  54Comments  ยท  Source: JuliaLang/julia

For most floating point types, nextfloat(x, n) and prevfloat(x, n) are defined to be the nth next or nth previous floating point value. BigFloats support neither.

good first issue help wanted

Most helpful comment

great! use the library. This is a small part of large undertaking otherwise. Its best to solve it simply.

All 54 comments

The most basic approach to implementing this would be to just iterate the single-argument nextfloat/prevfloat call n times. For extra credit one could try to do something more efficient but I'm not sure if it's worth it. The intermediate efficiency result would be to avoid allocating intermediate BigFloat values by using mutating MPFR operations.

without any special cases (there may be none)

nextfloat(x::BigFloat, n::Integer) = x + n*eps(x)
prevfloat(x::BigFloat, n::Integer) = x - n*eps(x).

the above matches iterating the function in my few tests

I would worry about crossing an eps-size boundary. From larger eps to smaller eps the increment could end up being too big. In the other direction I think it could also end up being too small.

That is correct.

function Base.prevfloat(x::BigFloat, n::Int)
    signbit(n) && return nextfloat(x, abs(n))
    result = x
    for i=1:n
        result = prevfloat(result)
    end
    return result
end

function Base.nextfloat(x::BigFloat, n::Int)
    signbit(n) && return prevfloat(x, abs(n))
    result = x
    for i=1:n
        result = nextfloat(result)
    end
    return result
end

The simplest option is probably to call mpfr_nextabove/mpfr_nextbelow n times.

See also #10040 and #19948.

for reference (from Base/mpfr.jl)

function nextfloat(x::BigFloat)
    z = BigFloat()
    ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode),
          z, x, ROUNDING_MODE[])
    ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    return z
end

function prevfloat(x::BigFloat)
    z = BigFloat()
    ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode),
          z, x, ROUNDING_MODE[])
    ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    return z
end

We could have MPFR.nextfloat! and define nextfloat(x::BigFloat) = nextfloat!(BigFloat(x)) (or however we create a copy of a BigFloat).

That seems like a good idea. I would be in favour of exposing more of the mutating MPFR API, but clearly document that they should only be used on values that are created within the scope of the function.

This duplicates a BigFloat

function Base.deepcopy(x::BigFloat)
    z = BigFloat()
    # final 0 sets rounding to nearest
    ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)
    return z
end
julia> a = BigFloat(1); b = deepcopy(a);
julia> a == b, a === b
(true, false)

but it forms the duplicate using the global precision for BigFloat even when the x is given at a different precision, so?

function Base.deepcopy(x::BigFloat)
    setprecision(BigFloat, x.prec) do
        z = BigFloat()
        # final 0 sets rounding to nearest
        ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)
        return z
    end
end
function Base.deepcopy(x::BigFloat)
    setprecision(BigFloat, x.prec) do
        z = BigFloat()
        # final 0 sets rounding to nearest
        ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)
        return z
    end
end

function nextfloat!(x::BigFloat)
    z = deepcopy(x)
    ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    return z
end

function prevfloat!(x::BigFloat)
    z = deepcopy(x)
    ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), z) != 0 
   return z
end

Base.nextfloat(x::BigFloat) = nextfloat!(x)
Base.prevfloat(x::BigFloat) = prevfloat!(x)

function Base.nextfloat(x::BigFloat, n::Int)
    n === 0 && return deepcopy(x)
    signbit(n) && return prevfloat(x, abs(n))
    z = nextfloat(x)
    n -= 1
    for i=1:n
       ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    end
    return z
end

function Base.prevfloat(x::BigFloat, n::Int)
    n === 0 && return deepcopy(x)
    signbit(n) && return nextfloat(x, abs(n))
    z = prevfloat(x)
    n -= 1
    for i=1:n
       ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    end
    return z
end

My thought was nextfloat! and prevfloat! modify BigFloat values in-place instead of copying.

function BigFloat(x::BigFloat, r::MPFRRoundingMode=ROUNDING_MODE[];
                            precision::Integer=DEFAULT_PRECISION[],
                            new::Bool=false)
     if new || precision != MPFR.precision(x)
         z = BigFloat(;precision=precision)
         ccall((:mpfr_set, :libmpfr), Int32, 
                             (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode), z, x, r)
     else
         z = x
     end 
     return z 
end 

function nextfloat!(x::BigFloat)
    ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), x) != 0
    return x
end

function prevfloat!(x::BigFloat)
    ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), x) != 0 
   return x
end


function nextfloat!(x::BigFloat, n::Int)
    n === 0 && return x
    signbit(n) && return prevfloat!(x, abs(n))
    for i=1:n
       ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), x) != 0
    end
    return x
end

function prevfloat!(x::BigFloat, n::Int)
    n === 0 && return x
    signbit(n) && return nextfloat!(x, abs(n))
    for i=1:n
       ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), x) != 0
    end
    return x
end

nextfloat(x::BigFloat) = nextfloat!(BigFloat(x, new=true))
prevfloat(x::BigFloat) = prevfloat!(BigFloat(x, new=true))

function nextfloat(x::BigFloat, n::Int)
    n === 0 && return BigFloat(x, new=true)
    signbit(n) && return prevfloat(x, abs(n))
    z = nextfloat(x)
    n -= 1
    for i=1:n
       ccall((:mpfr_nextabove, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    end
    return z
end

function prevfloat(x::BigFloat, n::Int)
    n === 0 && return BigFloat(x, new=true)
    signbit(n) && return nextfloat(x, abs(n))
    z = prevfloat(x)
    n -= 1
    for i=1:n
       ccall((:mpfr_nextbelow, :libmpfr), Int32, (Ref{BigFloat},), z) != 0
    end
    return z
end

#=
function librarybased_internal_deepcopy(x::BigFloat)
     z = BigFloat(; precision = precision(x))
     ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)
     return z
end
=#

Instead of using the MPFR library, can't we solve this with native implementation? There is already a GSOC project Native Bignums for reimplementing GMP and MPFR in Julia itself.
So using MPFR interface now might lead to redundant work. What are your views on this? @JeffreySarnoff @simonbyrne

It would be outstanding for that project to reimplement both GMP Ints and MPFR Floats in Julia.
The code above is there to minimize the work required for someone to complete this as a PR.
The summer of code project would benefit from a clean way to check their implementation of nextfloat and prevfloat, should that happen. On balance, I see Little effort overlap.

I am interested in solving this issue. So, for now, should I try to write native code or use the MPFR library as you mentioned above?

great! use the library. This is a small part of large undertaking otherwise. Its best to solve it simply.

I tried following code in the Julia REPL. According to these results, deepcopy() makes a copy with same precision as x.

julia> x = BigFloat(1, 32) # BigFloat of precision 32
1.0

julia> x.prec # Precision of x is 32
32

julia> deepcopy(x).prec # deepcopy of x has precision 32
32

julia> BigFloat(1).prec # Default precision is 256
256

Yes, it does. The implementation above that is specialized for BigFloats also preserves the precision and it allocates less and it runs faster:

using BenchmarkTools
# hide the annotations that let @btime report elapsed time
macro bench(f,x)
    esc(:($f($Ref(x)[])))
end

x = BigFloat(1.0, 108)

@btime @bench(current_deepcopy, x);
  176.458 ns (5 allocations: 464 bytes)

@btime @bench(proposed_deepcopy, x);
130.293 ns (4 allocations: 112 bytes)

You are not required to use the specialized implementation; do you have a reason to avoid it?

No. While experimenting, I found this and thought that specialized implementation might be redundant. But I had not benchmarked the code. I will definitely include the specialized implementation now.

julia> x = BigFloat(1, 100)
1.0
julia> x.prec
100
julia> nextfloat(x).prec
256

The precision of nextfloat(x) is not the same as x. Same is the case for prevfloat(). Is this by design or is it a bug?

The precision should be maintained. The source code above does this. Please use it when developing your PR.

Do we encourage extending deepcopy?

Note that you no longer need setprecision, instead you can simply do:

z = BigFloat(;precision=precision(x))
ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)

In this specific instance .. where the PR is into Base, I don't see this as an external extension of deepcopy, Taking advantage of this type specific speedup and memory tightening inside Base seems reasonable. (thanks for the code snippet .. I'll edit above to use it)

According to docs for deepcopy(), it is not recommended to override deepcopy directly (also we cannot do it without changing Base.jl) but do it using deepcopy_internal(). Currently, deepcopy_internal() is implemented but it is not even close to customized deepcopy() implementation.

julia> @btime Base.deepcopy_internal(x, IdDict())
  88.254 ns (4 allocations: 464 bytes)

julia> @btime customized_deepcopy(x)
  30.218 ns (2 allocations: 96 bytes) # these results are obtained without using setprecision()

I should also mention that deepcopy_internal() is only used in deepcopy.jl(generic implementations), mpfr.jl(for BigFloat) and gmp.jl(for BigInt).
I am in favor of making necessary changes in the Base.jl and enable deepcopy() to override as I don't see any harm in this. What are your views on this? @JeffreySarnoff

I guess defining deepcopy directly instead of deepcopy_internal allows us to skip allocating an IdDict (for preserving reference topology). Maybe we could fix that more generally by only allocating the IdDict when it is first needed. Otherwise we need to be certain the deepcopy and deepcopy_internal methods do exactly the same thing; they should share core.

Or maybe the BigFloat constructor should always make a copy. That's non-breaking since it currently makes a copy sometimes. convert(BigFloat, ::BigFloat) still avoids copying so there shouldn't be too big a performance impact (?).

Since they have to share core, its goof-proofier to define deepcopy_internal. More substantively, having some type be the exception is unhelpful to a language that works at having no barriers to its fluid design.

Do all specializations of deepcopy_internal keep a dictionary of already seen entities? Is this (that all deepcopy_internal functions memoize) something that is required by the design of deepcopy? Would everything work just as well if deepcopy_internal(::BigFloat) did not keep this dict? If so, lets time both ways with repetitive values and less repetitive values.

Should I wait for the final solution for deepcopy or move forward by using deepcopy_internal?

I tried simulating the issue with given functions and i got the following results:

function memoized_deepcopy(x::BigFloat, n::Int)
       for i=1:n
            deepcopy(x) == x # currrent implementation of deepcopy which has memoized buit-in
       end
end

function proposed_deepcopy(x::BigFloat)
      z = BigFloat(;precision=x.prec)
      ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0)
      return z
end

function non_memoized_deepcopy(x::BigFloat, n::Int)
       for i=1:n
            proposed_deepcopy(x) == x
       end
end

julia> using BenchmarkTools

julia> @btime memoized_deepcopy(BigFloat(1), 100)
  9.839 ฮผs (402 allocations: 46.98 KiB)

julia> @btime non_memoized_deepcopy(BigFloat(1), 100)
  3.152 ฮผs (202 allocations: 11.05 KiB)

julia> @btime memoized_deepcopy(BigFloat(1), 10000000)
  1.262 s (40000002 allocations: 4.47 GiB)

julia> @btime non_memoized_deepcopy(BigFloat(1), 10000000)
  361.788 ms (20000002 allocations: 1.04 GiB)

@JeffreySarnoff I think it is to deal with cases like:

julia> x = fill(zero(BigFloat), 100);

julia> x[1].d == x[2].d
true

julia> y = deepcopy(x);

julia> y[1].d == y[2].d
true

By those timings, the nonmemoized versions are noticeably snappier. Apparently,
using memoization when deepcopying BigFloats carries both a time and a memory cost
~3 relative time units and ~4 relative mem units

Is it really that much of a drag?

I would suggest avoiding using deepcopy at all if possible.

That is a prickly :cactus: thing, Simon.

So, lets use our own unattached function name for the BigFloat duplication logic you have sketched.
function duplicate(x::BigFloat) z = BigFloat(; precision = precision(x)) ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, Int32), z, x, 0) return z end

Should we also introduce a duplicate(x::BigInt) function because similar speedup can be achieved there too?

We would need to include these points in the documentation.

  • use duplicate() instead of deepcopy() in case of BigFloat
  • and documentation for duplicate()

"Memoization" is a bit of a misnomer here --- the purpose of the dictionary is not to avoid redundant computation, it's to ensure the new object has the exact same reference structure as the original. So it's expected to be more expensive. However, of course this is not necessary just to deepcopy one BigFloat. So that's an optimization we could try to do in deepcopy.

Would that mean two classes of deepcopy-able things, those that replicate best without the support dictionary and all the others?

@narendrakpatel That is not a good approach. duplicate is used as a way to walk around terms that are given specific meaning/uses/constraints within Julia .. it is not intended to become another one.
(that would get confusing, imo). @JeffBezanson has the right approach in view. Let's conform to this.

a simpler option might be to add a forcenew keyword option to the BigFloat(::BigFloat) constructor:
https://github.com/JuliaLang/julia/blob/1fc43803696585bf4dc9792b34cf359e4611b270/base/mpfr.jl#L183-L192

So a = BigFloat(pi); b = BigFloat(a); c = BigFloat(a, forcenew=true) would mean
a == b, a == c, a === b, a !== c?

As a matter of personal programming policy, every type I create includes ThisType(x::ThisType) = x.
That the default is to give back what has been given makes sense to me.
I'm less comfortable with ThisType(x::ThisType; forcenew::Bool = false) because however we decide that replication is intended/required, that paradigm will sweep into preferred practice (which is good) and I find _forcenew_ to be well, a bit forced. Would the more pithy _new_ in allegorical connection with struct ... return new() ... be well received? ThisType(x::ThisType; new::Bool=false)

I think that the infra-deepcopy-code will move to let values of some types be copied without involving a reference dict and provide reference dict enabled copying for types that have not demonstrated safety or have not assured logical consistency in duplication absent a referentce dict. The internal_deepcopy specializations will continue to provide the buffer they were designed to provide, as they (the information and understanding required to better do that) improve [my present take].

The manner of supporting duplication absent reference dict for types like BigFloat and, probably, BigInt (I am unfamiliar with the internals of that lib) should solidify as a consensus soon, now that our attention has been brought to bear. It is not necessary to pause this PR-in-development until that is provided. It is wise to develop the specifics of this PR in a manner that makes it easy to meld with the evanescent solution to duplication sans reference dict for types where that is most desirable.

_sketched in straw_

Provide a BigFloat constructor with the keyword new, a Bool value that defaults false.
- for internal use so far as this PR effort is concerned
- pbly exported as an enhancement to the BigFloat API along with a few other smoothifications
^at a later date, likely a follow on to working in concert wtih the deepcopy resolution

Use that constructor to duplicate BigFloat values where needed to perform prevfloat, nextfloat.

Follow @simonbyrne in implementing the constructor with the keyword new

Follow @StefanKarpinski in implementing prevfloat! nextfloat!
and use them in implementing prevfloat nextfloat

and I'll keep editing the code above to reflect new refinements from y'all
to make it easier for @narendrakpatel to move forward with a successful PR

@narendrakpatel please formulate some tests as you review the code above.

I noticed that BigFloat(x::BigFloat, r::MPFRRoundingMode=ROUNDING_MODE[];precision::Integer=DEFAULT_PRECISION[], new:::Bool=false) overwrites function BigFloat(x::BigFloat; new::Bool=false). Thus it cannot be done in the way mentioned in code above.
To solve this I wrote this code:

function BigFloat(x::BigFloat, r::MPFRRoundingMode=ROUNDING_MODE[];
                  precision::Integer=DEFAULT_PRECISION[], new::Bool=false)
    precision == MPFR.precision(x) && !new && return x
    z = BigFloat(; precision = new ? MPFR.precision(x) : precision)
    ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode),
              z, x, r)
    return z
end

It works perfectly. But this way is a bit slower than the one mentioned in code above. Only way that I could think of to get the performance benfits of above function is to define a new method like duplicate(). But I guess it is not recommended to things this way.
What are your suggestions on this? @JeffBezanson @JeffreySarnoff
Edit: Maybe we can define an internal method __BigFloat(). This way the semantics will be consistent and the performance will also improve.

please try this revision

One may request a BigFloat that is new and of a specified precision that differs from the precision of the BigFloat arg x. Your z = presupposes a new version of a given BigFloat must be of the same precision as that of the BigFloat given. Often that is the desired behaviour. Sometimes we want to increase the precision while preparing for a less stable calculation, and, when done, then wind the precision back down again.

Consider the case when the user wants to create a new copy of x, then the copy will have the default precision, i.e. to make a deepcopy we would have to call BigFloat(x, precision=precision(x), new=true)
But in my case:

julia> x = BigFloat(12.123,100) # x with precision 100
12.122999999999999332089828385506

julia> a = BigFloat(x) # x with default(high) precision
12.1229999999999993320898283855058252811431884765625

julia> (a==x, a===x)
(true, false)

julia> a = BigFloat(x, new=true) # deepcopy of x
12.122999999999999332089828385506

julia> (a==x, a===x)
(true, false)

julia> a = BigFloat(x, precision(x)) # copy of x
12.122999999999999332089828385506

julia> (a==x, a===x)
(true, true)

I believe this is a bit cleaner syntax and makes more sense semantically. What are your views?

Working from solution to implementation is sound.

FYI the use of BigFloat(x, n) is being replaced by BigFloat(x, precision=n), this manner of reform .. using keyword args to clarify and subspecify .. is happening with many Julia functions (e.g. round(x, n) becomes round(x, digits=n) or round(x, sigdigits=n), as needed).

The new keyword is inspired by structs use of new with an inner constructor. It is rather specific, targeted semantically: construct a new exemplar of type with value as provided (usually, sometimes construct an empty or zeroed instance).
I see no a priori commitment to promulgating a default precision, adopting a copied precision, or establishing a specified precision that attaches to new.

Of course, we may establish a convention; I'd encourage that which best serves Julia as it allows us to develop things that _just work_. And the other ways to assign precision would be available by following what obtains most properly therefrom and smoothly therewith.

a pleasing look

# either
# go with the initial setting for BigFloat precision as the default _256 bits_.
# (which occurs through doing nothing)
# or

setprecision(BigFloat, digits=D, base=B)

# establish a default precision, a contextually uniform value, a backstop.
# if `base` is given and `digits` is unspecified, `digits` is inferred
# to best match the current default (which is kept as a number of bits)
# so do nothing or just keep the given base and the the number of digits
# in that base which corresponds to current default precision in bits.
# if `digits` is given and `base` is unspecified, `digits` is a bit count.

 ๐‘๐‘“ = BigFloat(pi)
                   precision(๐‘๐‘“) == <current defaultfor BigFloats, a bit count>
# ----

 ๐‘๐‘“โ‚ = BigFloat(๐‘๐‘“, precision=100)

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚)                               #  !revised!
                   #  ๐‘๐‘“โ‚‚ === ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions match
                   #  ThisType(x::ThisType) === x #  correction

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚ , new=true)
                   #  ๐‘๐‘“โ‚‚ !== ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions match

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚ , new=false)
                   #  ๐‘๐‘“โ‚‚ === ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions match

# ----

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚ , precision=128)
                   #  ๐‘๐‘“โ‚‚ !== ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions differ

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚ , precision=128, new=true)
                   #  ๐‘๐‘“โ‚‚ !== ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions differ

 ๐‘๐‘“โ‚‚ = BigFloat(๐‘๐‘“โ‚ , precision=128, new=false) 
                   #  ๐‘๐‘“โ‚‚ !== ๐‘๐‘“โ‚ && ๐‘๐‘“โ‚‚ == ๐‘๐‘“โ‚  &&  precisions differ
                   #  new=false is ignored because precisions differ
                   #  a warning about ignoring `new=false` may be given

I came up with this implementation:

function BigFloat(x::BigFloat, r::MPFRRoundingMode=ROUNDING_MODE[]; precision::Integer=DEFAULT_PRECISION[], new::Bool=true)
    precision == DEFAULT_PRECISION[] && !new && return x
    z = BigFloat(; precision = precision == DEFAULT_PRECISION[] ? MPFR.precision(x) : precision)
    ccall((:mpfr_set, :libmpfr), Int32, (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode),
          z, x, r)
    return z
end

Although this implementation has the following bug: If the user passes precision=256, then the function neglects this and uses precision(x) instead.

julia> x = BigFloat(pi, 100)
3.1415926535897932384626433832793

julia> a = BigFloat(x, precision=256)
3.1415926535897932384626433832793

julia> (a!==x, a==x, a.prec!=x.prec)
(true, true, false)

Approach: Replace DEFAULT_PRECISION[] with 0. Note that, BigFloat cannot have 0 precision, thus no function will be calling BigFloat(x, precision=0) unlike BigFloat(x,precision=256). This approach is inspired from how the process ids are used in UNIX based systems.
Shall I move forward with this approach?

We are grateful for your critical attention. As the specifics of the approach we adopt are likely to become followed elsewhere, and there are other stakeholders already joined to this issue, it makes sense to allow them time to see this and consider it -- so let's give the others at least a day and a half to weigh in before you take this to a PR (at which point, commentary and suggestions will appear in the PR's thread). At the very least, let me reread this from a more caffeinated perspective :coffee: upon rejoining this new day :sunrise_over_mountains:.

In any event, it is reasonable for you to move into PR preparation this evening or sometime tomorrow. I'll help you with that if you run into process questions; and there is the Slack channel #my-first-pr with more help and advise.

Elevating the integer zero to carry nonnumerical information when appearing as a precision is not an approach that plays well with Julia. A "float value" that is represented with a significand of 0 bits would be something akin to _the empty integer_ -- not knowing what that is, I prefer to compute using entities I grasp.

Walking away from C/Unix styles [no disrespect, C paved much] becomes refreshing. We avoid sentinelizing generic values, using call signatures that may incorporate singleton structs or fixedpoint value-types to guide multidispatch.

On rereading, I revised my "pleasing look" to respect Typ(x::Typ) === x.
(As it should be, one may extend copy!(z,x) which always has z !== x.)

The pleasing look entails a function BigFloat(x::BigFloat, _) wherein

(a) precision defaults to MPFR.precision(x) and
(b) when precision is set to a value that differs from MPFR.precision(x)
new is necessarily true, overriding the default or explicit new=false
(c) when precision takes its default value or is explicitly given that value
new is determinative: new=false returns x, new=true constructs.

function BigFloat(x::BigFloat, r::MPFRRoundingMode=RoundingMode[];
                  precision::Int32=MPFR.precision(x),
                  new::Bool=false)
    new = ifelse(precision != MPFR.precision(x), true, new)
    if new
        z = BigFloat(;precision=precision)
        ccall((:mpfr_set, :libmpfr), Int32, 
              (Ref{BigFloat}, Ref{BigFloat}, MPFRRoundingMode), z, x, r)
    else
        z = x
    end
    return z
end

@narendrakpatel go ahead .. let's carry this forward as a PR

Was this page helpful?
0 / 5 - 0 ratings

Related issues

dpsanders picture dpsanders  ยท  3Comments

musm picture musm  ยท  3Comments

ararslan picture ararslan  ยท  3Comments

tkoolen picture tkoolen  ยท  3Comments

arshpreetsingh picture arshpreetsingh  ยท  3Comments