summaryrefslogtreecommitdiff
path: root/tests/lean/misc-constants
diff options
context:
space:
mode:
authorSon Ho2023-05-09 10:40:19 +0200
committerSon HO2023-06-04 21:44:33 +0200
commit50d1542f830b7ceb73efd34573b6b56b4971a114 (patch)
treec73b6c415b39cdbc3d92ec3056a2521445ff8afc /tests/lean/misc-constants
parent4078f2569b362920a648622be73761cddde8a288 (diff)
Regenerate the translated files for Lean
Diffstat (limited to '')
-rw-r--r--tests/lean/misc-constants/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-constants/Constants.lean267
2 files changed, 532 insertions, 357 deletions
diff --git a/tests/lean/misc-constants/Base/Primitives.lean b/tests/lean/misc-constants/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-constants/Base/Primitives.lean
+++ b/tests/lean/misc-constants/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-constants/Constants.lean b/tests/lean/misc-constants/Constants.lean
index 937a15e5..8306ed85 100644
--- a/tests/lean/misc-constants/Constants.lean
+++ b/tests/lean/misc-constants/Constants.lean
@@ -2,143 +2,130 @@
-- [constants]
import Base.Primitives
-structure OpaqueDefs where
-
- /- [constants::X0] -/
- def x0_body : Result UInt32 := Result.ret (UInt32.ofNatCore 0 (by intlit))
- def x0_c : UInt32 := eval_global x0_body (by simp)
-
- /- [core::num::u32::{9}::MAX] -/
- def core_num_u32_max_body : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 4294967295 (by intlit))
- def core_num_u32_max_c : UInt32 :=
- eval_global core_num_u32_max_body (by simp)
-
- /- [constants::X1] -/
- def x1_body : Result UInt32 := Result.ret core_num_u32_max_c
- def x1_c : UInt32 := eval_global x1_body (by simp)
-
- /- [constants::X2] -/
- def x2_body : Result UInt32 := Result.ret (UInt32.ofNatCore 3 (by intlit))
- def x2_c : UInt32 := eval_global x2_body (by simp)
-
- /- [constants::incr] -/
- def incr_fwd (n : UInt32) : Result UInt32 :=
- UInt32.checked_add n (UInt32.ofNatCore 1 (by intlit))
-
- /- [constants::X3] -/
- def x3_body : Result UInt32 := incr_fwd (UInt32.ofNatCore 32 (by intlit))
- def x3_c : UInt32 := eval_global x3_body (by simp)
-
- /- [constants::mk_pair0] -/
- def mk_pair0_fwd (x : UInt32) (y : UInt32) : Result (UInt32 × UInt32) :=
- Result.ret (x, y)
-
- /- [constants::Pair] -/
- structure pair_t (T1 T2 : Type) where
- pair_x : T1
- pair_y : T2
-
- /- [constants::mk_pair1] -/
- def mk_pair1_fwd (x : UInt32) (y : UInt32) : Result (pair_t UInt32 UInt32) :=
- Result.ret { pair_x := x, pair_y := y }
-
- /- [constants::P0] -/
- def p0_body : Result (UInt32 × UInt32) :=
- mk_pair0_fwd (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 1 (by intlit))
- def p0_c : (UInt32 × UInt32) := eval_global p0_body (by simp)
-
- /- [constants::P1] -/
- def p1_body : Result (pair_t UInt32 UInt32) :=
- mk_pair1_fwd (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 1 (by intlit))
- def p1_c : pair_t UInt32 UInt32 := eval_global p1_body (by simp)
-
- /- [constants::P2] -/
- def p2_body : Result (UInt32 × UInt32) :=
- Result.ret
- ((UInt32.ofNatCore 0 (by intlit)),
- (UInt32.ofNatCore 1 (by intlit)))
- def p2_c : (UInt32 × UInt32) := eval_global p2_body (by simp)
-
- /- [constants::P3] -/
- def p3_body : Result (pair_t UInt32 UInt32) :=
- Result.ret
- {
- pair_x := (UInt32.ofNatCore 0 (by intlit)),
- pair_y := (UInt32.ofNatCore 1 (by intlit))
- }
- def p3_c : pair_t UInt32 UInt32 := eval_global p3_body (by simp)
-
- /- [constants::Wrap] -/
- structure wrap_t (T : Type) where
- wrap_val : T
-
- /- [constants::Wrap::{0}::new] -/
- def wrap_new_fwd (T : Type) (val : T) : Result (wrap_t T) :=
- Result.ret { wrap_val := val }
-
- /- [constants::Y] -/
- def y_body : Result (wrap_t Int32) :=
- wrap_new_fwd Int32 (Int32.ofNatCore 2 (by intlit))
- def y_c : wrap_t Int32 := eval_global y_body (by simp)
-
- /- [constants::unwrap_y] -/
- def unwrap_y_fwd : Result Int32 :=
- Result.ret y_c.wrap_val
-
- /- [constants::YVAL] -/
- def yval_body : Result Int32 := unwrap_y_fwd
- def yval_c : Int32 := eval_global yval_body (by simp)
-
- /- [constants::get_z1::Z1] -/
- def get_z1_z1_body : Result Int32 :=
- Result.ret (Int32.ofNatCore 3 (by intlit))
- def get_z1_z1_c : Int32 := eval_global get_z1_z1_body (by simp)
-
- /- [constants::get_z1] -/
- def get_z1_fwd : Result Int32 :=
- Result.ret get_z1_z1_c
-
- /- [constants::add] -/
- def add_fwd (a : Int32) (b : Int32) : Result Int32 :=
- Int32.checked_add a b
-
- /- [constants::Q1] -/
- def q1_body : Result Int32 := Result.ret (Int32.ofNatCore 5 (by intlit))
- def q1_c : Int32 := eval_global q1_body (by simp)
-
- /- [constants::Q2] -/
- def q2_body : Result Int32 := Result.ret q1_c
- def q2_c : Int32 := eval_global q2_body (by simp)
-
- /- [constants::Q3] -/
- def q3_body : Result Int32 := add_fwd q2_c (Int32.ofNatCore 3 (by intlit))
- def q3_c : Int32 := eval_global q3_body (by simp)
-
- /- [constants::get_z2] -/
- def get_z2_fwd : Result Int32 :=
- do
- let i ← get_z1_fwd
- let i0 ← add_fwd i q3_c
- add_fwd q1_c i0
-
- /- [constants::S1] -/
- def s1_body : Result UInt32 := Result.ret (UInt32.ofNatCore 6 (by intlit))
- def s1_c : UInt32 := eval_global s1_body (by simp)
-
- /- [constants::S2] -/
- def s2_body : Result UInt32 := incr_fwd s1_c
- def s2_c : UInt32 := eval_global s2_body (by simp)
-
- /- [constants::S3] -/
- def s3_body : Result (pair_t UInt32 UInt32) := Result.ret p3_c
- def s3_c : pair_t UInt32 UInt32 := eval_global s3_body (by simp)
-
- /- [constants::S4] -/
- def s4_body : Result (pair_t UInt32 UInt32) :=
- mk_pair1_fwd (UInt32.ofNatCore 7 (by intlit))
- (UInt32.ofNatCore 8 (by intlit))
- def s4_c : pair_t UInt32 UInt32 := eval_global s4_body (by simp)
-
+/- [constants::X0] -/
+def x0_body : Result U32 := Result.ret (U32.ofInt 0 (by intlit))
+def x0_c : U32 := eval_global x0_body (by simp)
+
+/- [core::num::u32::{9}::MAX] -/
+def core_num_u32_max_body : Result U32 :=
+ Result.ret (U32.ofInt 4294967295 (by intlit))
+def core_num_u32_max_c : U32 := eval_global core_num_u32_max_body (by simp)
+
+/- [constants::X1] -/
+def x1_body : Result U32 := Result.ret core_num_u32_max_c
+def x1_c : U32 := eval_global x1_body (by simp)
+
+/- [constants::X2] -/
+def x2_body : Result U32 := Result.ret (U32.ofInt 3 (by intlit))
+def x2_c : U32 := eval_global x2_body (by simp)
+
+/- [constants::incr] -/
+def incr_fwd (n : U32) : Result U32 :=
+ n + (U32.ofInt 1 (by intlit))
+
+/- [constants::X3] -/
+def x3_body : Result U32 := incr_fwd (U32.ofInt 32 (by intlit))
+def x3_c : U32 := eval_global x3_body (by simp)
+
+/- [constants::mk_pair0] -/
+def mk_pair0_fwd (x : U32) (y : U32) : Result (U32 × U32) :=
+ Result.ret (x, y)
+
+/- [constants::Pair] -/
+structure pair_t (T1 T2 : Type) where
+ pair_x : T1
+ pair_y : T2
+
+/- [constants::mk_pair1] -/
+def mk_pair1_fwd (x : U32) (y : U32) : Result (pair_t U32 U32) :=
+ Result.ret { pair_x := x, pair_y := y }
+
+/- [constants::P0] -/
+def p0_body : Result (U32 × U32) :=
+ mk_pair0_fwd (U32.ofInt 0 (by intlit)) (U32.ofInt 1 (by intlit))
+def p0_c : (U32 × U32) := eval_global p0_body (by simp)
+
+/- [constants::P1] -/
+def p1_body : Result (pair_t U32 U32) :=
+ mk_pair1_fwd (U32.ofInt 0 (by intlit)) (U32.ofInt 1 (by intlit))
+def p1_c : pair_t U32 U32 := eval_global p1_body (by simp)
+
+/- [constants::P2] -/
+def p2_body : Result (U32 × U32) :=
+ Result.ret ((U32.ofInt 0 (by intlit)), (U32.ofInt 1 (by intlit)))
+def p2_c : (U32 × U32) := eval_global p2_body (by simp)
+
+/- [constants::P3] -/
+def p3_body : Result (pair_t U32 U32) :=
+ Result.ret
+ { pair_x := (U32.ofInt 0 (by intlit)), pair_y := (U32.ofInt 1 (by intlit)) }
+def p3_c : pair_t U32 U32 := eval_global p3_body (by simp)
+
+/- [constants::Wrap] -/
+structure wrap_t (T : Type) where
+ wrap_val : T
+
+/- [constants::Wrap::{0}::new] -/
+def wrap_new_fwd (T : Type) (val : T) : Result (wrap_t T) :=
+ Result.ret { wrap_val := val }
+
+/- [constants::Y] -/
+def y_body : Result (wrap_t I32) := wrap_new_fwd I32 (I32.ofInt 2 (by intlit))
+def y_c : wrap_t I32 := eval_global y_body (by simp)
+
+/- [constants::unwrap_y] -/
+def unwrap_y_fwd : Result I32 :=
+ Result.ret y_c.wrap_val
+
+/- [constants::YVAL] -/
+def yval_body : Result I32 := unwrap_y_fwd
+def yval_c : I32 := eval_global yval_body (by simp)
+
+/- [constants::get_z1::Z1] -/
+def get_z1_z1_body : Result I32 := Result.ret (I32.ofInt 3 (by intlit))
+def get_z1_z1_c : I32 := eval_global get_z1_z1_body (by simp)
+
+/- [constants::get_z1] -/
+def get_z1_fwd : Result I32 :=
+ Result.ret get_z1_z1_c
+
+/- [constants::add] -/
+def add_fwd (a : I32) (b : I32) : Result I32 :=
+ a + b
+
+/- [constants::Q1] -/
+def q1_body : Result I32 := Result.ret (I32.ofInt 5 (by intlit))
+def q1_c : I32 := eval_global q1_body (by simp)
+
+/- [constants::Q2] -/
+def q2_body : Result I32 := Result.ret q1_c
+def q2_c : I32 := eval_global q2_body (by simp)
+
+/- [constants::Q3] -/
+def q3_body : Result I32 := add_fwd q2_c (I32.ofInt 3 (by intlit))
+def q3_c : I32 := eval_global q3_body (by simp)
+
+/- [constants::get_z2] -/
+def get_z2_fwd : Result I32 :=
+ do
+ let i ← get_z1_fwd
+ let i0 ← add_fwd i q3_c
+ add_fwd q1_c i0
+
+/- [constants::S1] -/
+def s1_body : Result U32 := Result.ret (U32.ofInt 6 (by intlit))
+def s1_c : U32 := eval_global s1_body (by simp)
+
+/- [constants::S2] -/
+def s2_body : Result U32 := incr_fwd s1_c
+def s2_c : U32 := eval_global s2_body (by simp)
+
+/- [constants::S3] -/
+def s3_body : Result (pair_t U32 U32) := Result.ret p3_c
+def s3_c : pair_t U32 U32 := eval_global s3_body (by simp)
+
+/- [constants::S4] -/
+def s4_body : Result (pair_t U32 U32) :=
+ mk_pair1_fwd (U32.ofInt 7 (by intlit)) (U32.ofInt 8 (by intlit))
+def s4_c : pair_t U32 U32 := eval_global s4_body (by simp)
+