From 3e8060b5501ec83940a4309389a68898df26ebd0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 23:37:31 +0200 Subject: Reorganize the Lean backend --- backends/lean/Base/Primitives/Base.lean | 130 ++++++++ backends/lean/Base/Primitives/Scalar.lean | 507 ++++++++++++++++++++++++++++++ backends/lean/Base/Primitives/Vec.lean | 113 +++++++ 3 files changed, 750 insertions(+) create mode 100644 backends/lean/Base/Primitives/Base.lean create mode 100644 backends/lean/Base/Primitives/Scalar.lean create mode 100644 backends/lean/Base/Primitives/Vec.lean (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Base.lean b/backends/lean/Base/Primitives/Base.lean new file mode 100644 index 00000000..db462c38 --- /dev/null +++ b/backends/lean/Base/Primitives/Base.lean @@ -0,0 +1,130 @@ +import Lean + +namespace Primitives + +-------------------- +-- ASSERT COMMAND --Std. +-------------------- + +open Lean Elab Command Term Meta + +syntax (name := assert) "#assert" term: command + +@[command_elab assert] +unsafe +def assertImpl : CommandElab := fun (_stx: Syntax) => do + runTermElabM (fun _ => do + let r ← evalTerm Bool (mkConst ``Bool) _stx[1] + if not r then + logInfo ("Assertion failed for:\n" ++ _stx[1]) + throwError ("Expression reduced to false:\n" ++ _stx[1]) + pure ()) + +#eval 2 == 2 +#assert (2 == 2) + +------------- +-- PRELUDE -- +------------- + +-- Results & monadic combinators + +inductive Error where + | assertionFailure: Error + | integerOverflow: Error + | divisionByZero: Error + | arrayOutOfBounds: Error + | maximumSizeExceeded: Error + | panic: Error +deriving Repr, BEq + +open Error + +inductive Result (α : Type u) where + | ret (v: α): Result α + | fail (e: Error): Result α + | div +deriving Repr, BEq + +open Result + +instance Result_Inhabited (α : Type u) : Inhabited (Result α) := + Inhabited.mk (fail panic) + +instance Result_Nonempty (α : Type u) : Nonempty (Result α) := + Nonempty.intro div + +/- HELPERS -/ + +def ret? {α: Type u} (r: Result α): Bool := + match r with + | ret _ => true + | fail _ | div => false + +def div? {α: Type u} (r: Result α): Bool := + match r with + | div => true + | ret _ | fail _ => false + +def massert (b:Bool) : Result Unit := + if b then ret () else fail assertionFailure + +def eval_global {α: Type u} (x: Result α) (_: ret? x): α := + match x with + | fail _ | div => by contradiction + | ret x => x + +/- DO-DSL SUPPORT -/ + +def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := + match x with + | ret v => f v + | fail v => fail v + | div => div + +-- Allows using Result in do-blocks +instance : Bind Result where + bind := bind + +-- Allows using return x in do-blocks +instance : Pure Result where + pure := fun x => ret x + +@[simp] theorem bind_ret (x : α) (f : α → Result β) : bind (.ret x) f = f x := by simp [bind] +@[simp] theorem bind_fail (x : Error) (f : α → Result β) : bind (.fail x) f = .fail x := by simp [bind] +@[simp] theorem bind_div (f : α → Result β) : bind .div f = .div := by simp [bind] + +/- CUSTOM-DSL SUPPORT -/ + +-- Let-binding the Result of a monadic operation is oftentimes not sufficient, +-- because we may need a hypothesis for equational reasoning in the scope. We +-- rely on subtype, and a custom let-binding operator, in effect recreating our +-- own variant of the do-dsl + +def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } := + match o with + | ret x => ret ⟨x, rfl⟩ + | fail e => fail e + | div => div + +@[simp] theorem bind_tc_ret (x : α) (f : α → Result β) : + (do let y ← .ret x; f y) = f x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_fail (x : Error) (f : α → Result β) : + (do let y ← fail x; f y) = fail x := by simp [Bind.bind, bind] + +@[simp] theorem bind_tc_div (f : α → Result β) : + (do let y ← div; f y) = div := by simp [Bind.bind, bind] + +---------- +-- MISC -- +---------- + +@[simp] def mem.replace (a : Type) (x : a) (_ : a) : a := x +@[simp] def mem.replace_back (a : Type) (_ : a) (y : a) : a := y + +/-- Aeneas-translated function -- useful to reduce non-recursive definitions. + Use with `simp [ aeneas ]` -/ +register_simp_attr aeneas + +end Primitives diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean new file mode 100644 index 00000000..241dfa07 --- /dev/null +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -0,0 +1,507 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Mathlib.Tactic.Linarith +import Base.Primitives.Base + +namespace Primitives + +---------------------- +-- MACHINE INTEGERS -- +---------------------- + +-- We redefine our machine integers types. + +-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits` +-- using the simplifier, meaning that proofs do not depend on the compile-time value of +-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at +-- least officially, 16-bit microcontrollers, so this seems like a fine design decision +-- for now.) + +-- Note from Chris Bailey: "If there's more than one salient property of your +-- definition then the subtyping strategy might get messy, and the property part +-- of a subtype is less discoverable by the simplifier or tactics like +-- library_search." So, we will not add refinements on the return values of the +-- operations defined on Primitives, but will rather rely on custom lemmas to +-- invert on possible return values of the primitive operations. + +-- Machine integer constants, done via `ofNatCore`, which requires a proof that +-- the `Nat` fits within the desired integer type. We provide a custom tactic. + +open Result Error +open System.Platform.getNumBits + +-- TODO: is there a way of only importing System.Platform.getNumBits? +-- +@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val + +-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention. + +-- The "structured" bounds +def Isize.smin : Int := - (HPow.hPow 2 (size_num_bits - 1)) +def Isize.smax : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1 +def I8.smin : Int := - (HPow.hPow 2 7) +def I8.smax : Int := HPow.hPow 2 7 - 1 +def I16.smin : Int := - (HPow.hPow 2 15) +def I16.smax : Int := HPow.hPow 2 15 - 1 +def I32.smin : Int := -(HPow.hPow 2 31) +def I32.smax : Int := HPow.hPow 2 31 - 1 +def I64.smin : Int := -(HPow.hPow 2 63) +def I64.smax : Int := HPow.hPow 2 63 - 1 +def I128.smin : Int := -(HPow.hPow 2 127) +def I128.smax : Int := HPow.hPow 2 127 - 1 +def Usize.smin : Int := 0 +def Usize.smax : Int := HPow.hPow 2 size_num_bits - 1 +def U8.smin : Int := 0 +def U8.smax : Int := HPow.hPow 2 8 - 1 +def U16.smin : Int := 0 +def U16.smax : Int := HPow.hPow 2 16 - 1 +def U32.smin : Int := 0 +def U32.smax : Int := HPow.hPow 2 32 - 1 +def U64.smin : Int := 0 +def U64.smax : Int := HPow.hPow 2 64 - 1 +def U128.smin : Int := 0 +def U128.smax : Int := HPow.hPow 2 128 - 1 + +-- The "normalized" bounds, that we use in practice +def I8.min := -128 +def I8.max := 127 +def I16.min := -32768 +def I16.max := 32767 +def I32.min := -2147483648 +def I32.max := 2147483647 +def I64.min := -9223372036854775808 +def I64.max := 9223372036854775807 +def I128.min := -170141183460469231731687303715884105728 +def I128.max := 170141183460469231731687303715884105727 +@[simp] def U8.min := 0 +def U8.max := 255 +@[simp] def U16.min := 0 +def U16.max := 65535 +@[simp] def U32.min := 0 +def U32.max := 4294967295 +@[simp] def U64.min := 0 +def U64.max := 18446744073709551615 +@[simp] def U128.min := 0 +def U128.max := 340282366920938463463374607431768211455 +@[simp] def Usize.min := 0 + +def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := + ⟨ Isize.smin, by + simp [Isize.smin] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.refined_max : { n:Int // n = I32.max ∨ n = I64.max } := + ⟨ Isize.smax, by + simp [Isize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Usize.refined_max : { n:Int // n = U32.max ∨ n = U64.max } := + ⟨ Usize.smax, by + simp [Usize.smax] + cases System.Platform.numBits_eq <;> + unfold System.Platform.numBits at * <;> simp [*] ⟩ + +def Isize.min := Isize.refined_min.val +def Isize.max := Isize.refined_max.val +def Usize.max := Usize.refined_max.val + +inductive ScalarTy := +| Isize +| I8 +| I16 +| I32 +| I64 +| I128 +| Usize +| U8 +| U16 +| U32 +| U64 +| U128 + +def Scalar.smin (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smin + | .I8 => I8.smin + | .I16 => I16.smin + | .I32 => I32.smin + | .I64 => I64.smin + | .I128 => I128.smin + | .Usize => Usize.smin + | .U8 => U8.smin + | .U16 => U16.smin + | .U32 => U32.smin + | .U64 => U64.smin + | .U128 => U128.smin + +def Scalar.smax (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.smax + | .I8 => I8.smax + | .I16 => I16.smax + | .I32 => I32.smax + | .I64 => I64.smax + | .I128 => I128.smax + | .Usize => Usize.smax + | .U8 => U8.smax + | .U16 => U16.smax + | .U32 => U32.smax + | .U64 => U64.smax + | .U128 => U128.smax + +def Scalar.min (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.min + | .I8 => I8.min + | .I16 => I16.min + | .I32 => I32.min + | .I64 => I64.min + | .I128 => I128.min + | .Usize => Usize.min + | .U8 => U8.min + | .U16 => U16.min + | .U32 => U32.min + | .U64 => U64.min + | .U128 => U128.min + +def Scalar.max (ty : ScalarTy) : Int := + match ty with + | .Isize => Isize.max + | .I8 => I8.max + | .I16 => I16.max + | .I32 => I32.max + | .I64 => I64.max + | .I128 => I128.max + | .Usize => Usize.max + | .U8 => U8.max + | .U16 => U16.max + | .U32 => U32.max + | .U64 => U64.max + | .U128 => U128.max + +def Scalar.smin_eq (ty : ScalarTy) : Scalar.min ty = Scalar.smin ty := by + cases ty <;> rfl + +def Scalar.smax_eq (ty : ScalarTy) : Scalar.max ty = Scalar.smax ty := by + cases ty <;> rfl + +-- "Conservative" bounds +-- We use those because we can't compare to the isize bounds (which can't +-- reduce at compile-time). Whenever we perform an arithmetic operation like +-- addition we need to check that the result is in bounds: we first compare +-- to the conservative bounds, which reduce, then compare to the real bounds. +-- This is useful for the various #asserts that we want to reduce at +-- type-checking time. +def Scalar.cMin (ty : ScalarTy) : Int := + match ty with + | .Isize => Scalar.min .I32 + | _ => Scalar.min ty + +def Scalar.cMax (ty : ScalarTy) : Int := + match ty with + | .Isize => Scalar.max .I32 + | .Usize => Scalar.max .U32 + | _ => Scalar.max ty + +theorem Scalar.cMin_bound ty : Scalar.min ty ≤ Scalar.cMin ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + have h := Isize.refined_min.property + cases h <;> simp [*, Isize.min] + +theorem Scalar.cMax_bound ty : Scalar.cMax ty ≤ Scalar.max ty := by + cases ty <;> simp [Scalar.min, Scalar.max, Scalar.cMin, Scalar.cMax] at * + . have h := Isize.refined_max.property + cases h <;> simp [*, Isize.max] + . have h := Usize.refined_max.property + cases h <;> simp [*, Usize.max] + +theorem Scalar.cMin_suffices ty (h : Scalar.cMin ty ≤ x) : Scalar.min ty ≤ x := by + have := Scalar.cMin_bound ty + linarith + +theorem Scalar.cMax_suffices ty (h : x ≤ Scalar.cMax ty) : x ≤ Scalar.max ty := by + have := Scalar.cMax_bound ty + linarith + +structure Scalar (ty : ScalarTy) where + val : Int + hmin : Scalar.min ty ≤ val + hmax : val ≤ Scalar.max ty +deriving Repr + +theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) : + Scalar.cMin ty ≤ x ∧ x ≤ Scalar.cMax ty -> + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty + := + λ h => by + apply And.intro <;> have hmin := Scalar.cMin_bound ty <;> have hmax := Scalar.cMax_bound ty <;> linarith + +def Scalar.ofIntCore {ty : ScalarTy} (x : Int) + (hmin : Scalar.min ty ≤ x) (hmax : x ≤ Scalar.max ty) : Scalar ty := + { val := x, hmin := hmin, hmax := hmax } + +-- Tactic to prove that integers are in bounds +-- TODO: use this: https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/instance.20with.20tactic.20autoparam +syntax "intlit" : tactic +macro_rules + | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices; decide) + +def Scalar.ofInt {ty : ScalarTy} (x : Int) + (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by intlit) : Scalar ty := + -- Remark: we initially wrote: + -- let ⟨ hmin, hmax ⟩ := h + -- Scalar.ofIntCore x hmin hmax + -- We updated to the line below because a similar pattern in `Scalar.tryMk` + -- made reduction block. Both versions seem to work for `Scalar.ofInt`, though. + -- TODO: investigate + Scalar.ofIntCore x h.left h.right + +@[simp] def Scalar.check_bounds (ty : ScalarTy) (x : Int) : Bool := + (Scalar.cMin ty ≤ x || Scalar.min ty ≤ x) ∧ (x ≤ Scalar.cMax ty || x ≤ Scalar.max ty) + +theorem Scalar.check_bounds_prop {ty : ScalarTy} {x : Int} (h: Scalar.check_bounds ty x) : + Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty := by + simp at * + have ⟨ hmin, hmax ⟩ := h + have hbmin := Scalar.cMin_bound ty + have hbmax := Scalar.cMax_bound ty + cases hmin <;> cases hmax <;> apply And.intro <;> linarith + +-- Further thoughts: look at what has been done here: +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean +-- and +-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean +-- which both contain a fair amount of reasoning already! +def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := + if h:Scalar.check_bounds ty x then + -- If we do: + -- ``` + -- let ⟨ hmin, hmax ⟩ := (Scalar.check_bounds_prop h) + -- Scalar.ofIntCore x hmin hmax + -- ``` + -- then normalization blocks (for instance, some proofs which use reflexivity fail). + -- However, the version below doesn't block reduction (TODO: investigate): + return Scalar.ofInt x (Scalar.check_bounds_prop h) + else fail integerOverflow + +def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) + +def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero + +-- Our custom remainder operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_rem (x y : Int) : Int := + if 0 ≤ x then |x| % |y| + else - (|x| % |y|) + +-- Our custom division operation, which satisfies the semantics of Rust +-- TODO: is there a better way? +def scalar_div (x y : Int) : Int := + if 0 ≤ x && 0 ≤ y then |x| / |y| + else if 0 ≤ x && y < 0 then - (|x| / |y|) + else if x < 0 && 0 ≤ y then - (|x| / |y|) + else |x| / |y| + +-- Checking that the remainder operation is correct +#assert scalar_rem 1 2 = 1 +#assert scalar_rem (-1) 2 = -1 +#assert scalar_rem 1 (-2) = 1 +#assert scalar_rem (-1) (-2) = -1 +#assert scalar_rem 7 3 = (1:Int) +#assert scalar_rem (-7) 3 = -1 +#assert scalar_rem 7 (-3) = 1 +#assert scalar_rem (-7) (-3) = -1 + +-- Checking that the division operation is correct +#assert scalar_div 3 2 = 1 +#assert scalar_div (-3) 2 = -1 +#assert scalar_div 3 (-2) = -1 +#assert scalar_div (-3) (-2) = 1 +#assert scalar_div 7 3 = 2 +#assert scalar_div (-7) 3 = -2 +#assert scalar_div 7 (-3) = -2 +#assert scalar_div (-7) (-3) = 2 + +def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero + +def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val + y.val) + +def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val - y.val) + +def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + Scalar.tryMk ty (x.val * y.val) + +-- TODO: instances of +, -, * etc. for scalars + +-- Cast an integer from a [src_ty] to a [tgt_ty] +-- TODO: check the semantics of casts in Rust +def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) := + Scalar.tryMk tgt_ty x.val + +-- The scalar types +-- We declare the definitions as reducible so that Lean can unfold them (useful +-- for type class resolution for instance). +@[reducible] def Isize := Scalar .Isize +@[reducible] def I8 := Scalar .I8 +@[reducible] def I16 := Scalar .I16 +@[reducible] def I32 := Scalar .I32 +@[reducible] def I64 := Scalar .I64 +@[reducible] def I128 := Scalar .I128 +@[reducible] def Usize := Scalar .Usize +@[reducible] def U8 := Scalar .U8 +@[reducible] def U16 := Scalar .U16 +@[reducible] def U32 := Scalar .U32 +@[reducible] def U64 := Scalar .U64 +@[reducible] def U128 := Scalar .U128 + +-- TODO: below: not sure this is the best way. +-- Should we rather overload operations like +, -, etc.? +-- Also, it is possible to automate the generation of those definitions +-- with macros (but would it be a good idea? It would be less easy to +-- read the file, which is not supposed to change a lot) + +-- Negation + +/-- +Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce +one here. + +The notation typeclass for heterogeneous addition. +This enables the notation `- a : β` where `a : α`. +-/ +class HNeg (α : Type u) (β : outParam (Type v)) where + /-- `- a` computes the negation of `a`. + The meaning of this notation is type-dependent. -/ + hNeg : α → β + +prefix:75 "-" => HNeg.hNeg + +instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x +instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x +instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x +instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x +instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x +instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x + +-- Addition +instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hAdd x y := Scalar.add x y + +-- Substraction +instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hSub x y := Scalar.sub x y + +-- Multiplication +instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMul x y := Scalar.mul x y + +-- Division +instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hDiv x y := Scalar.div x y + +-- Remainder +instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where + hMod x y := Scalar.rem x y + +-- ofIntCore +-- TODO: typeclass? +def Isize.ofIntCore := @Scalar.ofIntCore .Isize +def I8.ofIntCore := @Scalar.ofIntCore .I8 +def I16.ofIntCore := @Scalar.ofIntCore .I16 +def I32.ofIntCore := @Scalar.ofIntCore .I32 +def I64.ofIntCore := @Scalar.ofIntCore .I64 +def I128.ofIntCore := @Scalar.ofIntCore .I128 +def Usize.ofIntCore := @Scalar.ofIntCore .Usize +def U8.ofIntCore := @Scalar.ofIntCore .U8 +def U16.ofIntCore := @Scalar.ofIntCore .U16 +def U32.ofIntCore := @Scalar.ofIntCore .U32 +def U64.ofIntCore := @Scalar.ofIntCore .U64 +def U128.ofIntCore := @Scalar.ofIntCore .U128 + +-- ofInt +-- TODO: typeclass? +def Isize.ofInt := @Scalar.ofInt .Isize +def I8.ofInt := @Scalar.ofInt .I8 +def I16.ofInt := @Scalar.ofInt .I16 +def I32.ofInt := @Scalar.ofInt .I32 +def I64.ofInt := @Scalar.ofInt .I64 +def I128.ofInt := @Scalar.ofInt .I128 +def Usize.ofInt := @Scalar.ofInt .Usize +def U8.ofInt := @Scalar.ofInt .U8 +def U16.ofInt := @Scalar.ofInt .U16 +def U32.ofInt := @Scalar.ofInt .U32 +def U64.ofInt := @Scalar.ofInt .U64 +def U128.ofInt := @Scalar.ofInt .U128 + +-- Comparisons +instance {ty} : LT (Scalar ty) where + lt a b := LT.lt a.val b.val + +instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val + +instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt .. +instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe .. + +theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j + | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl + +theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val := + h ▸ rfl + +theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) := + fun h' => absurd (val_eq_of_eq h') h + +instance (ty : ScalarTy) : DecidableEq (Scalar ty) := + fun i j => + match decEq i.val j.val with + | isTrue h => isTrue (Scalar.eq_of_val_eq h) + | isFalse h => isFalse (Scalar.ne_of_val_ne h) + +/- Remark: we can't write the following instance because of restrictions about + the type class parameters (`ty` doesn't appear in the return type, which is + forbidden): + + ``` + instance Scalar.cast (ty : ScalarTy) : Coe (Scalar ty) Int where coe := λ v => v.val + ``` + -/ +def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val + +-- -- We now define a type class that subsumes the various machine integer types, so +-- -- as to write a concise definition for scalar_cast, rather than exhaustively +-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics +-- -- and fails if a cast operation would involve a truncation or modulo. + +-- class MachineInteger (t: Type) where +-- size: Nat +-- val: t -> Fin size +-- ofNatCore: (n:Nat) -> LT.lt n size -> t + +-- set_option hygiene false in +-- run_cmd +-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do +-- Lean.Elab.Command.elabCommand (← `( +-- namespace $typeName +-- instance: MachineInteger $typeName where +-- size := size +-- val := val +-- ofNatCore := ofNatCore +-- end $typeName +-- )) + +-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on +-- -- Lean to infer `src`. + +-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst := +-- if h: MachineInteger.val x < MachineInteger.size dst then +-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h) +-- else +-- .fail integerOverflow + +end Primitives diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean new file mode 100644 index 00000000..7851a232 --- /dev/null +++ b/backends/lean/Base/Primitives/Vec.lean @@ -0,0 +1,113 @@ +import Lean +import Lean.Meta.Tactic.Simp +import Init.Data.List.Basic +import Mathlib.Tactic.RunCmd +import Mathlib.Tactic.Linarith +import Base.IList +import Base.Primitives.Scalar +import Base.Arith + +namespace Primitives + +open Result Error + +------------- +-- VECTORS -- +------------- + +def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } + +-- TODO: do we really need it? It should be with Subtype by default +instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val + +instance (a : Type) : Arith.HasIntProp (Vec a) where + prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => l + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + intro_has_int_prop_instances + simp_all [Scalar.max, Scalar.min] + +example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by + scalar_tac + +def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ + +def Vec.len (α : Type u) (v : Vec α) : Usize := + let ⟨ v, l ⟩ := v + Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l + +def Vec.length {α : Type u} (v : Vec α) : Int := v.val.len + +-- This shouldn't be used +def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () + +-- This is actually the backward function +def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) + := + let nlen := List.length v.val + 1 + if h : nlen ≤ U32.max || nlen ≤ Usize.max then + have h : nlen ≤ Usize.max := by + simp [Usize.max] at * + have hm := Usize.refined_max.property + cases h <;> cases hm <;> simp [U32.max, U64.max] at * <;> try linarith + return ⟨ List.concat v.val x, by simp at *; assumption ⟩ + else + fail maximumSizeExceeded + +-- This shouldn't be used +def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +-- This is actually the backward function +def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if i.val < List.length v.val then + -- TODO: maybe we should redefine a list library which uses integers + -- (instead of natural numbers) + .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ + else + .fail arrayOutOfBounds + +-- TODO: remove +def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : + Fin (List.length v.val) := + let j := i.val.toNat + let h: j < List.length v.val := by + have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin + apply heq.mpr + assumption + ⟨j, h⟩ + +def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some x => ret x + +-- This shouldn't be used +def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := + if i.val < List.length v.val then + .ret () + else + .fail arrayOutOfBounds + +def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := + if h: i.val < List.length v.val then + let i := Vec.index_to_fin h + .ret (List.get v.val i) + else + .fail arrayOutOfBounds + +def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := + if h: i.val < List.length v.val then + let i := Vec.index_to_fin h + .ret ⟨ List.set v.val i x, by + have h: List.length v.val ≤ Usize.max := v.property + simp [*] at * + ⟩ + else + .fail arrayOutOfBounds + +end Primitives -- cgit v1.2.3 From 2fa3cb8ee04dd7ff4184e3e1000fdc025abc50a4 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Mon, 17 Jul 2023 23:37:48 +0200 Subject: Start proving theorems for primitive definitions --- backends/lean/Base/Primitives/Scalar.lean | 1 + backends/lean/Base/Primitives/Vec.lean | 89 +++++++++++++++++++------------ 2 files changed, 57 insertions(+), 33 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 241dfa07..3f88caa2 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -2,6 +2,7 @@ import Lean import Lean.Meta.Tactic.Simp import Mathlib.Tactic.Linarith import Base.Primitives.Base +import Base.Diverge.Base namespace Primitives diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 7851a232..4ecfa28f 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -6,6 +6,7 @@ import Mathlib.Tactic.Linarith import Base.IList import Base.Primitives.Scalar import Base.Arith +import Base.Progress.Base namespace Primitives @@ -56,58 +57,80 @@ def Vec.push (α : Type u) (v : Vec α) (x : α) : Result (Vec α) fail maximumSizeExceeded -- This shouldn't be used -def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := - if i.val < List.length v.val then +def Vec.insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := + if i.val < v.length then .ret () else .fail arrayOutOfBounds -- This is actually the backward function -def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if i.val < List.length v.val then - -- TODO: maybe we should redefine a list library which uses integers - -- (instead of natural numbers) +def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := + if i.val < v.length then .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ else .fail arrayOutOfBounds --- TODO: remove -def Vec.index_to_fin {α : Type u} {v: Vec α} {i: Usize} (h : i.val < List.length v.val) : - Fin (List.length v.val) := - let j := i.val.toNat - let h: j < List.length v.val := by - have heq := @Int.toNat_lt (List.length v.val) i.val i.hmin - apply heq.mpr - assumption - ⟨j, h⟩ - -def Vec.index (α : Type u) (v: Vec α) (i: Usize): Result α := +@[pspec] +theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) : + i.val < v.length → + ∃ nv, v.insert α i x = ret nv ∧ nv.val = v.val.update i.val x := by + intro h + simp [insert, *] + +def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some x => ret x +@[pspec] +theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : + i.val < v.length → + v.index α i = ret (v.val.index i.val) := by + intro + simp only [index] + -- TODO: dependent rewrite + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + simp only [*] + -- This shouldn't be used -def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit := +def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := if i.val < List.length v.val then .ret () else .fail arrayOutOfBounds -def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize): Result α := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret (List.get v.val i) - else - .fail arrayOutOfBounds +def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some x => ret x -def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) := - if h: i.val < List.length v.val then - let i := Vec.index_to_fin h - .ret ⟨ List.set v.val i x, by - have h: List.length v.val ≤ Usize.max := v.property - simp [*] at * - ⟩ - else - .fail arrayOutOfBounds +@[pspec] +theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : + i.val < v.length → + v.index_mut α i = ret (v.val.index i.val) := by + intro + simp only [index_mut] + -- TODO: dependent rewrite + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + simp only [*] + +def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := + match v.val.indexOpt i.val with + | none => fail .arrayOutOfBounds + | some _ => + .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ + +@[pspec] +theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) : + i.val < v.length → + ∃ nv, v.index_mut_back α i x = ret nv ∧ + nv.val = v.val.update i.val x + := by + intro + simp only [index_mut_back] + have h := List.indexOpt_bounds v.val i.val + split + . simp_all [length]; cases h <;> scalar_tac + . simp_all end Primitives -- cgit v1.2.3 From 0f430c055c3a531ceab83635adc5df92f0015c6e Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 18 Jul 2023 16:55:27 +0200 Subject: Make modifications to Vec.lean --- backends/lean/Base/Primitives/Vec.lean | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 4ecfa28f..be3a0e5b 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -38,7 +38,8 @@ def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l -def Vec.length {α : Type u} (v : Vec α) : Int := v.val.len +@[simp] +abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () @@ -89,7 +90,7 @@ theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : intro simp only [index] -- TODO: dependent rewrite - have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] -- This shouldn't be used @@ -111,13 +112,14 @@ theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : intro simp only [index_mut] -- TODO: dependent rewrite - have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp[length] at *; simp [*]) + have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some _ => + -- TODO: int_tac: introduce the refinements in the context? .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] -- cgit v1.2.3 From 3df0b36891975935c3d8035f56389ee6bbcbf251 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 19 Jul 2023 18:13:31 +0200 Subject: Add arithmetic spec lemmas --- backends/lean/Base/Primitives/Scalar.lean | 167 ++++++++++++++++++++++++++++-- 1 file changed, 161 insertions(+), 6 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 3f88caa2..aaa4027f 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -3,6 +3,8 @@ import Lean.Meta.Tactic.Simp import Mathlib.Tactic.Linarith import Base.Primitives.Base import Base.Diverge.Base +import Base.Progress.Base +import Base.Arith.Int namespace Primitives @@ -122,6 +124,22 @@ inductive ScalarTy := | U64 | U128 +def ScalarTy.isSigned (ty : ScalarTy) : Bool := + match ty with + | Isize + | I8 + | I16 + | I32 + | I64 + | I128 => true + | Usize + | U8 + | U16 + | U32 + | U64 + | U128 => false + + def Scalar.smin (ty : ScalarTy) : Int := match ty with | .Isize => Isize.smin @@ -289,23 +307,30 @@ def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) := def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val) -def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero - -- Our custom remainder operation, which satisfies the semantics of Rust -- TODO: is there a better way? def scalar_rem (x y : Int) : Int := - if 0 ≤ x then |x| % |y| + if 0 ≤ x then x % y else - (|x| % |y|) +@[simp] +def scalar_rem_nonneg {x y : Int} (hx : 0 ≤ x) : scalar_rem x y = x % y := by + intros + simp [*, scalar_rem] + -- Our custom division operation, which satisfies the semantics of Rust -- TODO: is there a better way? def scalar_div (x y : Int) : Int := - if 0 ≤ x && 0 ≤ y then |x| / |y| + if 0 ≤ x && 0 ≤ y then x / y else if 0 ≤ x && y < 0 then - (|x| / |y|) else if x < 0 && 0 ≤ y then - (|x| / |y|) else |x| / |y| +@[simp] +def scalar_div_nonneg {x y : Int} (hx : 0 ≤ x) (hy : 0 ≤ y) : scalar_div x y = x / y := by + intros + simp [*, scalar_div] + -- Checking that the remainder operation is correct #assert scalar_rem 1 2 = 1 #assert scalar_rem (-1) 2 = -1 @@ -326,8 +351,11 @@ def scalar_div (x y : Int) : Int := #assert scalar_div 7 (-3) = -2 #assert scalar_div (-7) (-3) = 2 +def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := + if y.val != 0 then Scalar.tryMk ty (scalar_div x.val y.val) else fail divisionByZero + def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := - if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero + if y.val != 0 then Scalar.tryMk ty (scalar_rem x.val y.val) else fail divisionByZero def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (x.val + y.val) @@ -410,6 +438,133 @@ instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where hMod x y := Scalar.rem x y +-- TODO: make progress work at a more fine grained level (see `Scalar.add_unsigned_spec`) +@[cpspec] +theorem Scalar.add_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val + y.val) + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + simp [HAdd.hAdd, add, Add.add] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.add_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmax : x.val + y.val ≤ Scalar.max ty) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + have hmin : Scalar.min ty ≤ x.val + y.val := by + have hx := x.hmin + have hy := y.hmin + cases ty <;> simp [min] at * <;> linarith + apply add_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.sub_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val - y.val) + (hmax : x.val - y.val ≤ Scalar.max ty) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + simp [HSub.hSub, sub, Sub.sub] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.sub_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + have : x.val - y.val ≤ Scalar.max ty := by + have hx := x.hmin + have hxm := x.hmax + have hy := y.hmin + cases ty <;> simp [min, max] at * <;> linarith + intros + apply sub_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.mul_spec {ty} {x y : Scalar ty} + (hmin : Scalar.min ty ≤ x.val * y.val) + (hmax : x.val * y.val ≤ Scalar.max ty) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + simp [HMul.hMul, mul, Mul.mul] + simp [tryMk] + split + . simp [pure] + rfl + . tauto + +theorem Scalar.mul_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} + (hmax : x.val * y.val ≤ Scalar.max ty) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + have : Scalar.min ty ≤ x.val * y.val := by + have hx := x.hmin + have hy := y.hmin + cases ty <;> simp at * <;> apply mul_nonneg hx hy + apply mul_spec <;> assumption + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.div_spec {ty} {x y : Scalar ty} + (hnz : y.val ≠ 0) + (hmin : Scalar.min ty ≤ scalar_div x.val y.val) + (hmax : scalar_div x.val y.val ≤ Scalar.max ty) : + ∃ z, x / y = ret z ∧ z.val = scalar_div x.val y.val := by + simp [HDiv.hDiv, div, Div.div] + simp [tryMk, *] + simp [pure] + rfl + +theorem Scalar.div_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} + (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + have h : Scalar.min ty = 0 := by cases ty <;> simp at * + have hx := x.hmin + have hy := y.hmin + simp [h] at hx hy + have hmin : 0 ≤ x.val / y.val := Int.ediv_nonneg hx hy + have hmax : x.val / y.val ≤ Scalar.max ty := by + have := Int.ediv_le_self y.val hx + have := x.hmax + linarith + have hs := @div_spec ty x y hnz + simp [*] at hs + apply hs + +-- TODO: make it finer grained +@[cpspec] +theorem Scalar.rem_spec {ty} {x y : Scalar ty} + (hnz : y.val ≠ 0) + (hmin : Scalar.min ty ≤ scalar_rem x.val y.val) + (hmax : scalar_rem x.val y.val ≤ Scalar.max ty) : + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + simp [HMod.hMod, rem] + simp [tryMk, *] + simp [pure] + rfl + +theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} + (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + have h : Scalar.min ty = 0 := by cases ty <;> simp at * + have hx := x.hmin + have hy := y.hmin + simp [h] at hx hy + have hmin : 0 ≤ x.val % y.val := Int.emod_nonneg x.val hnz + have hmax : x.val % y.val ≤ Scalar.max ty := by + have h := @Int.ediv_emod_unique x.val y.val (x.val % y.val) (x.val / y.val) + simp at h + have : 0 < y.val := by int_tac + simp [*] at h + have := y.hmax + linarith + have hs := @rem_spec ty x y hnz + simp [*] at hs + simp [*] + -- ofIntCore -- TODO: typeclass? def Isize.ofIntCore := @Scalar.ofIntCore .Isize -- cgit v1.2.3 From d87e35e1a53b2252cc2c8c554216115773fd9678 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Thu, 20 Jul 2023 11:38:55 +0200 Subject: Add fine-grained lemmas for the arithmetic operations --- backends/lean/Base/Primitives/Scalar.lean | 137 ++++++++++++++++++++++++++++-- 1 file changed, 130 insertions(+), 7 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index aaa4027f..1e9b51c2 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -438,7 +438,7 @@ instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where hMod x y := Scalar.rem x y --- TODO: make progress work at a more fine grained level (see `Scalar.add_unsigned_spec`) +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.add_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val + y.val) @@ -460,7 +460,32 @@ theorem Scalar.add_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} cases ty <;> simp [min] at * <;> linarith apply add_spec <;> assumption --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.add_spec {x y : Usize} (hmax : x.val + y.val ≤ Usize.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U8.add_spec {x y : U8} (hmax : x.val + y.val ≤ U8.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U16.add_spec {x y : U16} (hmax : x.val + y.val ≤ U16.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U32.add_spec {x y : U32} (hmax : x.val + y.val ≤ U32.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U64.add_spec {x y : U64} (hmax : x.val + y.val ≤ U64.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U128.add_spec {x y : U128} (hmax : x.val + y.val ≤ U128.max) : + ∃ z, x + y = ret z ∧ z.val = x.val + y.val := by + apply Scalar.add_unsigned_spec <;> simp only [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.sub_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val - y.val) @@ -484,8 +509,32 @@ theorem Scalar.sub_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} intros apply sub_spec <;> assumption --- TODO: make it finer grained -@[cpspec] +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.sub_spec {x y : Usize} (hmin : Usize.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U8.sub_spec {x y : U8} (hmin : U8.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U16.sub_spec {x y : U16} (hmin : U16.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U32.sub_spec {x y : U32} (hmin : U32.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U64.sub_spec {x y : U64} (hmin : U64.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +@[cepspec] theorem U128.sub_spec {x y : U128} (hmin : U128.min ≤ x.val - y.val) : + ∃ z, x - y = ret z ∧ z.val = x.val - y.val := by + apply Scalar.sub_unsigned_spec <;> simp only [Scalar.min, *] + +-- Generic theorem - shouldn't be used much theorem Scalar.mul_spec {ty} {x y : Scalar ty} (hmin : Scalar.min ty ≤ x.val * y.val) (hmax : x.val * y.val ≤ Scalar.max ty) : @@ -506,7 +555,32 @@ theorem Scalar.mul_unsigned_spec {ty} (s: ¬ ty.isSigned) {x y : Scalar ty} cases ty <;> simp at * <;> apply mul_nonneg hx hy apply mul_spec <;> assumption --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.mul_spec {x y : Usize} (hmax : x.val * y.val ≤ Usize.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U8.mul_spec {x y : U8} (hmax : x.val * y.val ≤ U8.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U16.mul_spec {x y : U16} (hmax : x.val * y.val ≤ U16.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U32.mul_spec {x y : U32} (hmax : x.val * y.val ≤ U32.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U64.mul_spec {x y : U64} (hmax : x.val * y.val ≤ U64.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +@[cepspec] theorem U128.mul_spec {x y : U128} (hmax : x.val * y.val ≤ U128.max) : + ∃ z, x * y = ret z ∧ z.val = x.val * y.val := by + apply Scalar.mul_unsigned_spec <;> simp only [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.div_spec {ty} {x y : Scalar ty} (hnz : y.val ≠ 0) @@ -534,7 +608,32 @@ theorem Scalar.div_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [*] at hs apply hs --- TODO: make it finer grained +/- Fine-grained theorems -/ +@[cepspec] theorem Usize.div_spec (x : Usize) {y : Usize} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [*] + +@[cepspec] theorem U8.div_spec (x : U8) {y : U8} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U16.div_spec (x : U16) {y : U16} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U32.div_spec (x : U32) {y : U32} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U64.div_spec (x : U64) {y : U64} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U128.div_spec (x : U128) {y : U128} (hnz : y.val ≠ 0) : + ∃ z, x / y = ret z ∧ z.val = x.val / y.val := by + apply Scalar.div_unsigned_spec <;> simp [Scalar.max, *] + +-- Generic theorem - shouldn't be used much @[cpspec] theorem Scalar.rem_spec {ty} {x y : Scalar ty} (hnz : y.val ≠ 0) @@ -548,7 +647,7 @@ theorem Scalar.rem_spec {ty} {x y : Scalar ty} theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : Scalar ty} (hnz : y.val ≠ 0) : - ∃ z, x % y = ret z ∧ z.val = scalar_rem x.val y.val := by + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by have h : Scalar.min ty = 0 := by cases ty <;> simp at * have hx := x.hmin have hy := y.hmin @@ -565,6 +664,30 @@ theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [*] at hs simp [*] +@[cepspec] theorem Usize.rem_spec (x : Usize) {y : Usize} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [*] + +@[cepspec] theorem U8.rem_spec (x : U8) {y : U8} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U16.rem_spec (x : U16) {y : U16} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U32.rem_spec (x : U32) {y : U32} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U64.rem_spec (x : U64) {y : U64} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + +@[cepspec] theorem U128.rem_spec (x : U128) {y : U128} (hnz : y.val ≠ 0) : + ∃ z, x % y = ret z ∧ z.val = x.val % y.val := by + apply Scalar.rem_unsigned_spec <;> simp [Scalar.max, *] + -- ofIntCore -- TODO: typeclass? def Isize.ofIntCore := @Scalar.ofIntCore .Isize -- cgit v1.2.3 From 876137dff361620d8ade1a4ee94fa9274df0bdc6 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 14:08:44 +0200 Subject: Improve int_tac and scalar_tac --- backends/lean/Base/Primitives/Vec.lean | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index be3a0e5b..35092c29 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -16,20 +16,19 @@ open Result Error -- VECTORS -- ------------- -def Vec (α : Type u) := { l : List α // List.length l ≤ Usize.max } +def Vec (α : Type u) := { l : List α // l.length ≤ Usize.max } -- TODO: do we really need it? It should be with Subtype by default -instance Vec.cast (a : Type): Coe (Vec a) (List a) where coe := λ v => v.val +instance Vec.cast (a : Type u): Coe (Vec a) (List a) where coe := λ v => v.val -instance (a : Type) : Arith.HasIntProp (Vec a) where - prop_ty := λ v => v.val.length ≤ Scalar.max ScalarTy.Usize - prop := λ ⟨ _, l ⟩ => l +instance (a : Type u) : Arith.HasIntProp (Vec a) where + prop_ty := λ v => v.val.len ≤ Scalar.max ScalarTy.Usize + prop := λ ⟨ _, l ⟩ => by simp[Scalar.max, List.len_eq_length, *] -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by - intro_has_int_prop_instances - simp_all [Scalar.max, Scalar.min] +@[simp] +abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len -example {a: Type} (v : Vec a) : v.val.length ≤ Scalar.max ScalarTy.Usize := by +example {a: Type u} (v : Vec a) : v.length ≤ Scalar.max ScalarTy.Usize := by scalar_tac def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ @@ -38,9 +37,6 @@ def Vec.len (α : Type u) (v : Vec α) : Usize := let ⟨ v, l ⟩ := v Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l -@[simp] -abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len - -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () @@ -115,11 +111,14 @@ theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) simp only [*] +instance {α : Type u} (p : Vec α → Prop) : Arith.HasIntProp (Subtype p) where + prop_ty := λ x => p x + prop := λ x => x.property + def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := match v.val.indexOpt i.val with | none => fail .arrayOutOfBounds | some _ => - -- TODO: int_tac: introduce the refinements in the context? .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] -- cgit v1.2.3 From 1854c631a6a7a3f8d45ad18e05547f9d3782c3ee Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 16:26:08 +0200 Subject: Make progress on the hashmap properties --- backends/lean/Base/Primitives/Scalar.lean | 48 +++++++++++++++++-------------- backends/lean/Base/Primitives/Vec.lean | 13 +++++++-- 2 files changed, 37 insertions(+), 24 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 1e9b51c2..3beb7527 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -66,27 +66,33 @@ def U128.smin : Int := 0 def U128.smax : Int := HPow.hPow 2 128 - 1 -- The "normalized" bounds, that we use in practice -def I8.min := -128 -def I8.max := 127 -def I16.min := -32768 -def I16.max := 32767 -def I32.min := -2147483648 -def I32.max := 2147483647 -def I64.min := -9223372036854775808 -def I64.max := 9223372036854775807 -def I128.min := -170141183460469231731687303715884105728 -def I128.max := 170141183460469231731687303715884105727 -@[simp] def U8.min := 0 -def U8.max := 255 -@[simp] def U16.min := 0 -def U16.max := 65535 -@[simp] def U32.min := 0 -def U32.max := 4294967295 -@[simp] def U64.min := 0 -def U64.max := 18446744073709551615 -@[simp] def U128.min := 0 -def U128.max := 340282366920938463463374607431768211455 -@[simp] def Usize.min := 0 +def I8.min : Int := -128 +def I8.max : Int := 127 +def I16.min : Int := -32768 +def I16.max : Int := 32767 +def I32.min : Int := -2147483648 +def I32.max : Int := 2147483647 +def I64.min : Int := -9223372036854775808 +def I64.max : Int := 9223372036854775807 +def I128.min : Int := -170141183460469231731687303715884105728 +def I128.max : Int := 170141183460469231731687303715884105727 +@[simp] +def U8.min : Int := 0 +def U8.max : Int := 255 +@[simp] +def U16.min : Int := 0 +def U16.max : Int := 65535 +@[simp] +def U32.min : Int := 0 +def U32.max : Int := 4294967295 +@[simp] +def U64.min : Int := 0 +def U64.max : Int := 18446744073709551615 +@[simp] +def U128.min : Int := 0 +def U128.max : Int := 340282366920938463463374607431768211455 +@[simp] +def Usize.min : Int := 0 def Isize.refined_min : { n:Int // n = I32.min ∨ n = I64.min } := ⟨ Isize.smin, by diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 35092c29..5a709566 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -22,20 +22,27 @@ def Vec (α : Type u) := { l : List α // l.length ≤ Usize.max } instance Vec.cast (a : Type u): Coe (Vec a) (List a) where coe := λ v => v.val instance (a : Type u) : Arith.HasIntProp (Vec a) where - prop_ty := λ v => v.val.len ≤ Scalar.max ScalarTy.Usize + prop_ty := λ v => 0 ≤ v.val.len ∧ v.val.len ≤ Scalar.max ScalarTy.Usize prop := λ ⟨ _, l ⟩ => by simp[Scalar.max, List.len_eq_length, *] @[simp] abbrev Vec.length {α : Type u} (v : Vec α) : Int := v.val.len +@[simp] +abbrev Vec.v {α : Type u} (v : Vec α) : List α := v.val + example {a: Type u} (v : Vec a) : v.length ≤ Scalar.max ScalarTy.Usize := by scalar_tac def Vec.new (α : Type u): Vec α := ⟨ [], by apply Scalar.cMax_suffices .Usize; simp ⟩ +-- TODO: very annoying that the α is an explicit parameter def Vec.len (α : Type u) (v : Vec α) : Usize := - let ⟨ v, l ⟩ := v - Usize.ofIntCore (List.length v) (by simp [Scalar.min, Usize.min]) l + Usize.ofIntCore v.val.len (by scalar_tac) (by scalar_tac) + +@[simp] +theorem Vec.len_val {α : Type u} (v : Vec α) : (Vec.len α v).val = v.length := + by rfl -- This shouldn't be used def Vec.push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := () -- cgit v1.2.3 From 0cc3c78137434d848188eee2a66b1e2cacfd102e Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 19:06:05 +0200 Subject: Make progress on the proofs of the hashmap --- backends/lean/Base/Primitives/Base.lean | 2 +- backends/lean/Base/Primitives/Vec.lean | 20 ++++++++------------ 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Base.lean b/backends/lean/Base/Primitives/Base.lean index db462c38..7c0fa3bb 100644 --- a/backends/lean/Base/Primitives/Base.lean +++ b/backends/lean/Base/Primitives/Base.lean @@ -76,7 +76,7 @@ def eval_global {α: Type u} (x: Result α) (_: ret? x): α := /- DO-DSL SUPPORT -/ -def bind {α : Type u} {β : Type v} (x: Result α) (f: α -> Result β) : Result β := +def bind {α : Type u} {β : Type v} (x: Result α) (f: α → Result β) : Result β := match x with | ret v => f v | fail v => fail v diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 5a709566..523372bb 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -75,10 +75,9 @@ def Vec.insert (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Vec α) := .fail arrayOutOfBounds @[pspec] -theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) : - i.val < v.length → +theorem Vec.insert_spec {α : Type u} (v: Vec α) (i: Usize) (x: α) + (hbound : i.val < v.length) : ∃ nv, v.insert α i x = ret nv ∧ nv.val = v.val.update i.val x := by - intro h simp [insert, *] def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := @@ -87,10 +86,9 @@ def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := | some x => ret x @[pspec] -theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : - i.val < v.length → +theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) + (hbound : i.val < v.length) : v.index α i = ret (v.val.index i.val) := by - intro simp only [index] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) @@ -109,10 +107,9 @@ def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := | some x => ret x @[pspec] -theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) : - i.val < v.length → +theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) + (hbound : i.val < v.length) : v.index_mut α i = ret (v.val.index i.val) := by - intro simp only [index_mut] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) @@ -129,12 +126,11 @@ def Vec.index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α) : Result (Ve .ret ⟨ v.val.update i.val x, by have := v.property; simp [*] ⟩ @[pspec] -theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) : - i.val < v.length → +theorem Vec.index_mut_back_spec {α : Type u} (v: Vec α) (i: Usize) (x : α) + (hbound : i.val < v.length) : ∃ nv, v.index_mut_back α i x = ret nv ∧ nv.val = v.val.update i.val x := by - intro simp only [index_mut_back] have h := List.indexOpt_bounds v.val i.val split -- cgit v1.2.3 From 9e8fccbe4b667fc341b6544030f85af05fe89307 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Tue, 25 Jul 2023 20:12:48 +0200 Subject: Make progress on the proofs of the hashmap --- backends/lean/Base/Primitives/Scalar.lean | 47 ++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 4 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean index 3beb7527..2e5be8bf 100644 --- a/backends/lean/Base/Primitives/Scalar.lean +++ b/backends/lean/Base/Primitives/Scalar.lean @@ -660,10 +660,8 @@ theorem Scalar.rem_unsigned_spec {ty} (s: ¬ ty.isSigned) (x : Scalar ty) {y : S simp [h] at hx hy have hmin : 0 ≤ x.val % y.val := Int.emod_nonneg x.val hnz have hmax : x.val % y.val ≤ Scalar.max ty := by - have h := @Int.ediv_emod_unique x.val y.val (x.val % y.val) (x.val / y.val) - simp at h - have : 0 < y.val := by int_tac - simp [*] at h + have h : 0 < y.val := by int_tac + have h := Int.emod_lt_of_pos x.val h have := y.hmax linarith have hs := @rem_spec ty x y hnz @@ -724,6 +722,47 @@ def U32.ofInt := @Scalar.ofInt .U32 def U64.ofInt := @Scalar.ofInt .U64 def U128.ofInt := @Scalar.ofInt .U128 +-- TODO: factor those lemmas out +@[simp] theorem Scalar.ofInt_val_eq {ty} (h : Scalar.min ty ≤ x ∧ x ≤ Scalar.max ty) : (Scalar.ofInt x h).val = x := by + simp [Scalar.ofInt, Scalar.ofIntCore] + +@[simp] theorem Isize.ofInt_val_eq (h : Scalar.min ScalarTy.Isize ≤ x ∧ x ≤ Scalar.max ScalarTy.Isize) : (Isize.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I8.ofInt_val_eq (h : Scalar.min ScalarTy.I8 ≤ x ∧ x ≤ Scalar.max ScalarTy.I8) : (I8.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I16.ofInt_val_eq (h : Scalar.min ScalarTy.I16 ≤ x ∧ x ≤ Scalar.max ScalarTy.I16) : (I16.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I32.ofInt_val_eq (h : Scalar.min ScalarTy.I32 ≤ x ∧ x ≤ Scalar.max ScalarTy.I32) : (I32.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I64.ofInt_val_eq (h : Scalar.min ScalarTy.I64 ≤ x ∧ x ≤ Scalar.max ScalarTy.I64) : (I64.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem I128.ofInt_val_eq (h : Scalar.min ScalarTy.I128 ≤ x ∧ x ≤ Scalar.max ScalarTy.I128) : (I128.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem Usize.ofInt_val_eq (h : Scalar.min ScalarTy.Usize ≤ x ∧ x ≤ Scalar.max ScalarTy.Usize) : (Usize.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U8.ofInt_val_eq (h : Scalar.min ScalarTy.U8 ≤ x ∧ x ≤ Scalar.max ScalarTy.U8) : (U8.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U16.ofInt_val_eq (h : Scalar.min ScalarTy.U16 ≤ x ∧ x ≤ Scalar.max ScalarTy.U16) : (U16.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U32.ofInt_val_eq (h : Scalar.min ScalarTy.U32 ≤ x ∧ x ≤ Scalar.max ScalarTy.U32) : (U32.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U64.ofInt_val_eq (h : Scalar.min ScalarTy.U64 ≤ x ∧ x ≤ Scalar.max ScalarTy.U64) : (U64.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + +@[simp] theorem U128.ofInt_val_eq (h : Scalar.min ScalarTy.U128 ≤ x ∧ x ≤ Scalar.max ScalarTy.U128) : (U128.ofInt x h).val = x := by + apply Scalar.ofInt_val_eq h + + -- Comparisons instance {ty} : LT (Scalar ty) where lt a b := LT.lt a.val b.val -- cgit v1.2.3 From 3337c4ac3326c3132dcc322f55f23a7d2054ceb0 Mon Sep 17 00:00:00 2001 From: Son Ho Date: Wed, 26 Jul 2023 15:00:11 +0200 Subject: Update some of the Vec function specs --- backends/lean/Base/Primitives/Vec.lean | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'backends/lean/Base/Primitives') diff --git a/backends/lean/Base/Primitives/Vec.lean b/backends/lean/Base/Primitives/Vec.lean index 523372bb..a09d6ac2 100644 --- a/backends/lean/Base/Primitives/Vec.lean +++ b/backends/lean/Base/Primitives/Vec.lean @@ -85,14 +85,19 @@ def Vec.index (α : Type u) (v: Vec α) (i: Usize) : Result α := | none => fail .arrayOutOfBounds | some x => ret x +/- In the theorems below: we don't always need the `∃ ..`, but we use one + so that `progress` introduces an opaque variable and an equality. This + helps control the context. + -/ + @[pspec] theorem Vec.index_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) (hbound : i.val < v.length) : - v.index α i = ret (v.val.index i.val) := by + ∃ x, v.index α i = ret x ∧ x = v.val.index i.val := by simp only [index] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) - simp only [*] + simp [*] -- This shouldn't be used def Vec.index_back (α : Type u) (v: Vec α) (i: Usize) (_: α) : Result Unit := @@ -109,11 +114,11 @@ def Vec.index_mut (α : Type u) (v: Vec α) (i: Usize) : Result α := @[pspec] theorem Vec.index_mut_spec {α : Type u} [Inhabited α] (v: Vec α) (i: Usize) (hbound : i.val < v.length) : - v.index_mut α i = ret (v.val.index i.val) := by + ∃ x, v.index_mut α i = ret x ∧ x = v.val.index i.val := by simp only [index_mut] -- TODO: dependent rewrite have h := List.indexOpt_eq_index v.val i.val (by scalar_tac) (by simp [*]) - simp only [*] + simp [*] instance {α : Type u} (p : Vec α → Prop) : Arith.HasIntProp (Subtype p) where prop_ty := λ x => p x -- cgit v1.2.3