summaryrefslogtreecommitdiff
path: root/tests/lean
diff options
context:
space:
mode:
authorSon Ho2023-05-09 10:40:19 +0200
committerSon HO2023-06-04 21:44:33 +0200
commit50d1542f830b7ceb73efd34573b6b56b4971a114 (patch)
treec73b6c415b39cdbc3d92ec3056a2521445ff8afc /tests/lean
parent4078f2569b362920a648622be73761cddde8a288 (diff)
Regenerate the translated files for Lean
Diffstat (limited to 'tests/lean')
-rw-r--r--tests/lean/betree/Base/Primitives.lean392
-rw-r--r--tests/lean/betree/BetreeMain.lean1
-rw-r--r--tests/lean/betree/BetreeMain/Clauses/Template.lean185
-rw-r--r--tests/lean/betree/BetreeMain/Funs.lean1167
-rw-r--r--tests/lean/betree/BetreeMain/Opaque.lean33
-rw-r--r--tests/lean/betree/BetreeMain/Types.lean55
-rw-r--r--tests/lean/betree/lake-manifest.json27
-rw-r--r--tests/lean/betree/lakefile.lean12
-rw-r--r--tests/lean/betree/lean-toolchain1
-rw-r--r--tests/lean/hashmap/Base/Primitives.lean622
-rw-r--r--tests/lean/hashmap/Hashmap/Clauses/Clauses.lean18
-rw-r--r--tests/lean/hashmap/Hashmap/Clauses/Template.lean16
-rw-r--r--tests/lean/hashmap/Hashmap/Funs.lean183
-rw-r--r--tests/lean/hashmap/Hashmap/Types.lean8
-rw-r--r--tests/lean/hashmap_on_disk/Base/Primitives.lean622
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/Clauses/Clauses.lean44
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/Clauses/Template.lean16
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/ExternalFuns.lean5
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/Funs.lean205
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/Opaque.lean4
-rw-r--r--tests/lean/hashmap_on_disk/HashmapMain/Types.lean8
-rw-r--r--tests/lean/misc-constants/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-constants/Constants.lean267
-rw-r--r--tests/lean/misc-external/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-external/External/ExternalFuns.lean5
-rw-r--r--tests/lean/misc-external/External/Funs.lean35
-rw-r--r--tests/lean/misc-external/External/Opaque.lean3
-rw-r--r--tests/lean/misc-loops/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-loops/Loops/Clauses/Clauses.lean42
-rw-r--r--tests/lean/misc-loops/Loops/Clauses/Template.lean45
-rw-r--r--tests/lean/misc-loops/Loops/Funs.lean281
-rw-r--r--tests/lean/misc-no_nested_borrows/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-no_nested_borrows/NoNestedBorrows.lean1050
-rw-r--r--tests/lean/misc-paper/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-paper/Paper.lean240
-rw-r--r--tests/lean/misc-polonius_list/Base/Primitives.lean622
-rw-r--r--tests/lean/misc-polonius_list/PoloniusList.lean59
37 files changed, 4452 insertions, 4931 deletions
diff --git a/tests/lean/betree/Base/Primitives.lean b/tests/lean/betree/Base/Primitives.lean
deleted file mode 100644
index 5b64e908..00000000
--- a/tests/lean/betree/Base/Primitives.lean
+++ /dev/null
@@ -1,392 +0,0 @@
-import Lean
-import Lean.Meta.Tactic.Simp
-import Init.Data.List.Basic
-import Mathlib.Tactic.RunCmd
-
--------------
--- PRELUDE --
--------------
-
--- Results & monadic combinators
-
-inductive Error where
- | assertionFailure: Error
- | integerOverflow: Error
- | arrayOutOfBounds: Error
- | maximumSizeExceeded: Error
- | panic: Error
-deriving Repr, BEq
-
-open Error
-
-inductive Result (α : Type u) where
- | ret (v: α): Result α
- | fail (e: Error): Result α
-deriving Repr, BEq
-
-open Result
-
-/- HELPERS -/
-
-def ret? {α: Type} (r: Result α): Bool :=
- match r with
- | Result.ret _ => true
- | Result.fail _ => false
-
-def massert (b:Bool) : Result Unit :=
- if b then .ret () else fail assertionFailure
-
-def eval_global {α: Type} (x: Result α) (_: ret? x): α :=
- match x with
- | Result.fail _ => by contradiction
- | Result.ret x => x
-
-/- DO-DSL SUPPORT -/
-
-def bind (x: Result α) (f: α -> Result β) : Result β :=
- match x with
- | ret v => f v
- | fail v => fail v
-
--- Allows using Result in do-blocks
-instance : Bind Result where
- bind := bind
-
--- Allows using return x in do-blocks
-instance : Pure Result where
- pure := fun x => ret x
-
-/- CUSTOM-DSL SUPPORT -/
-
--- Let-binding the Result of a monadic operation is oftentimes not sufficient,
--- because we may need a hypothesis for equational reasoning in the scope. We
--- rely on subtype, and a custom let-binding operator, in effect recreating our
--- own variant of the do-dsl
-
-def Result.attach {α: Type} (o : Result α): Result { x : α // o = ret x } :=
- match o with
- | .ret x => .ret ⟨x, rfl⟩
- | .fail e => .fail e
-
-macro "let" e:term " ⟵ " f:term : doElem =>
- `(doElem| let ⟨$e, h⟩ ← Result.attach $f)
-
--- TODO: any way to factorize both definitions?
-macro "let" e:term " <-- " f:term : doElem =>
- `(doElem| let ⟨$e, h⟩ ← Result.attach $f)
-
--- We call the hypothesis `h`, in effect making it unavailable to the user
--- (because too much shadowing). But in practice, once can use the French single
--- quote notation (input with f< and f>), where `‹ h ›` finds a suitable
--- hypothesis in the context, this is equivalent to `have x: h := by assumption in x`
-#eval do
- let y <-- .ret (0: Nat)
- let _: y = 0 := by cases ‹ ret 0 = ret y › ; decide
- let r: { x: Nat // x = 0 } := ⟨ y, by assumption ⟩
- .ret r
-
-----------------------
--- MACHINE INTEGERS --
-----------------------
-
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
-
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
-
--- Note from Chris Bailey: "If there's more than one salient property of your
--- definition then the subtyping strategy might get messy, and the property part
--- of a subtype is less discoverable by the simplifier or tactics like
--- library_search." So, we will not add refinements on the return values of the
--- operations defined on Primitives, but will rather rely on custom lemmas to
--- invert on possible return values of the primitive operations.
-
--- Machine integer constants, done via `ofNatCore`, which requires a proof that
--- the `Nat` fits within the desired integer type. We provide a custom tactic.
-
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
--- This is useful for the various #asserts that we want to reduce at
--- type-checking time.
-
--- Further thoughts: look at what has been done here:
--- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
--- and
--- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
--- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
-
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
-
--------------
--- VECTORS --
--------------
-
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
-
-#check vec_new
-
-def vec_len (α : Type u) (v : Vec α) : USize :=
- let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
-
-def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
-
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
-def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
- :=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
- else
- fail maximumSizeExceeded
-
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
- if i.val < List.length v.val then
- .ret ()
- else
- .fail arrayOutOfBounds
-
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
- if i.val < List.length v.val then
- .ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
- rewrite [ List.length_set v.val i.val x ]
- assumption
- ⟩
- else
- .fail arrayOutOfBounds
-
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
- .ret (List.get v.val ⟨i.val, h⟩)
- else
- .fail arrayOutOfBounds
-
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
- if i.val < List.length v.val then
- .ret ()
- else
- .fail arrayOutOfBounds
-
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
- .ret (List.get v.val ⟨i.val, h⟩)
- else
- .fail arrayOutOfBounds
-
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
- if i.val < List.length v.val then
- .ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
- rewrite [ List.length_set v.val i.val x ]
- assumption
- ⟩
- else
- .fail arrayOutOfBounds
-
-----------
--- MISC --
-----------
-
-def mem_replace_fwd (a : Type) (x : a) (_ : a) : a :=
- x
-
-def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
- y
-
-/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
- Use with `simp [ aeneas ]` -/
-register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/betree/BetreeMain.lean b/tests/lean/betree/BetreeMain.lean
deleted file mode 100644
index 5f307877..00000000
--- a/tests/lean/betree/BetreeMain.lean
+++ /dev/null
@@ -1 +0,0 @@
-import BetreeMain.Funs
diff --git a/tests/lean/betree/BetreeMain/Clauses/Template.lean b/tests/lean/betree/BetreeMain/Clauses/Template.lean
deleted file mode 100644
index 1d18174e..00000000
--- a/tests/lean/betree/BetreeMain/Clauses/Template.lean
+++ /dev/null
@@ -1,185 +0,0 @@
--- THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS
--- [betree_main]: templates for the decreases clauses
-import Base.Primitives
-import BetreeMain.Types
-
-/- [betree_main::betree::List::{1}::len]: termination measure -/
-@[simp]
-def betree_list_len_terminates (T : Type) (self : betree_list_t T) := self
-
-/- [betree_main::betree::List::{1}::len]: decreases_by tactic -/
-syntax "betree_list_len_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_list_len_decreases $self) =>`(tactic| sorry)
-
-/- [betree_main::betree::List::{1}::split_at]: termination measure -/
-@[simp]
-def betree_list_split_at_terminates (T : Type) (self : betree_list_t T)
- (n : UInt64) :=
- (self, n)
-
-/- [betree_main::betree::List::{1}::split_at]: decreases_by tactic -/
-syntax "betree_list_split_at_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_list_split_at_decreases $self $n) =>`(tactic| sorry)
-
-/- [betree_main::betree::List::{2}::partition_at_pivot]: termination measure -/
-@[simp]
-def betree_list_partition_at_pivot_terminates (T : Type)
- (self : betree_list_t (UInt64 × T)) (pivot : UInt64) :=
- (self, pivot)
-
-/- [betree_main::betree::List::{2}::partition_at_pivot]: decreases_by tactic -/
-syntax "betree_list_partition_at_pivot_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_list_partition_at_pivot_decreases $self $pivot) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::lookup_in_bindings]: termination measure -/
-@[simp]
-def betree_node_lookup_in_bindings_terminates (key : UInt64)
- (bindings : betree_list_t (UInt64 × UInt64)) :=
- (key, bindings)
-
-/- [betree_main::betree::Node::{5}::lookup_in_bindings]: decreases_by tactic -/
-syntax "betree_node_lookup_in_bindings_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_lookup_in_bindings_decreases $key $bindings) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_for_key]: termination measure -/
-@[simp]
-def betree_node_lookup_first_message_for_key_terminates (key : UInt64)
- (msgs : betree_list_t (UInt64 × betree_message_t)) :=
- (key, msgs)
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_for_key]: decreases_by tactic -/
-syntax "betree_node_lookup_first_message_for_key_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_lookup_first_message_for_key_decreases $key $msgs) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::apply_upserts]: termination measure -/
-@[simp]
-def betree_node_apply_upserts_terminates
- (msgs : betree_list_t (UInt64 × betree_message_t)) (prev : Option UInt64)
- (key : UInt64) (st : State) :=
- (msgs, prev, key, st)
-
-/- [betree_main::betree::Node::{5}::apply_upserts]: decreases_by tactic -/
-syntax "betree_node_apply_upserts_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_apply_upserts_decreases $msgs $prev $key $st) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::lookup]: termination measure -/
-@[simp]
-def betree_node_lookup_terminates (self : betree_node_t) (key : UInt64)
- (st : State) :=
- (self, key, st)
-
-/- [betree_main::betree::Node::{5}::lookup]: decreases_by tactic -/
-syntax "betree_node_lookup_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_lookup_decreases $self $key $st) =>`(tactic| sorry)
-
-/- [betree_main::betree::Internal::{4}::lookup_in_children]: termination measure -/
-@[simp]
-def betree_internal_lookup_in_children_terminates (self : betree_internal_t)
- (key : UInt64) (st : State) :=
- (self, key, st)
-
-/- [betree_main::betree::Internal::{4}::lookup_in_children]: decreases_by tactic -/
-syntax "betree_internal_lookup_in_children_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_internal_lookup_in_children_decreases $self $key $st) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::lookup_mut_in_bindings]: termination measure -/
-@[simp]
-def betree_node_lookup_mut_in_bindings_terminates (key : UInt64)
- (bindings : betree_list_t (UInt64 × UInt64)) :=
- (key, bindings)
-
-/- [betree_main::betree::Node::{5}::lookup_mut_in_bindings]: decreases_by tactic -/
-syntax "betree_node_lookup_mut_in_bindings_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_lookup_mut_in_bindings_decreases $key $bindings) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_leaf]: termination measure -/
-@[simp]
-def betree_node_apply_messages_to_leaf_terminates
- (bindings : betree_list_t (UInt64 × UInt64))
- (new_msgs : betree_list_t (UInt64 × betree_message_t)) :=
- (bindings, new_msgs)
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_leaf]: decreases_by tactic -/
-syntax "betree_node_apply_messages_to_leaf_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_apply_messages_to_leaf_decreases $bindings
-$new_msgs) =>`(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::filter_messages_for_key]: termination measure -/
-@[simp]
-def betree_node_filter_messages_for_key_terminates (key : UInt64)
- (msgs : betree_list_t (UInt64 × betree_message_t)) :=
- (key, msgs)
-
-/- [betree_main::betree::Node::{5}::filter_messages_for_key]: decreases_by tactic -/
-syntax "betree_node_filter_messages_for_key_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_filter_messages_for_key_decreases $key $msgs) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_after_key]: termination measure -/
-@[simp]
-def betree_node_lookup_first_message_after_key_terminates (key : UInt64)
- (msgs : betree_list_t (UInt64 × betree_message_t)) :=
- (key, msgs)
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_after_key]: decreases_by tactic -/
-syntax "betree_node_lookup_first_message_after_key_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_lookup_first_message_after_key_decreases $key $msgs) =>
- `(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_internal]: termination measure -/
-@[simp]
-def betree_node_apply_messages_to_internal_terminates
- (msgs : betree_list_t (UInt64 × betree_message_t))
- (new_msgs : betree_list_t (UInt64 × betree_message_t)) :=
- (msgs, new_msgs)
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_internal]: decreases_by tactic -/
-syntax "betree_node_apply_messages_to_internal_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_apply_messages_to_internal_decreases $msgs
-$new_msgs) =>`(tactic| sorry)
-
-/- [betree_main::betree::Node::{5}::apply_messages]: termination measure -/
-@[simp]
-def betree_node_apply_messages_terminates (self : betree_node_t)
- (params : betree_params_t) (node_id_cnt : betree_node_id_counter_t)
- (msgs : betree_list_t (UInt64 × betree_message_t)) (st : State) :=
- (self, params, node_id_cnt, msgs, st)
-
-/- [betree_main::betree::Node::{5}::apply_messages]: decreases_by tactic -/
-syntax "betree_node_apply_messages_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_node_apply_messages_decreases $self $params $node_id_cnt
-$msgs $st) =>`(tactic| sorry)
-
-/- [betree_main::betree::Internal::{4}::flush]: termination measure -/
-@[simp]
-def betree_internal_flush_terminates (self : betree_internal_t)
- (params : betree_params_t) (node_id_cnt : betree_node_id_counter_t)
- (content : betree_list_t (UInt64 × betree_message_t)) (st : State) :=
- (self, params, node_id_cnt, content, st)
-
-/- [betree_main::betree::Internal::{4}::flush]: decreases_by tactic -/
-syntax "betree_internal_flush_decreases" term+ : tactic
-macro_rules
-| `(tactic| betree_internal_flush_decreases $self $params $node_id_cnt $content
-$st) =>`(tactic| sorry)
-
diff --git a/tests/lean/betree/BetreeMain/Funs.lean b/tests/lean/betree/BetreeMain/Funs.lean
deleted file mode 100644
index 7177e2ec..00000000
--- a/tests/lean/betree/BetreeMain/Funs.lean
+++ /dev/null
@@ -1,1167 +0,0 @@
--- THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS
--- [betree_main]: function definitions
-import Base.Primitives
-import BetreeMain.Types
-import BetreeMain.Opaque
-import BetreeMain.Clauses.Clauses
-
-section variable (opaque_defs: OpaqueDefs)
-
-/- [betree_main::betree::load_internal_node] -/
-def betree_load_internal_node_fwd
- (id : UInt64) (st : State) :
- Result (State × (betree_list_t (UInt64 × betree_message_t)))
- :=
- opaque_defs.betree_utils_load_internal_node_fwd id st
-
-/- [betree_main::betree::store_internal_node] -/
-def betree_store_internal_node_fwd
- (id : UInt64) (content : betree_list_t (UInt64 × betree_message_t))
- (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ←
- opaque_defs.betree_utils_store_internal_node_fwd id content st
- Result.ret (st0, ())
-
-/- [betree_main::betree::load_leaf_node] -/
-def betree_load_leaf_node_fwd
- (id : UInt64) (st : State) :
- Result (State × (betree_list_t (UInt64 × UInt64)))
- :=
- opaque_defs.betree_utils_load_leaf_node_fwd id st
-
-/- [betree_main::betree::store_leaf_node] -/
-def betree_store_leaf_node_fwd
- (id : UInt64) (content : betree_list_t (UInt64 × UInt64)) (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ← opaque_defs.betree_utils_store_leaf_node_fwd id content st
- Result.ret (st0, ())
-
-/- [betree_main::betree::fresh_node_id] -/
-def betree_fresh_node_id_fwd (counter : UInt64) : Result UInt64 :=
- do
- let _ ← UInt64.checked_add counter (UInt64.ofNatCore 1 (by intlit))
- Result.ret counter
-
-/- [betree_main::betree::fresh_node_id] -/
-def betree_fresh_node_id_back (counter : UInt64) : Result UInt64 :=
- UInt64.checked_add counter (UInt64.ofNatCore 1 (by intlit))
-
-/- [betree_main::betree::NodeIdCounter::{0}::new] -/
-def betree_node_id_counter_new_fwd : Result betree_node_id_counter_t :=
- Result.ret
- { betree_node_id_counter_next_node_id := (UInt64.ofNatCore 0 (by intlit)) }
-
-/- [betree_main::betree::NodeIdCounter::{0}::fresh_id] -/
-def betree_node_id_counter_fresh_id_fwd
- (self : betree_node_id_counter_t) : Result UInt64 :=
- do
- let _ ← UInt64.checked_add self.betree_node_id_counter_next_node_id
- (UInt64.ofNatCore 1 (by intlit))
- Result.ret self.betree_node_id_counter_next_node_id
-
-/- [betree_main::betree::NodeIdCounter::{0}::fresh_id] -/
-def betree_node_id_counter_fresh_id_back
- (self : betree_node_id_counter_t) : Result betree_node_id_counter_t :=
- do
- let i ← UInt64.checked_add self.betree_node_id_counter_next_node_id
- (UInt64.ofNatCore 1 (by intlit))
- Result.ret { betree_node_id_counter_next_node_id := i }
-
-/- [core::num::u64::{10}::MAX] -/
-def core_num_u64_max_body : Result UInt64 :=
- Result.ret (UInt64.ofNatCore 18446744073709551615 (by intlit))
-def core_num_u64_max_c : UInt64 := eval_global core_num_u64_max_body (by simp)
-
-/- [betree_main::betree::upsert_update] -/
-def betree_upsert_update_fwd
- (prev : Option UInt64) (st : betree_upsert_fun_state_t) : Result UInt64 :=
- match h: prev with
- | Option.none =>
- match h: st with
- | betree_upsert_fun_state_t.Add v => Result.ret v
- | betree_upsert_fun_state_t.Sub i =>
- Result.ret (UInt64.ofNatCore 0 (by intlit))
- | Option.some prev0 =>
- match h: st with
- | betree_upsert_fun_state_t.Add v =>
- do
- let margin ← UInt64.checked_sub core_num_u64_max_c prev0
- if h: margin >= v
- then UInt64.checked_add prev0 v
- else Result.ret core_num_u64_max_c
- | betree_upsert_fun_state_t.Sub v =>
- if h: prev0 >= v
- then UInt64.checked_sub prev0 v
- else Result.ret (UInt64.ofNatCore 0 (by intlit))
-
-/- [betree_main::betree::List::{1}::len] -/
-def betree_list_len_fwd
- (T : Type) (self : betree_list_t T) : (Result UInt64) :=
- match h: self with
- | betree_list_t.Cons t tl =>
- do
- let i ← betree_list_len_fwd T tl
- UInt64.checked_add (UInt64.ofNatCore 1 (by intlit)) i
- | betree_list_t.Nil => Result.ret (UInt64.ofNatCore 0 (by intlit))
-termination_by betree_list_len_fwd self => betree_list_len_terminates T self
-decreasing_by betree_list_len_decreases self
-
-/- [betree_main::betree::List::{1}::split_at] -/
-def betree_list_split_at_fwd
- (T : Type) (self : betree_list_t T) (n : UInt64) :
- (Result ((betree_list_t T) × (betree_list_t T)))
- :=
- if h: n = (UInt64.ofNatCore 0 (by intlit))
- then Result.ret (betree_list_t.Nil, self)
- else
- match h: self with
- | betree_list_t.Cons hd tl =>
- do
- let i ← UInt64.checked_sub n (UInt64.ofNatCore 1 (by intlit))
- let p ← betree_list_split_at_fwd T tl i
- let (ls0, ls1) := p
- let l := ls0
- Result.ret (betree_list_t.Cons hd l, ls1)
- | betree_list_t.Nil => Result.fail Error.panic
-termination_by betree_list_split_at_fwd self n =>
- betree_list_split_at_terminates T self n
-decreasing_by betree_list_split_at_decreases self n
-
-/- [betree_main::betree::List::{1}::push_front] -/
-def betree_list_push_front_fwd_back
- (T : Type) (self : betree_list_t T) (x : T) : Result (betree_list_t T) :=
- let tl := mem_replace_fwd (betree_list_t T) self betree_list_t.Nil
- let l := tl
- Result.ret (betree_list_t.Cons x l)
-
-/- [betree_main::betree::List::{1}::pop_front] -/
-def betree_list_pop_front_fwd (T : Type) (self : betree_list_t T) : Result T :=
- let ls := mem_replace_fwd (betree_list_t T) self betree_list_t.Nil
- match h: ls with
- | betree_list_t.Cons x tl => Result.ret x
- | betree_list_t.Nil => Result.fail Error.panic
-
-/- [betree_main::betree::List::{1}::pop_front] -/
-def betree_list_pop_front_back
- (T : Type) (self : betree_list_t T) : Result (betree_list_t T) :=
- let ls := mem_replace_fwd (betree_list_t T) self betree_list_t.Nil
- match h: ls with
- | betree_list_t.Cons x tl => Result.ret tl
- | betree_list_t.Nil => Result.fail Error.panic
-
-/- [betree_main::betree::List::{1}::hd] -/
-def betree_list_hd_fwd (T : Type) (self : betree_list_t T) : Result T :=
- match h: self with
- | betree_list_t.Cons hd l => Result.ret hd
- | betree_list_t.Nil => Result.fail Error.panic
-
-/- [betree_main::betree::List::{2}::head_has_key] -/
-def betree_list_head_has_key_fwd
- (T : Type) (self : betree_list_t (UInt64 × T)) (key : UInt64) :
- Result Bool
- :=
- match h: self with
- | betree_list_t.Cons hd l => let (i, _) := hd
- Result.ret (i = key)
- | betree_list_t.Nil => Result.ret false
-
-/- [betree_main::betree::List::{2}::partition_at_pivot] -/
-def betree_list_partition_at_pivot_fwd
- (T : Type) (self : betree_list_t (UInt64 × T)) (pivot : UInt64) :
- (Result ((betree_list_t (UInt64 × T)) × (betree_list_t (UInt64 × T))))
- :=
- match h: self with
- | betree_list_t.Cons hd tl =>
- let (i, t) := hd
- if h: i >= pivot
- then Result.ret (betree_list_t.Nil, betree_list_t.Cons (i, t) tl)
- else
- do
- let p ← betree_list_partition_at_pivot_fwd T tl pivot
- let (ls0, ls1) := p
- let l := ls0
- Result.ret (betree_list_t.Cons (i, t) l, ls1)
- | betree_list_t.Nil => Result.ret (betree_list_t.Nil, betree_list_t.Nil)
-termination_by betree_list_partition_at_pivot_fwd self pivot =>
- betree_list_partition_at_pivot_terminates T self pivot
-decreasing_by betree_list_partition_at_pivot_decreases self pivot
-
-/- [betree_main::betree::Leaf::{3}::split] -/
-def betree_leaf_split_fwd
- (self : betree_leaf_t) (content : betree_list_t (UInt64 × UInt64))
- (params : betree_params_t) (node_id_cnt : betree_node_id_counter_t)
- (st : State) :
- Result (State × betree_internal_t)
- :=
- do
- let p ←
- betree_list_split_at_fwd (UInt64 × UInt64) content
- params.betree_params_split_size
- let (content0, content1) := p
- let p0 ← betree_list_hd_fwd (UInt64 × UInt64) content1
- let (pivot, _) := p0
- let id0 ← betree_node_id_counter_fresh_id_fwd node_id_cnt
- let node_id_cnt0 ← betree_node_id_counter_fresh_id_back node_id_cnt
- let id1 ← betree_node_id_counter_fresh_id_fwd node_id_cnt0
- let (st0, _) ← betree_store_leaf_node_fwd id0 content0 st
- let (st1, _) ← betree_store_leaf_node_fwd id1 content1 st0
- let n := betree_node_t.Leaf
- {
- betree_leaf_id := id0,
- betree_leaf_size := params.betree_params_split_size
- }
- let n0 := betree_node_t.Leaf
- {
- betree_leaf_id := id1,
- betree_leaf_size := params.betree_params_split_size
- }
- Result.ret (st1, mkbetree_internal_t self.betree_leaf_id pivot n n0)
-
-/- [betree_main::betree::Leaf::{3}::split] -/
-def betree_leaf_split_back
- (self : betree_leaf_t) (content : betree_list_t (UInt64 × UInt64))
- (params : betree_params_t) (node_id_cnt : betree_node_id_counter_t)
- (st : State) :
- Result betree_node_id_counter_t
- :=
- do
- let p ←
- betree_list_split_at_fwd (UInt64 × UInt64) content
- params.betree_params_split_size
- let (content0, content1) := p
- let _ ← betree_list_hd_fwd (UInt64 × UInt64) content1
- let id0 ← betree_node_id_counter_fresh_id_fwd node_id_cnt
- let node_id_cnt0 ← betree_node_id_counter_fresh_id_back node_id_cnt
- let id1 ← betree_node_id_counter_fresh_id_fwd node_id_cnt0
- let (st0, _) ← betree_store_leaf_node_fwd id0 content0 st
- let _ ← betree_store_leaf_node_fwd id1 content1 st0
- betree_node_id_counter_fresh_id_back node_id_cnt0
-
-/- [betree_main::betree::Node::{5}::lookup_in_bindings] -/
-def betree_node_lookup_in_bindings_fwd
- (key : UInt64) (bindings : betree_list_t (UInt64 × UInt64)) :
- (Result (Option UInt64))
- :=
- match h: bindings with
- | betree_list_t.Cons hd tl =>
- let (i, i0) := hd
- if h: i = key
- then Result.ret (Option.some i0)
- else
- if h: i > key
- then Result.ret Option.none
- else betree_node_lookup_in_bindings_fwd key tl
- | betree_list_t.Nil => Result.ret Option.none
-termination_by betree_node_lookup_in_bindings_fwd key bindings =>
- betree_node_lookup_in_bindings_terminates key bindings
-decreasing_by betree_node_lookup_in_bindings_decreases key bindings
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_for_key] -/
-def betree_node_lookup_first_message_for_key_fwd
- (key : UInt64) (msgs : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: msgs with
- | betree_list_t.Cons x next_msgs =>
- let (i, m) := x
- if h: i >= key
- then Result.ret (betree_list_t.Cons (i, m) next_msgs)
- else betree_node_lookup_first_message_for_key_fwd key next_msgs
- | betree_list_t.Nil => Result.ret betree_list_t.Nil
-termination_by betree_node_lookup_first_message_for_key_fwd key msgs =>
- betree_node_lookup_first_message_for_key_terminates key msgs
-decreasing_by betree_node_lookup_first_message_for_key_decreases key msgs
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_for_key] -/
-def betree_node_lookup_first_message_for_key_back
- (key : UInt64) (msgs : betree_list_t (UInt64 × betree_message_t))
- (ret0 : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: msgs with
- | betree_list_t.Cons x next_msgs =>
- let (i, m) := x
- if h: i >= key
- then Result.ret ret0
- else
- do
- let next_msgs0 ←
- betree_node_lookup_first_message_for_key_back key next_msgs ret0
- Result.ret (betree_list_t.Cons (i, m) next_msgs0)
- | betree_list_t.Nil => Result.ret ret0
-termination_by betree_node_lookup_first_message_for_key_back key msgs ret0 =>
- betree_node_lookup_first_message_for_key_terminates key msgs
-decreasing_by betree_node_lookup_first_message_for_key_decreases key msgs
-
-/- [betree_main::betree::Node::{5}::apply_upserts] -/
-def betree_node_apply_upserts_fwd
- (msgs : betree_list_t (UInt64 × betree_message_t)) (prev : Option UInt64)
- (key : UInt64) (st : State) :
- (Result (State × UInt64))
- :=
- do
- let b ← betree_list_head_has_key_fwd betree_message_t msgs key
- if h: b
- then
- do
- let msg ← betree_list_pop_front_fwd (UInt64 × betree_message_t) msgs
- let (_, m) := msg
- match h: m with
- | betree_message_t.Insert i => Result.fail Error.panic
- | betree_message_t.Delete => Result.fail Error.panic
- | betree_message_t.Upsert s =>
- do
- let v ← betree_upsert_update_fwd prev s
- let msgs0 ←
- betree_list_pop_front_back (UInt64 × betree_message_t) msgs
- betree_node_apply_upserts_fwd msgs0 (Option.some v) key st
- else
- do
- let (st0, v) ←
- opaque_defs.core_option_option_unwrap_fwd UInt64 prev st
- let _ ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t) msgs
- (key, betree_message_t.Insert v)
- Result.ret (st0, v)
-termination_by betree_node_apply_upserts_fwd msgs prev key st =>
- betree_node_apply_upserts_terminates msgs prev key st
-decreasing_by betree_node_apply_upserts_decreases msgs prev key st
-
-/- [betree_main::betree::Node::{5}::apply_upserts] -/
-def betree_node_apply_upserts_back
- (msgs : betree_list_t (UInt64 × betree_message_t)) (prev : Option UInt64)
- (key : UInt64) (st : State) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- do
- let b ← betree_list_head_has_key_fwd betree_message_t msgs key
- if h: b
- then
- do
- let msg ← betree_list_pop_front_fwd (UInt64 × betree_message_t) msgs
- let (_, m) := msg
- match h: m with
- | betree_message_t.Insert i => Result.fail Error.panic
- | betree_message_t.Delete => Result.fail Error.panic
- | betree_message_t.Upsert s =>
- do
- let v ← betree_upsert_update_fwd prev s
- let msgs0 ←
- betree_list_pop_front_back (UInt64 × betree_message_t) msgs
- betree_node_apply_upserts_back msgs0 (Option.some v) key st
- else
- do
- let (_, v) ← opaque_defs.core_option_option_unwrap_fwd UInt64 prev st
- betree_list_push_front_fwd_back (UInt64 × betree_message_t) msgs (key,
- betree_message_t.Insert v)
-termination_by betree_node_apply_upserts_back msgs prev key st =>
- betree_node_apply_upserts_terminates msgs prev key st
-decreasing_by betree_node_apply_upserts_decreases msgs prev key st
-
-/- [betree_main::betree::Node::{5}::lookup] -/
-mutual def betree_node_lookup_fwd
- (self : betree_node_t) (key : UInt64) (st : State) :
- (Result (State × (Option UInt64)))
- :=
- match h: self with
- | betree_node_t.Internal node =>
- do
- let (mkbetree_internal_t i i0 n n0) := node
- let (st0, msgs) ← betree_load_internal_node_fwd i st
- let pending ← betree_node_lookup_first_message_for_key_fwd key msgs
- match h: pending with
- | betree_list_t.Cons p l =>
- let (k, msg) := p
- if h: k != key
- then
- do
- let (st1, opt) ←
- betree_internal_lookup_in_children_fwd (mkbetree_internal_t i i0
- n n0) key st0
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, msg) l)
- Result.ret (st1, opt)
- else
- match h: msg with
- | betree_message_t.Insert v =>
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, betree_message_t.Insert v) l)
- Result.ret (st0, Option.some v)
- | betree_message_t.Delete =>
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, betree_message_t.Delete) l)
- Result.ret (st0, Option.none)
- | betree_message_t.Upsert ufs =>
- do
- let (st1, v) ←
- betree_internal_lookup_in_children_fwd (mkbetree_internal_t i
- i0 n n0) key st0
- let (st2, v0) ←
- betree_node_apply_upserts_fwd (betree_list_t.Cons (k,
- betree_message_t.Upsert ufs) l) v key st1
- let node0 ←
- betree_internal_lookup_in_children_back (mkbetree_internal_t i
- i0 n n0) key st0
- let (mkbetree_internal_t i1 _ _ _) := node0
- let pending0 ←
- betree_node_apply_upserts_back (betree_list_t.Cons (k,
- betree_message_t.Upsert ufs) l) v key st1
- let msgs0 ←
- betree_node_lookup_first_message_for_key_back key msgs pending0
- let (st3, _) ← betree_store_internal_node_fwd i1 msgs0 st2
- Result.ret (st3, Option.some v0)
- | betree_list_t.Nil =>
- do
- let (st1, opt) ←
- betree_internal_lookup_in_children_fwd (mkbetree_internal_t i i0 n
- n0) key st0
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- betree_list_t.Nil
- Result.ret (st1, opt)
- | betree_node_t.Leaf node =>
- do
- let (st0, bindings) ← betree_load_leaf_node_fwd node.betree_leaf_id st
- let opt ← betree_node_lookup_in_bindings_fwd key bindings
- Result.ret (st0, opt)
-termination_by betree_node_lookup_fwd self key st =>
- betree_node_lookup_terminates self key st
-decreasing_by betree_node_lookup_decreases self key st
-
-/- [betree_main::betree::Node::{5}::lookup] -/
-def betree_node_lookup_back
- (self : betree_node_t) (key : UInt64) (st : State) :
- (Result betree_node_t)
- :=
- match h: self with
- | betree_node_t.Internal node =>
- do
- let (mkbetree_internal_t i i0 n n0) := node
- let (st0, msgs) ← betree_load_internal_node_fwd i st
- let pending ← betree_node_lookup_first_message_for_key_fwd key msgs
- match h: pending with
- | betree_list_t.Cons p l =>
- let (k, msg) := p
- if h: k != key
- then
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, msg) l)
- let node0 ←
- betree_internal_lookup_in_children_back (mkbetree_internal_t i i0
- n n0) key st0
- Result.ret (betree_node_t.Internal node0)
- else
- match h: msg with
- | betree_message_t.Insert v =>
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, betree_message_t.Insert v) l)
- Result.ret (betree_node_t.Internal (mkbetree_internal_t i i0 n
- n0))
- | betree_message_t.Delete =>
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- (betree_list_t.Cons (k, betree_message_t.Delete) l)
- Result.ret (betree_node_t.Internal (mkbetree_internal_t i i0 n
- n0))
- | betree_message_t.Upsert ufs =>
- do
- let (st1, v) ←
- betree_internal_lookup_in_children_fwd (mkbetree_internal_t i
- i0 n n0) key st0
- let (st2, _) ←
- betree_node_apply_upserts_fwd (betree_list_t.Cons (k,
- betree_message_t.Upsert ufs) l) v key st1
- let node0 ←
- betree_internal_lookup_in_children_back (mkbetree_internal_t i
- i0 n n0) key st0
- let (mkbetree_internal_t i1 i2 n1 n2) := node0
- let pending0 ←
- betree_node_apply_upserts_back (betree_list_t.Cons (k,
- betree_message_t.Upsert ufs) l) v key st1
- let msgs0 ←
- betree_node_lookup_first_message_for_key_back key msgs pending0
- let _ ← betree_store_internal_node_fwd i1 msgs0 st2
- Result.ret (betree_node_t.Internal (mkbetree_internal_t i1 i2 n1
- n2))
- | betree_list_t.Nil =>
- do
- let _ ←
- betree_node_lookup_first_message_for_key_back key msgs
- betree_list_t.Nil
- let node0 ←
- betree_internal_lookup_in_children_back (mkbetree_internal_t i i0 n
- n0) key st0
- Result.ret (betree_node_t.Internal node0)
- | betree_node_t.Leaf node =>
- do
- let (_, bindings) ← betree_load_leaf_node_fwd node.betree_leaf_id st
- let _ ← betree_node_lookup_in_bindings_fwd key bindings
- Result.ret (betree_node_t.Leaf node)
-termination_by betree_node_lookup_back self key st =>
- betree_node_lookup_terminates self key st
-decreasing_by betree_node_lookup_decreases self key st
-
-/- [betree_main::betree::Internal::{4}::lookup_in_children] -/
-def betree_internal_lookup_in_children_fwd
- (self : betree_internal_t) (key : UInt64) (st : State) :
- (Result (State × (Option UInt64)))
- :=
- let (mkbetree_internal_t _ i n n0) := self
- if h: key < i
- then betree_node_lookup_fwd n key st
- else betree_node_lookup_fwd n0 key st
-termination_by betree_internal_lookup_in_children_fwd self key st =>
- betree_internal_lookup_in_children_terminates self key st
-decreasing_by betree_internal_lookup_in_children_decreases self key st
-
-/- [betree_main::betree::Internal::{4}::lookup_in_children] -/
-def betree_internal_lookup_in_children_back
- (self : betree_internal_t) (key : UInt64) (st : State) :
- (Result betree_internal_t)
- :=
- let (mkbetree_internal_t i i0 n n0) := self
- if h: key < i0
- then
- do
- let n1 ← betree_node_lookup_back n key st
- Result.ret (mkbetree_internal_t i i0 n1 n0)
- else
- do
- let n1 ← betree_node_lookup_back n0 key st
- Result.ret (mkbetree_internal_t i i0 n n1)
-termination_by betree_internal_lookup_in_children_back self key st =>
- betree_internal_lookup_in_children_terminates self key st
-decreasing_by betree_internal_lookup_in_children_decreases self key st
-
-/- [betree_main::betree::Node::{5}::lookup_mut_in_bindings] -/
-def betree_node_lookup_mut_in_bindings_fwd
- (key : UInt64) (bindings : betree_list_t (UInt64 × UInt64)) :
- (Result (betree_list_t (UInt64 × UInt64)))
- :=
- match h: bindings with
- | betree_list_t.Cons hd tl =>
- let (i, i0) := hd
- if h: i >= key
- then Result.ret (betree_list_t.Cons (i, i0) tl)
- else betree_node_lookup_mut_in_bindings_fwd key tl
- | betree_list_t.Nil => Result.ret betree_list_t.Nil
-termination_by betree_node_lookup_mut_in_bindings_fwd key bindings =>
- betree_node_lookup_mut_in_bindings_terminates key bindings
-decreasing_by betree_node_lookup_mut_in_bindings_decreases key bindings
-
-/- [betree_main::betree::Node::{5}::lookup_mut_in_bindings] -/
-def betree_node_lookup_mut_in_bindings_back
- (key : UInt64) (bindings : betree_list_t (UInt64 × UInt64))
- (ret0 : betree_list_t (UInt64 × UInt64)) :
- (Result (betree_list_t (UInt64 × UInt64)))
- :=
- match h: bindings with
- | betree_list_t.Cons hd tl =>
- let (i, i0) := hd
- if h: i >= key
- then Result.ret ret0
- else
- do
- let tl0 ← betree_node_lookup_mut_in_bindings_back key tl ret0
- Result.ret (betree_list_t.Cons (i, i0) tl0)
- | betree_list_t.Nil => Result.ret ret0
-termination_by betree_node_lookup_mut_in_bindings_back key bindings ret0 =>
- betree_node_lookup_mut_in_bindings_terminates key bindings
-decreasing_by betree_node_lookup_mut_in_bindings_decreases key bindings
-
-/- [betree_main::betree::Node::{5}::apply_to_leaf] -/
-def betree_node_apply_to_leaf_fwd_back
- (bindings : betree_list_t (UInt64 × UInt64)) (key : UInt64)
- (new_msg : betree_message_t) :
- Result (betree_list_t (UInt64 × UInt64))
- :=
- do
- let bindings0 ← betree_node_lookup_mut_in_bindings_fwd key bindings
- let b ← betree_list_head_has_key_fwd UInt64 bindings0 key
- if h: b
- then
- do
- let hd ← betree_list_pop_front_fwd (UInt64 × UInt64) bindings0
- match h: new_msg with
- | betree_message_t.Insert v =>
- do
- let bindings1 ←
- betree_list_pop_front_back (UInt64 × UInt64) bindings0
- let bindings2 ←
- betree_list_push_front_fwd_back (UInt64 × UInt64) bindings1
- (key, v)
- betree_node_lookup_mut_in_bindings_back key bindings bindings2
- | betree_message_t.Delete =>
- do
- let bindings1 ←
- betree_list_pop_front_back (UInt64 × UInt64) bindings0
- betree_node_lookup_mut_in_bindings_back key bindings bindings1
- | betree_message_t.Upsert s =>
- do
- let (_, i) := hd
- let v ← betree_upsert_update_fwd (Option.some i) s
- let bindings1 ←
- betree_list_pop_front_back (UInt64 × UInt64) bindings0
- let bindings2 ←
- betree_list_push_front_fwd_back (UInt64 × UInt64) bindings1
- (key, v)
- betree_node_lookup_mut_in_bindings_back key bindings bindings2
- else
- match h: new_msg with
- | betree_message_t.Insert v =>
- do
- let bindings1 ←
- betree_list_push_front_fwd_back (UInt64 × UInt64) bindings0 (key,
- v)
- betree_node_lookup_mut_in_bindings_back key bindings bindings1
- | betree_message_t.Delete =>
- betree_node_lookup_mut_in_bindings_back key bindings bindings0
- | betree_message_t.Upsert s =>
- do
- let v ← betree_upsert_update_fwd Option.none s
- let bindings1 ←
- betree_list_push_front_fwd_back (UInt64 × UInt64) bindings0 (key,
- v)
- betree_node_lookup_mut_in_bindings_back key bindings bindings1
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_leaf] -/
-def betree_node_apply_messages_to_leaf_fwd_back
- (bindings : betree_list_t (UInt64 × UInt64))
- (new_msgs : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × UInt64)))
- :=
- match h: new_msgs with
- | betree_list_t.Cons new_msg new_msgs_tl =>
- do
- let (i, m) := new_msg
- let bindings0 ← betree_node_apply_to_leaf_fwd_back bindings i m
- betree_node_apply_messages_to_leaf_fwd_back bindings0 new_msgs_tl
- | betree_list_t.Nil => Result.ret bindings
-termination_by betree_node_apply_messages_to_leaf_fwd_back bindings new_msgs =>
- betree_node_apply_messages_to_leaf_terminates bindings new_msgs
-decreasing_by betree_node_apply_messages_to_leaf_decreases bindings new_msgs
-
-/- [betree_main::betree::Node::{5}::filter_messages_for_key] -/
-def betree_node_filter_messages_for_key_fwd_back
- (key : UInt64) (msgs : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: msgs with
- | betree_list_t.Cons p l =>
- let (k, m) := p
- if h: k = key
- then
- do
- let msgs0 ←
- betree_list_pop_front_back (UInt64 × betree_message_t)
- (betree_list_t.Cons (k, m) l)
- betree_node_filter_messages_for_key_fwd_back key msgs0
- else Result.ret (betree_list_t.Cons (k, m) l)
- | betree_list_t.Nil => Result.ret betree_list_t.Nil
-termination_by betree_node_filter_messages_for_key_fwd_back key msgs =>
- betree_node_filter_messages_for_key_terminates key msgs
-decreasing_by betree_node_filter_messages_for_key_decreases key msgs
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_after_key] -/
-def betree_node_lookup_first_message_after_key_fwd
- (key : UInt64) (msgs : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: msgs with
- | betree_list_t.Cons p next_msgs =>
- let (k, m) := p
- if h: k = key
- then betree_node_lookup_first_message_after_key_fwd key next_msgs
- else Result.ret (betree_list_t.Cons (k, m) next_msgs)
- | betree_list_t.Nil => Result.ret betree_list_t.Nil
-termination_by betree_node_lookup_first_message_after_key_fwd key msgs =>
- betree_node_lookup_first_message_after_key_terminates key msgs
-decreasing_by betree_node_lookup_first_message_after_key_decreases key msgs
-
-/- [betree_main::betree::Node::{5}::lookup_first_message_after_key] -/
-def betree_node_lookup_first_message_after_key_back
- (key : UInt64) (msgs : betree_list_t (UInt64 × betree_message_t))
- (ret0 : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: msgs with
- | betree_list_t.Cons p next_msgs =>
- let (k, m) := p
- if h: k = key
- then
- do
- let next_msgs0 ←
- betree_node_lookup_first_message_after_key_back key next_msgs ret0
- Result.ret (betree_list_t.Cons (k, m) next_msgs0)
- else Result.ret ret0
- | betree_list_t.Nil => Result.ret ret0
-termination_by betree_node_lookup_first_message_after_key_back key msgs ret0 =>
- betree_node_lookup_first_message_after_key_terminates key msgs
-decreasing_by betree_node_lookup_first_message_after_key_decreases key msgs
-
-/- [betree_main::betree::Node::{5}::apply_to_internal] -/
-def betree_node_apply_to_internal_fwd_back
- (msgs : betree_list_t (UInt64 × betree_message_t)) (key : UInt64)
- (new_msg : betree_message_t) :
- Result (betree_list_t (UInt64 × betree_message_t))
- :=
- do
- let msgs0 ← betree_node_lookup_first_message_for_key_fwd key msgs
- let b ← betree_list_head_has_key_fwd betree_message_t msgs0 key
- if h: b
- then
- match h: new_msg with
- | betree_message_t.Insert i =>
- do
- let msgs1 ← betree_node_filter_messages_for_key_fwd_back key msgs0
- let msgs2 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t) msgs1
- (key, betree_message_t.Insert i)
- betree_node_lookup_first_message_for_key_back key msgs msgs2
- | betree_message_t.Delete =>
- do
- let msgs1 ← betree_node_filter_messages_for_key_fwd_back key msgs0
- let msgs2 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t) msgs1
- (key, betree_message_t.Delete)
- betree_node_lookup_first_message_for_key_back key msgs msgs2
- | betree_message_t.Upsert s =>
- do
- let p ← betree_list_hd_fwd (UInt64 × betree_message_t) msgs0
- let (_, m) := p
- match h: m with
- | betree_message_t.Insert prev =>
- do
- let v ← betree_upsert_update_fwd (Option.some prev) s
- let msgs1 ←
- betree_list_pop_front_back (UInt64 × betree_message_t) msgs0
- let msgs2 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t)
- msgs1 (key, betree_message_t.Insert v)
- betree_node_lookup_first_message_for_key_back key msgs msgs2
- | betree_message_t.Delete =>
- do
- let v ← betree_upsert_update_fwd Option.none s
- let msgs1 ←
- betree_list_pop_front_back (UInt64 × betree_message_t) msgs0
- let msgs2 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t)
- msgs1 (key, betree_message_t.Insert v)
- betree_node_lookup_first_message_for_key_back key msgs msgs2
- | betree_message_t.Upsert ufs =>
- do
- let msgs1 ←
- betree_node_lookup_first_message_after_key_fwd key msgs0
- let msgs2 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t)
- msgs1 (key, betree_message_t.Upsert s)
- let msgs3 ←
- betree_node_lookup_first_message_after_key_back key msgs0 msgs2
- betree_node_lookup_first_message_for_key_back key msgs msgs3
- else
- do
- let msgs1 ←
- betree_list_push_front_fwd_back (UInt64 × betree_message_t) msgs0
- (key, new_msg)
- betree_node_lookup_first_message_for_key_back key msgs msgs1
-
-/- [betree_main::betree::Node::{5}::apply_messages_to_internal] -/
-def betree_node_apply_messages_to_internal_fwd_back
- (msgs : betree_list_t (UInt64 × betree_message_t))
- (new_msgs : betree_list_t (UInt64 × betree_message_t)) :
- (Result (betree_list_t (UInt64 × betree_message_t)))
- :=
- match h: new_msgs with
- | betree_list_t.Cons new_msg new_msgs_tl =>
- do
- let (i, m) := new_msg
- let msgs0 ← betree_node_apply_to_internal_fwd_back msgs i m
- betree_node_apply_messages_to_internal_fwd_back msgs0 new_msgs_tl
- | betree_list_t.Nil => Result.ret msgs
-termination_by betree_node_apply_messages_to_internal_fwd_back msgs new_msgs =>
- betree_node_apply_messages_to_internal_terminates msgs new_msgs
-decreasing_by betree_node_apply_messages_to_internal_decreases msgs new_msgs
-
-/- [betree_main::betree::Node::{5}::apply_messages] -/
-mutual def betree_node_apply_messages_fwd
- (self : betree_node_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t)
- (msgs : betree_list_t (UInt64 × betree_message_t)) (st : State) :
- (Result (State × Unit))
- :=
- match h: self with
- | betree_node_t.Internal node =>
- do
- let (mkbetree_internal_t i i0 n n0) := node
- let (st0, content) ← betree_load_internal_node_fwd i st
- let content0 ←
- betree_node_apply_messages_to_internal_fwd_back content msgs
- let num_msgs ←
- betree_list_len_fwd (UInt64 × betree_message_t) content0
- if h: num_msgs >= params.betree_params_min_flush_size
- then
- do
- let (st1, content1) ←
- betree_internal_flush_fwd (mkbetree_internal_t i i0 n n0) params
- node_id_cnt content0 st0
- let (node0, _) ←
- betree_internal_flush_back (mkbetree_internal_t i i0 n n0) params
- node_id_cnt content0 st0
- let (mkbetree_internal_t i1 _ _ _) := node0
- let (st2, _) ← betree_store_internal_node_fwd i1 content1 st1
- Result.ret (st2, ())
- else
- do
- let (st1, _) ← betree_store_internal_node_fwd i content0 st0
- Result.ret (st1, ())
- | betree_node_t.Leaf node =>
- do
- let (st0, content) ← betree_load_leaf_node_fwd node.betree_leaf_id st
- let content0 ← betree_node_apply_messages_to_leaf_fwd_back content msgs
- let len ← betree_list_len_fwd (UInt64 × UInt64) content0
- let i ← UInt64.checked_mul (UInt64.ofNatCore 2 (by intlit))
- params.betree_params_split_size
- if h: len >= i
- then
- do
- let (st1, _) ←
- betree_leaf_split_fwd node content0 params node_id_cnt st0
- let (st2, _) ←
- betree_store_leaf_node_fwd node.betree_leaf_id betree_list_t.Nil
- st1
- Result.ret (st2, ())
- else
- do
- let (st1, _) ←
- betree_store_leaf_node_fwd node.betree_leaf_id content0 st0
- Result.ret (st1, ())
-termination_by betree_node_apply_messages_fwd self params node_id_cnt msgs st
- =>
- betree_node_apply_messages_terminates self params node_id_cnt msgs st
-decreasing_by
- betree_node_apply_messages_decreases self params node_id_cnt msgs st
-
-/- [betree_main::betree::Node::{5}::apply_messages] -/
-def betree_node_apply_messages_back
- (self : betree_node_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t)
- (msgs : betree_list_t (UInt64 × betree_message_t)) (st : State) :
- (Result (betree_node_t × betree_node_id_counter_t))
- :=
- match h: self with
- | betree_node_t.Internal node =>
- do
- let (mkbetree_internal_t i i0 n n0) := node
- let (st0, content) ← betree_load_internal_node_fwd i st
- let content0 ←
- betree_node_apply_messages_to_internal_fwd_back content msgs
- let num_msgs ←
- betree_list_len_fwd (UInt64 × betree_message_t) content0
- if h: num_msgs >= params.betree_params_min_flush_size
- then
- do
- let (st1, content1) ←
- betree_internal_flush_fwd (mkbetree_internal_t i i0 n n0) params
- node_id_cnt content0 st0
- let (node0, node_id_cnt0) ←
- betree_internal_flush_back (mkbetree_internal_t i i0 n n0) params
- node_id_cnt content0 st0
- let (mkbetree_internal_t i1 i2 n1 n2) := node0
- let _ ← betree_store_internal_node_fwd i1 content1 st1
- Result.ret (betree_node_t.Internal (mkbetree_internal_t i1 i2 n1 n2),
- node_id_cnt0)
- else
- do
- let _ ← betree_store_internal_node_fwd i content0 st0
- Result.ret (betree_node_t.Internal (mkbetree_internal_t i i0 n n0),
- node_id_cnt)
- | betree_node_t.Leaf node =>
- do
- let (st0, content) ← betree_load_leaf_node_fwd node.betree_leaf_id st
- let content0 ← betree_node_apply_messages_to_leaf_fwd_back content msgs
- let len ← betree_list_len_fwd (UInt64 × UInt64) content0
- let i ← UInt64.checked_mul (UInt64.ofNatCore 2 (by intlit))
- params.betree_params_split_size
- if h: len >= i
- then
- do
- let (st1, new_node) ←
- betree_leaf_split_fwd node content0 params node_id_cnt st0
- let _ ←
- betree_store_leaf_node_fwd node.betree_leaf_id betree_list_t.Nil
- st1
- let node_id_cnt0 ←
- betree_leaf_split_back node content0 params node_id_cnt st0
- Result.ret (betree_node_t.Internal new_node, node_id_cnt0)
- else
- do
- let _ ← betree_store_leaf_node_fwd node.betree_leaf_id content0 st0
- Result.ret (betree_node_t.Leaf { node with betree_leaf_size := len },
- node_id_cnt)
-termination_by betree_node_apply_messages_back self params node_id_cnt msgs st
- =>
- betree_node_apply_messages_terminates self params node_id_cnt msgs st
-decreasing_by
- betree_node_apply_messages_decreases self params node_id_cnt msgs st
-
-/- [betree_main::betree::Internal::{4}::flush] -/
-def betree_internal_flush_fwd
- (self : betree_internal_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t)
- (content : betree_list_t (UInt64 × betree_message_t)) (st : State) :
- (Result (State × (betree_list_t (UInt64 × betree_message_t))))
- :=
- do
- let (mkbetree_internal_t _ i n n0) := self
- let p ← betree_list_partition_at_pivot_fwd betree_message_t content i
- let (msgs_left, msgs_right) := p
- let len_left ← betree_list_len_fwd (UInt64 × betree_message_t) msgs_left
- if h: len_left >= params.betree_params_min_flush_size
- then
- do
- let (st0, _) ←
- betree_node_apply_messages_fwd n params node_id_cnt msgs_left st
- let (_, node_id_cnt0) ←
- betree_node_apply_messages_back n params node_id_cnt msgs_left st
- let len_right ←
- betree_list_len_fwd (UInt64 × betree_message_t) msgs_right
- if h: len_right >= params.betree_params_min_flush_size
- then
- do
- let (st1, _) ←
- betree_node_apply_messages_fwd n0 params node_id_cnt0 msgs_right
- st0
- let _ ←
- betree_node_apply_messages_back n0 params node_id_cnt0 msgs_right
- st0
- Result.ret (st1, betree_list_t.Nil)
- else Result.ret (st0, msgs_right)
- else
- do
- let (st0, _) ←
- betree_node_apply_messages_fwd n0 params node_id_cnt msgs_right st
- let _ ←
- betree_node_apply_messages_back n0 params node_id_cnt msgs_right st
- Result.ret (st0, msgs_left)
-termination_by betree_internal_flush_fwd self params node_id_cnt content st =>
- betree_internal_flush_terminates self params node_id_cnt content st
-decreasing_by
- betree_internal_flush_decreases self params node_id_cnt content st
-
-/- [betree_main::betree::Internal::{4}::flush] -/
-def betree_internal_flush_back
- (self : betree_internal_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t)
- (content : betree_list_t (UInt64 × betree_message_t)) (st : State) :
- (Result (betree_internal_t × betree_node_id_counter_t))
- :=
- do
- let (mkbetree_internal_t i i0 n n0) := self
- let p ← betree_list_partition_at_pivot_fwd betree_message_t content i0
- let (msgs_left, msgs_right) := p
- let len_left ← betree_list_len_fwd (UInt64 × betree_message_t) msgs_left
- if h: len_left >= params.betree_params_min_flush_size
- then
- do
- let (st0, _) ←
- betree_node_apply_messages_fwd n params node_id_cnt msgs_left st
- let (n1, node_id_cnt0) ←
- betree_node_apply_messages_back n params node_id_cnt msgs_left st
- let len_right ←
- betree_list_len_fwd (UInt64 × betree_message_t) msgs_right
- if h: len_right >= params.betree_params_min_flush_size
- then
- do
- let (n2, node_id_cnt1) ←
- betree_node_apply_messages_back n0 params node_id_cnt0 msgs_right
- st0
- Result.ret (mkbetree_internal_t i i0 n1 n2, node_id_cnt1)
- else Result.ret (mkbetree_internal_t i i0 n1 n0, node_id_cnt0)
- else
- do
- let (n1, node_id_cnt0) ←
- betree_node_apply_messages_back n0 params node_id_cnt msgs_right st
- Result.ret (mkbetree_internal_t i i0 n n1, node_id_cnt0)
-termination_by betree_internal_flush_back self params node_id_cnt content st =>
- betree_internal_flush_terminates self params node_id_cnt content st
-decreasing_by
- betree_internal_flush_decreases self params node_id_cnt content st
-
-/- [betree_main::betree::Node::{5}::apply] -/
-def betree_node_apply_fwd
- (self : betree_node_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t) (key : UInt64)
- (new_msg : betree_message_t) (st : State) :
- Result (State × Unit)
- :=
- do
- let l := betree_list_t.Nil
- let (st0, _) ←
- betree_node_apply_messages_fwd self params node_id_cnt
- (betree_list_t.Cons (key, new_msg) l) st
- let _ ←
- betree_node_apply_messages_back self params node_id_cnt
- (betree_list_t.Cons (key, new_msg) l) st
- Result.ret (st0, ())
-
-/- [betree_main::betree::Node::{5}::apply] -/
-def betree_node_apply_back
- (self : betree_node_t) (params : betree_params_t)
- (node_id_cnt : betree_node_id_counter_t) (key : UInt64)
- (new_msg : betree_message_t) (st : State) :
- Result (betree_node_t × betree_node_id_counter_t)
- :=
- let l := betree_list_t.Nil
- betree_node_apply_messages_back self params node_id_cnt (betree_list_t.Cons
- (key, new_msg) l) st
-
-/- [betree_main::betree::BeTree::{6}::new] -/
-def betree_be_tree_new_fwd
- (min_flush_size : UInt64) (split_size : UInt64) (st : State) :
- Result (State × betree_be_tree_t)
- :=
- do
- let node_id_cnt ← betree_node_id_counter_new_fwd
- let id ← betree_node_id_counter_fresh_id_fwd node_id_cnt
- let (st0, _) ← betree_store_leaf_node_fwd id betree_list_t.Nil st
- let node_id_cnt0 ← betree_node_id_counter_fresh_id_back node_id_cnt
- Result.ret (st0,
- {
- betree_be_tree_params :=
- {
- betree_params_min_flush_size := min_flush_size,
- betree_params_split_size := split_size
- },
- betree_be_tree_node_id_cnt := node_id_cnt0,
- betree_be_tree_root :=
- (betree_node_t.Leaf
- {
- betree_leaf_id := id,
- betree_leaf_size := (UInt64.ofNatCore 0 (by intlit))
- })
- })
-
-/- [betree_main::betree::BeTree::{6}::apply] -/
-def betree_be_tree_apply_fwd
- (self : betree_be_tree_t) (key : UInt64) (msg : betree_message_t)
- (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ←
- betree_node_apply_fwd self.betree_be_tree_root self.betree_be_tree_params
- self.betree_be_tree_node_id_cnt key msg st
- let _ ←
- betree_node_apply_back self.betree_be_tree_root
- self.betree_be_tree_params self.betree_be_tree_node_id_cnt key msg st
- Result.ret (st0, ())
-
-/- [betree_main::betree::BeTree::{6}::apply] -/
-def betree_be_tree_apply_back
- (self : betree_be_tree_t) (key : UInt64) (msg : betree_message_t)
- (st : State) :
- Result betree_be_tree_t
- :=
- do
- let (n, nic) ←
- betree_node_apply_back self.betree_be_tree_root
- self.betree_be_tree_params self.betree_be_tree_node_id_cnt key msg st
- Result.ret
- { self with betree_be_tree_node_id_cnt := nic, betree_be_tree_root := n }
-
-/- [betree_main::betree::BeTree::{6}::insert] -/
-def betree_be_tree_insert_fwd
- (self : betree_be_tree_t) (key : UInt64) (value : UInt64) (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ←
- betree_be_tree_apply_fwd self key (betree_message_t.Insert value) st
- let _ ←
- betree_be_tree_apply_back self key (betree_message_t.Insert value) st
- Result.ret (st0, ())
-
-/- [betree_main::betree::BeTree::{6}::insert] -/
-def betree_be_tree_insert_back
- (self : betree_be_tree_t) (key : UInt64) (value : UInt64) (st : State) :
- Result betree_be_tree_t
- :=
- betree_be_tree_apply_back self key (betree_message_t.Insert value) st
-
-/- [betree_main::betree::BeTree::{6}::delete] -/
-def betree_be_tree_delete_fwd
- (self : betree_be_tree_t) (key : UInt64) (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ←
- betree_be_tree_apply_fwd self key betree_message_t.Delete st
- let _ ← betree_be_tree_apply_back self key betree_message_t.Delete st
- Result.ret (st0, ())
-
-/- [betree_main::betree::BeTree::{6}::delete] -/
-def betree_be_tree_delete_back
- (self : betree_be_tree_t) (key : UInt64) (st : State) :
- Result betree_be_tree_t
- :=
- betree_be_tree_apply_back self key betree_message_t.Delete st
-
-/- [betree_main::betree::BeTree::{6}::upsert] -/
-def betree_be_tree_upsert_fwd
- (self : betree_be_tree_t) (key : UInt64) (upd : betree_upsert_fun_state_t)
- (st : State) :
- Result (State × Unit)
- :=
- do
- let (st0, _) ←
- betree_be_tree_apply_fwd self key (betree_message_t.Upsert upd) st
- let _ ←
- betree_be_tree_apply_back self key (betree_message_t.Upsert upd) st
- Result.ret (st0, ())
-
-/- [betree_main::betree::BeTree::{6}::upsert] -/
-def betree_be_tree_upsert_back
- (self : betree_be_tree_t) (key : UInt64) (upd : betree_upsert_fun_state_t)
- (st : State) :
- Result betree_be_tree_t
- :=
- betree_be_tree_apply_back self key (betree_message_t.Upsert upd) st
-
-/- [betree_main::betree::BeTree::{6}::lookup] -/
-def betree_be_tree_lookup_fwd
- (self : betree_be_tree_t) (key : UInt64) (st : State) :
- Result (State × (Option UInt64))
- :=
- betree_node_lookup_fwd self.betree_be_tree_root key st
-
-/- [betree_main::betree::BeTree::{6}::lookup] -/
-def betree_be_tree_lookup_back
- (self : betree_be_tree_t) (key : UInt64) (st : State) :
- Result betree_be_tree_t
- :=
- do
- let n ← betree_node_lookup_back self.betree_be_tree_root key st
- Result.ret { self with betree_be_tree_root := n }
-
-/- [betree_main::main] -/
-def main_fwd : Result Unit :=
- Result.ret ()
-
-/- Unit test for [betree_main::main] -/
-#assert (main_fwd == .ret ())
-
diff --git a/tests/lean/betree/BetreeMain/Opaque.lean b/tests/lean/betree/BetreeMain/Opaque.lean
deleted file mode 100644
index b3db37c2..00000000
--- a/tests/lean/betree/BetreeMain/Opaque.lean
+++ /dev/null
@@ -1,33 +0,0 @@
--- THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS
--- [betree_main]: opaque function definitions
-import Base.Primitives
-import BetreeMain.Types
-
-structure OpaqueDefs where
-
- /- [betree_main::betree_utils::load_internal_node] -/
- betree_utils_load_internal_node_fwd
- :
- UInt64 -> State -> Result (State × (betree_list_t (UInt64 ×
- betree_message_t)))
-
- /- [betree_main::betree_utils::store_internal_node] -/
- betree_utils_store_internal_node_fwd
- :
- UInt64 -> betree_list_t (UInt64 × betree_message_t) -> State -> Result
- (State × Unit)
-
- /- [betree_main::betree_utils::load_leaf_node] -/
- betree_utils_load_leaf_node_fwd
- : UInt64 -> State -> Result (State × (betree_list_t (UInt64 × UInt64)))
-
- /- [betree_main::betree_utils::store_leaf_node] -/
- betree_utils_store_leaf_node_fwd
- :
- UInt64 -> betree_list_t (UInt64 × UInt64) -> State -> Result (State ×
- Unit)
-
- /- [core::option::Option::{0}::unwrap] -/
- core_option_option_unwrap_fwd
- (T : Type) : Option T -> State -> Result (State × T)
-
diff --git a/tests/lean/betree/BetreeMain/Types.lean b/tests/lean/betree/BetreeMain/Types.lean
deleted file mode 100644
index 32634e30..00000000
--- a/tests/lean/betree/BetreeMain/Types.lean
+++ /dev/null
@@ -1,55 +0,0 @@
--- THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS
--- [betree_main]: type definitions
-import Base.Primitives
-
-/- [betree_main::betree::List] -/
-inductive betree_list_t (T : Type) :=
-| Cons : T -> betree_list_t T -> betree_list_t T
-| Nil : betree_list_t T
-
-/- [betree_main::betree::UpsertFunState] -/
-inductive betree_upsert_fun_state_t :=
-| Add : UInt64 -> betree_upsert_fun_state_t
-| Sub : UInt64 -> betree_upsert_fun_state_t
-
-/- [betree_main::betree::Message] -/
-inductive betree_message_t :=
-| Insert : UInt64 -> betree_message_t
-| Delete : betree_message_t
-| Upsert : betree_upsert_fun_state_t -> betree_message_t
-
-/- [betree_main::betree::Leaf] -/
-structure betree_leaf_t where
- betree_leaf_id : UInt64
- betree_leaf_size : UInt64
-
-/- [betree_main::betree::Node] -/
-mutual inductive betree_node_t :=
-| Internal : betree_internal_t -> betree_node_t
-| Leaf : betree_leaf_t -> betree_node_t
-
-/- [betree_main::betree::Internal] -/
-inductive betree_internal_t :=
- betree_internal_id : UInt64
- betree_internal_pivot : UInt64
- betree_internal_left : betree_node_t
- betree_internal_right : betree_node_t
-
-/- [betree_main::betree::Params] -/
-structure betree_params_t where
- betree_params_min_flush_size : UInt64
- betree_params_split_size : UInt64
-
-/- [betree_main::betree::NodeIdCounter] -/
-structure betree_node_id_counter_t where
- betree_node_id_counter_next_node_id : UInt64
-
-/- [betree_main::betree::BeTree] -/
-structure betree_be_tree_t where
- betree_be_tree_params : betree_params_t
- betree_be_tree_node_id_cnt : betree_node_id_counter_t
- betree_be_tree_root : betree_node_t
-
-/- The state type used in the state-error monad -/
-axiom State : Type
-
diff --git a/tests/lean/betree/lake-manifest.json b/tests/lean/betree/lake-manifest.json
deleted file mode 100644
index 57b071ca..00000000
--- a/tests/lean/betree/lake-manifest.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{"version": 4,
- "packagesDir": "./lake-packages",
- "packages":
- [{"git":
- {"url": "https://github.com/leanprover-community/mathlib4.git",
- "subDir?": null,
- "rev": "4037792ead804d7bfa8868e2c4684d4223c15ece",
- "name": "mathlib",
- "inputRev?": null}},
- {"git":
- {"url": "https://github.com/gebner/quote4",
- "subDir?": null,
- "rev": "2412c4fdf4a8b689f4467618e5e7b371ae5014aa",
- "name": "Qq",
- "inputRev?": "master"}},
- {"git":
- {"url": "https://github.com/JLimperg/aesop",
- "subDir?": null,
- "rev": "7fe9ecd9339b0e1796e89d243b776849c305c690",
- "name": "aesop",
- "inputRev?": "master"}},
- {"git":
- {"url": "https://github.com/leanprover/std4",
- "subDir?": null,
- "rev": "24897887905b3a1254b244369f5dd2cf6174b0ee",
- "name": "std",
- "inputRev?": "main"}}]}
diff --git a/tests/lean/betree/lakefile.lean b/tests/lean/betree/lakefile.lean
deleted file mode 100644
index ac2cc2d8..00000000
--- a/tests/lean/betree/lakefile.lean
+++ /dev/null
@@ -1,12 +0,0 @@
-import Lake
-open Lake DSL
-
-require mathlib from git
- "https://github.com/leanprover-community/mathlib4.git"
-
-package «betree_main» {}
-
-lean_lib «Base» {}
-
-@[default_target]
-lean_lib «BetreeMain» {}
diff --git a/tests/lean/betree/lean-toolchain b/tests/lean/betree/lean-toolchain
deleted file mode 100644
index bbf57f10..00000000
--- a/tests/lean/betree/lean-toolchain
+++ /dev/null
@@ -1 +0,0 @@
-leanprover/lean4:nightly-2023-01-21
diff --git a/tests/lean/hashmap/Base/Primitives.lean b/tests/lean/hashmap/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/hashmap/Base/Primitives.lean
+++ b/tests/lean/hashmap/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/hashmap/Hashmap/Clauses/Clauses.lean b/tests/lean/hashmap/Hashmap/Clauses/Clauses.lean
index fad5c11a..197b0a6a 100644
--- a/tests/lean/hashmap/Hashmap/Clauses/Clauses.lean
+++ b/tests/lean/hashmap/Hashmap/Clauses/Clauses.lean
@@ -1,11 +1,11 @@
--- [hashmap]: the decreases clauses
+-- [hashmap]: templates for the decreases clauses
import Base.Primitives
import Hashmap.Types
/- [hashmap::HashMap::{0}::allocate_slots]: termination measure -/
@[simp]
def hash_map_allocate_slots_loop_terminates (T : Type) (slots : Vec (list_t T))
- (n : USize) :=
+ (n : Usize) :=
(slots, n)
/- [hashmap::HashMap::{0}::allocate_slots]: decreases_by tactic -/
@@ -16,7 +16,7 @@ macro_rules
/- [hashmap::HashMap::{0}::clear]: termination measure -/
@[simp]
def hash_map_clear_loop_terminates (T : Type) (slots : Vec (list_t T))
- (i : USize) :=
+ (i : Usize) :=
(slots, i)
/- [hashmap::HashMap::{0}::clear]: decreases_by tactic -/
@@ -26,7 +26,7 @@ macro_rules
/- [hashmap::HashMap::{0}::insert_in_list]: termination measure -/
@[simp]
-def hash_map_insert_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_insert_in_list_loop_terminates (T : Type) (key : Usize)
(value : T) (ls : list_t T) :=
(key, value, ls)
@@ -51,7 +51,7 @@ macro_rules
/- [hashmap::HashMap::{0}::move_elements]: termination measure -/
@[simp]
def hash_map_move_elements_loop_terminates (T : Type) (ntable : hash_map_t T)
- (slots : Vec (list_t T)) (i : USize) :=
+ (slots : Vec (list_t T)) (i : Usize) :=
(ntable, slots, i)
/- [hashmap::HashMap::{0}::move_elements]: decreases_by tactic -/
@@ -62,7 +62,7 @@ macro_rules
/- [hashmap::HashMap::{0}::contains_key_in_list]: termination measure -/
@[simp]
-def hash_map_contains_key_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_contains_key_in_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
@@ -74,7 +74,7 @@ macro_rules
/- [hashmap::HashMap::{0}::get_in_list]: termination measure -/
@[simp]
-def hash_map_get_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_get_in_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
@@ -86,7 +86,7 @@ macro_rules
/- [hashmap::HashMap::{0}::get_mut_in_list]: termination measure -/
@[simp]
def hash_map_get_mut_in_list_loop_terminates (T : Type) (ls : list_t T)
- (key : USize) :=
+ (key : Usize) :=
(ls, key)
/- [hashmap::HashMap::{0}::get_mut_in_list]: decreases_by tactic -/
@@ -96,7 +96,7 @@ macro_rules
/- [hashmap::HashMap::{0}::remove_from_list]: termination measure -/
@[simp]
-def hash_map_remove_from_list_loop_terminates (T : Type) (key : USize)
+def hash_map_remove_from_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
diff --git a/tests/lean/hashmap/Hashmap/Clauses/Template.lean b/tests/lean/hashmap/Hashmap/Clauses/Template.lean
index 7ba079f2..560592c8 100644
--- a/tests/lean/hashmap/Hashmap/Clauses/Template.lean
+++ b/tests/lean/hashmap/Hashmap/Clauses/Template.lean
@@ -6,7 +6,7 @@ import Hashmap.Types
/- [hashmap::HashMap::{0}::allocate_slots]: termination measure -/
@[simp]
def hash_map_allocate_slots_loop_terminates (T : Type) (slots : Vec (list_t T))
- (n : USize) :=
+ (n : Usize) :=
(slots, n)
/- [hashmap::HashMap::{0}::allocate_slots]: decreases_by tactic -/
@@ -17,7 +17,7 @@ macro_rules
/- [hashmap::HashMap::{0}::clear]: termination measure -/
@[simp]
def hash_map_clear_loop_terminates (T : Type) (slots : Vec (list_t T))
- (i : USize) :=
+ (i : Usize) :=
(slots, i)
/- [hashmap::HashMap::{0}::clear]: decreases_by tactic -/
@@ -27,7 +27,7 @@ macro_rules
/- [hashmap::HashMap::{0}::insert_in_list]: termination measure -/
@[simp]
-def hash_map_insert_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_insert_in_list_loop_terminates (T : Type) (key : Usize)
(value : T) (ls : list_t T) :=
(key, value, ls)
@@ -52,7 +52,7 @@ macro_rules
/- [hashmap::HashMap::{0}::move_elements]: termination measure -/
@[simp]
def hash_map_move_elements_loop_terminates (T : Type) (ntable : hash_map_t T)
- (slots : Vec (list_t T)) (i : USize) :=
+ (slots : Vec (list_t T)) (i : Usize) :=
(ntable, slots, i)
/- [hashmap::HashMap::{0}::move_elements]: decreases_by tactic -/
@@ -63,7 +63,7 @@ macro_rules
/- [hashmap::HashMap::{0}::contains_key_in_list]: termination measure -/
@[simp]
-def hash_map_contains_key_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_contains_key_in_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
@@ -75,7 +75,7 @@ macro_rules
/- [hashmap::HashMap::{0}::get_in_list]: termination measure -/
@[simp]
-def hash_map_get_in_list_loop_terminates (T : Type) (key : USize)
+def hash_map_get_in_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
@@ -87,7 +87,7 @@ macro_rules
/- [hashmap::HashMap::{0}::get_mut_in_list]: termination measure -/
@[simp]
def hash_map_get_mut_in_list_loop_terminates (T : Type) (ls : list_t T)
- (key : USize) :=
+ (key : Usize) :=
(ls, key)
/- [hashmap::HashMap::{0}::get_mut_in_list]: decreases_by tactic -/
@@ -97,7 +97,7 @@ macro_rules
/- [hashmap::HashMap::{0}::remove_from_list]: termination measure -/
@[simp]
-def hash_map_remove_from_list_loop_terminates (T : Type) (key : USize)
+def hash_map_remove_from_list_loop_terminates (T : Type) (key : Usize)
(ls : list_t T) :=
(key, ls)
diff --git a/tests/lean/hashmap/Hashmap/Funs.lean b/tests/lean/hashmap/Hashmap/Funs.lean
index 535ac9b2..77b1a157 100644
--- a/tests/lean/hashmap/Hashmap/Funs.lean
+++ b/tests/lean/hashmap/Hashmap/Funs.lean
@@ -5,19 +5,19 @@ import Hashmap.Types
import Hashmap.Clauses.Clauses
/- [hashmap::hash_key] -/
-def hash_key_fwd (k : USize) : Result USize :=
+def hash_key_fwd (k : Usize) : Result Usize :=
Result.ret k
/- [hashmap::HashMap::{0}::allocate_slots] -/
def hash_map_allocate_slots_loop_fwd
- (T : Type) (slots : Vec (list_t T)) (n : USize) :
+ (T : Type) (slots : Vec (list_t T)) (n : Usize) :
(Result (Vec (list_t T)))
:=
- if h: n > (USize.ofNatCore 0 (by intlit))
+ if h: n > (Usize.ofInt 0 (by intlit))
then
do
let slots0 ← vec_push_back (list_t T) slots list_t.Nil
- let n0 ← USize.checked_sub n (USize.ofNatCore 1 (by intlit))
+ let n0 ← n - (Usize.ofInt 1 (by intlit))
hash_map_allocate_slots_loop_fwd T slots0 n0
else Result.ret slots
termination_by hash_map_allocate_slots_loop_fwd slots n =>
@@ -26,23 +26,23 @@ decreasing_by hash_map_allocate_slots_loop_decreases slots n
/- [hashmap::HashMap::{0}::allocate_slots] -/
def hash_map_allocate_slots_fwd
- (T : Type) (slots : Vec (list_t T)) (n : USize) : Result (Vec (list_t T)) :=
+ (T : Type) (slots : Vec (list_t T)) (n : Usize) : Result (Vec (list_t T)) :=
hash_map_allocate_slots_loop_fwd T slots n
/- [hashmap::HashMap::{0}::new_with_capacity] -/
def hash_map_new_with_capacity_fwd
- (T : Type) (capacity : USize) (max_load_dividend : USize)
- (max_load_divisor : USize) :
+ (T : Type) (capacity : Usize) (max_load_dividend : Usize)
+ (max_load_divisor : Usize) :
Result (hash_map_t T)
:=
do
let v := vec_new (list_t T)
let slots ← hash_map_allocate_slots_fwd T v capacity
- let i ← USize.checked_mul capacity max_load_dividend
- let i0 ← USize.checked_div i max_load_divisor
+ let i ← capacity * max_load_dividend
+ let i0 ← i / max_load_divisor
Result.ret
{
- hash_map_num_entries := (USize.ofNatCore 0 (by intlit)),
+ hash_map_num_entries := (Usize.ofInt 0 (by intlit)),
hash_map_max_load_factor := (max_load_dividend, max_load_divisor),
hash_map_max_load := i0,
hash_map_slots := slots
@@ -50,19 +50,19 @@ def hash_map_new_with_capacity_fwd
/- [hashmap::HashMap::{0}::new] -/
def hash_map_new_fwd (T : Type) : Result (hash_map_t T) :=
- hash_map_new_with_capacity_fwd T (USize.ofNatCore 32 (by intlit))
- (USize.ofNatCore 4 (by intlit)) (USize.ofNatCore 5 (by intlit))
+ hash_map_new_with_capacity_fwd T (Usize.ofInt 32 (by intlit))
+ (Usize.ofInt 4 (by intlit)) (Usize.ofInt 5 (by intlit))
/- [hashmap::HashMap::{0}::clear] -/
def hash_map_clear_loop_fwd_back
- (T : Type) (slots : Vec (list_t T)) (i : USize) :
+ (T : Type) (slots : Vec (list_t T)) (i : Usize) :
(Result (Vec (list_t T)))
:=
let i0 := vec_len (list_t T) slots
if h: i < i0
then
do
- let i1 ← USize.checked_add i (USize.ofNatCore 1 (by intlit))
+ let i1 ← i + (Usize.ofInt 1 (by intlit))
let slots0 ← vec_index_mut_back (list_t T) slots i list_t.Nil
hash_map_clear_loop_fwd_back T slots0 i1
else Result.ret slots
@@ -76,22 +76,22 @@ def hash_map_clear_fwd_back
do
let v ←
hash_map_clear_loop_fwd_back T self.hash_map_slots
- (USize.ofNatCore 0 (by intlit))
+ (Usize.ofInt 0 (by intlit))
Result.ret
{
self
with
- hash_map_num_entries := (USize.ofNatCore 0 (by intlit)),
+ hash_map_num_entries := (Usize.ofInt 0 (by intlit)),
hash_map_slots := v
}
/- [hashmap::HashMap::{0}::len] -/
-def hash_map_len_fwd (T : Type) (self : hash_map_t T) : Result USize :=
+def hash_map_len_fwd (T : Type) (self : hash_map_t T) : Result Usize :=
Result.ret self.hash_map_num_entries
/- [hashmap::HashMap::{0}::insert_in_list] -/
def hash_map_insert_in_list_loop_fwd
- (T : Type) (key : USize) (value : T) (ls : list_t T) : (Result Bool) :=
+ (T : Type) (key : Usize) (value : T) (ls : list_t T) : (Result Bool) :=
match h: ls with
| list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -104,12 +104,12 @@ decreasing_by hash_map_insert_in_list_loop_decreases key value ls
/- [hashmap::HashMap::{0}::insert_in_list] -/
def hash_map_insert_in_list_fwd
- (T : Type) (key : USize) (value : T) (ls : list_t T) : Result Bool :=
+ (T : Type) (key : Usize) (value : T) (ls : list_t T) : Result Bool :=
hash_map_insert_in_list_loop_fwd T key value ls
/- [hashmap::HashMap::{0}::insert_in_list] -/
def hash_map_insert_in_list_loop_back
- (T : Type) (key : USize) (value : T) (ls : list_t T) : (Result (list_t T)) :=
+ (T : Type) (key : Usize) (value : T) (ls : list_t T) : (Result (list_t T)) :=
match h: ls with
| list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -126,25 +126,24 @@ decreasing_by hash_map_insert_in_list_loop_decreases key value ls
/- [hashmap::HashMap::{0}::insert_in_list] -/
def hash_map_insert_in_list_back
- (T : Type) (key : USize) (value : T) (ls : list_t T) : Result (list_t T) :=
+ (T : Type) (key : Usize) (value : T) (ls : list_t T) : Result (list_t T) :=
hash_map_insert_in_list_loop_back T key value ls
/- [hashmap::HashMap::{0}::insert_no_resize] -/
def hash_map_insert_no_resize_fwd_back
- (T : Type) (self : hash_map_t T) (key : USize) (value : T) :
+ (T : Type) (self : hash_map_t T) (key : Usize) (value : T) :
Result (hash_map_t T)
:=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_mut_fwd (list_t T) self.hash_map_slots hash_mod
let inserted ← hash_map_insert_in_list_fwd T key value l
if h: inserted
then
do
- let i0 ← USize.checked_add self.hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let i0 ← self.hash_map_num_entries + (Usize.ofInt 1 (by intlit))
let l0 ← hash_map_insert_in_list_back T key value l
let v ← vec_index_mut_back (list_t T) self.hash_map_slots hash_mod l0
Result.ret
@@ -156,9 +155,9 @@ def hash_map_insert_no_resize_fwd_back
Result.ret { self with hash_map_slots := v }
/- [core::num::u32::{9}::MAX] -/
-def core_num_u32_max_body : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 4294967295 (by intlit))
-def core_num_u32_max_c : UInt32 := eval_global core_num_u32_max_body (by simp)
+def core_num_u32_max_body : Result U32 :=
+ Result.ret (U32.ofInt 4294967295 (by intlit))
+def core_num_u32_max_c : U32 := eval_global core_num_u32_max_body (by simp)
/- [hashmap::HashMap::{0}::move_elements_from_list] -/
def hash_map_move_elements_from_list_loop_fwd_back
@@ -182,7 +181,7 @@ def hash_map_move_elements_from_list_fwd_back
/- [hashmap::HashMap::{0}::move_elements] -/
def hash_map_move_elements_loop_fwd_back
- (T : Type) (ntable : hash_map_t T) (slots : Vec (list_t T)) (i : USize) :
+ (T : Type) (ntable : hash_map_t T) (slots : Vec (list_t T)) (i : Usize) :
(Result ((hash_map_t T) × (Vec (list_t T))))
:=
let i0 := vec_len (list_t T) slots
@@ -192,7 +191,7 @@ def hash_map_move_elements_loop_fwd_back
let l ← vec_index_mut_fwd (list_t T) slots i
let ls := mem_replace_fwd (list_t T) l list_t.Nil
let ntable0 ← hash_map_move_elements_from_list_fwd_back T ntable ls
- let i1 ← USize.checked_add i (USize.ofNatCore 1 (by intlit))
+ let i1 ← i + (Usize.ofInt 1 (by intlit))
let l0 := mem_replace_back (list_t T) l list_t.Nil
let slots0 ← vec_index_mut_back (list_t T) slots i l0
hash_map_move_elements_loop_fwd_back T ntable0 slots0 i1
@@ -203,7 +202,7 @@ decreasing_by hash_map_move_elements_loop_decreases ntable slots i
/- [hashmap::HashMap::{0}::move_elements] -/
def hash_map_move_elements_fwd_back
- (T : Type) (ntable : hash_map_t T) (slots : Vec (list_t T)) (i : USize) :
+ (T : Type) (ntable : hash_map_t T) (slots : Vec (list_t T)) (i : Usize) :
Result ((hash_map_t T) × (Vec (list_t T)))
:=
hash_map_move_elements_loop_fwd_back T ntable slots i
@@ -212,19 +211,19 @@ def hash_map_move_elements_fwd_back
def hash_map_try_resize_fwd_back
(T : Type) (self : hash_map_t T) : Result (hash_map_t T) :=
do
- let max_usize ← scalar_cast USize core_num_u32_max_c
+ let max_usize ← Scalar.cast .Usize core_num_u32_max_c
let capacity := vec_len (list_t T) self.hash_map_slots
- let n1 ← USize.checked_div max_usize (USize.ofNatCore 2 (by intlit))
+ let n1 ← max_usize / (Usize.ofInt 2 (by intlit))
let (i, i0) := self.hash_map_max_load_factor
- let i1 ← USize.checked_div n1 i
+ let i1 ← n1 / i
if h: capacity <= i1
then
do
- let i2 ← USize.checked_mul capacity (USize.ofNatCore 2 (by intlit))
+ let i2 ← capacity * (Usize.ofInt 2 (by intlit))
let ntable ← hash_map_new_with_capacity_fwd T i2 i i0
let (ntable0, _) ←
hash_map_move_elements_fwd_back T ntable self.hash_map_slots
- (USize.ofNatCore 0 (by intlit))
+ (Usize.ofInt 0 (by intlit))
Result.ret
{
ntable0
@@ -236,7 +235,7 @@ def hash_map_try_resize_fwd_back
/- [hashmap::HashMap::{0}::insert] -/
def hash_map_insert_fwd_back
- (T : Type) (self : hash_map_t T) (key : USize) (value : T) :
+ (T : Type) (self : hash_map_t T) (key : Usize) (value : T) :
Result (hash_map_t T)
:=
do
@@ -248,7 +247,7 @@ def hash_map_insert_fwd_back
/- [hashmap::HashMap::{0}::contains_key_in_list] -/
def hash_map_contains_key_in_list_loop_fwd
- (T : Type) (key : USize) (ls : list_t T) : (Result Bool) :=
+ (T : Type) (key : Usize) (ls : list_t T) : (Result Bool) :=
match h: ls with
| list_t.Cons ckey t tl =>
if h: ckey = key
@@ -261,22 +260,22 @@ decreasing_by hash_map_contains_key_in_list_loop_decreases key ls
/- [hashmap::HashMap::{0}::contains_key_in_list] -/
def hash_map_contains_key_in_list_fwd
- (T : Type) (key : USize) (ls : list_t T) : Result Bool :=
+ (T : Type) (key : Usize) (ls : list_t T) : Result Bool :=
hash_map_contains_key_in_list_loop_fwd T key ls
/- [hashmap::HashMap::{0}::contains_key] -/
def hash_map_contains_key_fwd
- (T : Type) (self : hash_map_t T) (key : USize) : Result Bool :=
+ (T : Type) (self : hash_map_t T) (key : Usize) : Result Bool :=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_fwd (list_t T) self.hash_map_slots hash_mod
hash_map_contains_key_in_list_fwd T key l
/- [hashmap::HashMap::{0}::get_in_list] -/
def hash_map_get_in_list_loop_fwd
- (T : Type) (key : USize) (ls : list_t T) : (Result T) :=
+ (T : Type) (key : Usize) (ls : list_t T) : (Result T) :=
match h: ls with
| list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -289,22 +288,22 @@ decreasing_by hash_map_get_in_list_loop_decreases key ls
/- [hashmap::HashMap::{0}::get_in_list] -/
def hash_map_get_in_list_fwd
- (T : Type) (key : USize) (ls : list_t T) : Result T :=
+ (T : Type) (key : Usize) (ls : list_t T) : Result T :=
hash_map_get_in_list_loop_fwd T key ls
/- [hashmap::HashMap::{0}::get] -/
def hash_map_get_fwd
- (T : Type) (self : hash_map_t T) (key : USize) : Result T :=
+ (T : Type) (self : hash_map_t T) (key : Usize) : Result T :=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_fwd (list_t T) self.hash_map_slots hash_mod
hash_map_get_in_list_fwd T key l
/- [hashmap::HashMap::{0}::get_mut_in_list] -/
def hash_map_get_mut_in_list_loop_fwd
- (T : Type) (ls : list_t T) (key : USize) : (Result T) :=
+ (T : Type) (ls : list_t T) (key : Usize) : (Result T) :=
match h: ls with
| list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -317,12 +316,12 @@ decreasing_by hash_map_get_mut_in_list_loop_decreases ls key
/- [hashmap::HashMap::{0}::get_mut_in_list] -/
def hash_map_get_mut_in_list_fwd
- (T : Type) (ls : list_t T) (key : USize) : Result T :=
+ (T : Type) (ls : list_t T) (key : Usize) : Result T :=
hash_map_get_mut_in_list_loop_fwd T ls key
/- [hashmap::HashMap::{0}::get_mut_in_list] -/
def hash_map_get_mut_in_list_loop_back
- (T : Type) (ls : list_t T) (key : USize) (ret0 : T) : (Result (list_t T)) :=
+ (T : Type) (ls : list_t T) (key : Usize) (ret0 : T) : (Result (list_t T)) :=
match h: ls with
| list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -338,28 +337,28 @@ decreasing_by hash_map_get_mut_in_list_loop_decreases ls key
/- [hashmap::HashMap::{0}::get_mut_in_list] -/
def hash_map_get_mut_in_list_back
- (T : Type) (ls : list_t T) (key : USize) (ret0 : T) : Result (list_t T) :=
+ (T : Type) (ls : list_t T) (key : Usize) (ret0 : T) : Result (list_t T) :=
hash_map_get_mut_in_list_loop_back T ls key ret0
/- [hashmap::HashMap::{0}::get_mut] -/
def hash_map_get_mut_fwd
- (T : Type) (self : hash_map_t T) (key : USize) : Result T :=
+ (T : Type) (self : hash_map_t T) (key : Usize) : Result T :=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_mut_fwd (list_t T) self.hash_map_slots hash_mod
hash_map_get_mut_in_list_fwd T l key
/- [hashmap::HashMap::{0}::get_mut] -/
def hash_map_get_mut_back
- (T : Type) (self : hash_map_t T) (key : USize) (ret0 : T) :
+ (T : Type) (self : hash_map_t T) (key : Usize) (ret0 : T) :
Result (hash_map_t T)
:=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_mut_fwd (list_t T) self.hash_map_slots hash_mod
let l0 ← hash_map_get_mut_in_list_back T l key ret0
let v ← vec_index_mut_back (list_t T) self.hash_map_slots hash_mod l0
@@ -367,7 +366,7 @@ def hash_map_get_mut_back
/- [hashmap::HashMap::{0}::remove_from_list] -/
def hash_map_remove_from_list_loop_fwd
- (T : Type) (key : USize) (ls : list_t T) : (Result (Option T)) :=
+ (T : Type) (key : Usize) (ls : list_t T) : (Result (Option T)) :=
match h: ls with
| list_t.Cons ckey t tl =>
if h: ckey = key
@@ -385,12 +384,12 @@ decreasing_by hash_map_remove_from_list_loop_decreases key ls
/- [hashmap::HashMap::{0}::remove_from_list] -/
def hash_map_remove_from_list_fwd
- (T : Type) (key : USize) (ls : list_t T) : Result (Option T) :=
+ (T : Type) (key : Usize) (ls : list_t T) : Result (Option T) :=
hash_map_remove_from_list_loop_fwd T key ls
/- [hashmap::HashMap::{0}::remove_from_list] -/
def hash_map_remove_from_list_loop_back
- (T : Type) (key : USize) (ls : list_t T) : (Result (list_t T)) :=
+ (T : Type) (key : Usize) (ls : list_t T) : (Result (list_t T)) :=
match h: ls with
| list_t.Cons ckey t tl =>
if h: ckey = key
@@ -411,33 +410,32 @@ decreasing_by hash_map_remove_from_list_loop_decreases key ls
/- [hashmap::HashMap::{0}::remove_from_list] -/
def hash_map_remove_from_list_back
- (T : Type) (key : USize) (ls : list_t T) : Result (list_t T) :=
+ (T : Type) (key : Usize) (ls : list_t T) : Result (list_t T) :=
hash_map_remove_from_list_loop_back T key ls
/- [hashmap::HashMap::{0}::remove] -/
def hash_map_remove_fwd
- (T : Type) (self : hash_map_t T) (key : USize) : Result (Option T) :=
+ (T : Type) (self : hash_map_t T) (key : Usize) : Result (Option T) :=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_mut_fwd (list_t T) self.hash_map_slots hash_mod
let x ← hash_map_remove_from_list_fwd T key l
match h: x with
| Option.none => Result.ret Option.none
| Option.some x0 =>
do
- let _ ← USize.checked_sub self.hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let _ ← self.hash_map_num_entries - (Usize.ofInt 1 (by intlit))
Result.ret (Option.some x0)
/- [hashmap::HashMap::{0}::remove] -/
def hash_map_remove_back
- (T : Type) (self : hash_map_t T) (key : USize) : Result (hash_map_t T) :=
+ (T : Type) (self : hash_map_t T) (key : Usize) : Result (hash_map_t T) :=
do
let hash ← hash_key_fwd key
let i := vec_len (list_t T) self.hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ← vec_index_mut_fwd (list_t T) self.hash_map_slots hash_mod
let x ← hash_map_remove_from_list_fwd T key l
match h: x with
@@ -448,8 +446,7 @@ def hash_map_remove_back
Result.ret { self with hash_map_slots := v }
| Option.some x0 =>
do
- let i0 ← USize.checked_sub self.hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let i0 ← self.hash_map_num_entries - (Usize.ofInt 1 (by intlit))
let l0 ← hash_map_remove_from_list_back T key l
let v ← vec_index_mut_back (list_t T) self.hash_map_slots hash_mod l0
Result.ret
@@ -458,65 +455,59 @@ def hash_map_remove_back
/- [hashmap::test1] -/
def test1_fwd : Result Unit :=
do
- let hm ← hash_map_new_fwd UInt64
+ let hm ← hash_map_new_fwd U64
let hm0 ←
- hash_map_insert_fwd_back UInt64 hm (USize.ofNatCore 0 (by intlit))
- (UInt64.ofNatCore 42 (by intlit))
+ hash_map_insert_fwd_back U64 hm (Usize.ofInt 0 (by intlit))
+ (U64.ofInt 42 (by intlit))
let hm1 ←
- hash_map_insert_fwd_back UInt64 hm0 (USize.ofNatCore 128 (by intlit))
- (UInt64.ofNatCore 18 (by intlit))
+ hash_map_insert_fwd_back U64 hm0 (Usize.ofInt 128 (by intlit))
+ (U64.ofInt 18 (by intlit))
let hm2 ←
- hash_map_insert_fwd_back UInt64 hm1 (USize.ofNatCore 1024 (by intlit))
- (UInt64.ofNatCore 138 (by intlit))
+ hash_map_insert_fwd_back U64 hm1 (Usize.ofInt 1024 (by intlit))
+ (U64.ofInt 138 (by intlit))
let hm3 ←
- hash_map_insert_fwd_back UInt64 hm2 (USize.ofNatCore 1056 (by intlit))
- (UInt64.ofNatCore 256 (by intlit))
- let i ← hash_map_get_fwd UInt64 hm3 (USize.ofNatCore 128 (by intlit))
- if h: not (i = (UInt64.ofNatCore 18 (by intlit)))
+ hash_map_insert_fwd_back U64 hm2 (Usize.ofInt 1056 (by intlit))
+ (U64.ofInt 256 (by intlit))
+ let i ← hash_map_get_fwd U64 hm3 (Usize.ofInt 128 (by intlit))
+ if h: not (i = (U64.ofInt 18 (by intlit)))
then Result.fail Error.panic
else
do
let hm4 ←
- hash_map_get_mut_back UInt64 hm3 (USize.ofNatCore 1024 (by intlit))
- (UInt64.ofNatCore 56 (by intlit))
- let i0 ←
- hash_map_get_fwd UInt64 hm4 (USize.ofNatCore 1024 (by intlit))
- if h: not (i0 = (UInt64.ofNatCore 56 (by intlit)))
+ hash_map_get_mut_back U64 hm3 (Usize.ofInt 1024 (by intlit))
+ (U64.ofInt 56 (by intlit))
+ let i0 ← hash_map_get_fwd U64 hm4 (Usize.ofInt 1024 (by intlit))
+ if h: not (i0 = (U64.ofInt 56 (by intlit)))
then Result.fail Error.panic
else
do
let x ←
- hash_map_remove_fwd UInt64 hm4 (USize.ofNatCore 1024 (by intlit))
+ hash_map_remove_fwd U64 hm4 (Usize.ofInt 1024 (by intlit))
match h: x with
| Option.none => Result.fail Error.panic
| Option.some x0 =>
- if h: not (x0 = (UInt64.ofNatCore 56 (by intlit)))
+ if h: not (x0 = (U64.ofInt 56 (by intlit)))
then Result.fail Error.panic
else
do
let hm5 ←
- hash_map_remove_back UInt64 hm4
- (USize.ofNatCore 1024 (by intlit))
+ hash_map_remove_back U64 hm4 (Usize.ofInt 1024 (by intlit))
let i1 ←
- hash_map_get_fwd UInt64 hm5 (USize.ofNatCore 0 (by intlit))
- if h: not (i1 = (UInt64.ofNatCore 42 (by intlit)))
+ hash_map_get_fwd U64 hm5 (Usize.ofInt 0 (by intlit))
+ if h: not (i1 = (U64.ofInt 42 (by intlit)))
then Result.fail Error.panic
else
do
let i2 ←
- hash_map_get_fwd UInt64 hm5
- (USize.ofNatCore 128 (by intlit))
- if h: not (i2 = (UInt64.ofNatCore 18 (by intlit)))
+ hash_map_get_fwd U64 hm5 (Usize.ofInt 128 (by intlit))
+ if h: not (i2 = (U64.ofInt 18 (by intlit)))
then Result.fail Error.panic
else
do
let i3 ←
- hash_map_get_fwd UInt64 hm5
- (USize.ofNatCore 1056 (by intlit))
- if h: not (i3 = (UInt64.ofNatCore 256 (by intlit)))
+ hash_map_get_fwd U64 hm5
+ (Usize.ofInt 1056 (by intlit))
+ if h: not (i3 = (U64.ofInt 256 (by intlit)))
then Result.fail Error.panic
else Result.ret ()
-/- Unit test for [hashmap::test1] -/
-#assert (test1_fwd == .ret ())
-
diff --git a/tests/lean/hashmap/Hashmap/Types.lean b/tests/lean/hashmap/Hashmap/Types.lean
index 9e9e5c03..6eabf7da 100644
--- a/tests/lean/hashmap/Hashmap/Types.lean
+++ b/tests/lean/hashmap/Hashmap/Types.lean
@@ -4,13 +4,13 @@ import Base.Primitives
/- [hashmap::List] -/
inductive list_t (T : Type) :=
-| Cons : USize -> T -> list_t T -> list_t T
+| Cons : Usize -> T -> list_t T -> list_t T
| Nil : list_t T
/- [hashmap::HashMap] -/
structure hash_map_t (T : Type) where
- hash_map_num_entries : USize
- hash_map_max_load_factor : (USize × USize)
- hash_map_max_load : USize
+ hash_map_num_entries : Usize
+ hash_map_max_load_factor : (Usize × Usize)
+ hash_map_max_load : Usize
hash_map_slots : Vec (list_t T)
diff --git a/tests/lean/hashmap_on_disk/Base/Primitives.lean b/tests/lean/hashmap_on_disk/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/hashmap_on_disk/Base/Primitives.lean
+++ b/tests/lean/hashmap_on_disk/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Clauses.lean b/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Clauses.lean
index 1b69eb2f..a4dc996a 100644
--- a/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Clauses.lean
+++ b/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Clauses.lean
@@ -1,39 +1,37 @@
--- [hashmap_main]: the decreases clauses
import Base.Primitives
import HashmapMain.Types
/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots]: termination measure -/
@[simp]
def hashmap_hash_map_allocate_slots_loop_terminates (T : Type)
- (slots : Vec (hashmap_list_t T)) (n : USize) :=
+ (slots : Vec (hashmap_list_t T)) (n : Usize) :=
(slots, n)
+/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots]: decreases_by tactic -/
syntax "hashmap_hash_map_allocate_slots_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_allocate_slots_loop_decreases $slots $n) =>
`(tactic| sorry)
-/- [hashmap_main::hashmap::HashMap::{0}::clear_slots]: termination measure -/
+/- [hashmap_main::hashmap::HashMap::{0}::clear]: termination measure -/
@[simp]
-def hashmap_hash_map_clear_slots_loop_terminates (T : Type)
- (slots : Vec (hashmap_list_t T)) (i : USize) :=
+def hashmap_hash_map_clear_loop_terminates (T : Type)
+ (slots : Vec (hashmap_list_t T)) (i : Usize) :=
(slots, i)
-syntax "hashmap_hash_map_clear_slots_loop_decreases" term+ : tactic
-
+/- [hashmap_main::hashmap::HashMap::{0}::clear]: decreases_by tactic -/
+syntax "hashmap_hash_map_clear_loop_decreases" term+ : tactic
macro_rules
-| `(tactic| hashmap_hash_map_clear_slots_loop_decreases $slots $i) =>
- `(tactic| sorry)
+| `(tactic| hashmap_hash_map_clear_loop_decreases $slots $i) =>`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list]: termination measure -/
@[simp]
-def hashmap_hash_map_insert_in_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_insert_in_list_loop_terminates (T : Type) (key : Usize)
(value : T) (ls : hashmap_list_t T) :=
(key, value, ls)
+/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list]: decreases_by tactic -/
syntax "hashmap_hash_map_insert_in_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_insert_in_list_loop_decreases $key $value $ls) =>
`(tactic| sorry)
@@ -44,8 +42,8 @@ def hashmap_hash_map_move_elements_from_list_loop_terminates (T : Type)
(ntable : hashmap_hash_map_t T) (ls : hashmap_list_t T) :=
(ntable, ls)
+/- [hashmap_main::hashmap::HashMap::{0}::move_elements_from_list]: decreases_by tactic -/
syntax "hashmap_hash_map_move_elements_from_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_move_elements_from_list_loop_decreases $ntable
$ls) =>`(tactic| sorry)
@@ -53,12 +51,12 @@ $ls) =>`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::move_elements]: termination measure -/
@[simp]
def hashmap_hash_map_move_elements_loop_terminates (T : Type)
- (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T)) (i : USize)
+ (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T)) (i : Usize)
:=
(ntable, slots, i)
+/- [hashmap_main::hashmap::HashMap::{0}::move_elements]: decreases_by tactic -/
syntax "hashmap_hash_map_move_elements_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_move_elements_loop_decreases $ntable $slots $i) =>
`(tactic| sorry)
@@ -66,46 +64,46 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list]: termination measure -/
@[simp]
def hashmap_hash_map_contains_key_in_list_loop_terminates (T : Type)
- (key : USize) (ls : hashmap_list_t T) :=
+ (key : Usize) (ls : hashmap_list_t T) :=
(key, ls)
+/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list]: decreases_by tactic -/
syntax "hashmap_hash_map_contains_key_in_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_contains_key_in_list_loop_decreases $key $ls) =>
`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::get_in_list]: termination measure -/
@[simp]
-def hashmap_hash_map_get_in_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_get_in_list_loop_terminates (T : Type) (key : Usize)
(ls : hashmap_list_t T) :=
(key, ls)
+/- [hashmap_main::hashmap::HashMap::{0}::get_in_list]: decreases_by tactic -/
syntax "hashmap_hash_map_get_in_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_get_in_list_loop_decreases $key $ls) =>`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list]: termination measure -/
@[simp]
def hashmap_hash_map_get_mut_in_list_loop_terminates (T : Type)
- (ls : hashmap_list_t T) (key : USize) :=
+ (ls : hashmap_list_t T) (key : Usize) :=
(ls, key)
+/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list]: decreases_by tactic -/
syntax "hashmap_hash_map_get_mut_in_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_get_mut_in_list_loop_decreases $ls $key) =>
`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list]: termination measure -/
@[simp]
-def hashmap_hash_map_remove_from_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_remove_from_list_loop_terminates (T : Type) (key : Usize)
(ls : hashmap_list_t T) :=
(key, ls)
+/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list]: decreases_by tactic -/
syntax "hashmap_hash_map_remove_from_list_loop_decreases" term+ : tactic
-
macro_rules
| `(tactic| hashmap_hash_map_remove_from_list_loop_decreases $key $ls) =>
`(tactic| sorry)
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Template.lean b/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Template.lean
index 753c92ac..33802597 100644
--- a/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Template.lean
+++ b/tests/lean/hashmap_on_disk/HashmapMain/Clauses/Template.lean
@@ -6,7 +6,7 @@ import HashmapMain.Types
/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots]: termination measure -/
@[simp]
def hashmap_hash_map_allocate_slots_loop_terminates (T : Type)
- (slots : Vec (hashmap_list_t T)) (n : USize) :=
+ (slots : Vec (hashmap_list_t T)) (n : Usize) :=
(slots, n)
/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots]: decreases_by tactic -/
@@ -18,7 +18,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::clear]: termination measure -/
@[simp]
def hashmap_hash_map_clear_loop_terminates (T : Type)
- (slots : Vec (hashmap_list_t T)) (i : USize) :=
+ (slots : Vec (hashmap_list_t T)) (i : Usize) :=
(slots, i)
/- [hashmap_main::hashmap::HashMap::{0}::clear]: decreases_by tactic -/
@@ -28,7 +28,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list]: termination measure -/
@[simp]
-def hashmap_hash_map_insert_in_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_insert_in_list_loop_terminates (T : Type) (key : Usize)
(value : T) (ls : hashmap_list_t T) :=
(key, value, ls)
@@ -53,7 +53,7 @@ $ls) =>`(tactic| sorry)
/- [hashmap_main::hashmap::HashMap::{0}::move_elements]: termination measure -/
@[simp]
def hashmap_hash_map_move_elements_loop_terminates (T : Type)
- (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T)) (i : USize)
+ (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T)) (i : Usize)
:=
(ntable, slots, i)
@@ -66,7 +66,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list]: termination measure -/
@[simp]
def hashmap_hash_map_contains_key_in_list_loop_terminates (T : Type)
- (key : USize) (ls : hashmap_list_t T) :=
+ (key : Usize) (ls : hashmap_list_t T) :=
(key, ls)
/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list]: decreases_by tactic -/
@@ -77,7 +77,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::get_in_list]: termination measure -/
@[simp]
-def hashmap_hash_map_get_in_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_get_in_list_loop_terminates (T : Type) (key : Usize)
(ls : hashmap_list_t T) :=
(key, ls)
@@ -89,7 +89,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list]: termination measure -/
@[simp]
def hashmap_hash_map_get_mut_in_list_loop_terminates (T : Type)
- (ls : hashmap_list_t T) (key : USize) :=
+ (ls : hashmap_list_t T) (key : Usize) :=
(ls, key)
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list]: decreases_by tactic -/
@@ -100,7 +100,7 @@ macro_rules
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list]: termination measure -/
@[simp]
-def hashmap_hash_map_remove_from_list_loop_terminates (T : Type) (key : USize)
+def hashmap_hash_map_remove_from_list_loop_terminates (T : Type) (key : Usize)
(ls : hashmap_list_t T) :=
(key, ls)
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/ExternalFuns.lean b/tests/lean/hashmap_on_disk/HashmapMain/ExternalFuns.lean
new file mode 100644
index 00000000..a5103acc
--- /dev/null
+++ b/tests/lean/hashmap_on_disk/HashmapMain/ExternalFuns.lean
@@ -0,0 +1,5 @@
+import Base.Primitives
+import HashmapMain.Types
+import HashmapMain.Opaque
+
+def opaque_defs : OpaqueDefs := by sorry
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/Funs.lean b/tests/lean/hashmap_on_disk/HashmapMain/Funs.lean
index 2be03d98..342c3833 100644
--- a/tests/lean/hashmap_on_disk/HashmapMain/Funs.lean
+++ b/tests/lean/hashmap_on_disk/HashmapMain/Funs.lean
@@ -2,25 +2,23 @@
-- [hashmap_main]: function definitions
import Base.Primitives
import HashmapMain.Types
-import HashmapMain.Opaque
+import HashmapMain.ExternalFuns
import HashmapMain.Clauses.Clauses
-section variable (opaque_defs: OpaqueDefs)
-
/- [hashmap_main::hashmap::hash_key] -/
-def hashmap_hash_key_fwd (k : USize) : Result USize :=
+def hashmap_hash_key_fwd (k : Usize) : Result Usize :=
Result.ret k
/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots] -/
def hashmap_hash_map_allocate_slots_loop_fwd
- (T : Type) (slots : Vec (hashmap_list_t T)) (n : USize) :
+ (T : Type) (slots : Vec (hashmap_list_t T)) (n : Usize) :
(Result (Vec (hashmap_list_t T)))
:=
- if h: n > (USize.ofNatCore 0 (by intlit))
+ if h: n > (Usize.ofInt 0 (by intlit))
then
do
let slots0 ← vec_push_back (hashmap_list_t T) slots hashmap_list_t.Nil
- let n0 ← USize.checked_sub n (USize.ofNatCore 1 (by intlit))
+ let n0 ← n - (Usize.ofInt 1 (by intlit))
hashmap_hash_map_allocate_slots_loop_fwd T slots0 n0
else Result.ret slots
termination_by hashmap_hash_map_allocate_slots_loop_fwd slots n =>
@@ -29,25 +27,25 @@ decreasing_by hashmap_hash_map_allocate_slots_loop_decreases slots n
/- [hashmap_main::hashmap::HashMap::{0}::allocate_slots] -/
def hashmap_hash_map_allocate_slots_fwd
- (T : Type) (slots : Vec (hashmap_list_t T)) (n : USize) :
+ (T : Type) (slots : Vec (hashmap_list_t T)) (n : Usize) :
Result (Vec (hashmap_list_t T))
:=
hashmap_hash_map_allocate_slots_loop_fwd T slots n
/- [hashmap_main::hashmap::HashMap::{0}::new_with_capacity] -/
def hashmap_hash_map_new_with_capacity_fwd
- (T : Type) (capacity : USize) (max_load_dividend : USize)
- (max_load_divisor : USize) :
+ (T : Type) (capacity : Usize) (max_load_dividend : Usize)
+ (max_load_divisor : Usize) :
Result (hashmap_hash_map_t T)
:=
do
let v := vec_new (hashmap_list_t T)
let slots ← hashmap_hash_map_allocate_slots_fwd T v capacity
- let i ← USize.checked_mul capacity max_load_dividend
- let i0 ← USize.checked_div i max_load_divisor
+ let i ← capacity * max_load_dividend
+ let i0 ← i / max_load_divisor
Result.ret
{
- hashmap_hash_map_num_entries := (USize.ofNatCore 0 (by intlit)),
+ hashmap_hash_map_num_entries := (Usize.ofInt 0 (by intlit)),
hashmap_hash_map_max_load_factor :=
(max_load_dividend, max_load_divisor),
hashmap_hash_map_max_load := i0,
@@ -56,19 +54,19 @@ def hashmap_hash_map_new_with_capacity_fwd
/- [hashmap_main::hashmap::HashMap::{0}::new] -/
def hashmap_hash_map_new_fwd (T : Type) : Result (hashmap_hash_map_t T) :=
- hashmap_hash_map_new_with_capacity_fwd T (USize.ofNatCore 32 (by intlit))
- (USize.ofNatCore 4 (by intlit)) (USize.ofNatCore 5 (by intlit))
+ hashmap_hash_map_new_with_capacity_fwd T (Usize.ofInt 32 (by intlit))
+ (Usize.ofInt 4 (by intlit)) (Usize.ofInt 5 (by intlit))
/- [hashmap_main::hashmap::HashMap::{0}::clear] -/
def hashmap_hash_map_clear_loop_fwd_back
- (T : Type) (slots : Vec (hashmap_list_t T)) (i : USize) :
+ (T : Type) (slots : Vec (hashmap_list_t T)) (i : Usize) :
(Result (Vec (hashmap_list_t T)))
:=
let i0 := vec_len (hashmap_list_t T) slots
if h: i < i0
then
do
- let i1 ← USize.checked_add i (USize.ofNatCore 1 (by intlit))
+ let i1 ← i + (Usize.ofInt 1 (by intlit))
let slots0 ←
vec_index_mut_back (hashmap_list_t T) slots i hashmap_list_t.Nil
hashmap_hash_map_clear_loop_fwd_back T slots0 i1
@@ -83,23 +81,23 @@ def hashmap_hash_map_clear_fwd_back
do
let v ←
hashmap_hash_map_clear_loop_fwd_back T self.hashmap_hash_map_slots
- (USize.ofNatCore 0 (by intlit))
+ (Usize.ofInt 0 (by intlit))
Result.ret
{
self
with
- hashmap_hash_map_num_entries := (USize.ofNatCore 0 (by intlit)),
+ hashmap_hash_map_num_entries := (Usize.ofInt 0 (by intlit)),
hashmap_hash_map_slots := v
}
/- [hashmap_main::hashmap::HashMap::{0}::len] -/
def hashmap_hash_map_len_fwd
- (T : Type) (self : hashmap_hash_map_t T) : Result USize :=
+ (T : Type) (self : hashmap_hash_map_t T) : Result Usize :=
Result.ret self.hashmap_hash_map_num_entries
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list] -/
def hashmap_hash_map_insert_in_list_loop_fwd
- (T : Type) (key : USize) (value : T) (ls : hashmap_list_t T) :
+ (T : Type) (key : Usize) (value : T) (ls : hashmap_list_t T) :
(Result Bool)
:=
match h: ls with
@@ -114,12 +112,12 @@ decreasing_by hashmap_hash_map_insert_in_list_loop_decreases key value ls
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list] -/
def hashmap_hash_map_insert_in_list_fwd
- (T : Type) (key : USize) (value : T) (ls : hashmap_list_t T) : Result Bool :=
+ (T : Type) (key : Usize) (value : T) (ls : hashmap_list_t T) : Result Bool :=
hashmap_hash_map_insert_in_list_loop_fwd T key value ls
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list] -/
def hashmap_hash_map_insert_in_list_loop_back
- (T : Type) (key : USize) (value : T) (ls : hashmap_list_t T) :
+ (T : Type) (key : Usize) (value : T) (ls : hashmap_list_t T) :
(Result (hashmap_list_t T))
:=
match h: ls with
@@ -139,28 +137,28 @@ decreasing_by hashmap_hash_map_insert_in_list_loop_decreases key value ls
/- [hashmap_main::hashmap::HashMap::{0}::insert_in_list] -/
def hashmap_hash_map_insert_in_list_back
- (T : Type) (key : USize) (value : T) (ls : hashmap_list_t T) :
+ (T : Type) (key : Usize) (value : T) (ls : hashmap_list_t T) :
Result (hashmap_list_t T)
:=
hashmap_hash_map_insert_in_list_loop_back T key value ls
/- [hashmap_main::hashmap::HashMap::{0}::insert_no_resize] -/
def hashmap_hash_map_insert_no_resize_fwd_back
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) (value : T) :
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) (value : T) :
Result (hashmap_hash_map_t T)
:=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_mut_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
let inserted ← hashmap_hash_map_insert_in_list_fwd T key value l
if h: inserted
then
do
- let i0 ← USize.checked_add self.hashmap_hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let i0 ← self.hashmap_hash_map_num_entries +
+ (Usize.ofInt 1 (by intlit))
let l0 ← hashmap_hash_map_insert_in_list_back T key value l
let v ←
vec_index_mut_back (hashmap_list_t T) self.hashmap_hash_map_slots
@@ -180,9 +178,9 @@ def hashmap_hash_map_insert_no_resize_fwd_back
Result.ret { self with hashmap_hash_map_slots := v }
/- [core::num::u32::{9}::MAX] -/
-def core_num_u32_max_body : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 4294967295 (by intlit))
-def core_num_u32_max_c : UInt32 := eval_global core_num_u32_max_body (by simp)
+def core_num_u32_max_body : Result U32 :=
+ Result.ret (U32.ofInt 4294967295 (by intlit))
+def core_num_u32_max_c : U32 := eval_global core_num_u32_max_body (by simp)
/- [hashmap_main::hashmap::HashMap::{0}::move_elements_from_list] -/
def hashmap_hash_map_move_elements_from_list_loop_fwd_back
@@ -210,7 +208,7 @@ def hashmap_hash_map_move_elements_from_list_fwd_back
/- [hashmap_main::hashmap::HashMap::{0}::move_elements] -/
def hashmap_hash_map_move_elements_loop_fwd_back
(T : Type) (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T))
- (i : USize) :
+ (i : Usize) :
(Result ((hashmap_hash_map_t T) × (Vec (hashmap_list_t T))))
:=
let i0 := vec_len (hashmap_list_t T) slots
@@ -221,7 +219,7 @@ def hashmap_hash_map_move_elements_loop_fwd_back
let ls := mem_replace_fwd (hashmap_list_t T) l hashmap_list_t.Nil
let ntable0 ←
hashmap_hash_map_move_elements_from_list_fwd_back T ntable ls
- let i1 ← USize.checked_add i (USize.ofNatCore 1 (by intlit))
+ let i1 ← i + (Usize.ofInt 1 (by intlit))
let l0 := mem_replace_back (hashmap_list_t T) l hashmap_list_t.Nil
let slots0 ← vec_index_mut_back (hashmap_list_t T) slots i l0
hashmap_hash_map_move_elements_loop_fwd_back T ntable0 slots0 i1
@@ -233,7 +231,7 @@ decreasing_by hashmap_hash_map_move_elements_loop_decreases ntable slots i
/- [hashmap_main::hashmap::HashMap::{0}::move_elements] -/
def hashmap_hash_map_move_elements_fwd_back
(T : Type) (ntable : hashmap_hash_map_t T) (slots : Vec (hashmap_list_t T))
- (i : USize) :
+ (i : Usize) :
Result ((hashmap_hash_map_t T) × (Vec (hashmap_list_t T)))
:=
hashmap_hash_map_move_elements_loop_fwd_back T ntable slots i
@@ -242,19 +240,19 @@ def hashmap_hash_map_move_elements_fwd_back
def hashmap_hash_map_try_resize_fwd_back
(T : Type) (self : hashmap_hash_map_t T) : Result (hashmap_hash_map_t T) :=
do
- let max_usize ← scalar_cast USize core_num_u32_max_c
+ let max_usize ← Scalar.cast .Usize core_num_u32_max_c
let capacity := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let n1 ← USize.checked_div max_usize (USize.ofNatCore 2 (by intlit))
+ let n1 ← max_usize / (Usize.ofInt 2 (by intlit))
let (i, i0) := self.hashmap_hash_map_max_load_factor
- let i1 ← USize.checked_div n1 i
+ let i1 ← n1 / i
if h: capacity <= i1
then
do
- let i2 ← USize.checked_mul capacity (USize.ofNatCore 2 (by intlit))
+ let i2 ← capacity * (Usize.ofInt 2 (by intlit))
let ntable ← hashmap_hash_map_new_with_capacity_fwd T i2 i i0
let (ntable0, _) ←
hashmap_hash_map_move_elements_fwd_back T ntable
- self.hashmap_hash_map_slots (USize.ofNatCore 0 (by intlit))
+ self.hashmap_hash_map_slots (Usize.ofInt 0 (by intlit))
Result.ret
{
ntable0
@@ -266,7 +264,7 @@ def hashmap_hash_map_try_resize_fwd_back
/- [hashmap_main::hashmap::HashMap::{0}::insert] -/
def hashmap_hash_map_insert_fwd_back
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) (value : T) :
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) (value : T) :
Result (hashmap_hash_map_t T)
:=
do
@@ -278,7 +276,7 @@ def hashmap_hash_map_insert_fwd_back
/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list] -/
def hashmap_hash_map_contains_key_in_list_loop_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : (Result Bool) :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : (Result Bool) :=
match h: ls with
| hashmap_list_t.Cons ckey t tl =>
if h: ckey = key
@@ -291,23 +289,23 @@ decreasing_by hashmap_hash_map_contains_key_in_list_loop_decreases key ls
/- [hashmap_main::hashmap::HashMap::{0}::contains_key_in_list] -/
def hashmap_hash_map_contains_key_in_list_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : Result Bool :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : Result Bool :=
hashmap_hash_map_contains_key_in_list_loop_fwd T key ls
/- [hashmap_main::hashmap::HashMap::{0}::contains_key] -/
def hashmap_hash_map_contains_key_fwd
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) : Result Bool :=
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) : Result Bool :=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
hashmap_hash_map_contains_key_in_list_fwd T key l
/- [hashmap_main::hashmap::HashMap::{0}::get_in_list] -/
def hashmap_hash_map_get_in_list_loop_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : (Result T) :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : (Result T) :=
match h: ls with
| hashmap_list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -320,23 +318,23 @@ decreasing_by hashmap_hash_map_get_in_list_loop_decreases key ls
/- [hashmap_main::hashmap::HashMap::{0}::get_in_list] -/
def hashmap_hash_map_get_in_list_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : Result T :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : Result T :=
hashmap_hash_map_get_in_list_loop_fwd T key ls
/- [hashmap_main::hashmap::HashMap::{0}::get] -/
def hashmap_hash_map_get_fwd
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) : Result T :=
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) : Result T :=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
hashmap_hash_map_get_in_list_fwd T key l
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list] -/
def hashmap_hash_map_get_mut_in_list_loop_fwd
- (T : Type) (ls : hashmap_list_t T) (key : USize) : (Result T) :=
+ (T : Type) (ls : hashmap_list_t T) (key : Usize) : (Result T) :=
match h: ls with
| hashmap_list_t.Cons ckey cvalue tl =>
if h: ckey = key
@@ -349,12 +347,12 @@ decreasing_by hashmap_hash_map_get_mut_in_list_loop_decreases ls key
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list] -/
def hashmap_hash_map_get_mut_in_list_fwd
- (T : Type) (ls : hashmap_list_t T) (key : USize) : Result T :=
+ (T : Type) (ls : hashmap_list_t T) (key : Usize) : Result T :=
hashmap_hash_map_get_mut_in_list_loop_fwd T ls key
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list] -/
def hashmap_hash_map_get_mut_in_list_loop_back
- (T : Type) (ls : hashmap_list_t T) (key : USize) (ret0 : T) :
+ (T : Type) (ls : hashmap_list_t T) (key : Usize) (ret0 : T) :
(Result (hashmap_list_t T))
:=
match h: ls with
@@ -372,31 +370,31 @@ decreasing_by hashmap_hash_map_get_mut_in_list_loop_decreases ls key
/- [hashmap_main::hashmap::HashMap::{0}::get_mut_in_list] -/
def hashmap_hash_map_get_mut_in_list_back
- (T : Type) (ls : hashmap_list_t T) (key : USize) (ret0 : T) :
+ (T : Type) (ls : hashmap_list_t T) (key : Usize) (ret0 : T) :
Result (hashmap_list_t T)
:=
hashmap_hash_map_get_mut_in_list_loop_back T ls key ret0
/- [hashmap_main::hashmap::HashMap::{0}::get_mut] -/
def hashmap_hash_map_get_mut_fwd
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) : Result T :=
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) : Result T :=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_mut_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
hashmap_hash_map_get_mut_in_list_fwd T l key
/- [hashmap_main::hashmap::HashMap::{0}::get_mut] -/
def hashmap_hash_map_get_mut_back
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) (ret0 : T) :
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) (ret0 : T) :
Result (hashmap_hash_map_t T)
:=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_mut_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
let l0 ← hashmap_hash_map_get_mut_in_list_back T l key ret0
@@ -407,7 +405,7 @@ def hashmap_hash_map_get_mut_back
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list] -/
def hashmap_hash_map_remove_from_list_loop_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : (Result (Option T)) :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : (Result (Option T)) :=
match h: ls with
| hashmap_list_t.Cons ckey t tl =>
if h: ckey = key
@@ -426,12 +424,12 @@ decreasing_by hashmap_hash_map_remove_from_list_loop_decreases key ls
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list] -/
def hashmap_hash_map_remove_from_list_fwd
- (T : Type) (key : USize) (ls : hashmap_list_t T) : Result (Option T) :=
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) : Result (Option T) :=
hashmap_hash_map_remove_from_list_loop_fwd T key ls
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list] -/
def hashmap_hash_map_remove_from_list_loop_back
- (T : Type) (key : USize) (ls : hashmap_list_t T) :
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) :
(Result (hashmap_list_t T))
:=
match h: ls with
@@ -455,18 +453,18 @@ decreasing_by hashmap_hash_map_remove_from_list_loop_decreases key ls
/- [hashmap_main::hashmap::HashMap::{0}::remove_from_list] -/
def hashmap_hash_map_remove_from_list_back
- (T : Type) (key : USize) (ls : hashmap_list_t T) :
+ (T : Type) (key : Usize) (ls : hashmap_list_t T) :
Result (hashmap_list_t T)
:=
hashmap_hash_map_remove_from_list_loop_back T key ls
/- [hashmap_main::hashmap::HashMap::{0}::remove] -/
def hashmap_hash_map_remove_fwd
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) : Result (Option T) :=
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) : Result (Option T) :=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_mut_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
let x ← hashmap_hash_map_remove_from_list_fwd T key l
@@ -474,19 +472,19 @@ def hashmap_hash_map_remove_fwd
| Option.none => Result.ret Option.none
| Option.some x0 =>
do
- let _ ← USize.checked_sub self.hashmap_hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let _ ← self.hashmap_hash_map_num_entries -
+ (Usize.ofInt 1 (by intlit))
Result.ret (Option.some x0)
/- [hashmap_main::hashmap::HashMap::{0}::remove] -/
def hashmap_hash_map_remove_back
- (T : Type) (self : hashmap_hash_map_t T) (key : USize) :
+ (T : Type) (self : hashmap_hash_map_t T) (key : Usize) :
Result (hashmap_hash_map_t T)
:=
do
let hash ← hashmap_hash_key_fwd key
let i := vec_len (hashmap_list_t T) self.hashmap_hash_map_slots
- let hash_mod ← USize.checked_rem hash i
+ let hash_mod ← hash % i
let l ←
vec_index_mut_fwd (hashmap_list_t T) self.hashmap_hash_map_slots hash_mod
let x ← hashmap_hash_map_remove_from_list_fwd T key l
@@ -500,8 +498,8 @@ def hashmap_hash_map_remove_back
Result.ret { self with hashmap_hash_map_slots := v }
| Option.some x0 =>
do
- let i0 ← USize.checked_sub self.hashmap_hash_map_num_entries
- (USize.ofNatCore 1 (by intlit))
+ let i0 ← self.hashmap_hash_map_num_entries -
+ (Usize.ofInt 1 (by intlit))
let l0 ← hashmap_hash_map_remove_from_list_back T key l
let v ←
vec_index_mut_back (hashmap_list_t T) self.hashmap_hash_map_slots
@@ -516,79 +514,73 @@ def hashmap_hash_map_remove_back
/- [hashmap_main::hashmap::test1] -/
def hashmap_test1_fwd : Result Unit :=
do
- let hm ← hashmap_hash_map_new_fwd UInt64
+ let hm ← hashmap_hash_map_new_fwd U64
let hm0 ←
- hashmap_hash_map_insert_fwd_back UInt64 hm
- (USize.ofNatCore 0 (by intlit)) (UInt64.ofNatCore 42 (by intlit))
+ hashmap_hash_map_insert_fwd_back U64 hm (Usize.ofInt 0 (by intlit))
+ (U64.ofInt 42 (by intlit))
let hm1 ←
- hashmap_hash_map_insert_fwd_back UInt64 hm0
- (USize.ofNatCore 128 (by intlit)) (UInt64.ofNatCore 18 (by intlit))
+ hashmap_hash_map_insert_fwd_back U64 hm0 (Usize.ofInt 128 (by intlit))
+ (U64.ofInt 18 (by intlit))
let hm2 ←
- hashmap_hash_map_insert_fwd_back UInt64 hm1
- (USize.ofNatCore 1024 (by intlit)) (UInt64.ofNatCore 138 (by intlit))
+ hashmap_hash_map_insert_fwd_back U64 hm1 (Usize.ofInt 1024 (by intlit))
+ (U64.ofInt 138 (by intlit))
let hm3 ←
- hashmap_hash_map_insert_fwd_back UInt64 hm2
- (USize.ofNatCore 1056 (by intlit)) (UInt64.ofNatCore 256 (by intlit))
- let i ←
- hashmap_hash_map_get_fwd UInt64 hm3 (USize.ofNatCore 128 (by intlit))
- if h: not (i = (UInt64.ofNatCore 18 (by intlit)))
+ hashmap_hash_map_insert_fwd_back U64 hm2 (Usize.ofInt 1056 (by intlit))
+ (U64.ofInt 256 (by intlit))
+ let i ← hashmap_hash_map_get_fwd U64 hm3 (Usize.ofInt 128 (by intlit))
+ if h: not (i = (U64.ofInt 18 (by intlit)))
then Result.fail Error.panic
else
do
let hm4 ←
- hashmap_hash_map_get_mut_back UInt64 hm3
- (USize.ofNatCore 1024 (by intlit))
- (UInt64.ofNatCore 56 (by intlit))
+ hashmap_hash_map_get_mut_back U64 hm3 (Usize.ofInt 1024 (by intlit))
+ (U64.ofInt 56 (by intlit))
let i0 ←
- hashmap_hash_map_get_fwd UInt64 hm4
- (USize.ofNatCore 1024 (by intlit))
- if h: not (i0 = (UInt64.ofNatCore 56 (by intlit)))
+ hashmap_hash_map_get_fwd U64 hm4 (Usize.ofInt 1024 (by intlit))
+ if h: not (i0 = (U64.ofInt 56 (by intlit)))
then Result.fail Error.panic
else
do
let x ←
- hashmap_hash_map_remove_fwd UInt64 hm4
- (USize.ofNatCore 1024 (by intlit))
+ hashmap_hash_map_remove_fwd U64 hm4
+ (Usize.ofInt 1024 (by intlit))
match h: x with
| Option.none => Result.fail Error.panic
| Option.some x0 =>
- if h: not (x0 = (UInt64.ofNatCore 56 (by intlit)))
+ if h: not (x0 = (U64.ofInt 56 (by intlit)))
then Result.fail Error.panic
else
do
let hm5 ←
- hashmap_hash_map_remove_back UInt64 hm4
- (USize.ofNatCore 1024 (by intlit))
+ hashmap_hash_map_remove_back U64 hm4
+ (Usize.ofInt 1024 (by intlit))
let i1 ←
- hashmap_hash_map_get_fwd UInt64 hm5
- (USize.ofNatCore 0 (by intlit))
- if h: not (i1 = (UInt64.ofNatCore 42 (by intlit)))
+ hashmap_hash_map_get_fwd U64 hm5
+ (Usize.ofInt 0 (by intlit))
+ if h: not (i1 = (U64.ofInt 42 (by intlit)))
then Result.fail Error.panic
else
do
let i2 ←
- hashmap_hash_map_get_fwd UInt64 hm5
- (USize.ofNatCore 128 (by intlit))
- if h: not (i2 = (UInt64.ofNatCore 18 (by intlit)))
+ hashmap_hash_map_get_fwd U64 hm5
+ (Usize.ofInt 128 (by intlit))
+ if h: not (i2 = (U64.ofInt 18 (by intlit)))
then Result.fail Error.panic
else
do
let i3 ←
- hashmap_hash_map_get_fwd UInt64 hm5
- (USize.ofNatCore 1056 (by intlit))
- if h: not (i3 = (UInt64.ofNatCore 256 (by intlit)))
+ hashmap_hash_map_get_fwd U64 hm5
+ (Usize.ofInt 1056 (by intlit))
+ if h: not (i3 = (U64.ofInt 256 (by intlit)))
then Result.fail Error.panic
else Result.ret ()
-/- Unit test for [hashmap_main::hashmap::test1] -/
-#assert (hashmap_test1_fwd == .ret ())
-
/- [hashmap_main::insert_on_disk] -/
def insert_on_disk_fwd
- (key : USize) (value : UInt64) (st : State) : Result (State × Unit) :=
+ (key : Usize) (value : U64) (st : State) : Result (State × Unit) :=
do
let (st0, hm) ← opaque_defs.hashmap_utils_deserialize_fwd st
- let hm0 ← hashmap_hash_map_insert_fwd_back UInt64 hm key value
+ let hm0 ← hashmap_hash_map_insert_fwd_back U64 hm key value
let (st1, _) ← opaque_defs.hashmap_utils_serialize_fwd hm0 st0
Result.ret (st1, ())
@@ -596,6 +588,3 @@ def insert_on_disk_fwd
def main_fwd : Result Unit :=
Result.ret ()
-/- Unit test for [hashmap_main::main] -/
-#assert (main_fwd == .ret ())
-
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/Opaque.lean b/tests/lean/hashmap_on_disk/HashmapMain/Opaque.lean
index 3531e6e0..d98f431a 100644
--- a/tests/lean/hashmap_on_disk/HashmapMain/Opaque.lean
+++ b/tests/lean/hashmap_on_disk/HashmapMain/Opaque.lean
@@ -7,9 +7,9 @@ structure OpaqueDefs where
/- [hashmap_main::hashmap_utils::deserialize] -/
hashmap_utils_deserialize_fwd
- : State -> Result (State × (hashmap_hash_map_t UInt64))
+ : State -> Result (State × (hashmap_hash_map_t U64))
/- [hashmap_main::hashmap_utils::serialize] -/
hashmap_utils_serialize_fwd
- : hashmap_hash_map_t UInt64 -> State -> Result (State × Unit)
+ : hashmap_hash_map_t U64 -> State -> Result (State × Unit)
diff --git a/tests/lean/hashmap_on_disk/HashmapMain/Types.lean b/tests/lean/hashmap_on_disk/HashmapMain/Types.lean
index 989dd2a9..0509fbbd 100644
--- a/tests/lean/hashmap_on_disk/HashmapMain/Types.lean
+++ b/tests/lean/hashmap_on_disk/HashmapMain/Types.lean
@@ -4,14 +4,14 @@ import Base.Primitives
/- [hashmap_main::hashmap::List] -/
inductive hashmap_list_t (T : Type) :=
-| Cons : USize -> T -> hashmap_list_t T -> hashmap_list_t T
+| Cons : Usize -> T -> hashmap_list_t T -> hashmap_list_t T
| Nil : hashmap_list_t T
/- [hashmap_main::hashmap::HashMap] -/
structure hashmap_hash_map_t (T : Type) where
- hashmap_hash_map_num_entries : USize
- hashmap_hash_map_max_load_factor : (USize × USize)
- hashmap_hash_map_max_load : USize
+ hashmap_hash_map_num_entries : Usize
+ hashmap_hash_map_max_load_factor : (Usize × Usize)
+ hashmap_hash_map_max_load : Usize
hashmap_hash_map_slots : Vec (hashmap_list_t T)
/- The state type used in the state-error monad -/
diff --git a/tests/lean/misc-constants/Base/Primitives.lean b/tests/lean/misc-constants/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-constants/Base/Primitives.lean
+++ b/tests/lean/misc-constants/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-constants/Constants.lean b/tests/lean/misc-constants/Constants.lean
index 937a15e5..8306ed85 100644
--- a/tests/lean/misc-constants/Constants.lean
+++ b/tests/lean/misc-constants/Constants.lean
@@ -2,143 +2,130 @@
-- [constants]
import Base.Primitives
-structure OpaqueDefs where
-
- /- [constants::X0] -/
- def x0_body : Result UInt32 := Result.ret (UInt32.ofNatCore 0 (by intlit))
- def x0_c : UInt32 := eval_global x0_body (by simp)
-
- /- [core::num::u32::{9}::MAX] -/
- def core_num_u32_max_body : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 4294967295 (by intlit))
- def core_num_u32_max_c : UInt32 :=
- eval_global core_num_u32_max_body (by simp)
-
- /- [constants::X1] -/
- def x1_body : Result UInt32 := Result.ret core_num_u32_max_c
- def x1_c : UInt32 := eval_global x1_body (by simp)
-
- /- [constants::X2] -/
- def x2_body : Result UInt32 := Result.ret (UInt32.ofNatCore 3 (by intlit))
- def x2_c : UInt32 := eval_global x2_body (by simp)
-
- /- [constants::incr] -/
- def incr_fwd (n : UInt32) : Result UInt32 :=
- UInt32.checked_add n (UInt32.ofNatCore 1 (by intlit))
-
- /- [constants::X3] -/
- def x3_body : Result UInt32 := incr_fwd (UInt32.ofNatCore 32 (by intlit))
- def x3_c : UInt32 := eval_global x3_body (by simp)
-
- /- [constants::mk_pair0] -/
- def mk_pair0_fwd (x : UInt32) (y : UInt32) : Result (UInt32 × UInt32) :=
- Result.ret (x, y)
-
- /- [constants::Pair] -/
- structure pair_t (T1 T2 : Type) where
- pair_x : T1
- pair_y : T2
-
- /- [constants::mk_pair1] -/
- def mk_pair1_fwd (x : UInt32) (y : UInt32) : Result (pair_t UInt32 UInt32) :=
- Result.ret { pair_x := x, pair_y := y }
-
- /- [constants::P0] -/
- def p0_body : Result (UInt32 × UInt32) :=
- mk_pair0_fwd (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 1 (by intlit))
- def p0_c : (UInt32 × UInt32) := eval_global p0_body (by simp)
-
- /- [constants::P1] -/
- def p1_body : Result (pair_t UInt32 UInt32) :=
- mk_pair1_fwd (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 1 (by intlit))
- def p1_c : pair_t UInt32 UInt32 := eval_global p1_body (by simp)
-
- /- [constants::P2] -/
- def p2_body : Result (UInt32 × UInt32) :=
- Result.ret
- ((UInt32.ofNatCore 0 (by intlit)),
- (UInt32.ofNatCore 1 (by intlit)))
- def p2_c : (UInt32 × UInt32) := eval_global p2_body (by simp)
-
- /- [constants::P3] -/
- def p3_body : Result (pair_t UInt32 UInt32) :=
- Result.ret
- {
- pair_x := (UInt32.ofNatCore 0 (by intlit)),
- pair_y := (UInt32.ofNatCore 1 (by intlit))
- }
- def p3_c : pair_t UInt32 UInt32 := eval_global p3_body (by simp)
-
- /- [constants::Wrap] -/
- structure wrap_t (T : Type) where
- wrap_val : T
-
- /- [constants::Wrap::{0}::new] -/
- def wrap_new_fwd (T : Type) (val : T) : Result (wrap_t T) :=
- Result.ret { wrap_val := val }
-
- /- [constants::Y] -/
- def y_body : Result (wrap_t Int32) :=
- wrap_new_fwd Int32 (Int32.ofNatCore 2 (by intlit))
- def y_c : wrap_t Int32 := eval_global y_body (by simp)
-
- /- [constants::unwrap_y] -/
- def unwrap_y_fwd : Result Int32 :=
- Result.ret y_c.wrap_val
-
- /- [constants::YVAL] -/
- def yval_body : Result Int32 := unwrap_y_fwd
- def yval_c : Int32 := eval_global yval_body (by simp)
-
- /- [constants::get_z1::Z1] -/
- def get_z1_z1_body : Result Int32 :=
- Result.ret (Int32.ofNatCore 3 (by intlit))
- def get_z1_z1_c : Int32 := eval_global get_z1_z1_body (by simp)
-
- /- [constants::get_z1] -/
- def get_z1_fwd : Result Int32 :=
- Result.ret get_z1_z1_c
-
- /- [constants::add] -/
- def add_fwd (a : Int32) (b : Int32) : Result Int32 :=
- Int32.checked_add a b
-
- /- [constants::Q1] -/
- def q1_body : Result Int32 := Result.ret (Int32.ofNatCore 5 (by intlit))
- def q1_c : Int32 := eval_global q1_body (by simp)
-
- /- [constants::Q2] -/
- def q2_body : Result Int32 := Result.ret q1_c
- def q2_c : Int32 := eval_global q2_body (by simp)
-
- /- [constants::Q3] -/
- def q3_body : Result Int32 := add_fwd q2_c (Int32.ofNatCore 3 (by intlit))
- def q3_c : Int32 := eval_global q3_body (by simp)
-
- /- [constants::get_z2] -/
- def get_z2_fwd : Result Int32 :=
- do
- let i ← get_z1_fwd
- let i0 ← add_fwd i q3_c
- add_fwd q1_c i0
-
- /- [constants::S1] -/
- def s1_body : Result UInt32 := Result.ret (UInt32.ofNatCore 6 (by intlit))
- def s1_c : UInt32 := eval_global s1_body (by simp)
-
- /- [constants::S2] -/
- def s2_body : Result UInt32 := incr_fwd s1_c
- def s2_c : UInt32 := eval_global s2_body (by simp)
-
- /- [constants::S3] -/
- def s3_body : Result (pair_t UInt32 UInt32) := Result.ret p3_c
- def s3_c : pair_t UInt32 UInt32 := eval_global s3_body (by simp)
-
- /- [constants::S4] -/
- def s4_body : Result (pair_t UInt32 UInt32) :=
- mk_pair1_fwd (UInt32.ofNatCore 7 (by intlit))
- (UInt32.ofNatCore 8 (by intlit))
- def s4_c : pair_t UInt32 UInt32 := eval_global s4_body (by simp)
-
+/- [constants::X0] -/
+def x0_body : Result U32 := Result.ret (U32.ofInt 0 (by intlit))
+def x0_c : U32 := eval_global x0_body (by simp)
+
+/- [core::num::u32::{9}::MAX] -/
+def core_num_u32_max_body : Result U32 :=
+ Result.ret (U32.ofInt 4294967295 (by intlit))
+def core_num_u32_max_c : U32 := eval_global core_num_u32_max_body (by simp)
+
+/- [constants::X1] -/
+def x1_body : Result U32 := Result.ret core_num_u32_max_c
+def x1_c : U32 := eval_global x1_body (by simp)
+
+/- [constants::X2] -/
+def x2_body : Result U32 := Result.ret (U32.ofInt 3 (by intlit))
+def x2_c : U32 := eval_global x2_body (by simp)
+
+/- [constants::incr] -/
+def incr_fwd (n : U32) : Result U32 :=
+ n + (U32.ofInt 1 (by intlit))
+
+/- [constants::X3] -/
+def x3_body : Result U32 := incr_fwd (U32.ofInt 32 (by intlit))
+def x3_c : U32 := eval_global x3_body (by simp)
+
+/- [constants::mk_pair0] -/
+def mk_pair0_fwd (x : U32) (y : U32) : Result (U32 × U32) :=
+ Result.ret (x, y)
+
+/- [constants::Pair] -/
+structure pair_t (T1 T2 : Type) where
+ pair_x : T1
+ pair_y : T2
+
+/- [constants::mk_pair1] -/
+def mk_pair1_fwd (x : U32) (y : U32) : Result (pair_t U32 U32) :=
+ Result.ret { pair_x := x, pair_y := y }
+
+/- [constants::P0] -/
+def p0_body : Result (U32 × U32) :=
+ mk_pair0_fwd (U32.ofInt 0 (by intlit)) (U32.ofInt 1 (by intlit))
+def p0_c : (U32 × U32) := eval_global p0_body (by simp)
+
+/- [constants::P1] -/
+def p1_body : Result (pair_t U32 U32) :=
+ mk_pair1_fwd (U32.ofInt 0 (by intlit)) (U32.ofInt 1 (by intlit))
+def p1_c : pair_t U32 U32 := eval_global p1_body (by simp)
+
+/- [constants::P2] -/
+def p2_body : Result (U32 × U32) :=
+ Result.ret ((U32.ofInt 0 (by intlit)), (U32.ofInt 1 (by intlit)))
+def p2_c : (U32 × U32) := eval_global p2_body (by simp)
+
+/- [constants::P3] -/
+def p3_body : Result (pair_t U32 U32) :=
+ Result.ret
+ { pair_x := (U32.ofInt 0 (by intlit)), pair_y := (U32.ofInt 1 (by intlit)) }
+def p3_c : pair_t U32 U32 := eval_global p3_body (by simp)
+
+/- [constants::Wrap] -/
+structure wrap_t (T : Type) where
+ wrap_val : T
+
+/- [constants::Wrap::{0}::new] -/
+def wrap_new_fwd (T : Type) (val : T) : Result (wrap_t T) :=
+ Result.ret { wrap_val := val }
+
+/- [constants::Y] -/
+def y_body : Result (wrap_t I32) := wrap_new_fwd I32 (I32.ofInt 2 (by intlit))
+def y_c : wrap_t I32 := eval_global y_body (by simp)
+
+/- [constants::unwrap_y] -/
+def unwrap_y_fwd : Result I32 :=
+ Result.ret y_c.wrap_val
+
+/- [constants::YVAL] -/
+def yval_body : Result I32 := unwrap_y_fwd
+def yval_c : I32 := eval_global yval_body (by simp)
+
+/- [constants::get_z1::Z1] -/
+def get_z1_z1_body : Result I32 := Result.ret (I32.ofInt 3 (by intlit))
+def get_z1_z1_c : I32 := eval_global get_z1_z1_body (by simp)
+
+/- [constants::get_z1] -/
+def get_z1_fwd : Result I32 :=
+ Result.ret get_z1_z1_c
+
+/- [constants::add] -/
+def add_fwd (a : I32) (b : I32) : Result I32 :=
+ a + b
+
+/- [constants::Q1] -/
+def q1_body : Result I32 := Result.ret (I32.ofInt 5 (by intlit))
+def q1_c : I32 := eval_global q1_body (by simp)
+
+/- [constants::Q2] -/
+def q2_body : Result I32 := Result.ret q1_c
+def q2_c : I32 := eval_global q2_body (by simp)
+
+/- [constants::Q3] -/
+def q3_body : Result I32 := add_fwd q2_c (I32.ofInt 3 (by intlit))
+def q3_c : I32 := eval_global q3_body (by simp)
+
+/- [constants::get_z2] -/
+def get_z2_fwd : Result I32 :=
+ do
+ let i ← get_z1_fwd
+ let i0 ← add_fwd i q3_c
+ add_fwd q1_c i0
+
+/- [constants::S1] -/
+def s1_body : Result U32 := Result.ret (U32.ofInt 6 (by intlit))
+def s1_c : U32 := eval_global s1_body (by simp)
+
+/- [constants::S2] -/
+def s2_body : Result U32 := incr_fwd s1_c
+def s2_c : U32 := eval_global s2_body (by simp)
+
+/- [constants::S3] -/
+def s3_body : Result (pair_t U32 U32) := Result.ret p3_c
+def s3_c : pair_t U32 U32 := eval_global s3_body (by simp)
+
+/- [constants::S4] -/
+def s4_body : Result (pair_t U32 U32) :=
+ mk_pair1_fwd (U32.ofInt 7 (by intlit)) (U32.ofInt 8 (by intlit))
+def s4_c : pair_t U32 U32 := eval_global s4_body (by simp)
+
diff --git a/tests/lean/misc-external/Base/Primitives.lean b/tests/lean/misc-external/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-external/Base/Primitives.lean
+++ b/tests/lean/misc-external/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-external/External/ExternalFuns.lean b/tests/lean/misc-external/External/ExternalFuns.lean
new file mode 100644
index 00000000..6bd4f4a9
--- /dev/null
+++ b/tests/lean/misc-external/External/ExternalFuns.lean
@@ -0,0 +1,5 @@
+import Base.Primitives
+import External.Types
+import External.Opaque
+
+def opaque_defs : OpaqueDefs := sorry
diff --git a/tests/lean/misc-external/External/Funs.lean b/tests/lean/misc-external/External/Funs.lean
index 4e1f36a1..eeb83989 100644
--- a/tests/lean/misc-external/External/Funs.lean
+++ b/tests/lean/misc-external/External/Funs.lean
@@ -2,9 +2,7 @@
-- [external]: function definitions
import Base.Primitives
import External.Types
-import External.Opaque
-
-section variable (opaque_defs: OpaqueDefs)
+import External.ExternalFuns
/- [external::swap] -/
def swap_fwd
@@ -28,9 +26,7 @@ def swap_back
/- [external::test_new_non_zero_u32] -/
def test_new_non_zero_u32_fwd
- (x : UInt32) (st : State) :
- Result (State × core_num_nonzero_non_zero_u32_t)
- :=
+ (x : U32) (st : State) : Result (State × core_num_nonzero_non_zero_u32_t) :=
do
let (st0, opt) ← opaque_defs.core_num_nonzero_non_zero_u32_new_fwd x st
opaque_defs.core_option_option_unwrap_fwd core_num_nonzero_non_zero_u32_t
@@ -39,13 +35,10 @@ def test_new_non_zero_u32_fwd
/- [external::test_vec] -/
def test_vec_fwd : Result Unit :=
do
- let v := vec_new UInt32
- let _ ← vec_push_back UInt32 v (UInt32.ofNatCore 0 (by intlit))
+ let v := vec_new U32
+ let _ ← vec_push_back U32 v (U32.ofInt 0 (by intlit))
Result.ret ()
-/- Unit test for [external::test_vec] -/
-#assert (test_vec_fwd == .ret ())
-
/- [external::custom_swap] -/
def custom_swap_fwd
(T : Type) (x : T) (y : T) (st : State) : Result (State × T) :=
@@ -68,26 +61,24 @@ def custom_swap_back
/- [external::test_custom_swap] -/
def test_custom_swap_fwd
- (x : UInt32) (y : UInt32) (st : State) : Result (State × Unit) :=
+ (x : U32) (y : U32) (st : State) : Result (State × Unit) :=
do
- let (st0, _) ← custom_swap_fwd UInt32 x y st
+ let (st0, _) ← custom_swap_fwd U32 x y st
Result.ret (st0, ())
/- [external::test_custom_swap] -/
def test_custom_swap_back
- (x : UInt32) (y : UInt32) (st : State) (st0 : State) :
- Result (State × (UInt32 × UInt32))
+ (x : U32) (y : U32) (st : State) (st0 : State) :
+ Result (State × (U32 × U32))
:=
- custom_swap_back UInt32 x y st (UInt32.ofNatCore 1 (by intlit)) st0
+ custom_swap_back U32 x y st (U32.ofInt 1 (by intlit)) st0
/- [external::test_swap_non_zero] -/
-def test_swap_non_zero_fwd
- (x : UInt32) (st : State) : Result (State × UInt32) :=
+def test_swap_non_zero_fwd (x : U32) (st : State) : Result (State × U32) :=
do
- let (st0, _) ← swap_fwd UInt32 x (UInt32.ofNatCore 0 (by intlit)) st
- let (st1, (x0, _)) ←
- swap_back UInt32 x (UInt32.ofNatCore 0 (by intlit)) st st0
- if h: x0 = (UInt32.ofNatCore 0 (by intlit))
+ let (st0, _) ← swap_fwd U32 x (U32.ofInt 0 (by intlit)) st
+ let (st1, (x0, _)) ← swap_back U32 x (U32.ofInt 0 (by intlit)) st st0
+ if h: x0 = (U32.ofInt 0 (by intlit))
then Result.fail Error.panic
else Result.ret (st1, x0)
diff --git a/tests/lean/misc-external/External/Opaque.lean b/tests/lean/misc-external/External/Opaque.lean
index d3582de3..d641912b 100644
--- a/tests/lean/misc-external/External/Opaque.lean
+++ b/tests/lean/misc-external/External/Opaque.lean
@@ -19,8 +19,7 @@ structure OpaqueDefs where
/- [core::num::nonzero::NonZeroU32::{14}::new] -/
core_num_nonzero_non_zero_u32_new_fwd
:
- UInt32 -> State -> Result (State × (Option
- core_num_nonzero_non_zero_u32_t))
+ U32 -> State -> Result (State × (Option core_num_nonzero_non_zero_u32_t))
/- [core::option::Option::{0}::unwrap] -/
core_option_option_unwrap_fwd
diff --git a/tests/lean/misc-loops/Base/Primitives.lean b/tests/lean/misc-loops/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-loops/Base/Primitives.lean
+++ b/tests/lean/misc-loops/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-loops/Loops/Clauses/Clauses.lean b/tests/lean/misc-loops/Loops/Clauses/Clauses.lean
index 5ddb65ca..89a7ce34 100644
--- a/tests/lean/misc-loops/Loops/Clauses/Clauses.lean
+++ b/tests/lean/misc-loops/Loops/Clauses/Clauses.lean
@@ -4,7 +4,7 @@ import Loops.Types
/- [loops::sum]: termination measure -/
@[simp]
-def sum_loop_terminates (max : UInt32) (i : UInt32) (s : UInt32) := (max, i, s)
+def sum_loop_terminates (max : U32) (i : U32) (s : U32) := (max, i, s)
syntax "sum_loop_decreases" term+ : tactic
@@ -13,8 +13,7 @@ macro_rules
/- [loops::sum_with_mut_borrows]: termination measure -/
@[simp]
-def sum_with_mut_borrows_loop_terminates (max : UInt32) (mi : UInt32)
- (ms : UInt32) :=
+def sum_with_mut_borrows_loop_terminates (max : U32) (mi : U32) (ms : U32) :=
(max, mi, ms)
syntax "sum_with_mut_borrows_loop_decreases" term+ : tactic
@@ -24,8 +23,7 @@ macro_rules
/- [loops::sum_with_shared_borrows]: termination measure -/
@[simp]
-def sum_with_shared_borrows_loop_terminates (max : UInt32) (i : UInt32)
- (s : UInt32) :=
+def sum_with_shared_borrows_loop_terminates (max : U32) (i : U32) (s : U32) :=
(max, i, s)
syntax "sum_with_shared_borrows_loop_decreases" term+ : tactic
@@ -34,7 +32,7 @@ macro_rules
| `(tactic| sum_with_shared_borrows_loop_decreases $max $i $s) =>`(tactic| sorry)
/- [loops::clear]: termination measure -/
-@[simp] def clear_loop_terminates (v : vec UInt32) (i : USize) := (v, i)
+@[simp] def clear_loop_terminates (v : Vec U32) (i : Usize) := (v, i)
syntax "clear_loop_decreases" term+ : tactic
@@ -43,7 +41,7 @@ macro_rules
/- [loops::list_mem]: termination measure -/
@[simp]
-def list_mem_loop_terminates (x : UInt32) (ls : list_t UInt32) := (x, ls)
+def list_mem_loop_terminates (x : U32) (ls : list_t U32) := (x, ls)
syntax "list_mem_loop_decreases" term+ : tactic
@@ -52,8 +50,7 @@ macro_rules
/- [loops::list_nth_mut_loop]: termination measure -/
@[simp]
-def list_nth_mut_loop_loop_terminates (T : Type) (ls : list_t T) (i : UInt32)
- :=
+def list_nth_mut_loop_loop_terminates (T : Type) (ls : list_t T) (i : U32) :=
(ls, i)
syntax "list_nth_mut_loop_loop_decreases" term+ : tactic
@@ -63,8 +60,7 @@ macro_rules
/- [loops::list_nth_shared_loop]: termination measure -/
@[simp]
-def list_nth_shared_loop_loop_terminates (T : Type) (ls : list_t T)
- (i : UInt32) :=
+def list_nth_shared_loop_loop_terminates (T : Type) (ls : list_t T) (i : U32) :=
(ls, i)
syntax "list_nth_shared_loop_loop_decreases" term+ : tactic
@@ -74,7 +70,7 @@ macro_rules
/- [loops::get_elem_mut]: termination measure -/
@[simp]
-def get_elem_mut_loop_terminates (x : USize) (ls : list_t USize) := (x, ls)
+def get_elem_mut_loop_terminates (x : Usize) (ls : list_t Usize) := (x, ls)
syntax "get_elem_mut_loop_decreases" term+ : tactic
@@ -83,7 +79,7 @@ macro_rules
/- [loops::get_elem_shared]: termination measure -/
@[simp]
-def get_elem_shared_loop_terminates (x : USize) (ls : list_t USize) := (x, ls)
+def get_elem_shared_loop_terminates (x : Usize) (ls : list_t Usize) := (x, ls)
syntax "get_elem_shared_loop_decreases" term+ : tactic
@@ -92,7 +88,7 @@ macro_rules
/- [loops::list_nth_mut_loop_with_id]: termination measure -/
@[simp]
-def list_nth_mut_loop_with_id_loop_terminates (T : Type) (i : UInt32)
+def list_nth_mut_loop_with_id_loop_terminates (T : Type) (i : U32)
(ls : list_t T) :=
(i, ls)
@@ -103,7 +99,7 @@ macro_rules
/- [loops::list_nth_shared_loop_with_id]: termination measure -/
@[simp]
-def list_nth_shared_loop_with_id_loop_terminates (T : Type) (i : UInt32)
+def list_nth_shared_loop_with_id_loop_terminates (T : Type) (i : U32)
(ls : list_t T) :=
(i, ls)
@@ -115,7 +111,7 @@ macro_rules
/- [loops::list_nth_mut_loop_pair]: termination measure -/
@[simp]
def list_nth_mut_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_mut_loop_pair_loop_decreases" term+ : tactic
@@ -126,7 +122,7 @@ macro_rules
/- [loops::list_nth_shared_loop_pair]: termination measure -/
@[simp]
def list_nth_shared_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_shared_loop_pair_loop_decreases" term+ : tactic
@@ -138,7 +134,7 @@ macro_rules
/- [loops::list_nth_mut_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_mut_loop_pair_merge_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_mut_loop_pair_merge_loop_decreases" term+ : tactic
@@ -150,7 +146,7 @@ macro_rules
/- [loops::list_nth_shared_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_shared_loop_pair_merge_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_shared_loop_pair_merge_loop_decreases" term+ : tactic
@@ -162,7 +158,7 @@ macro_rules
/- [loops::list_nth_mut_shared_loop_pair]: termination measure -/
@[simp]
def list_nth_mut_shared_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_mut_shared_loop_pair_loop_decreases" term+ : tactic
@@ -174,7 +170,7 @@ macro_rules
/- [loops::list_nth_mut_shared_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_mut_shared_loop_pair_merge_loop_terminates (T : Type)
- (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :=
+ (ls0 : list_t T) (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_mut_shared_loop_pair_merge_loop_decreases" term+ : tactic
@@ -186,7 +182,7 @@ macro_rules
/- [loops::list_nth_shared_mut_loop_pair]: termination measure -/
@[simp]
def list_nth_shared_mut_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_shared_mut_loop_pair_loop_decreases" term+ : tactic
@@ -198,7 +194,7 @@ macro_rules
/- [loops::list_nth_shared_mut_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_shared_mut_loop_pair_merge_loop_terminates (T : Type)
- (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :=
+ (ls0 : list_t T) (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
syntax "list_nth_shared_mut_loop_pair_merge_loop_decreases" term+ : tactic
diff --git a/tests/lean/misc-loops/Loops/Clauses/Template.lean b/tests/lean/misc-loops/Loops/Clauses/Template.lean
index d1e72d65..2e28a6c0 100644
--- a/tests/lean/misc-loops/Loops/Clauses/Template.lean
+++ b/tests/lean/misc-loops/Loops/Clauses/Template.lean
@@ -4,8 +4,7 @@ import Base.Primitives
import Loops.Types
/- [loops::sum]: termination measure -/
-@[simp]
-def sum_loop_terminates (max : UInt32) (i : UInt32) (s : UInt32) := (max, i, s)
+@[simp] def sum_loop_terminates (max : U32) (i : U32) (s : U32) := (max, i, s)
/- [loops::sum]: decreases_by tactic -/
syntax "sum_loop_decreases" term+ : tactic
@@ -14,8 +13,7 @@ macro_rules
/- [loops::sum_with_mut_borrows]: termination measure -/
@[simp]
-def sum_with_mut_borrows_loop_terminates (max : UInt32) (mi : UInt32)
- (ms : UInt32) :=
+def sum_with_mut_borrows_loop_terminates (max : U32) (mi : U32) (ms : U32) :=
(max, mi, ms)
/- [loops::sum_with_mut_borrows]: decreases_by tactic -/
@@ -25,8 +23,7 @@ macro_rules
/- [loops::sum_with_shared_borrows]: termination measure -/
@[simp]
-def sum_with_shared_borrows_loop_terminates (max : UInt32) (i : UInt32)
- (s : UInt32) :=
+def sum_with_shared_borrows_loop_terminates (max : U32) (i : U32) (s : U32) :=
(max, i, s)
/- [loops::sum_with_shared_borrows]: decreases_by tactic -/
@@ -35,7 +32,7 @@ macro_rules
| `(tactic| sum_with_shared_borrows_loop_decreases $max $i $s) =>`(tactic| sorry)
/- [loops::clear]: termination measure -/
-@[simp] def clear_loop_terminates (v : Vec UInt32) (i : USize) := (v, i)
+@[simp] def clear_loop_terminates (v : Vec U32) (i : Usize) := (v, i)
/- [loops::clear]: decreases_by tactic -/
syntax "clear_loop_decreases" term+ : tactic
@@ -43,8 +40,7 @@ macro_rules
| `(tactic| clear_loop_decreases $v $i) =>`(tactic| sorry)
/- [loops::list_mem]: termination measure -/
-@[simp]
-def list_mem_loop_terminates (x : UInt32) (ls : list_t UInt32) := (x, ls)
+@[simp] def list_mem_loop_terminates (x : U32) (ls : list_t U32) := (x, ls)
/- [loops::list_mem]: decreases_by tactic -/
syntax "list_mem_loop_decreases" term+ : tactic
@@ -53,8 +49,7 @@ macro_rules
/- [loops::list_nth_mut_loop]: termination measure -/
@[simp]
-def list_nth_mut_loop_loop_terminates (T : Type) (ls : list_t T) (i : UInt32)
- :=
+def list_nth_mut_loop_loop_terminates (T : Type) (ls : list_t T) (i : U32) :=
(ls, i)
/- [loops::list_nth_mut_loop]: decreases_by tactic -/
@@ -64,8 +59,8 @@ macro_rules
/- [loops::list_nth_shared_loop]: termination measure -/
@[simp]
-def list_nth_shared_loop_loop_terminates (T : Type) (ls : list_t T)
- (i : UInt32) :=
+def list_nth_shared_loop_loop_terminates (T : Type) (ls : list_t T) (i : U32)
+ :=
(ls, i)
/- [loops::list_nth_shared_loop]: decreases_by tactic -/
@@ -75,7 +70,7 @@ macro_rules
/- [loops::get_elem_mut]: termination measure -/
@[simp]
-def get_elem_mut_loop_terminates (x : USize) (ls : list_t USize) := (x, ls)
+def get_elem_mut_loop_terminates (x : Usize) (ls : list_t Usize) := (x, ls)
/- [loops::get_elem_mut]: decreases_by tactic -/
syntax "get_elem_mut_loop_decreases" term+ : tactic
@@ -84,7 +79,7 @@ macro_rules
/- [loops::get_elem_shared]: termination measure -/
@[simp]
-def get_elem_shared_loop_terminates (x : USize) (ls : list_t USize) := (x, ls)
+def get_elem_shared_loop_terminates (x : Usize) (ls : list_t Usize) := (x, ls)
/- [loops::get_elem_shared]: decreases_by tactic -/
syntax "get_elem_shared_loop_decreases" term+ : tactic
@@ -93,7 +88,7 @@ macro_rules
/- [loops::list_nth_mut_loop_with_id]: termination measure -/
@[simp]
-def list_nth_mut_loop_with_id_loop_terminates (T : Type) (i : UInt32)
+def list_nth_mut_loop_with_id_loop_terminates (T : Type) (i : U32)
(ls : list_t T) :=
(i, ls)
@@ -104,7 +99,7 @@ macro_rules
/- [loops::list_nth_shared_loop_with_id]: termination measure -/
@[simp]
-def list_nth_shared_loop_with_id_loop_terminates (T : Type) (i : UInt32)
+def list_nth_shared_loop_with_id_loop_terminates (T : Type) (i : U32)
(ls : list_t T) :=
(i, ls)
@@ -116,7 +111,7 @@ macro_rules
/- [loops::list_nth_mut_loop_pair]: termination measure -/
@[simp]
def list_nth_mut_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_mut_loop_pair]: decreases_by tactic -/
@@ -127,7 +122,7 @@ macro_rules
/- [loops::list_nth_shared_loop_pair]: termination measure -/
@[simp]
def list_nth_shared_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_shared_loop_pair]: decreases_by tactic -/
@@ -139,7 +134,7 @@ macro_rules
/- [loops::list_nth_mut_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_mut_loop_pair_merge_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_mut_loop_pair_merge]: decreases_by tactic -/
@@ -151,7 +146,7 @@ macro_rules
/- [loops::list_nth_shared_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_shared_loop_pair_merge_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_shared_loop_pair_merge]: decreases_by tactic -/
@@ -163,7 +158,7 @@ macro_rules
/- [loops::list_nth_mut_shared_loop_pair]: termination measure -/
@[simp]
def list_nth_mut_shared_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_mut_shared_loop_pair]: decreases_by tactic -/
@@ -175,7 +170,7 @@ macro_rules
/- [loops::list_nth_mut_shared_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_mut_shared_loop_pair_merge_loop_terminates (T : Type)
- (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :=
+ (ls0 : list_t T) (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_mut_shared_loop_pair_merge]: decreases_by tactic -/
@@ -187,7 +182,7 @@ macro_rules
/- [loops::list_nth_shared_mut_loop_pair]: termination measure -/
@[simp]
def list_nth_shared_mut_loop_pair_loop_terminates (T : Type) (ls0 : list_t T)
- (ls1 : list_t T) (i : UInt32) :=
+ (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_shared_mut_loop_pair]: decreases_by tactic -/
@@ -199,7 +194,7 @@ macro_rules
/- [loops::list_nth_shared_mut_loop_pair_merge]: termination measure -/
@[simp]
def list_nth_shared_mut_loop_pair_merge_loop_terminates (T : Type)
- (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :=
+ (ls0 : list_t T) (ls1 : list_t T) (i : U32) :=
(ls0, ls1, i)
/- [loops::list_nth_shared_mut_loop_pair_merge]: decreases_by tactic -/
diff --git a/tests/lean/misc-loops/Loops/Funs.lean b/tests/lean/misc-loops/Loops/Funs.lean
index f79a27a9..fd8d62d7 100644
--- a/tests/lean/misc-loops/Loops/Funs.lean
+++ b/tests/lean/misc-loops/Loops/Funs.lean
@@ -5,79 +5,78 @@ import Loops.Types
import Loops.Clauses.Clauses
/- [loops::sum] -/
-def sum_loop_fwd (max : UInt32) (i : UInt32) (s : UInt32) : (Result UInt32) :=
+def sum_loop_fwd (max : U32) (i : U32) (s : U32) : (Result U32) :=
if h: i < max
then
do
- let s0 ← UInt32.checked_add s i
- let i0 ← UInt32.checked_add i (UInt32.ofNatCore 1 (by intlit))
+ let s0 ← s + i
+ let i0 ← i + (U32.ofInt 1 (by intlit))
sum_loop_fwd max i0 s0
- else UInt32.checked_mul s (UInt32.ofNatCore 2 (by intlit))
+ else s * (U32.ofInt 2 (by intlit))
termination_by sum_loop_fwd max i s => sum_loop_terminates max i s
decreasing_by sum_loop_decreases max i s
/- [loops::sum] -/
-def sum_fwd (max : UInt32) : Result UInt32 :=
- sum_loop_fwd max (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 0 (by intlit))
+def sum_fwd (max : U32) : Result U32 :=
+ sum_loop_fwd max (U32.ofInt 0 (by intlit)) (U32.ofInt 0 (by intlit))
/- [loops::sum_with_mut_borrows] -/
def sum_with_mut_borrows_loop_fwd
- (max : UInt32) (mi : UInt32) (ms : UInt32) : (Result UInt32) :=
+ (max : U32) (mi : U32) (ms : U32) : (Result U32) :=
if h: mi < max
then
do
- let ms0 ← UInt32.checked_add ms mi
- let mi0 ← UInt32.checked_add mi (UInt32.ofNatCore 1 (by intlit))
+ let ms0 ← ms + mi
+ let mi0 ← mi + (U32.ofInt 1 (by intlit))
sum_with_mut_borrows_loop_fwd max mi0 ms0
- else UInt32.checked_mul ms (UInt32.ofNatCore 2 (by intlit))
+ else ms * (U32.ofInt 2 (by intlit))
termination_by sum_with_mut_borrows_loop_fwd max mi ms =>
sum_with_mut_borrows_loop_terminates max mi ms
decreasing_by sum_with_mut_borrows_loop_decreases max mi ms
/- [loops::sum_with_mut_borrows] -/
-def sum_with_mut_borrows_fwd (max : UInt32) : Result UInt32 :=
- sum_with_mut_borrows_loop_fwd max (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 0 (by intlit))
+def sum_with_mut_borrows_fwd (max : U32) : Result U32 :=
+ sum_with_mut_borrows_loop_fwd max (U32.ofInt 0 (by intlit))
+ (U32.ofInt 0 (by intlit))
/- [loops::sum_with_shared_borrows] -/
def sum_with_shared_borrows_loop_fwd
- (max : UInt32) (i : UInt32) (s : UInt32) : (Result UInt32) :=
+ (max : U32) (i : U32) (s : U32) : (Result U32) :=
if h: i < max
then
do
- let i0 ← UInt32.checked_add i (UInt32.ofNatCore 1 (by intlit))
- let s0 ← UInt32.checked_add s i0
+ let i0 ← i + (U32.ofInt 1 (by intlit))
+ let s0 ← s + i0
sum_with_shared_borrows_loop_fwd max i0 s0
- else UInt32.checked_mul s (UInt32.ofNatCore 2 (by intlit))
+ else s * (U32.ofInt 2 (by intlit))
termination_by sum_with_shared_borrows_loop_fwd max i s =>
sum_with_shared_borrows_loop_terminates max i s
decreasing_by sum_with_shared_borrows_loop_decreases max i s
/- [loops::sum_with_shared_borrows] -/
-def sum_with_shared_borrows_fwd (max : UInt32) : Result UInt32 :=
- sum_with_shared_borrows_loop_fwd max (UInt32.ofNatCore 0 (by intlit))
- (UInt32.ofNatCore 0 (by intlit))
+def sum_with_shared_borrows_fwd (max : U32) : Result U32 :=
+ sum_with_shared_borrows_loop_fwd max (U32.ofInt 0 (by intlit))
+ (U32.ofInt 0 (by intlit))
/- [loops::clear] -/
-def clear_loop_fwd_back (v : Vec UInt32) (i : USize) : (Result (Vec UInt32)) :=
- let i0 := vec_len UInt32 v
+def clear_loop_fwd_back (v : Vec U32) (i : Usize) : (Result (Vec U32)) :=
+ let i0 := vec_len U32 v
if h: i < i0
then
do
- let i1 ← USize.checked_add i (USize.ofNatCore 1 (by intlit))
- let v0 ← vec_index_mut_back UInt32 v i (UInt32.ofNatCore 0 (by intlit))
+ let i1 ← i + (Usize.ofInt 1 (by intlit))
+ let v0 ← vec_index_mut_back U32 v i (U32.ofInt 0 (by intlit))
clear_loop_fwd_back v0 i1
else Result.ret v
termination_by clear_loop_fwd_back v i => clear_loop_terminates v i
decreasing_by clear_loop_decreases v i
/- [loops::clear] -/
-def clear_fwd_back (v : Vec UInt32) : Result (Vec UInt32) :=
- clear_loop_fwd_back v (USize.ofNatCore 0 (by intlit))
+def clear_fwd_back (v : Vec U32) : Result (Vec U32) :=
+ clear_loop_fwd_back v (Usize.ofInt 0 (by intlit))
/- [loops::list_mem] -/
-def list_mem_loop_fwd (x : UInt32) (ls : list_t UInt32) : (Result Bool) :=
+def list_mem_loop_fwd (x : U32) (ls : list_t U32) : (Result Bool) :=
match h: ls with
| list_t.Cons y tl =>
if h: y = x
@@ -88,19 +87,19 @@ termination_by list_mem_loop_fwd x ls => list_mem_loop_terminates x ls
decreasing_by list_mem_loop_decreases x ls
/- [loops::list_mem] -/
-def list_mem_fwd (x : UInt32) (ls : list_t UInt32) : Result Bool :=
+def list_mem_fwd (x : U32) (ls : list_t U32) : Result Bool :=
list_mem_loop_fwd x ls
/- [loops::list_nth_mut_loop] -/
def list_nth_mut_loop_loop_fwd
- (T : Type) (ls : list_t T) (i : UInt32) : (Result T) :=
+ (T : Type) (ls : list_t T) (i : U32) : (Result T) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret x
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_loop_loop_fwd T tl i0
| list_t.Nil => Result.fail Error.panic
termination_by list_nth_mut_loop_loop_fwd ls i =>
@@ -108,19 +107,19 @@ termination_by list_nth_mut_loop_loop_fwd ls i =>
decreasing_by list_nth_mut_loop_loop_decreases ls i
/- [loops::list_nth_mut_loop] -/
-def list_nth_mut_loop_fwd (T : Type) (ls : list_t T) (i : UInt32) : Result T :=
+def list_nth_mut_loop_fwd (T : Type) (ls : list_t T) (i : U32) : Result T :=
list_nth_mut_loop_loop_fwd T ls i
/- [loops::list_nth_mut_loop] -/
def list_nth_mut_loop_loop_back
- (T : Type) (ls : list_t T) (i : UInt32) (ret0 : T) : (Result (list_t T)) :=
+ (T : Type) (ls : list_t T) (i : U32) (ret0 : T) : (Result (list_t T)) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl0 ← list_nth_mut_loop_loop_back T tl i0 ret0
Result.ret (list_t.Cons x tl0)
| list_t.Nil => Result.fail Error.panic
@@ -130,19 +129,19 @@ decreasing_by list_nth_mut_loop_loop_decreases ls i
/- [loops::list_nth_mut_loop] -/
def list_nth_mut_loop_back
- (T : Type) (ls : list_t T) (i : UInt32) (ret0 : T) : Result (list_t T) :=
+ (T : Type) (ls : list_t T) (i : U32) (ret0 : T) : Result (list_t T) :=
list_nth_mut_loop_loop_back T ls i ret0
/- [loops::list_nth_shared_loop] -/
def list_nth_shared_loop_loop_fwd
- (T : Type) (ls : list_t T) (i : UInt32) : (Result T) :=
+ (T : Type) (ls : list_t T) (i : U32) : (Result T) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret x
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_loop_loop_fwd T tl i0
| list_t.Nil => Result.fail Error.panic
termination_by list_nth_shared_loop_loop_fwd ls i =>
@@ -150,12 +149,11 @@ termination_by list_nth_shared_loop_loop_fwd ls i =>
decreasing_by list_nth_shared_loop_loop_decreases ls i
/- [loops::list_nth_shared_loop] -/
-def list_nth_shared_loop_fwd
- (T : Type) (ls : list_t T) (i : UInt32) : Result T :=
+def list_nth_shared_loop_fwd (T : Type) (ls : list_t T) (i : U32) : Result T :=
list_nth_shared_loop_loop_fwd T ls i
/- [loops::get_elem_mut] -/
-def get_elem_mut_loop_fwd (x : USize) (ls : list_t USize) : (Result USize) :=
+def get_elem_mut_loop_fwd (x : Usize) (ls : list_t Usize) : (Result Usize) :=
match h: ls with
| list_t.Cons y tl =>
if h: y = x
@@ -166,15 +164,15 @@ termination_by get_elem_mut_loop_fwd x ls => get_elem_mut_loop_terminates x ls
decreasing_by get_elem_mut_loop_decreases x ls
/- [loops::get_elem_mut] -/
-def get_elem_mut_fwd (slots : Vec (list_t USize)) (x : USize) : Result USize :=
+def get_elem_mut_fwd (slots : Vec (list_t Usize)) (x : Usize) : Result Usize :=
do
let l ←
- vec_index_mut_fwd (list_t USize) slots (USize.ofNatCore 0 (by intlit))
+ vec_index_mut_fwd (list_t Usize) slots (Usize.ofInt 0 (by intlit))
get_elem_mut_loop_fwd x l
/- [loops::get_elem_mut] -/
def get_elem_mut_loop_back
- (x : USize) (ls : list_t USize) (ret0 : USize) : (Result (list_t USize)) :=
+ (x : Usize) (ls : list_t Usize) (ret0 : Usize) : (Result (list_t Usize)) :=
match h: ls with
| list_t.Cons y tl =>
if h: y = x
@@ -190,18 +188,18 @@ decreasing_by get_elem_mut_loop_decreases x ls
/- [loops::get_elem_mut] -/
def get_elem_mut_back
- (slots : Vec (list_t USize)) (x : USize) (ret0 : USize) :
- Result (Vec (list_t USize))
+ (slots : Vec (list_t Usize)) (x : Usize) (ret0 : Usize) :
+ Result (Vec (list_t Usize))
:=
do
let l ←
- vec_index_mut_fwd (list_t USize) slots (USize.ofNatCore 0 (by intlit))
+ vec_index_mut_fwd (list_t Usize) slots (Usize.ofInt 0 (by intlit))
let l0 ← get_elem_mut_loop_back x l ret0
- vec_index_mut_back (list_t USize) slots (USize.ofNatCore 0 (by intlit)) l0
+ vec_index_mut_back (list_t Usize) slots (Usize.ofInt 0 (by intlit)) l0
/- [loops::get_elem_shared] -/
def get_elem_shared_loop_fwd
- (x : USize) (ls : list_t USize) : (Result USize) :=
+ (x : Usize) (ls : list_t Usize) : (Result Usize) :=
match h: ls with
| list_t.Cons y tl =>
if h: y = x
@@ -214,10 +212,9 @@ decreasing_by get_elem_shared_loop_decreases x ls
/- [loops::get_elem_shared] -/
def get_elem_shared_fwd
- (slots : Vec (list_t USize)) (x : USize) : Result USize :=
+ (slots : Vec (list_t Usize)) (x : Usize) : Result Usize :=
do
- let l ←
- vec_index_fwd (list_t USize) slots (USize.ofNatCore 0 (by intlit))
+ let l ← vec_index_fwd (list_t Usize) slots (Usize.ofInt 0 (by intlit))
get_elem_shared_loop_fwd x l
/- [loops::id_mut] -/
@@ -235,14 +232,14 @@ def id_shared_fwd (T : Type) (ls : list_t T) : Result (list_t T) :=
/- [loops::list_nth_mut_loop_with_id] -/
def list_nth_mut_loop_with_id_loop_fwd
- (T : Type) (i : UInt32) (ls : list_t T) : (Result T) :=
+ (T : Type) (i : U32) (ls : list_t T) : (Result T) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret x
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_loop_with_id_loop_fwd T i0 tl
| list_t.Nil => Result.fail Error.panic
termination_by list_nth_mut_loop_with_id_loop_fwd i ls =>
@@ -251,21 +248,21 @@ decreasing_by list_nth_mut_loop_with_id_loop_decreases i ls
/- [loops::list_nth_mut_loop_with_id] -/
def list_nth_mut_loop_with_id_fwd
- (T : Type) (ls : list_t T) (i : UInt32) : Result T :=
+ (T : Type) (ls : list_t T) (i : U32) : Result T :=
do
let ls0 ← id_mut_fwd T ls
list_nth_mut_loop_with_id_loop_fwd T i ls0
/- [loops::list_nth_mut_loop_with_id] -/
def list_nth_mut_loop_with_id_loop_back
- (T : Type) (i : UInt32) (ls : list_t T) (ret0 : T) : (Result (list_t T)) :=
+ (T : Type) (i : U32) (ls : list_t T) (ret0 : T) : (Result (list_t T)) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl0 ← list_nth_mut_loop_with_id_loop_back T i0 tl ret0
Result.ret (list_t.Cons x tl0)
| list_t.Nil => Result.fail Error.panic
@@ -275,7 +272,7 @@ decreasing_by list_nth_mut_loop_with_id_loop_decreases i ls
/- [loops::list_nth_mut_loop_with_id] -/
def list_nth_mut_loop_with_id_back
- (T : Type) (ls : list_t T) (i : UInt32) (ret0 : T) : Result (list_t T) :=
+ (T : Type) (ls : list_t T) (i : U32) (ret0 : T) : Result (list_t T) :=
do
let ls0 ← id_mut_fwd T ls
let l ← list_nth_mut_loop_with_id_loop_back T i ls0 ret0
@@ -283,14 +280,14 @@ def list_nth_mut_loop_with_id_back
/- [loops::list_nth_shared_loop_with_id] -/
def list_nth_shared_loop_with_id_loop_fwd
- (T : Type) (i : UInt32) (ls : list_t T) : (Result T) :=
+ (T : Type) (i : U32) (ls : list_t T) : (Result T) :=
match h: ls with
| list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret x
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_loop_with_id_loop_fwd T i0 tl
| list_t.Nil => Result.fail Error.panic
termination_by list_nth_shared_loop_with_id_loop_fwd i ls =>
@@ -299,25 +296,23 @@ decreasing_by list_nth_shared_loop_with_id_loop_decreases i ls
/- [loops::list_nth_shared_loop_with_id] -/
def list_nth_shared_loop_with_id_fwd
- (T : Type) (ls : list_t T) (i : UInt32) : Result T :=
+ (T : Type) (ls : list_t T) (i : U32) : Result T :=
do
let ls0 ← id_shared_fwd T ls
list_nth_shared_loop_with_id_loop_fwd T i ls0
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_loop_pair_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -327,25 +322,23 @@ decreasing_by list_nth_mut_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_mut_loop_pair_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_loop_back'a
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl0)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl00 ← list_nth_mut_loop_pair_loop_back'a T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x0 tl00)
| list_t.Nil => Result.fail Error.panic
@@ -356,25 +349,25 @@ decreasing_by list_nth_mut_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_back'a
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_mut_loop_pair_loop_back'a T ls0 ls1 i ret0
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_loop_back'b
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl10 ← list_nth_mut_loop_pair_loop_back'b T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x1 tl10)
| list_t.Nil => Result.fail Error.panic
@@ -385,25 +378,23 @@ decreasing_by list_nth_mut_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_loop_pair] -/
def list_nth_mut_loop_pair_back'b
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_mut_loop_pair_loop_back'b T ls0 ls1 i ret0
/- [loops::list_nth_shared_loop_pair] -/
def list_nth_shared_loop_pair_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_loop_pair_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -413,25 +404,21 @@ decreasing_by list_nth_shared_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_loop_pair] -/
def list_nth_shared_loop_pair_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_shared_loop_pair_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_loop_pair_merge] -/
def list_nth_mut_loop_pair_merge_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_loop_pair_merge_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -441,27 +428,25 @@ decreasing_by list_nth_mut_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_loop_pair_merge] -/
def list_nth_mut_loop_pair_merge_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_mut_loop_pair_merge_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_loop_pair_merge] -/
def list_nth_mut_loop_pair_merge_loop_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : (T × T)) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : (T × T)) :
(Result ((list_t T) × (list_t T)))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then
let (t, t0) := ret0
Result.ret (list_t.Cons t tl0, list_t.Cons t0 tl1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let (tl00, tl10) ←
list_nth_mut_loop_pair_merge_loop_back T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x0 tl00, list_t.Cons x1 tl10)
@@ -473,25 +458,23 @@ decreasing_by list_nth_mut_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_loop_pair_merge] -/
def list_nth_mut_loop_pair_merge_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : (T × T)) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : (T × T)) :
Result ((list_t T) × (list_t T))
:=
list_nth_mut_loop_pair_merge_loop_back T ls0 ls1 i ret0
/- [loops::list_nth_shared_loop_pair_merge] -/
def list_nth_shared_loop_pair_merge_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_loop_pair_merge_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -501,25 +484,21 @@ decreasing_by list_nth_shared_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_loop_pair_merge] -/
def list_nth_shared_loop_pair_merge_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_shared_loop_pair_merge_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair] -/
def list_nth_mut_shared_loop_pair_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_shared_loop_pair_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -529,25 +508,23 @@ decreasing_by list_nth_mut_shared_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair] -/
def list_nth_mut_shared_loop_pair_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_mut_shared_loop_pair_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair] -/
def list_nth_mut_shared_loop_pair_loop_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl0)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl00 ←
list_nth_mut_shared_loop_pair_loop_back T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x0 tl00)
@@ -559,25 +536,23 @@ decreasing_by list_nth_mut_shared_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair] -/
def list_nth_mut_shared_loop_pair_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_mut_shared_loop_pair_loop_back T ls0 ls1 i ret0
/- [loops::list_nth_mut_shared_loop_pair_merge] -/
def list_nth_mut_shared_loop_pair_merge_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_mut_shared_loop_pair_merge_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -587,25 +562,23 @@ decreasing_by list_nth_mut_shared_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair_merge] -/
def list_nth_mut_shared_loop_pair_merge_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_mut_shared_loop_pair_merge_loop_fwd T ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair_merge] -/
def list_nth_mut_shared_loop_pair_merge_loop_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl0)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl00 ←
list_nth_mut_shared_loop_pair_merge_loop_back T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x0 tl00)
@@ -617,25 +590,23 @@ decreasing_by list_nth_mut_shared_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_mut_shared_loop_pair_merge] -/
def list_nth_mut_shared_loop_pair_merge_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_mut_shared_loop_pair_merge_loop_back T ls0 ls1 i ret0
/- [loops::list_nth_shared_mut_loop_pair] -/
def list_nth_shared_mut_loop_pair_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_mut_loop_pair_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -645,25 +616,23 @@ decreasing_by list_nth_shared_mut_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair] -/
def list_nth_shared_mut_loop_pair_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_shared_mut_loop_pair_loop_fwd T ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair] -/
def list_nth_shared_mut_loop_pair_loop_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl10 ←
list_nth_shared_mut_loop_pair_loop_back T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x1 tl10)
@@ -675,25 +644,23 @@ decreasing_by list_nth_shared_mut_loop_pair_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair] -/
def list_nth_shared_mut_loop_pair_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_shared_mut_loop_pair_loop_back T ls0 ls1 i ret0
/- [loops::list_nth_shared_mut_loop_pair_merge] -/
def list_nth_shared_mut_loop_pair_merge_loop_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- (Result (T × T))
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : (Result (T × T)) :=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (x0, x1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
list_nth_shared_mut_loop_pair_merge_loop_fwd T tl0 tl1 i0
| list_t.Nil => Result.fail Error.panic
| list_t.Nil => Result.fail Error.panic
@@ -703,25 +670,23 @@ decreasing_by list_nth_shared_mut_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair_merge] -/
def list_nth_shared_mut_loop_pair_merge_fwd
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) :
- Result (T × T)
- :=
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) : Result (T × T) :=
list_nth_shared_mut_loop_pair_merge_loop_fwd T ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair_merge] -/
def list_nth_shared_mut_loop_pair_merge_loop_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
(Result (list_t T))
:=
match h: ls0 with
| list_t.Cons x0 tl0 =>
match h: ls1 with
| list_t.Cons x1 tl1 =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
+ if h: i = (U32.ofInt 0 (by intlit))
then Result.ret (list_t.Cons ret0 tl1)
else
do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
+ let i0 ← i - (U32.ofInt 1 (by intlit))
let tl10 ←
list_nth_shared_mut_loop_pair_merge_loop_back T tl0 tl1 i0 ret0
Result.ret (list_t.Cons x1 tl10)
@@ -733,7 +698,7 @@ decreasing_by list_nth_shared_mut_loop_pair_merge_loop_decreases ls0 ls1 i
/- [loops::list_nth_shared_mut_loop_pair_merge] -/
def list_nth_shared_mut_loop_pair_merge_back
- (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : UInt32) (ret0 : T) :
+ (T : Type) (ls0 : list_t T) (ls1 : list_t T) (i : U32) (ret0 : T) :
Result (list_t T)
:=
list_nth_shared_mut_loop_pair_merge_loop_back T ls0 ls1 i ret0
diff --git a/tests/lean/misc-no_nested_borrows/Base/Primitives.lean b/tests/lean/misc-no_nested_borrows/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-no_nested_borrows/Base/Primitives.lean
+++ b/tests/lean/misc-no_nested_borrows/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-no_nested_borrows/NoNestedBorrows.lean b/tests/lean/misc-no_nested_borrows/NoNestedBorrows.lean
index e2697385..a73848de 100644
--- a/tests/lean/misc-no_nested_borrows/NoNestedBorrows.lean
+++ b/tests/lean/misc-no_nested_borrows/NoNestedBorrows.lean
@@ -2,556 +2,534 @@
-- [no_nested_borrows]
import Base.Primitives
-structure OpaqueDefs where
-
- /- [no_nested_borrows::Pair] -/
- structure pair_t (T1 T2 : Type) where
- pair_x : T1
- pair_y : T2
-
- /- [no_nested_borrows::List] -/
- inductive list_t (T : Type) :=
- | Cons : T -> list_t T -> list_t T
- | Nil : list_t T
-
- /- [no_nested_borrows::One] -/
- inductive one_t (T1 : Type) :=
- | One : T1 -> one_t T1
-
- /- [no_nested_borrows::EmptyEnum] -/
- inductive empty_enum_t :=
- | Empty : empty_enum_t
-
- /- [no_nested_borrows::Enum] -/
- inductive enum_t :=
- | Variant1 : enum_t
- | Variant2 : enum_t
-
- /- [no_nested_borrows::EmptyStruct] -/
- structure empty_struct_t where
-
- /- [no_nested_borrows::Sum] -/
- inductive sum_t (T1 T2 : Type) :=
- | Left : T1 -> sum_t T1 T2
- | Right : T2 -> sum_t T1 T2
-
- /- [no_nested_borrows::neg_test] -/
- def neg_test_fwd (x : Int32) : Result Int32 :=
- Int32.checked_neg x
-
- /- [no_nested_borrows::add_test] -/
- def add_test_fwd (x : UInt32) (y : UInt32) : Result UInt32 :=
- UInt32.checked_add x y
-
- /- [no_nested_borrows::subs_test] -/
- def subs_test_fwd (x : UInt32) (y : UInt32) : Result UInt32 :=
- UInt32.checked_sub x y
-
- /- [no_nested_borrows::div_test] -/
- def div_test_fwd (x : UInt32) (y : UInt32) : Result UInt32 :=
- UInt32.checked_div x y
-
- /- [no_nested_borrows::div_test1] -/
- def div_test1_fwd (x : UInt32) : Result UInt32 :=
- UInt32.checked_div x (UInt32.ofNatCore 2 (by intlit))
-
- /- [no_nested_borrows::rem_test] -/
- def rem_test_fwd (x : UInt32) (y : UInt32) : Result UInt32 :=
- UInt32.checked_rem x y
-
- /- [no_nested_borrows::cast_test] -/
- def cast_test_fwd (x : UInt32) : Result Int32 :=
- scalar_cast Int32 x
-
- /- [no_nested_borrows::test2] -/
- def test2_fwd : Result Unit :=
- do
- let _ ← UInt32.checked_add (UInt32.ofNatCore 23 (by intlit))
- (UInt32.ofNatCore 44 (by intlit))
- Result.ret ()
-
- /- Unit test for [no_nested_borrows::test2] -/
- #assert (test2_fwd == .ret ())
-
- /- [no_nested_borrows::get_max] -/
- def get_max_fwd (x : UInt32) (y : UInt32) : Result UInt32 :=
- if h: x >= y
- then Result.ret x
- else Result.ret y
-
- /- [no_nested_borrows::test3] -/
- def test3_fwd : Result Unit :=
- do
- let x ←
- get_max_fwd (UInt32.ofNatCore 4 (by intlit))
- (UInt32.ofNatCore 3 (by intlit))
- let y ←
- get_max_fwd (UInt32.ofNatCore 10 (by intlit))
- (UInt32.ofNatCore 11 (by intlit))
- let z ← UInt32.checked_add x y
- if h: not (z = (UInt32.ofNatCore 15 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test3] -/
- #assert (test3_fwd == .ret ())
-
- /- [no_nested_borrows::test_neg1] -/
- def test_neg1_fwd : Result Unit :=
- do
- let y ← Int32.checked_neg (Int32.ofNatCore 3 (by intlit))
- if h: not (y = (Int32.ofNatCore -3 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_neg1] -/
- #assert (test_neg1_fwd == .ret ())
-
- /- [no_nested_borrows::refs_test1] -/
- def refs_test1_fwd : Result Unit :=
- if h: not ((Int32.ofNatCore 1 (by intlit)) =
- (Int32.ofNatCore 1 (by intlit)))
+/- [no_nested_borrows::Pair] -/
+structure pair_t (T1 T2 : Type) where
+ pair_x : T1
+ pair_y : T2
+
+/- [no_nested_borrows::List] -/
+inductive list_t (T : Type) :=
+| Cons : T -> list_t T -> list_t T
+| Nil : list_t T
+
+/- [no_nested_borrows::One] -/
+inductive one_t (T1 : Type) :=
+| One : T1 -> one_t T1
+
+/- [no_nested_borrows::EmptyEnum] -/
+inductive empty_enum_t :=
+| Empty : empty_enum_t
+
+/- [no_nested_borrows::Enum] -/
+inductive enum_t :=
+| Variant1 : enum_t
+| Variant2 : enum_t
+
+/- [no_nested_borrows::EmptyStruct] -/
+structure empty_struct_t where
+
+/- [no_nested_borrows::Sum] -/
+inductive sum_t (T1 T2 : Type) :=
+| Left : T1 -> sum_t T1 T2
+| Right : T2 -> sum_t T1 T2
+
+/- [no_nested_borrows::neg_test] -/
+def neg_test_fwd (x : I32) : Result I32 :=
+ - x
+
+/- [no_nested_borrows::add_test] -/
+def add_test_fwd (x : U32) (y : U32) : Result U32 :=
+ x + y
+
+/- [no_nested_borrows::subs_test] -/
+def subs_test_fwd (x : U32) (y : U32) : Result U32 :=
+ x - y
+
+/- [no_nested_borrows::div_test] -/
+def div_test_fwd (x : U32) (y : U32) : Result U32 :=
+ x / y
+
+/- [no_nested_borrows::div_test1] -/
+def div_test1_fwd (x : U32) : Result U32 :=
+ x / (U32.ofInt 2 (by intlit))
+
+/- [no_nested_borrows::rem_test] -/
+def rem_test_fwd (x : U32) (y : U32) : Result U32 :=
+ x % y
+
+/- [no_nested_borrows::cast_test] -/
+def cast_test_fwd (x : U32) : Result I32 :=
+ Scalar.cast .I32 x
+
+/- [no_nested_borrows::test2] -/
+def test2_fwd : Result Unit :=
+ do
+ let _ ← (U32.ofInt 23 (by intlit)) + (U32.ofInt 44 (by intlit))
+ Result.ret ()
+
+/- Unit test for [no_nested_borrows::test2] -/
+#assert (test2_fwd == .ret ())
+
+/- [no_nested_borrows::get_max] -/
+def get_max_fwd (x : U32) (y : U32) : Result U32 :=
+ if h: x >= y
+ then Result.ret x
+ else Result.ret y
+
+/- [no_nested_borrows::test3] -/
+def test3_fwd : Result Unit :=
+ do
+ let x ← get_max_fwd (U32.ofInt 4 (by intlit)) (U32.ofInt 3 (by intlit))
+ let y ← get_max_fwd (U32.ofInt 10 (by intlit)) (U32.ofInt 11 (by intlit))
+ let z ← x + y
+ if h: not (z = (U32.ofInt 15 (by intlit)))
then Result.fail Error.panic
else Result.ret ()
-
- /- Unit test for [no_nested_borrows::refs_test1] -/
- #assert (refs_test1_fwd == .ret ())
-
- /- [no_nested_borrows::refs_test2] -/
- def refs_test2_fwd : Result Unit :=
- if h: not ((Int32.ofNatCore 2 (by intlit)) =
- (Int32.ofNatCore 2 (by intlit)))
+
+/- Unit test for [no_nested_borrows::test3] -/
+#assert (test3_fwd == .ret ())
+
+/- [no_nested_borrows::test_neg1] -/
+def test_neg1_fwd : Result Unit :=
+ do
+ let y ← - (I32.ofInt 3 (by intlit))
+ if h: not (y = (I32.ofInt (-(3:Int)) (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_neg1] -/
+#assert (test_neg1_fwd == .ret ())
+
+/- [no_nested_borrows::refs_test1] -/
+def refs_test1_fwd : Result Unit :=
+ if h: not ((I32.ofInt 1 (by intlit)) = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::refs_test1] -/
+#assert (refs_test1_fwd == .ret ())
+
+/- [no_nested_borrows::refs_test2] -/
+def refs_test2_fwd : Result Unit :=
+ if h: not ((I32.ofInt 2 (by intlit)) = (I32.ofInt 2 (by intlit)))
+ then Result.fail Error.panic
+ else
+ if h: not ((I32.ofInt 0 (by intlit)) = (I32.ofInt 0 (by intlit)))
then Result.fail Error.panic
else
- if h: not ((Int32.ofNatCore 0 (by intlit)) =
- (Int32.ofNatCore 0 (by intlit)))
+ if h: not ((I32.ofInt 2 (by intlit)) = (I32.ofInt 2 (by intlit)))
then Result.fail Error.panic
else
- if h: not ((Int32.ofNatCore 2 (by intlit)) =
- (Int32.ofNatCore 2 (by intlit)))
+ if h: not ((I32.ofInt 2 (by intlit)) = (I32.ofInt 2 (by intlit)))
then Result.fail Error.panic
- else
- if h: not ((Int32.ofNatCore 2 (by intlit)) =
- (Int32.ofNatCore 2 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::refs_test2] -/
- #assert (refs_test2_fwd == .ret ())
-
- /- [no_nested_borrows::test_list1] -/
- def test_list1_fwd : Result Unit :=
- Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_list1] -/
- #assert (test_list1_fwd == .ret ())
-
- /- [no_nested_borrows::test_box1] -/
- def test_box1_fwd : Result Unit :=
- let b := (Int32.ofNatCore 1 (by intlit))
- let x := b
- if h: not (x = (Int32.ofNatCore 1 (by intlit)))
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::refs_test2] -/
+#assert (refs_test2_fwd == .ret ())
+
+/- [no_nested_borrows::test_list1] -/
+def test_list1_fwd : Result Unit :=
+ Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_list1] -/
+#assert (test_list1_fwd == .ret ())
+
+/- [no_nested_borrows::test_box1] -/
+def test_box1_fwd : Result Unit :=
+ let b := (I32.ofInt 1 (by intlit))
+ let x := b
+ if h: not (x = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_box1] -/
+#assert (test_box1_fwd == .ret ())
+
+/- [no_nested_borrows::copy_int] -/
+def copy_int_fwd (x : I32) : Result I32 :=
+ Result.ret x
+
+/- [no_nested_borrows::test_unreachable] -/
+def test_unreachable_fwd (b : Bool) : Result Unit :=
+ if h: b
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- [no_nested_borrows::test_panic] -/
+def test_panic_fwd (b : Bool) : Result Unit :=
+ if h: b
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- [no_nested_borrows::test_copy_int] -/
+def test_copy_int_fwd : Result Unit :=
+ do
+ let y ← copy_int_fwd (I32.ofInt 0 (by intlit))
+ if h: not ((I32.ofInt 0 (by intlit)) = y)
then Result.fail Error.panic
else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_box1] -/
- #assert (test_box1_fwd == .ret ())
-
- /- [no_nested_borrows::copy_int] -/
- def copy_int_fwd (x : Int32) : Result Int32 :=
- Result.ret x
-
- /- [no_nested_borrows::test_unreachable] -/
- def test_unreachable_fwd (b : Bool) : Result Unit :=
- if h: b
+
+/- Unit test for [no_nested_borrows::test_copy_int] -/
+#assert (test_copy_int_fwd == .ret ())
+
+/- [no_nested_borrows::is_cons] -/
+def is_cons_fwd (T : Type) (l : list_t T) : Result Bool :=
+ match h: l with
+ | list_t.Cons t l0 => Result.ret true
+ | list_t.Nil => Result.ret false
+
+/- [no_nested_borrows::test_is_cons] -/
+def test_is_cons_fwd : Result Unit :=
+ do
+ let l := list_t.Nil
+ let b ← is_cons_fwd I32 (list_t.Cons (I32.ofInt 0 (by intlit)) l)
+ if h: not b
then Result.fail Error.panic
else Result.ret ()
-
- /- [no_nested_borrows::test_panic] -/
- def test_panic_fwd (b : Bool) : Result Unit :=
- if h: b
+
+/- Unit test for [no_nested_borrows::test_is_cons] -/
+#assert (test_is_cons_fwd == .ret ())
+
+/- [no_nested_borrows::split_list] -/
+def split_list_fwd (T : Type) (l : list_t T) : Result (T × (list_t T)) :=
+ match h: l with
+ | list_t.Cons hd tl => Result.ret (hd, tl)
+ | list_t.Nil => Result.fail Error.panic
+
+/- [no_nested_borrows::test_split_list] -/
+def test_split_list_fwd : Result Unit :=
+ do
+ let l := list_t.Nil
+ let p ← split_list_fwd I32 (list_t.Cons (I32.ofInt 0 (by intlit)) l)
+ let (hd, _) := p
+ if h: not (hd = (I32.ofInt 0 (by intlit)))
then Result.fail Error.panic
else Result.ret ()
-
- /- [no_nested_borrows::test_copy_int] -/
- def test_copy_int_fwd : Result Unit :=
- do
- let y ← copy_int_fwd (Int32.ofNatCore 0 (by intlit))
- if h: not ((Int32.ofNatCore 0 (by intlit)) = y)
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_copy_int] -/
- #assert (test_copy_int_fwd == .ret ())
-
- /- [no_nested_borrows::is_cons] -/
- def is_cons_fwd (T : Type) (l : list_t T) : Result Bool :=
- match h: l with
- | list_t.Cons t l0 => Result.ret true
- | list_t.Nil => Result.ret false
-
- /- [no_nested_borrows::test_is_cons] -/
- def test_is_cons_fwd : Result Unit :=
- do
- let l := list_t.Nil
- let b ←
- is_cons_fwd Int32 (list_t.Cons (Int32.ofNatCore 0 (by intlit)) l)
- if h: not b
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_is_cons] -/
- #assert (test_is_cons_fwd == .ret ())
-
- /- [no_nested_borrows::split_list] -/
- def split_list_fwd (T : Type) (l : list_t T) : Result (T × (list_t T)) :=
- match h: l with
- | list_t.Cons hd tl => Result.ret (hd, tl)
- | list_t.Nil => Result.fail Error.panic
-
- /- [no_nested_borrows::test_split_list] -/
- def test_split_list_fwd : Result Unit :=
- do
- let l := list_t.Nil
- let p ←
- split_list_fwd Int32 (list_t.Cons (Int32.ofNatCore 0 (by intlit)) l)
- let (hd, _) := p
- if h: not (hd = (Int32.ofNatCore 0 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_split_list] -/
- #assert (test_split_list_fwd == .ret ())
-
- /- [no_nested_borrows::choose] -/
- def choose_fwd (T : Type) (b : Bool) (x : T) (y : T) : Result T :=
- if h: b
- then Result.ret x
- else Result.ret y
-
- /- [no_nested_borrows::choose] -/
- def choose_back
- (T : Type) (b : Bool) (x : T) (y : T) (ret0 : T) : Result (T × T) :=
- if h: b
- then Result.ret (ret0, y)
- else Result.ret (x, ret0)
-
- /- [no_nested_borrows::choose_test] -/
- def choose_test_fwd : Result Unit :=
- do
- let z ←
- choose_fwd Int32 true (Int32.ofNatCore 0 (by intlit))
- (Int32.ofNatCore 0 (by intlit))
- let z0 ← Int32.checked_add z (Int32.ofNatCore 1 (by intlit))
- if h: not (z0 = (Int32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let (x, y) ←
- choose_back Int32 true (Int32.ofNatCore 0 (by intlit))
- (Int32.ofNatCore 0 (by intlit)) z0
- if h: not (x = (Int32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- if h: not (y = (Int32.ofNatCore 0 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::choose_test] -/
- #assert (choose_test_fwd == .ret ())
-
- /- [no_nested_borrows::test_char] -/
- def test_char_fwd : Result Char :=
- Result.ret 'a'
-
- /- [no_nested_borrows::NodeElem] -/
- mutual inductive node_elem_t (T : Type) :=
- | Cons : tree_t T -> node_elem_t T -> node_elem_t T
- | Nil : node_elem_t T
-
- /- [no_nested_borrows::Tree] -/
- inductive tree_t (T : Type) :=
- | Leaf : T -> tree_t T
- | Node : T -> node_elem_t T -> tree_t T -> tree_t T
-
- /- [no_nested_borrows::list_length] -/
- def list_length_fwd (T : Type) (l : list_t T) : Result UInt32 :=
- match h: l with
- | list_t.Cons t l1 =>
+
+/- Unit test for [no_nested_borrows::test_split_list] -/
+#assert (test_split_list_fwd == .ret ())
+
+/- [no_nested_borrows::choose] -/
+def choose_fwd (T : Type) (b : Bool) (x : T) (y : T) : Result T :=
+ if h: b
+ then Result.ret x
+ else Result.ret y
+
+/- [no_nested_borrows::choose] -/
+def choose_back
+ (T : Type) (b : Bool) (x : T) (y : T) (ret0 : T) : Result (T × T) :=
+ if h: b
+ then Result.ret (ret0, y)
+ else Result.ret (x, ret0)
+
+/- [no_nested_borrows::choose_test] -/
+def choose_test_fwd : Result Unit :=
+ do
+ let z ←
+ choose_fwd I32 true (I32.ofInt 0 (by intlit)) (I32.ofInt 0 (by intlit))
+ let z0 ← z + (I32.ofInt 1 (by intlit))
+ if h: not (z0 = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
do
- let i ← list_length_fwd T l1
- UInt32.checked_add (UInt32.ofNatCore 1 (by intlit)) i
- | list_t.Nil => Result.ret (UInt32.ofNatCore 0 (by intlit))
-
- /- [no_nested_borrows::list_nth_shared] -/
- def list_nth_shared_fwd (T : Type) (l : list_t T) (i : UInt32) : Result T :=
- match h: l with
- | list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
- then Result.ret x
- else
- do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
- list_nth_shared_fwd T tl i0
- | list_t.Nil => Result.fail Error.panic
-
- /- [no_nested_borrows::list_nth_mut] -/
- def list_nth_mut_fwd (T : Type) (l : list_t T) (i : UInt32) : Result T :=
- match h: l with
- | list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
- then Result.ret x
- else
- do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
- list_nth_mut_fwd T tl i0
- | list_t.Nil => Result.fail Error.panic
-
- /- [no_nested_borrows::list_nth_mut] -/
- def list_nth_mut_back
- (T : Type) (l : list_t T) (i : UInt32) (ret0 : T) : Result (list_t T) :=
- match h: l with
- | list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
- then Result.ret (list_t.Cons ret0 tl)
- else
- do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
- let tl0 ← list_nth_mut_back T tl i0 ret0
- Result.ret (list_t.Cons x tl0)
- | list_t.Nil => Result.fail Error.panic
-
- /- [no_nested_borrows::list_rev_aux] -/
- def list_rev_aux_fwd
- (T : Type) (li : list_t T) (lo : list_t T) : Result (list_t T) :=
- match h: li with
- | list_t.Cons hd tl => list_rev_aux_fwd T tl (list_t.Cons hd lo)
- | list_t.Nil => Result.ret lo
-
- /- [no_nested_borrows::list_rev] -/
- def list_rev_fwd_back (T : Type) (l : list_t T) : Result (list_t T) :=
- let li := mem_replace_fwd (list_t T) l list_t.Nil
- list_rev_aux_fwd T li list_t.Nil
-
- /- [no_nested_borrows::test_list_functions] -/
- def test_list_functions_fwd : Result Unit :=
- do
- let l := list_t.Nil
- let l0 := list_t.Cons (Int32.ofNatCore 2 (by intlit)) l
- let l1 := list_t.Cons (Int32.ofNatCore 1 (by intlit)) l0
- let i ←
- list_length_fwd Int32 (list_t.Cons (Int32.ofNatCore 0 (by intlit)) l1)
- if h: not (i = (UInt32.ofNatCore 3 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let i0 ←
- list_nth_shared_fwd Int32 (list_t.Cons
- (Int32.ofNatCore 0 (by intlit)) l1)
- (UInt32.ofNatCore 0 (by intlit))
- if h: not (i0 = (Int32.ofNatCore 0 (by intlit)))
+ let (x, y) ←
+ choose_back I32 true (I32.ofInt 0 (by intlit))
+ (I32.ofInt 0 (by intlit)) z0
+ if h: not (x = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ if h: not (y = (I32.ofInt 0 (by intlit)))
then Result.fail Error.panic
- else
- do
- let i1 ←
- list_nth_shared_fwd Int32 (list_t.Cons
- (Int32.ofNatCore 0 (by intlit)) l1)
- (UInt32.ofNatCore 1 (by intlit))
- if h: not (i1 = (Int32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let i2 ←
- list_nth_shared_fwd Int32 (list_t.Cons
- (Int32.ofNatCore 0 (by intlit)) l1)
- (UInt32.ofNatCore 2 (by intlit))
- if h: not (i2 = (Int32.ofNatCore 2 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let ls ←
- list_nth_mut_back Int32 (list_t.Cons
- (Int32.ofNatCore 0 (by intlit)) l1)
- (UInt32.ofNatCore 1 (by intlit))
- (Int32.ofNatCore 3 (by intlit))
- let i3 ←
- list_nth_shared_fwd Int32 ls
- (UInt32.ofNatCore 0 (by intlit))
- if h: not (i3 = (Int32.ofNatCore 0 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let i4 ←
- list_nth_shared_fwd Int32 ls
- (UInt32.ofNatCore 1 (by intlit))
- if h: not (i4 = (Int32.ofNatCore 3 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let i5 ←
- list_nth_shared_fwd Int32 ls
- (UInt32.ofNatCore 2 (by intlit))
- if h: not (i5 = (Int32.ofNatCore 2 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_list_functions] -/
- #assert (test_list_functions_fwd == .ret ())
-
- /- [no_nested_borrows::id_mut_pair1] -/
- def id_mut_pair1_fwd (T1 T2 : Type) (x : T1) (y : T2) : Result (T1 × T2) :=
- Result.ret (x, y)
-
- /- [no_nested_borrows::id_mut_pair1] -/
- def id_mut_pair1_back
- (T1 T2 : Type) (x : T1) (y : T2) (ret0 : (T1 × T2)) : Result (T1 × T2) :=
- let (t, t0) := ret0
- Result.ret (t, t0)
-
- /- [no_nested_borrows::id_mut_pair2] -/
- def id_mut_pair2_fwd (T1 T2 : Type) (p : (T1 × T2)) : Result (T1 × T2) :=
- let (t, t0) := p
- Result.ret (t, t0)
-
- /- [no_nested_borrows::id_mut_pair2] -/
- def id_mut_pair2_back
- (T1 T2 : Type) (p : (T1 × T2)) (ret0 : (T1 × T2)) : Result (T1 × T2) :=
- let (t, t0) := ret0
- Result.ret (t, t0)
-
- /- [no_nested_borrows::id_mut_pair3] -/
- def id_mut_pair3_fwd (T1 T2 : Type) (x : T1) (y : T2) : Result (T1 × T2) :=
- Result.ret (x, y)
-
- /- [no_nested_borrows::id_mut_pair3] -/
- def id_mut_pair3_back'a
- (T1 T2 : Type) (x : T1) (y : T2) (ret0 : T1) : Result T1 :=
- Result.ret ret0
-
- /- [no_nested_borrows::id_mut_pair3] -/
- def id_mut_pair3_back'b
- (T1 T2 : Type) (x : T1) (y : T2) (ret0 : T2) : Result T2 :=
- Result.ret ret0
-
- /- [no_nested_borrows::id_mut_pair4] -/
- def id_mut_pair4_fwd (T1 T2 : Type) (p : (T1 × T2)) : Result (T1 × T2) :=
- let (t, t0) := p
- Result.ret (t, t0)
-
- /- [no_nested_borrows::id_mut_pair4] -/
- def id_mut_pair4_back'a
- (T1 T2 : Type) (p : (T1 × T2)) (ret0 : T1) : Result T1 :=
- Result.ret ret0
-
- /- [no_nested_borrows::id_mut_pair4] -/
- def id_mut_pair4_back'b
- (T1 T2 : Type) (p : (T1 × T2)) (ret0 : T2) : Result T2 :=
- Result.ret ret0
-
- /- [no_nested_borrows::StructWithTuple] -/
- structure struct_with_tuple_t (T1 T2 : Type) where
- struct_with_tuple_p : (T1 × T2)
-
- /- [no_nested_borrows::new_tuple1] -/
- def new_tuple1_fwd : Result (struct_with_tuple_t UInt32 UInt32) :=
- Result.ret
- {
- struct_with_tuple_p :=
- ((UInt32.ofNatCore 1 (by intlit)), (UInt32.ofNatCore 2 (by intlit)))
- }
-
- /- [no_nested_borrows::new_tuple2] -/
- def new_tuple2_fwd : Result (struct_with_tuple_t Int16 Int16) :=
- Result.ret
- {
- struct_with_tuple_p :=
- ((Int16.ofNatCore 1 (by intlit)), (Int16.ofNatCore 2 (by intlit)))
- }
-
- /- [no_nested_borrows::new_tuple3] -/
- def new_tuple3_fwd : Result (struct_with_tuple_t UInt64 Int64) :=
- Result.ret
- {
- struct_with_tuple_p :=
- ((UInt64.ofNatCore 1 (by intlit)), (Int64.ofNatCore 2 (by intlit)))
- }
-
- /- [no_nested_borrows::StructWithPair] -/
- structure struct_with_pair_t (T1 T2 : Type) where
- struct_with_pair_p : pair_t T1 T2
-
- /- [no_nested_borrows::new_pair1] -/
- def new_pair1_fwd : Result (struct_with_pair_t UInt32 UInt32) :=
- Result.ret
- {
- struct_with_pair_p :=
- {
- pair_x := (UInt32.ofNatCore 1 (by intlit)),
- pair_y := (UInt32.ofNatCore 2 (by intlit))
- }
- }
-
- /- [no_nested_borrows::test_constants] -/
- def test_constants_fwd : Result Unit :=
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::choose_test] -/
+#assert (choose_test_fwd == .ret ())
+
+/- [no_nested_borrows::test_char] -/
+def test_char_fwd : Result Char :=
+ Result.ret 'a'
+
+/- [no_nested_borrows::NodeElem] -/
+mutual inductive node_elem_t (T : Type) :=
+| Cons : tree_t T -> node_elem_t T -> node_elem_t T
+| Nil : node_elem_t T
+
+/- [no_nested_borrows::Tree] -/
+inductive tree_t (T : Type) :=
+| Leaf : T -> tree_t T
+| Node : T -> node_elem_t T -> tree_t T -> tree_t T
+end
+
+/- [no_nested_borrows::list_length] -/
+def list_length_fwd (T : Type) (l : list_t T) : Result U32 :=
+ match h: l with
+ | list_t.Cons t l1 =>
do
- let swt ← new_tuple1_fwd
- let (i, _) := swt.struct_with_tuple_p
- if h: not (i = (UInt32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let swt0 ← new_tuple2_fwd
- let (i0, _) := swt0.struct_with_tuple_p
- if h: not (i0 = (Int16.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let swt1 ← new_tuple3_fwd
- let (i1, _) := swt1.struct_with_tuple_p
- if h: not (i1 = (UInt64.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let swp ← new_pair1_fwd
- if h: not (swp.struct_with_pair_p.pair_x =
- (UInt32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_constants] -/
- #assert (test_constants_fwd == .ret ())
-
- /- [no_nested_borrows::test_weird_borrows1] -/
- def test_weird_borrows1_fwd : Result Unit :=
- Result.ret ()
-
- /- Unit test for [no_nested_borrows::test_weird_borrows1] -/
- #assert (test_weird_borrows1_fwd == .ret ())
-
- /- [no_nested_borrows::test_mem_replace] -/
- def test_mem_replace_fwd_back (px : UInt32) : Result UInt32 :=
- let y := mem_replace_fwd UInt32 px (UInt32.ofNatCore 1 (by intlit))
- if h: not (y = (UInt32.ofNatCore 0 (by intlit)))
+ let i ← list_length_fwd T l1
+ (U32.ofInt 1 (by intlit)) + i
+ | list_t.Nil => Result.ret (U32.ofInt 0 (by intlit))
+
+/- [no_nested_borrows::list_nth_shared] -/
+def list_nth_shared_fwd (T : Type) (l : list_t T) (i : U32) : Result T :=
+ match h: l with
+ | list_t.Cons x tl =>
+ if h: i = (U32.ofInt 0 (by intlit))
+ then Result.ret x
+ else
+ do
+ let i0 ← i - (U32.ofInt 1 (by intlit))
+ list_nth_shared_fwd T tl i0
+ | list_t.Nil => Result.fail Error.panic
+
+/- [no_nested_borrows::list_nth_mut] -/
+def list_nth_mut_fwd (T : Type) (l : list_t T) (i : U32) : Result T :=
+ match h: l with
+ | list_t.Cons x tl =>
+ if h: i = (U32.ofInt 0 (by intlit))
+ then Result.ret x
+ else do
+ let i0 ← i - (U32.ofInt 1 (by intlit))
+ list_nth_mut_fwd T tl i0
+ | list_t.Nil => Result.fail Error.panic
+
+/- [no_nested_borrows::list_nth_mut] -/
+def list_nth_mut_back
+ (T : Type) (l : list_t T) (i : U32) (ret0 : T) : Result (list_t T) :=
+ match h: l with
+ | list_t.Cons x tl =>
+ if h: i = (U32.ofInt 0 (by intlit))
+ then Result.ret (list_t.Cons ret0 tl)
+ else
+ do
+ let i0 ← i - (U32.ofInt 1 (by intlit))
+ let tl0 ← list_nth_mut_back T tl i0 ret0
+ Result.ret (list_t.Cons x tl0)
+ | list_t.Nil => Result.fail Error.panic
+
+/- [no_nested_borrows::list_rev_aux] -/
+def list_rev_aux_fwd
+ (T : Type) (li : list_t T) (lo : list_t T) : Result (list_t T) :=
+ match h: li with
+ | list_t.Cons hd tl => list_rev_aux_fwd T tl (list_t.Cons hd lo)
+ | list_t.Nil => Result.ret lo
+
+/- [no_nested_borrows::list_rev] -/
+def list_rev_fwd_back (T : Type) (l : list_t T) : Result (list_t T) :=
+ let li := mem_replace_fwd (list_t T) l list_t.Nil
+ list_rev_aux_fwd T li list_t.Nil
+
+/- [no_nested_borrows::test_list_functions] -/
+def test_list_functions_fwd : Result Unit :=
+ do
+ let l := list_t.Nil
+ let l0 := list_t.Cons (I32.ofInt 2 (by intlit)) l
+ let l1 := list_t.Cons (I32.ofInt 1 (by intlit)) l0
+ let i ← list_length_fwd I32 (list_t.Cons (I32.ofInt 0 (by intlit)) l1)
+ if h: not (i = (U32.ofInt 3 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let i0 ←
+ list_nth_shared_fwd I32 (list_t.Cons (I32.ofInt 0 (by intlit)) l1)
+ (U32.ofInt 0 (by intlit))
+ if h: not (i0 = (I32.ofInt 0 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let i1 ←
+ list_nth_shared_fwd I32 (list_t.Cons (I32.ofInt 0 (by intlit))
+ l1) (U32.ofInt 1 (by intlit))
+ if h: not (i1 = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let i2 ←
+ list_nth_shared_fwd I32 (list_t.Cons
+ (I32.ofInt 0 (by intlit)) l1) (U32.ofInt 2 (by intlit))
+ if h: not (i2 = (I32.ofInt 2 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let ls ←
+ list_nth_mut_back I32 (list_t.Cons
+ (I32.ofInt 0 (by intlit)) l1) (U32.ofInt 1 (by intlit))
+ (I32.ofInt 3 (by intlit))
+ let i3 ←
+ list_nth_shared_fwd I32 ls (U32.ofInt 0 (by intlit))
+ if h: not (i3 = (I32.ofInt 0 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let i4 ←
+ list_nth_shared_fwd I32 ls (U32.ofInt 1 (by intlit))
+ if h: not (i4 = (I32.ofInt 3 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let i5 ←
+ list_nth_shared_fwd I32 ls
+ (U32.ofInt 2 (by intlit))
+ if h: not (i5 = (I32.ofInt 2 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_list_functions] -/
+#assert (test_list_functions_fwd == .ret ())
+
+/- [no_nested_borrows::id_mut_pair1] -/
+def id_mut_pair1_fwd (T1 T2 : Type) (x : T1) (y : T2) : Result (T1 × T2) :=
+ Result.ret (x, y)
+
+/- [no_nested_borrows::id_mut_pair1] -/
+def id_mut_pair1_back
+ (T1 T2 : Type) (x : T1) (y : T2) (ret0 : (T1 × T2)) : Result (T1 × T2) :=
+ let (t, t0) := ret0
+ Result.ret (t, t0)
+
+/- [no_nested_borrows::id_mut_pair2] -/
+def id_mut_pair2_fwd (T1 T2 : Type) (p : (T1 × T2)) : Result (T1 × T2) :=
+ let (t, t0) := p
+ Result.ret (t, t0)
+
+/- [no_nested_borrows::id_mut_pair2] -/
+def id_mut_pair2_back
+ (T1 T2 : Type) (p : (T1 × T2)) (ret0 : (T1 × T2)) : Result (T1 × T2) :=
+ let (t, t0) := ret0
+ Result.ret (t, t0)
+
+/- [no_nested_borrows::id_mut_pair3] -/
+def id_mut_pair3_fwd (T1 T2 : Type) (x : T1) (y : T2) : Result (T1 × T2) :=
+ Result.ret (x, y)
+
+/- [no_nested_borrows::id_mut_pair3] -/
+def id_mut_pair3_back'a
+ (T1 T2 : Type) (x : T1) (y : T2) (ret0 : T1) : Result T1 :=
+ Result.ret ret0
+
+/- [no_nested_borrows::id_mut_pair3] -/
+def id_mut_pair3_back'b
+ (T1 T2 : Type) (x : T1) (y : T2) (ret0 : T2) : Result T2 :=
+ Result.ret ret0
+
+/- [no_nested_borrows::id_mut_pair4] -/
+def id_mut_pair4_fwd (T1 T2 : Type) (p : (T1 × T2)) : Result (T1 × T2) :=
+ let (t, t0) := p
+ Result.ret (t, t0)
+
+/- [no_nested_borrows::id_mut_pair4] -/
+def id_mut_pair4_back'a
+ (T1 T2 : Type) (p : (T1 × T2)) (ret0 : T1) : Result T1 :=
+ Result.ret ret0
+
+/- [no_nested_borrows::id_mut_pair4] -/
+def id_mut_pair4_back'b
+ (T1 T2 : Type) (p : (T1 × T2)) (ret0 : T2) : Result T2 :=
+ Result.ret ret0
+
+/- [no_nested_borrows::StructWithTuple] -/
+structure struct_with_tuple_t (T1 T2 : Type) where
+ struct_with_tuple_p : (T1 × T2)
+
+/- [no_nested_borrows::new_tuple1] -/
+def new_tuple1_fwd : Result (struct_with_tuple_t U32 U32) :=
+ Result.ret
+ {
+ struct_with_tuple_p :=
+ ((U32.ofInt 1 (by intlit)), (U32.ofInt 2 (by intlit)))
+ }
+
+/- [no_nested_borrows::new_tuple2] -/
+def new_tuple2_fwd : Result (struct_with_tuple_t I16 I16) :=
+ Result.ret
+ {
+ struct_with_tuple_p :=
+ ((I16.ofInt 1 (by intlit)), (I16.ofInt 2 (by intlit)))
+ }
+
+/- [no_nested_borrows::new_tuple3] -/
+def new_tuple3_fwd : Result (struct_with_tuple_t U64 I64) :=
+ Result.ret
+ {
+ struct_with_tuple_p :=
+ ((U64.ofInt 1 (by intlit)), (I64.ofInt 2 (by intlit)))
+ }
+
+/- [no_nested_borrows::StructWithPair] -/
+structure struct_with_pair_t (T1 T2 : Type) where
+ struct_with_pair_p : pair_t T1 T2
+
+/- [no_nested_borrows::new_pair1] -/
+def new_pair1_fwd : Result (struct_with_pair_t U32 U32) :=
+ Result.ret
+ {
+ struct_with_pair_p :=
+ {
+ pair_x := (U32.ofInt 1 (by intlit)),
+ pair_y := (U32.ofInt 2 (by intlit))
+ }
+ }
+
+/- [no_nested_borrows::test_constants] -/
+def test_constants_fwd : Result Unit :=
+ do
+ let swt ← new_tuple1_fwd
+ let (i, _) := swt.struct_with_tuple_p
+ if h: not (i = (U32.ofInt 1 (by intlit)))
then Result.fail Error.panic
- else Result.ret (UInt32.ofNatCore 2 (by intlit))
-
- /- [no_nested_borrows::test_shared_borrow_bool1] -/
- def test_shared_borrow_bool1_fwd (b : Bool) : Result UInt32 :=
- if h: b
- then Result.ret (UInt32.ofNatCore 0 (by intlit))
- else Result.ret (UInt32.ofNatCore 1 (by intlit))
-
- /- [no_nested_borrows::test_shared_borrow_bool2] -/
- def test_shared_borrow_bool2_fwd : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 0 (by intlit))
-
- /- [no_nested_borrows::test_shared_borrow_enum1] -/
- def test_shared_borrow_enum1_fwd (l : list_t UInt32) : Result UInt32 :=
- match h: l with
- | list_t.Cons i l0 => Result.ret (UInt32.ofNatCore 1 (by intlit))
- | list_t.Nil => Result.ret (UInt32.ofNatCore 0 (by intlit))
-
- /- [no_nested_borrows::test_shared_borrow_enum2] -/
- def test_shared_borrow_enum2_fwd : Result UInt32 :=
- Result.ret (UInt32.ofNatCore 0 (by intlit))
-
+ else
+ do
+ let swt0 ← new_tuple2_fwd
+ let (i0, _) := swt0.struct_with_tuple_p
+ if h: not (i0 = (I16.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let swt1 ← new_tuple3_fwd
+ let (i1, _) := swt1.struct_with_tuple_p
+ if h: not (i1 = (U64.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let swp ← new_pair1_fwd
+ if h: not (swp.struct_with_pair_p.pair_x =
+ (U32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_constants] -/
+#assert (test_constants_fwd == .ret ())
+
+/- [no_nested_borrows::test_weird_borrows1] -/
+def test_weird_borrows1_fwd : Result Unit :=
+ Result.ret ()
+
+/- Unit test for [no_nested_borrows::test_weird_borrows1] -/
+#assert (test_weird_borrows1_fwd == .ret ())
+
+/- [no_nested_borrows::test_mem_replace] -/
+def test_mem_replace_fwd_back (px : U32) : Result U32 :=
+ let y := mem_replace_fwd U32 px (U32.ofInt 1 (by intlit))
+ if h: not (y = (U32.ofInt 0 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret (U32.ofInt 2 (by intlit))
+
+/- [no_nested_borrows::test_shared_borrow_bool1] -/
+def test_shared_borrow_bool1_fwd (b : Bool) : Result U32 :=
+ if h: b
+ then Result.ret (U32.ofInt 0 (by intlit))
+ else Result.ret (U32.ofInt 1 (by intlit))
+
+/- [no_nested_borrows::test_shared_borrow_bool2] -/
+def test_shared_borrow_bool2_fwd : Result U32 :=
+ Result.ret (U32.ofInt 0 (by intlit))
+
+/- [no_nested_borrows::test_shared_borrow_enum1] -/
+def test_shared_borrow_enum1_fwd (l : list_t U32) : Result U32 :=
+ match h: l with
+ | list_t.Cons i l0 => Result.ret (U32.ofInt 1 (by intlit))
+ | list_t.Nil => Result.ret (U32.ofInt 0 (by intlit))
+
+/- [no_nested_borrows::test_shared_borrow_enum2] -/
+def test_shared_borrow_enum2_fwd : Result U32 :=
+ Result.ret (U32.ofInt 0 (by intlit))
+
diff --git a/tests/lean/misc-paper/Base/Primitives.lean b/tests/lean/misc-paper/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-paper/Base/Primitives.lean
+++ b/tests/lean/misc-paper/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-paper/Paper.lean b/tests/lean/misc-paper/Paper.lean
index 05fde52c..0b16fb8e 100644
--- a/tests/lean/misc-paper/Paper.lean
+++ b/tests/lean/misc-paper/Paper.lean
@@ -2,126 +2,122 @@
-- [paper]
import Base.Primitives
-structure OpaqueDefs where
-
- /- [paper::ref_incr] -/
- def ref_incr_fwd_back (x : Int32) : Result Int32 :=
- Int32.checked_add x (Int32.ofNatCore 1 (by intlit))
-
- /- [paper::test_incr] -/
- def test_incr_fwd : Result Unit :=
- do
- let x ← ref_incr_fwd_back (Int32.ofNatCore 0 (by intlit))
- if h: not (x = (Int32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [paper::test_incr] -/
- #assert (test_incr_fwd == .ret ())
-
- /- [paper::choose] -/
- def choose_fwd (T : Type) (b : Bool) (x : T) (y : T) : Result T :=
- if h: b
- then Result.ret x
- else Result.ret y
-
- /- [paper::choose] -/
- def choose_back
- (T : Type) (b : Bool) (x : T) (y : T) (ret0 : T) : Result (T × T) :=
- if h: b
- then Result.ret (ret0, y)
- else Result.ret (x, ret0)
-
- /- [paper::test_choose] -/
- def test_choose_fwd : Result Unit :=
- do
- let z ←
- choose_fwd Int32 true (Int32.ofNatCore 0 (by intlit))
- (Int32.ofNatCore 0 (by intlit))
- let z0 ← Int32.checked_add z (Int32.ofNatCore 1 (by intlit))
- if h: not (z0 = (Int32.ofNatCore 1 (by intlit)))
- then Result.fail Error.panic
- else
- do
- let (x, y) ←
- choose_back Int32 true (Int32.ofNatCore 0 (by intlit))
- (Int32.ofNatCore 0 (by intlit)) z0
- if h: not (x = (Int32.ofNatCore 1 (by intlit)))
+/- [paper::ref_incr] -/
+def ref_incr_fwd_back (x : I32) : Result I32 :=
+ x + (I32.ofInt 1 (by intlit))
+
+/- [paper::test_incr] -/
+def test_incr_fwd : Result Unit :=
+ do
+ let x ← ref_incr_fwd_back (I32.ofInt 0 (by intlit))
+ if h: not (x = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [paper::test_incr] -/
+#assert (test_incr_fwd == .ret ())
+
+/- [paper::choose] -/
+def choose_fwd (T : Type) (b : Bool) (x : T) (y : T) : Result T :=
+ if h: b
+ then Result.ret x
+ else Result.ret y
+
+/- [paper::choose] -/
+def choose_back
+ (T : Type) (b : Bool) (x : T) (y : T) (ret0 : T) : Result (T × T) :=
+ if h: b
+ then Result.ret (ret0, y)
+ else Result.ret (x, ret0)
+
+/- [paper::test_choose] -/
+def test_choose_fwd : Result Unit :=
+ do
+ let z ←
+ choose_fwd I32 true (I32.ofInt 0 (by intlit)) (I32.ofInt 0 (by intlit))
+ let z0 ← z + (I32.ofInt 1 (by intlit))
+ if h: not (z0 = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ do
+ let (x, y) ←
+ choose_back I32 true (I32.ofInt 0 (by intlit))
+ (I32.ofInt 0 (by intlit)) z0
+ if h: not (x = (I32.ofInt 1 (by intlit)))
+ then Result.fail Error.panic
+ else
+ if h: not (y = (I32.ofInt 0 (by intlit)))
then Result.fail Error.panic
- else
- if h: not (y = (Int32.ofNatCore 0 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [paper::test_choose] -/
- #assert (test_choose_fwd == .ret ())
-
- /- [paper::List] -/
- inductive list_t (T : Type) :=
- | Cons : T -> list_t T -> list_t T
- | Nil : list_t T
-
- /- [paper::list_nth_mut] -/
- def list_nth_mut_fwd (T : Type) (l : list_t T) (i : UInt32) : Result T :=
- match h: l with
- | list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
- then Result.ret x
- else
- do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
- list_nth_mut_fwd T tl i0
- | list_t.Nil => Result.fail Error.panic
-
- /- [paper::list_nth_mut] -/
- def list_nth_mut_back
- (T : Type) (l : list_t T) (i : UInt32) (ret0 : T) : Result (list_t T) :=
- match h: l with
- | list_t.Cons x tl =>
- if h: i = (UInt32.ofNatCore 0 (by intlit))
- then Result.ret (list_t.Cons ret0 tl)
- else
- do
- let i0 ← UInt32.checked_sub i (UInt32.ofNatCore 1 (by intlit))
- let tl0 ← list_nth_mut_back T tl i0 ret0
- Result.ret (list_t.Cons x tl0)
- | list_t.Nil => Result.fail Error.panic
-
- /- [paper::sum] -/
- def sum_fwd (l : list_t Int32) : Result Int32 :=
- match h: l with
- | list_t.Cons x tl => do
- let i ← sum_fwd tl
- Int32.checked_add x i
- | list_t.Nil => Result.ret (Int32.ofNatCore 0 (by intlit))
-
- /- [paper::test_nth] -/
- def test_nth_fwd : Result Unit :=
- do
- let l := list_t.Nil
- let l0 := list_t.Cons (Int32.ofNatCore 3 (by intlit)) l
- let l1 := list_t.Cons (Int32.ofNatCore 2 (by intlit)) l0
- let x ←
- list_nth_mut_fwd Int32 (list_t.Cons (Int32.ofNatCore 1 (by intlit)) l1)
- (UInt32.ofNatCore 2 (by intlit))
- let x0 ← Int32.checked_add x (Int32.ofNatCore 1 (by intlit))
- let l2 ←
- list_nth_mut_back Int32 (list_t.Cons (Int32.ofNatCore 1 (by intlit))
- l1) (UInt32.ofNatCore 2 (by intlit)) x0
- let i ← sum_fwd l2
- if h: not (i = (Int32.ofNatCore 7 (by intlit)))
- then Result.fail Error.panic
- else Result.ret ()
-
- /- Unit test for [paper::test_nth] -/
- #assert (test_nth_fwd == .ret ())
-
- /- [paper::call_choose] -/
- def call_choose_fwd (p : (UInt32 × UInt32)) : Result UInt32 :=
- do
- let (px, py) := p
- let pz ← choose_fwd UInt32 true px py
- let pz0 ← UInt32.checked_add pz (UInt32.ofNatCore 1 (by intlit))
- let (px0, _) ← choose_back UInt32 true px py pz0
- Result.ret px0
-
+ else Result.ret ()
+
+/- Unit test for [paper::test_choose] -/
+#assert (test_choose_fwd == .ret ())
+
+/- [paper::List] -/
+inductive list_t (T : Type) :=
+| Cons : T -> list_t T -> list_t T
+| Nil : list_t T
+
+/- [paper::list_nth_mut] -/
+def list_nth_mut_fwd (T : Type) (l : list_t T) (i : U32) : Result T :=
+ match h: l with
+ | list_t.Cons x tl =>
+ if h: i = (U32.ofInt 0 (by intlit))
+ then Result.ret x
+ else do
+ let i0 ← i - (U32.ofInt 1 (by intlit))
+ list_nth_mut_fwd T tl i0
+ | list_t.Nil => Result.fail Error.panic
+
+/- [paper::list_nth_mut] -/
+def list_nth_mut_back
+ (T : Type) (l : list_t T) (i : U32) (ret0 : T) : Result (list_t T) :=
+ match h: l with
+ | list_t.Cons x tl =>
+ if h: i = (U32.ofInt 0 (by intlit))
+ then Result.ret (list_t.Cons ret0 tl)
+ else
+ do
+ let i0 ← i - (U32.ofInt 1 (by intlit))
+ let tl0 ← list_nth_mut_back T tl i0 ret0
+ Result.ret (list_t.Cons x tl0)
+ | list_t.Nil => Result.fail Error.panic
+
+/- [paper::sum] -/
+def sum_fwd (l : list_t I32) : Result I32 :=
+ match h: l with
+ | list_t.Cons x tl => do
+ let i ← sum_fwd tl
+ x + i
+ | list_t.Nil => Result.ret (I32.ofInt 0 (by intlit))
+
+/- [paper::test_nth] -/
+def test_nth_fwd : Result Unit :=
+ do
+ let l := list_t.Nil
+ let l0 := list_t.Cons (I32.ofInt 3 (by intlit)) l
+ let l1 := list_t.Cons (I32.ofInt 2 (by intlit)) l0
+ let x ←
+ list_nth_mut_fwd I32 (list_t.Cons (I32.ofInt 1 (by intlit)) l1)
+ (U32.ofInt 2 (by intlit))
+ let x0 ← x + (I32.ofInt 1 (by intlit))
+ let l2 ←
+ list_nth_mut_back I32 (list_t.Cons (I32.ofInt 1 (by intlit)) l1)
+ (U32.ofInt 2 (by intlit)) x0
+ let i ← sum_fwd l2
+ if h: not (i = (I32.ofInt 7 (by intlit)))
+ then Result.fail Error.panic
+ else Result.ret ()
+
+/- Unit test for [paper::test_nth] -/
+#assert (test_nth_fwd == .ret ())
+
+/- [paper::call_choose] -/
+def call_choose_fwd (p : (U32 × U32)) : Result U32 :=
+ do
+ let (px, py) := p
+ let pz ← choose_fwd U32 true px py
+ let pz0 ← pz + (U32.ofInt 1 (by intlit))
+ let (px0, _) ← choose_back U32 true px py pz0
+ Result.ret px0
+
diff --git a/tests/lean/misc-polonius_list/Base/Primitives.lean b/tests/lean/misc-polonius_list/Base/Primitives.lean
index 5b64e908..034f41b2 100644
--- a/tests/lean/misc-polonius_list/Base/Primitives.lean
+++ b/tests/lean/misc-polonius_list/Base/Primitives.lean
@@ -3,6 +3,28 @@ import Lean.Meta.Tactic.Simp
import Init.Data.List.Basic
import Mathlib.Tactic.RunCmd
+--------------------
+-- ASSERT COMMAND --
+--------------------
+
+open Lean Elab Command Term Meta
+
+syntax (name := assert) "#assert" term: command
+
+@[command_elab assert]
+unsafe
+def assertImpl : CommandElab := fun (_stx: Syntax) => do
+ runTermElabM (fun _ => do
+ let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
+ if not r then
+ logInfo "Assertion failed for: "
+ logInfo _stx[1]
+ logError "Expression reduced to false"
+ pure ())
+
+#eval 2 == 2
+#assert (2 == 2)
+
-------------
-- PRELUDE --
-------------
@@ -12,6 +34,7 @@ import Mathlib.Tactic.RunCmd
inductive Error where
| assertionFailure: Error
| integerOverflow: Error
+ | divisionByZero: Error
| arrayOutOfBounds: Error
| maximumSizeExceeded: Error
| panic: Error
@@ -89,17 +112,13 @@ macro "let" e:term " <-- " f:term : doElem =>
-- MACHINE INTEGERS --
----------------------
--- NOTE: we reuse the fixed-width integer types from prelude.lean: UInt8, ...,
--- USize. They are generally defined in an idiomatic style, except that there is
--- not a single type class to rule them all (more on that below). The absence of
--- type class is intentional, and allows the Lean compiler to efficiently map
--- them to machine integers during compilation.
+-- We redefine our machine integers types.
--- USize is designed properly: you cannot reduce `getNumBits` using the
--- simplifier, meaning that proofs do not depend on the compile-time value of
--- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really
--- support, at least officially, 16-bit microcontrollers, so this seems like a
--- fine design decision for now.)
+-- For Isize/Usize, we reuse `getNumBits` from `USize`. You cannot reduce `getNumBits`
+-- using the simplifier, meaning that proofs do not depend on the compile-time value of
+-- USize.size. (Lean assumes 32 or 64-bit platforms, and Rust doesn't really support, at
+-- least officially, 16-bit microcontrollers, so this seems like a fine design decision
+-- for now.)
-- Note from Chris Bailey: "If there's more than one salient property of your
-- definition then the subtyping strategy might get messy, and the property part
@@ -111,236 +130,435 @@ macro "let" e:term " <-- " f:term : doElem =>
-- Machine integer constants, done via `ofNatCore`, which requires a proof that
-- the `Nat` fits within the desired integer type. We provide a custom tactic.
-syntax "intlit" : tactic
-
-macro_rules
- | `(tactic| intlit) => `(tactic|
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => decide
- | _, Or.inr rfl => decide)
-
--- This is how the macro is expected to be used
-#eval USize.ofNatCore 0 (by intlit)
-
--- Also works for other integer types (at the expense of a needless disjunction)
-#eval UInt32.ofNatCore 0 (by intlit)
-
--- The machine integer operations (e.g. sub) are always total, which is not what
--- we want. We therefore define "checked" variants, below. Note that we add a
--- tiny bit of complexity for the USize variant: we first check whether the
--- result is < 2^32; if it is, we can compute the definition, rather than
--- returning a term that is computationally stuck (the comparison to USize.size
--- cannot reduce at compile-time, per the remark about regarding `getNumBits`).
+open System.Platform.getNumBits
+
+-- TODO: is there a way of only importing System.Platform.getNumBits?
+--
+@[simp] def size_num_bits : Nat := (System.Platform.getNumBits ()).val
+
+-- Remark: Lean seems to use < for the comparisons with the upper bounds by convention.
+-- We keep the F* convention for now.
+@[simp] def Isize.min : Int := - (HPow.hPow 2 (size_num_bits - 1))
+@[simp] def Isize.max : Int := (HPow.hPow 2 (size_num_bits - 1)) - 1
+@[simp] def I8.min : Int := - (HPow.hPow 2 7)
+@[simp] def I8.max : Int := HPow.hPow 2 7 - 1
+@[simp] def I16.min : Int := - (HPow.hPow 2 15)
+@[simp] def I16.max : Int := HPow.hPow 2 15 - 1
+@[simp] def I32.min : Int := -(HPow.hPow 2 31)
+@[simp] def I32.max : Int := HPow.hPow 2 31 - 1
+@[simp] def I64.min : Int := -(HPow.hPow 2 63)
+@[simp] def I64.max : Int := HPow.hPow 2 63 - 1
+@[simp] def I128.min : Int := -(HPow.hPow 2 127)
+@[simp] def I128.max : Int := HPow.hPow 2 127 - 1
+@[simp] def Usize.min : Int := 0
+@[simp] def Usize.max : Int := HPow.hPow 2 size_num_bits - 1
+@[simp] def U8.min : Int := 0
+@[simp] def U8.max : Int := HPow.hPow 2 8 - 1
+@[simp] def U16.min : Int := 0
+@[simp] def U16.max : Int := HPow.hPow 2 16 - 1
+@[simp] def U32.min : Int := 0
+@[simp] def U32.max : Int := HPow.hPow 2 32 - 1
+@[simp] def U64.min : Int := 0
+@[simp] def U64.max : Int := HPow.hPow 2 64 - 1
+@[simp] def U128.min : Int := 0
+@[simp] def U128.max : Int := HPow.hPow 2 128 - 1
+
+#assert (I8.min == -128)
+#assert (I8.max == 127)
+#assert (I16.min == -32768)
+#assert (I16.max == 32767)
+#assert (I32.min == -2147483648)
+#assert (I32.max == 2147483647)
+#assert (I64.min == -9223372036854775808)
+#assert (I64.max == 9223372036854775807)
+#assert (I128.min == -170141183460469231731687303715884105728)
+#assert (I128.max == 170141183460469231731687303715884105727)
+#assert (U8.min == 0)
+#assert (U8.max == 255)
+#assert (U16.min == 0)
+#assert (U16.max == 65535)
+#assert (U32.min == 0)
+#assert (U32.max == 4294967295)
+#assert (U64.min == 0)
+#assert (U64.max == 18446744073709551615)
+#assert (U128.min == 0)
+#assert (U128.max == 340282366920938463463374607431768211455)
+
+inductive ScalarTy :=
+| Isize
+| I8
+| I16
+| I32
+| I64
+| I128
+| Usize
+| U8
+| U16
+| U32
+| U64
+| U128
+
+def Scalar.min (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.min
+ | .I8 => I8.min
+ | .I16 => I16.min
+ | .I32 => I32.min
+ | .I64 => I64.min
+ | .I128 => I128.min
+ | .Usize => Usize.min
+ | .U8 => U8.min
+ | .U16 => U16.min
+ | .U32 => U32.min
+ | .U64 => U64.min
+ | .U128 => U128.min
+
+def Scalar.max (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => Isize.max
+ | .I8 => I8.max
+ | .I16 => I16.max
+ | .I32 => I32.max
+ | .I64 => I64.max
+ | .I128 => I128.max
+ | .Usize => Usize.max
+ | .U8 => U8.max
+ | .U16 => U16.max
+ | .U32 => U32.max
+ | .U64 => U64.max
+ | .U128 => U128.max
+
+-- "Conservative" bounds
+-- We use those because we can't compare to the isize bounds (which can't
+-- reduce at compile-time). Whenever we perform an arithmetic operation like
+-- addition we need to check that the result is in bounds: we first compare
+-- to the conservative bounds, which reduce, then compare to the real bounds.
-- This is useful for the various #asserts that we want to reduce at
-- type-checking time.
+def Scalar.cMin (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.min
+ | _ => Scalar.min ty
+
+def Scalar.cMax (ty : ScalarTy) : Int :=
+ match ty with
+ | .Isize => I32.max
+ | .Usize => U32.max
+ | _ => Scalar.max ty
+
+theorem Scalar.cMin_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+theorem Scalar.cMax_bound ty : Scalar.min ty <= Scalar.cMin ty := by sorry
+
+structure Scalar (ty : ScalarTy) where
+ val : Int
+ hmin : Scalar.min ty <= val
+ hmax : val <= Scalar.max ty
+
+theorem Scalar.bound_suffices (ty : ScalarTy) (x : Int) :
+ Scalar.cMin ty <= x && x <= Scalar.cMax ty ->
+ (decide (Scalar.min ty ≤ x) && decide (x ≤ Scalar.max ty)) = true
+ := by sorry
+
+def Scalar.ofIntCore {ty : ScalarTy} (x : Int)
+ (hmin : Scalar.min ty <= x) (hmax : x <= Scalar.max ty) : Scalar ty :=
+ { val := x, hmin := hmin, hmax := hmax }
+
+def Scalar.ofInt {ty : ScalarTy} (x : Int)
+ (h : Scalar.min ty <= x && x <= Scalar.max ty) : Scalar ty :=
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ Scalar.ofIntCore x hmin hmax
-- Further thoughts: look at what has been done here:
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/Fin/Basic.lean
-- and
-- https://github.com/leanprover-community/mathlib4/blob/master/Mathlib/Data/UInt.lean
-- which both contain a fair amount of reasoning already!
-def USize.checked_sub (n: USize) (m: USize): Result USize :=
- -- NOTE: the test USize.toNat n - m >= 0 seems to always succeed?
- if n >= m then
- let n' := USize.toNat n
- let m' := USize.toNat n
- let r := USize.ofNatCore (n' - m') (by
- have h: n' - m' <= n' := by
- apply Nat.sub_le_of_le_add
- case h => rewrite [ Nat.add_comm ]; apply Nat.le_add_left
- apply Nat.lt_of_le_of_lt h
- apply n.val.isLt
- )
- return r
- else
- fail integerOverflow
-
-@[simp]
-theorem usize_fits (n: Nat) (h: n <= 4294967295): n < USize.size :=
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => Nat.lt_of_le_of_lt h (by decide)
- | _, Or.inr rfl => Nat.lt_of_le_of_lt h (by decide)
-
-def USize.checked_add (n: USize) (m: USize): Result USize :=
- if h: n.val + m.val < USize.size then
- .ret ⟨ n.val + m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_rem (n: USize) (m: USize): Result USize :=
- if h: m > 0 then
- .ret ⟨ n.val % m.val, by
- have h1: ↑m.val < USize.size := m.val.isLt
- have h2: n.val.val % m.val.val < m.val.val := @Nat.mod_lt n.val m.val h
- apply Nat.lt_trans h2 h1
- ⟩
- else
- .fail integerOverflow
+def Scalar.tryMk (ty : ScalarTy) (x : Int) : Result (Scalar ty) :=
+ -- TODO: write this with only one if then else
+ if hmin_cons: Scalar.cMin ty <= x || Scalar.min ty <= x then
+ if hmax_cons: x <= Scalar.cMax ty || x <= Scalar.max ty then
+ let hmin: Scalar.min ty <= x := by sorry
+ let hmax: x <= Scalar.max ty := by sorry
+ return Scalar.ofIntCore x hmin hmax
+ else fail integerOverflow
+ else fail integerOverflow
+
+def Scalar.neg {ty : ScalarTy} (x : Scalar ty) : Result (Scalar ty) := Scalar.tryMk ty (- x.val)
+
+def Scalar.div {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val / y.val) else fail divisionByZero
+
+-- Checking that the % operation in Lean computes the same as the remainder operation in Rust
+#assert 1 % 2 = (1:Int)
+#assert (-1) % 2 = -1
+#assert 1 % (-2) = 1
+#assert (-1) % (-2) = -1
+
+def Scalar.rem {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ if y.val != 0 then Scalar.tryMk ty (x.val % y.val) else fail divisionByZero
+
+def Scalar.add {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val + y.val)
+
+def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val - y.val)
+
+def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
+ Scalar.tryMk ty (x.val * y.val)
+
+-- TODO: instances of +, -, * etc. for scalars
+
+-- Cast an integer from a [src_ty] to a [tgt_ty]
+-- TODO: check the semantics of casts in Rust
+def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
+ Scalar.tryMk tgt_ty x.val
+
+-- The scalar types
+-- We declare the definitions as reducible so that Lean can unfold them (useful
+-- for type class resolution for instance).
+@[reducible] def Isize := Scalar .Isize
+@[reducible] def I8 := Scalar .I8
+@[reducible] def I16 := Scalar .I16
+@[reducible] def I32 := Scalar .I32
+@[reducible] def I64 := Scalar .I64
+@[reducible] def I128 := Scalar .I128
+@[reducible] def Usize := Scalar .Usize
+@[reducible] def U8 := Scalar .U8
+@[reducible] def U16 := Scalar .U16
+@[reducible] def U32 := Scalar .U32
+@[reducible] def U64 := Scalar .U64
+@[reducible] def U128 := Scalar .U128
+
+-- TODO: below: not sure this is the best way.
+-- Should we rather overload operations like +, -, etc.?
+-- Also, it is possible to automate the generation of those definitions
+-- with macros (but would it be a good idea? It would be less easy to
+-- read the file, which is not supposed to change a lot)
+
+-- Negation
+
+/--
+Remark: there is no heterogeneous negation in the Lean prelude: we thus introduce
+one here.
+
+The notation typeclass for heterogeneous addition.
+This enables the notation `- a : β` where `a : α`.
+-/
+class HNeg (α : Type u) (β : outParam (Type v)) where
+ /-- `- a` computes the negation of `a`.
+ The meaning of this notation is type-dependent. -/
+ hNeg : α → β
+
+prefix:75 "-" => HNeg.hNeg
+
+instance : HNeg Isize (Result Isize) where hNeg x := Scalar.neg x
+instance : HNeg I8 (Result I8) where hNeg x := Scalar.neg x
+instance : HNeg I16 (Result I16) where hNeg x := Scalar.neg x
+instance : HNeg I32 (Result I32) where hNeg x := Scalar.neg x
+instance : HNeg I64 (Result I64) where hNeg x := Scalar.neg x
+instance : HNeg I128 (Result I128) where hNeg x := Scalar.neg x
+
+-- Addition
+instance {ty} : HAdd (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hAdd x y := Scalar.add x y
+
+-- Substraction
+instance {ty} : HSub (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hSub x y := Scalar.sub x y
+
+-- Multiplication
+instance {ty} : HMul (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMul x y := Scalar.mul x y
+
+-- Division
+instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hDiv x y := Scalar.div x y
+
+-- Remainder
+instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
+ hMod x y := Scalar.rem x y
+
+-- ofIntCore
+-- TODO: typeclass?
+def Isize.ofIntCore := @Scalar.ofIntCore .Isize
+def I8.ofIntCore := @Scalar.ofIntCore .I8
+def I16.ofIntCore := @Scalar.ofIntCore .I16
+def I32.ofIntCore := @Scalar.ofIntCore .I32
+def I64.ofIntCore := @Scalar.ofIntCore .I64
+def I128.ofIntCore := @Scalar.ofIntCore .I128
+def Usize.ofIntCore := @Scalar.ofIntCore .Usize
+def U8.ofIntCore := @Scalar.ofIntCore .U8
+def U16.ofIntCore := @Scalar.ofIntCore .U16
+def U32.ofIntCore := @Scalar.ofIntCore .U32
+def U64.ofIntCore := @Scalar.ofIntCore .U64
+def U128.ofIntCore := @Scalar.ofIntCore .U128
+
+-- ofInt
+-- TODO: typeclass?
+def Isize.ofInt := @Scalar.ofInt .Isize
+def I8.ofInt := @Scalar.ofInt .I8
+def I16.ofInt := @Scalar.ofInt .I16
+def I32.ofInt := @Scalar.ofInt .I32
+def I64.ofInt := @Scalar.ofInt .I64
+def I128.ofInt := @Scalar.ofInt .I128
+def Usize.ofInt := @Scalar.ofInt .Usize
+def U8.ofInt := @Scalar.ofInt .U8
+def U16.ofInt := @Scalar.ofInt .U16
+def U32.ofInt := @Scalar.ofInt .U32
+def U64.ofInt := @Scalar.ofInt .U64
+def U128.ofInt := @Scalar.ofInt .U128
+
+-- Comparisons
+instance {ty} : LT (Scalar ty) where
+ lt a b := LT.lt a.val b.val
+
+instance {ty} : LE (Scalar ty) where le a b := LE.le a.val b.val
+
+instance Scalar.decLt {ty} (a b : Scalar ty) : Decidable (LT.lt a b) := Int.decLt ..
+instance Scalar.decLe {ty} (a b : Scalar ty) : Decidable (LE.le a b) := Int.decLe ..
+
+theorem Scalar.eq_of_val_eq {ty} : ∀ {i j : Scalar ty}, Eq i.val j.val → Eq i j
+ | ⟨_, _, _⟩, ⟨_, _, _⟩, rfl => rfl
+
+theorem Scalar.val_eq_of_eq {ty} {i j : Scalar ty} (h : Eq i j) : Eq i.val j.val :=
+ h ▸ rfl
+
+theorem Scalar.ne_of_val_ne {ty} {i j : Scalar ty} (h : Not (Eq i.val j.val)) : Not (Eq i j) :=
+ fun h' => absurd (val_eq_of_eq h') h
+
+instance (ty : ScalarTy) : DecidableEq (Scalar ty) :=
+ fun i j =>
+ match decEq i.val j.val with
+ | isTrue h => isTrue (Scalar.eq_of_val_eq h)
+ | isFalse h => isFalse (Scalar.ne_of_val_ne h)
+
+def Scalar.toInt {ty} (n : Scalar ty) : Int := n.val
+
+-- Tactic to prove that integers are in bounds
+syntax "intlit" : tactic
-def USize.checked_mul (n: USize) (m: USize): Result USize :=
- if h: n.val * m.val < USize.size then
- .ret ⟨ n.val * m.val, h ⟩
- else
- .fail integerOverflow
-
-def USize.checked_div (n: USize) (m: USize): Result USize :=
- if m > 0 then
- .ret ⟨ n.val / m.val, by
- have h1: ↑n.val < USize.size := n.val.isLt
- have h2: n.val.val / m.val.val <= n.val.val := @Nat.div_le_self n.val m.val
- apply Nat.lt_of_le_of_lt h2 h1
- ⟩
- else
- .fail integerOverflow
-
--- Test behavior...
-#eval assert! USize.checked_sub 10 20 == fail integerOverflow; 0
-
-#eval USize.checked_sub 20 10
--- NOTE: compare with concrete behavior here, which I do not think we want
-#eval USize.sub 0 1
-#eval UInt8.add 255 255
-
--- We now define a type class that subsumes the various machine integer types, so
--- as to write a concise definition for scalar_cast, rather than exhaustively
--- enumerating all of the possible pairs. We remark that Rust has sane semantics
--- and fails if a cast operation would involve a truncation or modulo.
-
-class MachineInteger (t: Type) where
- size: Nat
- val: t -> Fin size
- ofNatCore: (n:Nat) -> LT.lt n size -> t
-
-set_option hygiene false in
-run_cmd
- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
- Lean.Elab.Command.elabCommand (← `(
- namespace $typeName
- instance: MachineInteger $typeName where
- size := size
- val := val
- ofNatCore := ofNatCore
- end $typeName
- ))
-
--- Aeneas only instantiates the destination type (`src` is implicit). We rely on
--- Lean to infer `src`.
-
-def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
- if h: MachineInteger.val x < MachineInteger.size dst then
- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
- else
- .fail integerOverflow
+macro_rules
+ | `(tactic| intlit) => `(tactic| apply Scalar.bound_suffices ; decide)
+
+-- -- We now define a type class that subsumes the various machine integer types, so
+-- -- as to write a concise definition for scalar_cast, rather than exhaustively
+-- -- enumerating all of the possible pairs. We remark that Rust has sane semantics
+-- -- and fails if a cast operation would involve a truncation or modulo.
+
+-- class MachineInteger (t: Type) where
+-- size: Nat
+-- val: t -> Fin size
+-- ofNatCore: (n:Nat) -> LT.lt n size -> t
+
+-- set_option hygiene false in
+-- run_cmd
+-- for typeName in [`UInt8, `UInt16, `UInt32, `UInt64, `USize].map Lean.mkIdent do
+-- Lean.Elab.Command.elabCommand (← `(
+-- namespace $typeName
+-- instance: MachineInteger $typeName where
+-- size := size
+-- val := val
+-- ofNatCore := ofNatCore
+-- end $typeName
+-- ))
+
+-- -- Aeneas only instantiates the destination type (`src` is implicit). We rely on
+-- -- Lean to infer `src`.
+
+-- def scalar_cast { src: Type } (dst: Type) [ MachineInteger src ] [ MachineInteger dst ] (x: src): Result dst :=
+-- if h: MachineInteger.val x < MachineInteger.size dst then
+-- .ret (MachineInteger.ofNatCore (MachineInteger.val x).val h)
+-- else
+-- .fail integerOverflow
-------------
-- VECTORS --
-------------
--- Note: unlike F*, Lean seems to use strict upper bounds (e.g. USize.size)
--- rather than maximum values (usize_max).
-def Vec (α : Type u) := { l : List α // List.length l < USize.size }
-
-def vec_new (α : Type u): Vec α := ⟨ [], by {
- match USize.size, usize_size_eq with
- | _, Or.inl rfl => simp
- | _, Or.inr rfl => simp
- } ⟩
+def Vec (α : Type u) := { l : List α // List.length l <= Usize.max }
-#check vec_new
+def vec_new (α : Type u): Vec α := ⟨ [], by sorry ⟩
-def vec_len (α : Type u) (v : Vec α) : USize :=
+def vec_len (α : Type u) (v : Vec α) : Usize :=
let ⟨ v, l ⟩ := v
- USize.ofNatCore (List.length v) l
-
-#eval vec_len Nat (vec_new Nat)
+ Usize.ofIntCore (List.length v) (by sorry) l
def vec_push_fwd (α : Type u) (_ : Vec α) (_ : α) : Unit := ()
--- NOTE: old version trying to use a subtype notation, but probably better to
--- leave Result elimination to auxiliary lemmas with suitable preconditions
--- TODO: I originally wrote `List.length v.val < USize.size - 1`; how can one
--- make the proof work in that case? Probably need to import tactics from
--- mathlib to deal with inequalities... would love to see an example.
-def vec_push_back_old (α : Type u) (v : Vec α) (x : α) : { res: Result (Vec α) //
- match res with | fail _ => True | ret v' => List.length v'.val = List.length v.val + 1}
- :=
- if h : List.length v.val + 1 < USize.size then
- ⟨ return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩, by simp ⟩
- else
- ⟨ fail maximumSizeExceeded, by simp ⟩
-
-#eval do
- -- NOTE: the // notation is syntactic sugar for Subtype, a refinement with
- -- fields val and property. However, Lean's elaborator can automatically
- -- select the `val` field if the context provides a type annotation. We
- -- annotate `x`, which relieves us of having to write `.val` on the right-hand
- -- side of the monadic let.
- let v := vec_new Nat
- let x: Vec Nat ← (vec_push_back_old Nat v 1: Result (Vec Nat)) -- WHY do we need the type annotation here?
- -- TODO: strengthen post-condition above and do a demo to show that we can
- -- safely eliminate the `fail` case
- return (vec_len Nat x)
-
def vec_push_back (α : Type u) (v : Vec α) (x : α) : Result (Vec α)
:=
- if h : List.length v.val + 1 <= 4294967295 then
- return ⟨ List.concat v.val x,
- by
- rw [List.length_concat]
- have h': 4294967295 < USize.size := by intlit
- apply Nat.lt_of_le_of_lt h h'
- ⟩
- else if h: List.length v.val + 1 < USize.size then
- return ⟨List.concat v.val x,
- by
- rw [List.length_concat]
- assumption
- ⟩
+ if h : List.length v.val <= U32.max || List.length v.val <= Usize.max then
+ return ⟨ List.concat v.val x, by sorry ⟩
else
fail maximumSizeExceeded
-def vec_insert_fwd (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_insert_fwd (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_insert_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_insert_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ -- TODO: maybe we should redefine a list library which uses integers
+ -- (instead of natural numbers)
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
else
.fail arrayOutOfBounds
-def vec_index_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_back (α : Type u) (v: Vec α) (i: USize) (_: α): Result Unit :=
+def vec_index_back (α : Type u) (v: Vec α) (i: Usize) (_: α): Result Unit :=
if i.val < List.length v.val then
.ret ()
else
.fail arrayOutOfBounds
-def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: USize): Result α :=
- if h: i.val < List.length v.val then
+def vec_index_mut_fwd (α : Type u) (v: Vec α) (i: Usize): Result α :=
+ if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
+ let h: i < List.length v.val := by sorry
.ret (List.get v.val ⟨i.val, h⟩)
else
.fail arrayOutOfBounds
-def vec_index_mut_back (α : Type u) (v: Vec α) (i: USize) (x: α): Result (Vec α) :=
+def vec_index_mut_back (α : Type u) (v: Vec α) (i: Usize) (x: α): Result (Vec α) :=
if i.val < List.length v.val then
+ let i : Nat :=
+ match i.val with
+ | .ofNat n => n
+ | .negSucc n => by sorry -- TODO: we can't get here
+ let isLt: i < USize.size := by sorry
+ let i : Fin USize.size := { val := i, isLt := isLt }
.ret ⟨ List.set v.val i.val x, by
- have h: List.length v.val < USize.size := v.property
+ have h: List.length v.val <= Usize.max := v.property
rewrite [ List.length_set v.val i.val x ]
assumption
@@ -360,33 +578,3 @@ def mem_replace_back (a : Type) (_ : a) (y : a) : a :=
/-- Aeneas-translated function -- useful to reduce non-recursive definitions.
Use with `simp [ aeneas ]` -/
register_simp_attr aeneas
-
---------------------
--- ASSERT COMMAND --
---------------------
-
-open Lean Elab Command Term Meta
-
-syntax (name := assert) "#assert" term: command
-
-@[command_elab assert]
-unsafe
-def assertImpl : CommandElab := fun (_stx: Syntax) => do
- runTermElabM (fun _ => do
- let r ← evalTerm Bool (mkConst ``Bool) _stx[1]
- if not r then
- logInfo "Assertion failed for: "
- logInfo _stx[1]
- logError "Expression reduced to false"
- pure ())
-
-#eval 2 == 2
-#assert (2 == 2)
-
--------------------
--- SANITY CHECKS --
--------------------
-
--- TODO: add more once we have signed integers
-
-#assert (USize.checked_rem 1 2 == .ret 1)
diff --git a/tests/lean/misc-polonius_list/PoloniusList.lean b/tests/lean/misc-polonius_list/PoloniusList.lean
index a3bbfd0a..79696996 100644
--- a/tests/lean/misc-polonius_list/PoloniusList.lean
+++ b/tests/lean/misc-polonius_list/PoloniusList.lean
@@ -2,35 +2,30 @@
-- [polonius_list]
import Base.Primitives
-structure OpaqueDefs where
-
- /- [polonius_list::List] -/
- inductive list_t (T : Type) :=
- | Cons : T -> list_t T -> list_t T
- | Nil : list_t T
-
- /- [polonius_list::get_list_at_x] -/
- def get_list_at_x_fwd
- (ls : list_t UInt32) (x : UInt32) : Result (list_t UInt32) :=
- match h: ls with
- | list_t.Cons hd tl =>
- if h: hd = x
- then Result.ret (list_t.Cons hd tl)
- else get_list_at_x_fwd tl x
- | list_t.Nil => Result.ret list_t.Nil
-
- /- [polonius_list::get_list_at_x] -/
- def get_list_at_x_back
- (ls : list_t UInt32) (x : UInt32) (ret0 : list_t UInt32) :
- Result (list_t UInt32)
- :=
- match h: ls with
- | list_t.Cons hd tl =>
- if h: hd = x
- then Result.ret ret0
- else
- do
- let tl0 ← get_list_at_x_back tl x ret0
- Result.ret (list_t.Cons hd tl0)
- | list_t.Nil => Result.ret ret0
-
+/- [polonius_list::List] -/
+inductive list_t (T : Type) :=
+| Cons : T -> list_t T -> list_t T
+| Nil : list_t T
+
+/- [polonius_list::get_list_at_x] -/
+def get_list_at_x_fwd (ls : list_t U32) (x : U32) : Result (list_t U32) :=
+ match h: ls with
+ | list_t.Cons hd tl =>
+ if h: hd = x
+ then Result.ret (list_t.Cons hd tl)
+ else get_list_at_x_fwd tl x
+ | list_t.Nil => Result.ret list_t.Nil
+
+/- [polonius_list::get_list_at_x] -/
+def get_list_at_x_back
+ (ls : list_t U32) (x : U32) (ret0 : list_t U32) : Result (list_t U32) :=
+ match h: ls with
+ | list_t.Cons hd tl =>
+ if h: hd = x
+ then Result.ret ret0
+ else
+ do
+ let tl0 ← get_list_at_x_back tl x ret0
+ Result.ret (list_t.Cons hd tl0)
+ | list_t.Nil => Result.ret ret0
+