summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSon HO2023-11-29 16:02:42 +0100
committerGitHub2023-11-29 16:02:42 +0100
commit789ba1473acd287814a0d659b5f5a0e480e4e9d7 (patch)
tree983ad685eb6b3c60b0baa3e3920dedbc6eaa0e57
parent90e42e0e1c1889aabfa66283fb15b43a5852a02a (diff)
parente732f97d09179fae43fafcb244340f98e3ca9229 (diff)
Merge pull request #39 from AeneasVerif/afromher_shifts
Add support for bitshifts
Diffstat (limited to '')
-rw-r--r--Makefile19
-rw-r--r--backends/coq/Primitives.v76
-rw-r--r--backends/fstar/Primitives.fst167
-rw-r--r--backends/lean/Base/Primitives/Scalar.lean42
-rw-r--r--compiler/ExtractBase.ml12
-rw-r--r--compiler/ExtractTypes.ml17
-rw-r--r--compiler/InterpreterExpressions.ml5
-rw-r--r--compiler/SymbolicToPure.ml6
-rw-r--r--flake.lock18
-rw-r--r--tests/coq/array/Primitives.v76
-rw-r--r--tests/coq/betree/Primitives.v76
-rw-r--r--tests/coq/hashmap/Primitives.v76
-rw-r--r--tests/coq/hashmap_on_disk/Primitives.v76
-rw-r--r--tests/coq/misc/Bitwise.v38
-rw-r--r--tests/coq/misc/Primitives.v76
-rw-r--r--tests/coq/misc/_CoqProject1
-rw-r--r--tests/coq/traits/Primitives.v76
-rw-r--r--tests/fstar/array/Primitives.fst167
-rw-r--r--tests/fstar/betree/Primitives.fst167
-rw-r--r--tests/fstar/betree_back_stateful/Primitives.fst167
-rw-r--r--tests/fstar/hashmap/Primitives.fst167
-rw-r--r--tests/fstar/hashmap_on_disk/Primitives.fst167
-rw-r--r--tests/fstar/misc/Bitwise.fst32
-rw-r--r--tests/fstar/misc/Primitives.fst167
-rw-r--r--tests/fstar/traits/Primitives.fst167
-rw-r--r--tests/lean/Bitwise.lean37
26 files changed, 2000 insertions, 95 deletions
diff --git a/Makefile b/Makefile
index 88cb7d05..2cce30d9 100644
--- a/Makefile
+++ b/Makefile
@@ -91,7 +91,7 @@ tests: test-no_nested_borrows test-paper \
testp-polonius_list testp-betree_main \
ctest-testp-betree_main \
test-loops \
- test-array test-traits # TODO: generalize to all backends
+ test-array test-traits test-bitwise
# Verify the F* files generated by the translation
.PHONY: verify
@@ -139,14 +139,6 @@ tlean-traits: SUBDIR :=
tlean-traits: OPTIONS +=
thol4-traits: OPTIONS +=
-# TODO: activate the arrays for all the backends
-thol4-array:
- echo "Ignoring the array test for HOL4"
-
-# TODO: activate the traits for all the backends
-thol4-traits:
- echo "Ignoring the traits test for HOL4"
-
test-loops: OPTIONS +=
test-loops: SUBDIR := misc
tfstar-loops: OPTIONS += -decreases-clauses -template-clauses -split-files
@@ -198,6 +190,15 @@ tlean-external: OPTIONS +=
thol4-external: SUBDIR := misc-external
thol4-external: OPTIONS +=
+test-bitwise: OPTIONS += -test-trans-units
+test-bitwise: SUBDIR := misc
+tfstar-bitwise: OPTIONS +=
+tcoq-bitwise: OPTIONS +=
+tlean-bitwise: SUBDIR :=
+tlean-bitwise: OPTIONS +=
+thol4-bitwise: SUBDIR := misc-bitwise
+thol4-bitwise: OPTIONS +=
+
BETREE_FSTAR_OPTIONS = -decreases-clauses -template-clauses
testp-betree_main: OPTIONS += -backward-no-state-update -test-trans-units -state -split-files
testp-betree_main: SUBDIR:=betree
diff --git a/backends/coq/Primitives.v b/backends/coq/Primitives.v
index 83f860b6..99ffe070 100644
--- a/backends/coq/Primitives.v
+++ b/backends/coq/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/backends/fstar/Primitives.fst b/backends/fstar/Primitives.fst
index 94322ead..dd340c00 100644
--- a/backends/fstar/Primitives.fst
+++ b/backends/fstar/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/backends/lean/Base/Primitives/Scalar.lean b/backends/lean/Base/Primitives/Scalar.lean
index ec9665a5..cdd6d6f9 100644
--- a/backends/lean/Base/Primitives/Scalar.lean
+++ b/backends/lean/Base/Primitives/Scalar.lean
@@ -386,10 +386,28 @@ def Scalar.sub {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar
def Scalar.mul {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Result (Scalar ty) :=
Scalar.tryMk ty (x.val * y.val)
--- TODO: instances of +, -, * etc. for scalars
+-- TODO: shift left
+def Scalar.shiftl {ty0 ty1 : ScalarTy} (x : Scalar ty0) (y : Scalar ty1) : Result (Scalar ty0) :=
+ sorry
+
+-- TODO: shift right
+def Scalar.shiftr {ty0 ty1 : ScalarTy} (x : Scalar ty0) (y : Scalar ty1) : Result (Scalar ty0) :=
+ sorry
+
+-- TODO: xor
+def Scalar.xor {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Scalar ty :=
+ sorry
+
+-- TODO: and
+def Scalar.and {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Scalar ty :=
+ sorry
+
+-- TODO: or
+def Scalar.or {ty : ScalarTy} (x : Scalar ty) (y : Scalar ty) : Scalar ty :=
+ sorry
-- Cast an integer from a [src_ty] to a [tgt_ty]
--- TODO: check the semantics of casts in Rust
+-- TODO: double-check the semantics of casts in Rust
def Scalar.cast {src_ty : ScalarTy} (tgt_ty : ScalarTy) (x : Scalar src_ty) : Result (Scalar tgt_ty) :=
Scalar.tryMk tgt_ty x.val
@@ -486,6 +504,26 @@ instance {ty} : HDiv (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
instance {ty} : HMod (Scalar ty) (Scalar ty) (Result (Scalar ty)) where
hMod x y := Scalar.rem x y
+-- Shift left
+instance {ty0 ty1} : HShiftLeft (Scalar ty0) (Scalar ty1) (Result (Scalar ty0)) where
+ hShiftLeft x y := Scalar.shiftl x y
+
+-- Shift right
+instance {ty0 ty1} : HShiftRight (Scalar ty0) (Scalar ty1) (Result (Scalar ty0)) where
+ hShiftRight x y := Scalar.shiftr x y
+
+-- Xor
+instance {ty} : HXor (Scalar ty) (Scalar ty) (Scalar ty) where
+ hXor x y := Scalar.xor x y
+
+-- Or
+instance {ty} : HOr (Scalar ty) (Scalar ty) (Scalar ty) where
+ hOr x y := Scalar.or x y
+
+-- And
+instance {ty} : HAnd (Scalar ty) (Scalar ty) (Scalar ty) where
+ hAnd x y := Scalar.and x y
+
-- Generic theorem - shouldn't be used much
@[cpspec]
theorem Scalar.add_spec {ty} {x y : Scalar ty}
diff --git a/compiler/ExtractBase.ml b/compiler/ExtractBase.ml
index 85ab1112..73ccac44 100644
--- a/compiler/ExtractBase.ml
+++ b/compiler/ExtractBase.ml
@@ -786,7 +786,7 @@ let unop_name (unop : unop) : string =
like [<]).
*)
let named_binop_name (binop : E.binop) (int_ty : integer_type) : string =
- let binop =
+ let binop_s =
match binop with
| Div -> "div"
| Rem -> "rem"
@@ -800,16 +800,14 @@ let named_binop_name (binop : E.binop) (int_ty : integer_type) : string =
| BitXor -> "xor"
| BitAnd -> "and"
| BitOr -> "or"
- | Shl -> "lsl"
- | Shr ->
- "asr"
- (* NOTE: make sure arithmetic shift right is implemented, i.e. OCaml's asr operator, not lsr *)
+ | Shl -> "shl"
+ | Shr -> "shr"
| _ -> raise (Failure "Unreachable")
in
(* Remark: the Lean case is actually not used *)
match !backend with
- | Lean -> int_name int_ty ^ "." ^ binop
- | FStar | Coq | HOL4 -> int_name int_ty ^ "_" ^ binop
+ | Lean -> int_name int_ty ^ "." ^ binop_s
+ | FStar | Coq | HOL4 -> int_name int_ty ^ "_" ^ binop_s
(** A list of keywords/identifiers used by the backend and with which we
want to check collision.
diff --git a/compiler/ExtractTypes.ml b/compiler/ExtractTypes.ml
index ca9984be..3657627b 100644
--- a/compiler/ExtractTypes.ml
+++ b/compiler/ExtractTypes.ml
@@ -163,7 +163,7 @@ let extract_binop (extract_expr : bool -> texpression -> unit)
(match (!backend, binop) with
| HOL4, (Eq | Ne)
| (FStar | Coq | Lean), (Eq | Lt | Le | Ne | Ge | Gt)
- | Lean, (Div | Rem | Add | Sub | Mul) ->
+ | Lean, (Div | Rem | Add | Sub | Mul | Shl | Shr | BitXor | BitOr | BitAnd) ->
let binop =
match binop with
| Eq -> "="
@@ -177,7 +177,11 @@ let extract_binop (extract_expr : bool -> texpression -> unit)
| Add -> "+"
| Sub -> "-"
| Mul -> "*"
- | _ -> raise (Failure "Unreachable")
+ | Shl -> "<<<"
+ | Shr -> ">>>"
+ | BitXor -> "^^^"
+ | BitOr -> "|||"
+ | BitAnd -> "&&&"
in
let binop =
match !backend with FStar | Lean | HOL4 -> binop | Coq -> "s" ^ binop
@@ -188,8 +192,17 @@ let extract_binop (extract_expr : bool -> texpression -> unit)
F.pp_print_space fmt ();
extract_expr false arg1
| _ ->
+ let binop_is_shift = match binop with Shl | Shr -> true | _ -> false in
let binop = named_binop_name binop int_ty in
F.pp_print_string fmt binop;
+ (* In the case of F*, for shift operations, because machine integers
+ are simply integers with a refinement, if the second argument is a
+ constant we need to provide the second implicit type argument *)
+ if binop_is_shift && !backend = FStar && is_const arg1 then (
+ F.pp_print_space fmt ();
+ let ty = ty_as_integer arg1.ty in
+ F.pp_print_string fmt
+ ("#" ^ StringUtils.capitalize_first_letter (int_name ty)));
F.pp_print_space fmt ();
extract_expr true arg0;
F.pp_print_space fmt ();
diff --git a/compiler/InterpreterExpressions.ml b/compiler/InterpreterExpressions.ml
index cc0580be..ac6c9ede 100644
--- a/compiler/InterpreterExpressions.ml
+++ b/compiler/InterpreterExpressions.ml
@@ -580,7 +580,10 @@ let eval_binary_op_symbolic (config : config) (binop : binop) (op1 : operand)
| Div | Rem | Add | Sub | Mul | BitXor | BitAnd | BitOr ->
assert (int_ty1 = int_ty2);
TLiteral (TInteger int_ty1)
- | Shl | Shr -> raise Unimplemented
+ | Shl | Shr ->
+ (* The number of bits can be of a different integer type
+ than the operand *)
+ TLiteral (TInteger int_ty1)
| Ne | Eq -> raise (Failure "Unreachable"))
| _ -> raise (Failure "Invalid inputs for binop")
in
diff --git a/compiler/SymbolicToPure.ml b/compiler/SymbolicToPure.ml
index 4df3ee73..f5b055fd 100644
--- a/compiler/SymbolicToPure.ml
+++ b/compiler/SymbolicToPure.ml
@@ -1755,7 +1755,11 @@ and translate_function_call (call : S.call) (e : S.expression) (ctx : bs_ctx) :
| [ arg0; arg1 ] ->
let int_ty0 = ty_as_integer arg0.ty in
let int_ty1 = ty_as_integer arg1.ty in
- assert (int_ty0 = int_ty1);
+ (match binop with
+ (* The Rust compiler accepts bitshifts for any integer type combination for ty0, ty1 *)
+ | E.Shl | E.Shr -> ()
+ | _ -> assert (int_ty0 = int_ty1)
+ );
let effect_info =
{
can_fail = ExpressionsUtils.binop_can_fail binop;
diff --git a/flake.lock b/flake.lock
index eedf152d..55f38cfb 100644
--- a/flake.lock
+++ b/flake.lock
@@ -8,11 +8,11 @@
"rust-overlay": "rust-overlay"
},
"locked": {
- "lastModified": 1701263140,
- "narHash": "sha256-rxfTrlSCGZVwcHNV+zqRk8Wh92lojYfdXWmE9oUSp0w=",
+ "lastModified": 1701269567,
+ "narHash": "sha256-3UsioCtoMv+FUPPJaR+sXnKzEnyW45Rl9FrWcNLpDE8=",
"owner": "aeneasverif",
"repo": "charon",
- "rev": "e3718f07206e54458b3fcf0642089c32ee2381f0",
+ "rev": "013396662cbdb76a09c8b70cfd13606e6c6d428f",
"type": "github"
},
"original": {
@@ -265,11 +265,11 @@
"nixpkgs": "nixpkgs_4"
},
"locked": {
- "lastModified": 1701259400,
- "narHash": "sha256-BZxLzC6mapqUUpL4SUiEy4ZqszTehcTDQRAhThDCMAY=",
+ "lastModified": 1701264285,
+ "narHash": "sha256-zquq62OOZE5aySVkFi4NnSGVY+YSTzipxLwO8umA4zo=",
"owner": "leanprover",
"repo": "lean4",
- "rev": "4f2f704962f960c42ca2bc3ca81c4c43767b22f2",
+ "rev": "367ac01279e6a26e65fd8f5ca66ed43709c458ae",
"type": "github"
},
"original": {
@@ -318,11 +318,11 @@
"nixpkgs": "nixpkgs_7"
},
"locked": {
- "lastModified": 1701259400,
- "narHash": "sha256-BZxLzC6mapqUUpL4SUiEy4ZqszTehcTDQRAhThDCMAY=",
+ "lastModified": 1701264285,
+ "narHash": "sha256-zquq62OOZE5aySVkFi4NnSGVY+YSTzipxLwO8umA4zo=",
"owner": "leanprover",
"repo": "lean4",
- "rev": "4f2f704962f960c42ca2bc3ca81c4c43767b22f2",
+ "rev": "367ac01279e6a26e65fd8f5ca66ed43709c458ae",
"type": "github"
},
"original": {
diff --git a/tests/coq/array/Primitives.v b/tests/coq/array/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/array/Primitives.v
+++ b/tests/coq/array/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/coq/betree/Primitives.v b/tests/coq/betree/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/betree/Primitives.v
+++ b/tests/coq/betree/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/coq/hashmap/Primitives.v b/tests/coq/hashmap/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/hashmap/Primitives.v
+++ b/tests/coq/hashmap/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/coq/hashmap_on_disk/Primitives.v b/tests/coq/hashmap_on_disk/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/hashmap_on_disk/Primitives.v
+++ b/tests/coq/hashmap_on_disk/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/coq/misc/Bitwise.v b/tests/coq/misc/Bitwise.v
new file mode 100644
index 00000000..94771b37
--- /dev/null
+++ b/tests/coq/misc/Bitwise.v
@@ -0,0 +1,38 @@
+(** THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS *)
+(** [bitwise] *)
+Require Import Primitives.
+Import Primitives.
+Require Import Coq.ZArith.ZArith.
+Require Import List.
+Import ListNotations.
+Local Open Scope Primitives_scope.
+Module Bitwise.
+
+(** [bitwise::shift_u32]: forward function
+ Source: 'src/bitwise.rs', lines 3:0-3:31 *)
+Definition shift_u32 (a : u32) : result u32 :=
+ t <- u32_shr a 16%usize; u32_shl t 16%usize
+.
+
+(** [bitwise::shift_i32]: forward function
+ Source: 'src/bitwise.rs', lines 10:0-10:31 *)
+Definition shift_i32 (a : i32) : result i32 :=
+ t <- i32_shr a 16%isize; i32_shl t 16%isize
+.
+
+(** [bitwise::xor_u32]: forward function
+ Source: 'src/bitwise.rs', lines 17:0-17:37 *)
+Definition xor_u32 (a : u32) (b : u32) : result u32 :=
+ Return (u32_xor a b).
+
+(** [bitwise::or_u32]: forward function
+ Source: 'src/bitwise.rs', lines 21:0-21:36 *)
+Definition or_u32 (a : u32) (b : u32) : result u32 :=
+ Return (u32_or a b).
+
+(** [bitwise::and_u32]: forward function
+ Source: 'src/bitwise.rs', lines 25:0-25:37 *)
+Definition and_u32 (a : u32) (b : u32) : result u32 :=
+ Return (u32_and a b).
+
+End Bitwise.
diff --git a/tests/coq/misc/Primitives.v b/tests/coq/misc/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/misc/Primitives.v
+++ b/tests/coq/misc/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/coq/misc/_CoqProject b/tests/coq/misc/_CoqProject
index 0828bced..abbf6aa8 100644
--- a/tests/coq/misc/_CoqProject
+++ b/tests/coq/misc/_CoqProject
@@ -13,5 +13,6 @@ Constants.v
PoloniusList.v
NoNestedBorrows.v
External_FunsExternal.v
+Bitwise.v
External_TypesExternal_Template.v
External_FunsExternal_Template.v
diff --git a/tests/coq/traits/Primitives.v b/tests/coq/traits/Primitives.v
index 83f860b6..99ffe070 100644
--- a/tests/coq/traits/Primitives.v
+++ b/tests/coq/traits/Primitives.v
@@ -255,6 +255,12 @@ Definition scalar_rem {ty} (x y: scalar ty) : result (scalar ty) := mk_scalar ty
Definition scalar_neg {ty} (x: scalar ty) : result (scalar ty) := mk_scalar ty (-(to_Z x)).
+Axiom scalar_xor : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_or : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_and : forall ty, scalar ty -> scalar ty -> scalar ty. (* TODO *)
+Axiom scalar_shl : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+Axiom scalar_shr : forall ty0 ty1, scalar ty0 -> scalar ty1 -> result (scalar ty0). (* TODO *)
+
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
(* TODO: check the semantics of casts in Rust *)
Definition scalar_cast (src_ty tgt_ty : scalar_ty) (x : scalar src_ty) : result (scalar tgt_ty) :=
@@ -372,6 +378,76 @@ Definition u32_mul := @scalar_mul U32.
Definition u64_mul := @scalar_mul U64.
Definition u128_mul := @scalar_mul U128.
+(** Xor *)
+Definition u8_xor := @scalar_xor U8.
+Definition u16_xor := @scalar_xor U16.
+Definition u32_xor := @scalar_xor U32.
+Definition u64_xor := @scalar_xor U64.
+Definition u128_xor := @scalar_xor U128.
+Definition usize_xor := @scalar_xor Usize.
+Definition i8_xor := @scalar_xor I8.
+Definition i16_xor := @scalar_xor I16.
+Definition i32_xor := @scalar_xor I32.
+Definition i64_xor := @scalar_xor I64.
+Definition i128_xor := @scalar_xor I128.
+Definition isize_xor := @scalar_xor Isize.
+
+(** Or *)
+Definition u8_or := @scalar_or U8.
+Definition u16_or := @scalar_or U16.
+Definition u32_or := @scalar_or U32.
+Definition u64_or := @scalar_or U64.
+Definition u128_or := @scalar_or U128.
+Definition usize_or := @scalar_or Usize.
+Definition i8_or := @scalar_or I8.
+Definition i16_or := @scalar_or I16.
+Definition i32_or := @scalar_or I32.
+Definition i64_or := @scalar_or I64.
+Definition i128_or := @scalar_or I128.
+Definition isize_or := @scalar_or Isize.
+
+(** And *)
+Definition u8_and := @scalar_and U8.
+Definition u16_and := @scalar_and U16.
+Definition u32_and := @scalar_and U32.
+Definition u64_and := @scalar_and U64.
+Definition u128_and := @scalar_and U128.
+Definition usize_and := @scalar_and Usize.
+Definition i8_and := @scalar_and I8.
+Definition i16_and := @scalar_and I16.
+Definition i32_and := @scalar_and I32.
+Definition i64_and := @scalar_and I64.
+Definition i128_and := @scalar_and I128.
+Definition isize_and := @scalar_and Isize.
+
+(** Shift left *)
+Definition u8_shl {ty} := @scalar_shl U8 ty.
+Definition u16_shl {ty} := @scalar_shl U16 ty.
+Definition u32_shl {ty} := @scalar_shl U32 ty.
+Definition u64_shl {ty} := @scalar_shl U64 ty.
+Definition u128_shl {ty} := @scalar_shl U128 ty.
+Definition usize_shl {ty} := @scalar_shl Usize ty.
+Definition i8_shl {ty} := @scalar_shl I8 ty.
+Definition i16_shl {ty} := @scalar_shl I16 ty.
+Definition i32_shl {ty} := @scalar_shl I32 ty.
+Definition i64_shl {ty} := @scalar_shl I64 ty.
+Definition i128_shl {ty} := @scalar_shl I128 ty.
+Definition isize_shl {ty} := @scalar_shl Isize ty.
+
+(** Shift right *)
+Definition u8_shr {ty} := @scalar_shr U8 ty.
+Definition u16_shr {ty} := @scalar_shr U16 ty.
+Definition u32_shr {ty} := @scalar_shr U32 ty.
+Definition u64_shr {ty} := @scalar_shr U64 ty.
+Definition u128_shr {ty} := @scalar_shr U128 ty.
+Definition usize_shr {ty} := @scalar_shr Usize ty.
+Definition i8_shr {ty} := @scalar_shr I8 ty.
+Definition i16_shr {ty} := @scalar_shr I16 ty.
+Definition i32_shr {ty} := @scalar_shr I32 ty.
+Definition i64_shr {ty} := @scalar_shr I64 ty.
+Definition i128_shr {ty} := @scalar_shr I128 ty.
+Definition isize_shr {ty} := @scalar_shr Isize ty.
+
(** Small utility *)
Definition usize_to_nat (x: usize) : nat := Z.to_nat (to_Z x).
diff --git a/tests/fstar/array/Primitives.fst b/tests/fstar/array/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/array/Primitives.fst
+++ b/tests/fstar/array/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/betree/Primitives.fst b/tests/fstar/betree/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/betree/Primitives.fst
+++ b/tests/fstar/betree/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/betree_back_stateful/Primitives.fst b/tests/fstar/betree_back_stateful/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/betree_back_stateful/Primitives.fst
+++ b/tests/fstar/betree_back_stateful/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/hashmap/Primitives.fst b/tests/fstar/hashmap/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/hashmap/Primitives.fst
+++ b/tests/fstar/hashmap/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/hashmap_on_disk/Primitives.fst b/tests/fstar/hashmap_on_disk/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/hashmap_on_disk/Primitives.fst
+++ b/tests/fstar/hashmap_on_disk/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/misc/Bitwise.fst b/tests/fstar/misc/Bitwise.fst
new file mode 100644
index 00000000..d7ba2c57
--- /dev/null
+++ b/tests/fstar/misc/Bitwise.fst
@@ -0,0 +1,32 @@
+(** THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS *)
+(** [bitwise] *)
+module Bitwise
+open Primitives
+
+#set-options "--z3rlimit 50 --fuel 1 --ifuel 1"
+
+(** [bitwise::shift_u32]: forward function
+ Source: 'src/bitwise.rs', lines 3:0-3:31 *)
+let shift_u32 (a : u32) : result u32 =
+ let* t = u32_shr #Usize a 16 in u32_shl #Usize t 16
+
+(** [bitwise::shift_i32]: forward function
+ Source: 'src/bitwise.rs', lines 10:0-10:31 *)
+let shift_i32 (a : i32) : result i32 =
+ let* t = i32_shr #Isize a 16 in i32_shl #Isize t 16
+
+(** [bitwise::xor_u32]: forward function
+ Source: 'src/bitwise.rs', lines 17:0-17:37 *)
+let xor_u32 (a : u32) (b : u32) : result u32 =
+ Return (u32_xor a b)
+
+(** [bitwise::or_u32]: forward function
+ Source: 'src/bitwise.rs', lines 21:0-21:36 *)
+let or_u32 (a : u32) (b : u32) : result u32 =
+ Return (u32_or a b)
+
+(** [bitwise::and_u32]: forward function
+ Source: 'src/bitwise.rs', lines 25:0-25:37 *)
+let and_u32 (a : u32) (b : u32) : result u32 =
+ Return (u32_and a b)
+
diff --git a/tests/fstar/misc/Primitives.fst b/tests/fstar/misc/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/misc/Primitives.fst
+++ b/tests/fstar/misc/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/fstar/traits/Primitives.fst b/tests/fstar/traits/Primitives.fst
index 94322ead..dd340c00 100644
--- a/tests/fstar/traits/Primitives.fst
+++ b/tests/fstar/traits/Primitives.fst
@@ -65,6 +65,10 @@ type const_raw_ptr (t : Type0) = { v : t }
(*** Scalars *)
/// Rem.: most of the following code was partially generated
+assume val size_numbits : pos
+
+// TODO: we could use FStar.Int.int_t and FStar.UInt.int_t
+
let isize_min : int = -9223372036854775808 // TODO: should be opaque
let isize_max : int = 9223372036854775807 // TODO: should be opaque
let i8_min : int = -128
@@ -108,7 +112,6 @@ let is_unsigned = function
| Isize | I8 | I16 | I32 | I64 | I128 -> false
| Usize | U8 | U16 | U32 | U64 | U128 -> true
-
let scalar_min (ty : scalar_ty) : int =
match ty with
| Isize -> isize_min
@@ -171,7 +174,7 @@ let scalar_sub (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scala
let scalar_mul (#ty : scalar_ty) (x : scalar ty) (y : scalar ty) : result (scalar ty) =
mk_scalar ty (x * y)
-let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
+let scalar_xor (#ty : scalar_ty)
(x : scalar ty) (y : scalar ty) : scalar ty =
match ty with
| U8 -> FStar.UInt.logxor #8 x y
@@ -179,6 +182,91 @@ let scalar_lxor (#ty : scalar_ty { is_unsigned ty && ty <> Usize })
| U32 -> FStar.UInt.logxor #32 x y
| U64 -> FStar.UInt.logxor #64 x y
| U128 -> FStar.UInt.logxor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logxor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logxor #16 x y
+ | I32 -> FStar.Int.logxor #32 x y
+ | I64 -> FStar.Int.logxor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logxor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_or (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logor #8 x y
+ | U16 -> FStar.UInt.logor #16 x y
+ | U32 -> FStar.UInt.logor #32 x y
+ | U64 -> FStar.UInt.logor #64 x y
+ | U128 -> FStar.UInt.logor #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logor #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logor #16 x y
+ | I32 -> FStar.Int.logor #32 x y
+ | I64 -> FStar.Int.logor #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logor #128 x y
+ | Isize -> admit() // TODO
+
+let scalar_and (#ty : scalar_ty)
+ (x : scalar ty) (y : scalar ty) : scalar ty =
+ match ty with
+ | U8 -> FStar.UInt.logand #8 x y
+ | U16 -> FStar.UInt.logand #16 x y
+ | U32 -> FStar.UInt.logand #32 x y
+ | U64 -> FStar.UInt.logand #64 x y
+ | U128 -> FStar.UInt.logand #128 x y
+ | Usize -> admit() // TODO
+ | I8 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 8);
+ normalize_spec (scalar I8);
+ FStar.Int.logand #8 x y
+ | I16 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 16);
+ normalize_spec (scalar I16);
+ FStar.Int.logand #16 x y
+ | I32 -> FStar.Int.logand #32 x y
+ | I64 -> FStar.Int.logand #64 x y
+ | I128 ->
+ // Encoding issues...
+ normalize_spec (FStar.Int.int_t 128);
+ normalize_spec (scalar I128);
+ FStar.Int.logand #128 x y
+ | Isize -> admit() // TODO
+
+// Shift left
+let scalar_shl (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
+
+// Shift right
+let scalar_shr (#ty0 #ty1 : scalar_ty)
+ (x : scalar ty0) (y : scalar ty1) : result (scalar ty0) =
+ admit()
(** Cast an integer from a [src_ty] to a [tgt_ty] *)
// TODO: check the semantics of casts in Rust
@@ -304,12 +392,75 @@ let u32_mul = scalar_mul #U32
let u64_mul = scalar_mul #U64
let u128_mul = scalar_mul #U128
-/// Logical operators, defined for unsigned types only, so far
-let u8_xor = scalar_lxor #U8
-let u16_xor = scalar_lxor #U16
-let u32_xor = scalar_lxor #U32
-let u64_xor = scalar_lxor #U64
-let u128_xor = scalar_lxor #U128
+/// Xor
+let u8_xor = scalar_xor #U8
+let u16_xor = scalar_xor #U16
+let u32_xor = scalar_xor #U32
+let u64_xor = scalar_xor #U64
+let u128_xor = scalar_xor #U128
+let usize_xor = scalar_xor #Usize
+let i8_xor = scalar_xor #I8
+let i16_xor = scalar_xor #I16
+let i32_xor = scalar_xor #I32
+let i64_xor = scalar_xor #I64
+let i128_xor = scalar_xor #I128
+let isize_xor = scalar_xor #Isize
+
+/// Or
+let u8_or = scalar_or #U8
+let u16_or = scalar_or #U16
+let u32_or = scalar_or #U32
+let u64_or = scalar_or #U64
+let u128_or = scalar_or #U128
+let usize_or = scalar_or #Usize
+let i8_or = scalar_or #I8
+let i16_or = scalar_or #I16
+let i32_or = scalar_or #I32
+let i64_or = scalar_or #I64
+let i128_or = scalar_or #I128
+let isize_or = scalar_or #Isize
+
+/// And
+let u8_and = scalar_and #U8
+let u16_and = scalar_and #U16
+let u32_and = scalar_and #U32
+let u64_and = scalar_and #U64
+let u128_and = scalar_and #U128
+let usize_and = scalar_and #Usize
+let i8_and = scalar_and #I8
+let i16_and = scalar_and #I16
+let i32_and = scalar_and #I32
+let i64_and = scalar_and #I64
+let i128_and = scalar_and #I128
+let isize_and = scalar_and #Isize
+
+/// Shift left
+let u8_shl #ty = scalar_shl #U8 #ty
+let u16_shl #ty = scalar_shl #U16 #ty
+let u32_shl #ty = scalar_shl #U32 #ty
+let u64_shl #ty = scalar_shl #U64 #ty
+let u128_shl #ty = scalar_shl #U128 #ty
+let usize_shl #ty = scalar_shl #Usize #ty
+let i8_shl #ty = scalar_shl #I8 #ty
+let i16_shl #ty = scalar_shl #I16 #ty
+let i32_shl #ty = scalar_shl #I32 #ty
+let i64_shl #ty = scalar_shl #I64 #ty
+let i128_shl #ty = scalar_shl #I128 #ty
+let isize_shl #ty = scalar_shl #Isize #ty
+
+/// Shift right
+let u8_shr #ty = scalar_shr #U8 #ty
+let u16_shr #ty = scalar_shr #U16 #ty
+let u32_shr #ty = scalar_shr #U32 #ty
+let u64_shr #ty = scalar_shr #U64 #ty
+let u128_shr #ty = scalar_shr #U128 #ty
+let usize_shr #ty = scalar_shr #Usize #ty
+let i8_shr #ty = scalar_shr #I8 #ty
+let i16_shr #ty = scalar_shr #I16 #ty
+let i32_shr #ty = scalar_shr #I32 #ty
+let i64_shr #ty = scalar_shr #I64 #ty
+let i128_shr #ty = scalar_shr #I128 #ty
+let isize_shr #ty = scalar_shr #Isize #ty
(*** core::ops *)
diff --git a/tests/lean/Bitwise.lean b/tests/lean/Bitwise.lean
new file mode 100644
index 00000000..c8538aa2
--- /dev/null
+++ b/tests/lean/Bitwise.lean
@@ -0,0 +1,37 @@
+-- THIS FILE WAS AUTOMATICALLY GENERATED BY AENEAS
+-- [bitwise]
+import Base
+open Primitives
+
+namespace bitwise
+
+/- [bitwise::shift_u32]: forward function
+ Source: 'src/bitwise.rs', lines 3:0-3:31 -/
+def shift_u32 (a : U32) : Result U32 :=
+ do
+ let t ← a >>> 16#usize
+ t <<< 16#usize
+
+/- [bitwise::shift_i32]: forward function
+ Source: 'src/bitwise.rs', lines 10:0-10:31 -/
+def shift_i32 (a : I32) : Result I32 :=
+ do
+ let t ← a >>> 16#isize
+ t <<< 16#isize
+
+/- [bitwise::xor_u32]: forward function
+ Source: 'src/bitwise.rs', lines 17:0-17:37 -/
+def xor_u32 (a : U32) (b : U32) : Result U32 :=
+ Result.ret (a ^^^ b)
+
+/- [bitwise::or_u32]: forward function
+ Source: 'src/bitwise.rs', lines 21:0-21:36 -/
+def or_u32 (a : U32) (b : U32) : Result U32 :=
+ Result.ret (a ||| b)
+
+/- [bitwise::and_u32]: forward function
+ Source: 'src/bitwise.rs', lines 25:0-25:37 -/
+def and_u32 (a : U32) (b : U32) : Result U32 :=
+ Result.ret (a &&& b)
+
+end bitwise