ECDH: Add Everest Curve25519 to 3rdparty/everest

These files are automatically generated by the Everest toolchain from F*
files. They do not respect the mbedTLS code style guidelines as manual
modification would invalidate verification guarantees. The files in
3rdparty/everest/include/kremli{n,b} are a customized (minimzed) version of the
support headers expected by the code extracted using KreMLin.
diff --git a/3rdparty/everest/library/Hacl_Curve25519.c b/3rdparty/everest/library/Hacl_Curve25519.c
new file mode 100644
index 0000000..450b9f8
--- /dev/null
+++ b/3rdparty/everest/library/Hacl_Curve25519.c
@@ -0,0 +1,760 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+   Licensed under the Apache 2.0 License. */
+
+/* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ * KreMLin invocation: /mnt/e/everest/verify/kremlin/krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -fbuiltin-uint128 -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -I /mnt/e/everest/verify/hacl-star/code/lib/kremlin -I /mnt/e/everest/verify/kremlin/kremlib/compat -I /mnt/e/everest/verify/hacl-star/specs -I /mnt/e/everest/verify/hacl-star/specs/old -I . -ccopt -march=native -verbose -ldopt -flto -tmpdir x25519-c -I ../bignum -bundle Hacl.Curve25519=* -minimal -add-include "kremlib.h" -skip-compilation x25519-c/out.krml -o x25519-c/Hacl_Curve25519.c
+ * F* version: 059db0c8
+ * KreMLin version: 916c37ac
+ */
+
+
+#include "Hacl_Curve25519.h"
+
+extern uint64_t FStar_UInt64_eq_mask(uint64_t x0, uint64_t x1);
+
+extern uint64_t FStar_UInt64_gte_mask(uint64_t x0, uint64_t x1);
+
+extern uint128_t FStar_UInt128_add(uint128_t x0, uint128_t x1);
+
+extern uint128_t FStar_UInt128_add_mod(uint128_t x0, uint128_t x1);
+
+extern uint128_t FStar_UInt128_logand(uint128_t x0, uint128_t x1);
+
+extern uint128_t FStar_UInt128_shift_right(uint128_t x0, uint32_t x1);
+
+extern uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x0);
+
+extern uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x0);
+
+extern uint128_t FStar_UInt128_mul_wide(uint64_t x0, uint64_t x1);
+
+static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+  uint64_t b4 = b[4U];
+  uint64_t b0 = b[0U];
+  uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU;
+  uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U);
+  b[4U] = b4_;
+  b[0U] = b0_;
+}
+
+inline static void Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, uint128_t *input)
+{
+  uint32_t i;
+  for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
+  {
+    uint128_t xi = input[i];
+    output[i] = (uint64_t)xi;
+  }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(uint128_t *output, uint64_t *input, uint64_t s)
+{
+  uint32_t i;
+  for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
+  {
+    uint128_t xi = output[i];
+    uint64_t yi = input[i];
+    output[i] = xi + (uint128_t)yi * s;
+  }
+}
+
+inline static void Hacl_Bignum_Fproduct_carry_wide_(uint128_t *tmp)
+{
+  uint32_t i;
+  for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
+  {
+    uint32_t ctr = i;
+    uint128_t tctr = tmp[ctr];
+    uint128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+    uint64_t r0 = (uint64_t)tctr & (uint64_t)0x7ffffffffffffU;
+    uint128_t c = tctr >> (uint32_t)51U;
+    tmp[ctr] = (uint128_t)r0;
+    tmp[ctr + (uint32_t)1U] = tctrp1 + c;
+  }
+}
+
+inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+  uint64_t tmp = output[4U];
+  uint64_t b0;
+  {
+    uint32_t i;
+    for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
+    {
+      uint32_t ctr = (uint32_t)5U - i - (uint32_t)1U;
+      uint64_t z = output[ctr - (uint32_t)1U];
+      output[ctr] = z;
+    }
+  }
+  output[0U] = tmp;
+  b0 = output[0U];
+  output[0U] = (uint64_t)19U * b0;
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(uint128_t *output, uint64_t *input, uint64_t *input2)
+{
+  uint32_t i;
+  uint64_t input2i;
+  {
+    uint32_t i0;
+    for (i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
+    {
+      uint64_t input2i0 = input2[i0];
+      Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i0);
+      Hacl_Bignum_Fmul_shift_reduce(input);
+    }
+  }
+  i = (uint32_t)4U;
+  input2i = input2[i];
+  Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
+{
+  uint64_t tmp[5U] = { 0U };
+  memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]);
+  KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
+  {
+    uint128_t t[5U];
+    {
+      uint32_t _i;
+      for (_i = 0U; _i < (uint32_t)5U; ++_i)
+        t[_i] = (uint128_t)(uint64_t)0U;
+    }
+    {
+      uint128_t b4;
+      uint128_t b0;
+      uint128_t b4_;
+      uint128_t b0_;
+      uint64_t i0;
+      uint64_t i1;
+      uint64_t i0_;
+      uint64_t i1_;
+      Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input2);
+      Hacl_Bignum_Fproduct_carry_wide_(t);
+      b4 = t[4U];
+      b0 = t[0U];
+      b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
+      b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
+      t[4U] = b4_;
+      t[0U] = b0_;
+      Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+      i0 = output[0U];
+      i1 = output[1U];
+      i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+      i1_ = i1 + (i0 >> (uint32_t)51U);
+      output[0U] = i0_;
+      output[1U] = i1_;
+    }
+  }
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare__(uint128_t *tmp, uint64_t *output)
+{
+  uint64_t r0 = output[0U];
+  uint64_t r1 = output[1U];
+  uint64_t r2 = output[2U];
+  uint64_t r3 = output[3U];
+  uint64_t r4 = output[4U];
+  uint64_t d0 = r0 * (uint64_t)2U;
+  uint64_t d1 = r1 * (uint64_t)2U;
+  uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U;
+  uint64_t d419 = r4 * (uint64_t)19U;
+  uint64_t d4 = d419 * (uint64_t)2U;
+  uint128_t s0 = (uint128_t)r0 * r0 + (uint128_t)d4 * r1 + (uint128_t)d2 * r3;
+  uint128_t s1 = (uint128_t)d0 * r1 + (uint128_t)d4 * r2 + (uint128_t)(r3 * (uint64_t)19U) * r3;
+  uint128_t s2 = (uint128_t)d0 * r2 + (uint128_t)r1 * r1 + (uint128_t)d4 * r3;
+  uint128_t s3 = (uint128_t)d0 * r3 + (uint128_t)d1 * r2 + (uint128_t)r4 * d419;
+  uint128_t s4 = (uint128_t)d0 * r4 + (uint128_t)d1 * r3 + (uint128_t)r2 * r2;
+  tmp[0U] = s0;
+  tmp[1U] = s1;
+  tmp[2U] = s2;
+  tmp[3U] = s3;
+  tmp[4U] = s4;
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_(uint128_t *tmp, uint64_t *output)
+{
+  uint128_t b4;
+  uint128_t b0;
+  uint128_t b4_;
+  uint128_t b0_;
+  uint64_t i0;
+  uint64_t i1;
+  uint64_t i0_;
+  uint64_t i1_;
+  Hacl_Bignum_Fsquare_fsquare__(tmp, output);
+  Hacl_Bignum_Fproduct_carry_wide_(tmp);
+  b4 = tmp[4U];
+  b0 = tmp[0U];
+  b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
+  b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
+  tmp[4U] = b4_;
+  tmp[0U] = b0_;
+  Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+  i0 = output[0U];
+  i1 = output[1U];
+  i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+  i1_ = i1 + (i0 >> (uint32_t)51U);
+  output[0U] = i0_;
+  output[1U] = i1_;
+}
+
+static void
+Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, uint128_t *tmp, uint32_t count1)
+{
+  uint32_t i;
+  Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+  for (i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U)
+    Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
+{
+  KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
+  {
+    uint128_t t[5U];
+    {
+      uint32_t _i;
+      for (_i = 0U; _i < (uint32_t)5U; ++_i)
+        t[_i] = (uint128_t)(uint64_t)0U;
+    }
+    memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+    Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+  }
+}
+
+inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
+{
+  KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
+  {
+    uint128_t t[5U];
+    {
+      uint32_t _i;
+      for (_i = 0U; _i < (uint32_t)5U; ++_i)
+        t[_i] = (uint128_t)(uint64_t)0U;
+    }
+    Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+  }
+}
+
+inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
+{
+  uint64_t buf[20U] = { 0U };
+  uint64_t *a0 = buf;
+  uint64_t *t00 = buf + (uint32_t)5U;
+  uint64_t *b0 = buf + (uint32_t)10U;
+  uint64_t *t01;
+  uint64_t *b1;
+  uint64_t *c0;
+  uint64_t *a;
+  uint64_t *t0;
+  uint64_t *b;
+  uint64_t *c;
+  Hacl_Bignum_Fsquare_fsquare_times(a0, z, (uint32_t)1U);
+  Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)2U);
+  Hacl_Bignum_Fmul_fmul(b0, t00, z);
+  Hacl_Bignum_Fmul_fmul(a0, b0, a0);
+  Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)1U);
+  Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+  Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U);
+  t01 = buf + (uint32_t)5U;
+  b1 = buf + (uint32_t)10U;
+  c0 = buf + (uint32_t)15U;
+  Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+  Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U);
+  Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+  Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U);
+  Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+  Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U);
+  Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+  Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U);
+  a = buf;
+  t0 = buf + (uint32_t)5U;
+  b = buf + (uint32_t)10U;
+  c = buf + (uint32_t)15U;
+  Hacl_Bignum_Fmul_fmul(c, t0, b);
+  Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U);
+  Hacl_Bignum_Fmul_fmul(t0, t0, c);
+  Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U);
+  Hacl_Bignum_Fmul_fmul(t0, t0, b);
+  Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U);
+  Hacl_Bignum_Fmul_fmul(out, t0, a);
+}
+
+inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
+{
+  uint32_t i;
+  for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
+  {
+    uint64_t xi = a[i];
+    uint64_t yi = b[i];
+    a[i] = xi + yi;
+  }
+}
+
+inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
+{
+  uint64_t tmp[5U] = { 0U };
+  uint64_t b0;
+  uint64_t b1;
+  uint64_t b2;
+  uint64_t b3;
+  uint64_t b4;
+  memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]);
+  b0 = tmp[0U];
+  b1 = tmp[1U];
+  b2 = tmp[2U];
+  b3 = tmp[3U];
+  b4 = tmp[4U];
+  tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U;
+  tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U;
+  tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U;
+  tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U;
+  tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U;
+  {
+    uint32_t i;
+    for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
+    {
+      uint64_t xi = a[i];
+      uint64_t yi = tmp[i];
+      a[i] = yi - xi;
+    }
+  }
+}
+
+inline static void Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
+{
+  KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
+  {
+    uint128_t tmp[5U];
+    {
+      uint32_t _i;
+      for (_i = 0U; _i < (uint32_t)5U; ++_i)
+        tmp[_i] = (uint128_t)(uint64_t)0U;
+    }
+    {
+      uint128_t b4;
+      uint128_t b0;
+      uint128_t b4_;
+      uint128_t b0_;
+      {
+        uint32_t i;
+        for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
+        {
+          uint64_t xi = b[i];
+          tmp[i] = (uint128_t)xi * s;
+        }
+      }
+      Hacl_Bignum_Fproduct_carry_wide_(tmp);
+      b4 = tmp[4U];
+      b0 = tmp[0U];
+      b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
+      b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
+      tmp[4U] = b4_;
+      tmp[0U] = b0_;
+      Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+    }
+  }
+}
+
+inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
+{
+  Hacl_Bignum_Fmul_fmul(output, a, b);
+}
+
+inline static void Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
+{
+  Hacl_Bignum_Crecip_crecip(output, input);
+}
+
+static void
+Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+  uint32_t i = ctr - (uint32_t)1U;
+  uint64_t ai = a[i];
+  uint64_t bi = b[i];
+  uint64_t x = swap1 & (ai ^ bi);
+  uint64_t ai1 = ai ^ x;
+  uint64_t bi1 = bi ^ x;
+  a[i] = ai1;
+  b[i] = bi1;
+}
+
+static void
+Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+  if (!(ctr == (uint32_t)0U))
+  {
+    uint32_t i;
+    Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
+    i = ctr - (uint32_t)1U;
+    Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
+  }
+}
+
+static void Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
+{
+  uint64_t swap1 = (uint64_t)0U - iswap;
+  Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U);
+  Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U);
+}
+
+static void Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
+{
+  memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+  memcpy(output + (uint32_t)5U,
+    input + (uint32_t)5U,
+    (uint32_t)5U * sizeof (input + (uint32_t)5U)[0U]);
+}
+
+static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
+{
+  uint64_t i0 = load64_le(input);
+  uint8_t *x00 = input + (uint32_t)6U;
+  uint64_t i1 = load64_le(x00);
+  uint8_t *x01 = input + (uint32_t)12U;
+  uint64_t i2 = load64_le(x01);
+  uint8_t *x02 = input + (uint32_t)19U;
+  uint64_t i3 = load64_le(x02);
+  uint8_t *x0 = input + (uint32_t)24U;
+  uint64_t i4 = load64_le(x0);
+  uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU;
+  uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU;
+  uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU;
+  uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU;
+  uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU;
+  output[0U] = output0;
+  output[1U] = output1;
+  output[2U] = output2;
+  output[3U] = output3;
+  output[4U] = output4;
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
+{
+  uint64_t t0 = input[0U];
+  uint64_t t1 = input[1U];
+  uint64_t t2 = input[2U];
+  uint64_t t3 = input[3U];
+  uint64_t t4 = input[4U];
+  uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+  uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+  uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+  uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+  uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+  uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+  uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+  uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+  input[0U] = t0_;
+  input[1U] = t1__;
+  input[2U] = t2__;
+  input[3U] = t3__;
+  input[4U] = t4_;
+}
+
+static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
+{
+  Hacl_EC_Format_fcontract_first_carry_pass(input);
+  Hacl_Bignum_Modulo_carry_top(input);
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
+{
+  uint64_t t0 = input[0U];
+  uint64_t t1 = input[1U];
+  uint64_t t2 = input[2U];
+  uint64_t t3 = input[3U];
+  uint64_t t4 = input[4U];
+  uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+  uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+  uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+  uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+  uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+  uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+  uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+  uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+  input[0U] = t0_;
+  input[1U] = t1__;
+  input[2U] = t2__;
+  input[3U] = t3__;
+  input[4U] = t4_;
+}
+
+static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
+{
+  uint64_t i0;
+  uint64_t i1;
+  uint64_t i0_;
+  uint64_t i1_;
+  Hacl_EC_Format_fcontract_second_carry_pass(input);
+  Hacl_Bignum_Modulo_carry_top(input);
+  i0 = input[0U];
+  i1 = input[1U];
+  i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+  i1_ = i1 + (i0 >> (uint32_t)51U);
+  input[0U] = i0_;
+  input[1U] = i1_;
+}
+
+static void Hacl_EC_Format_fcontract_trim(uint64_t *input)
+{
+  uint64_t a0 = input[0U];
+  uint64_t a1 = input[1U];
+  uint64_t a2 = input[2U];
+  uint64_t a3 = input[3U];
+  uint64_t a4 = input[4U];
+  uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU);
+  uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU);
+  uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU);
+  uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU);
+  uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU);
+  uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+  uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask);
+  uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask);
+  uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask);
+  uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask);
+  uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask);
+  input[0U] = a0_;
+  input[1U] = a1_;
+  input[2U] = a2_;
+  input[3U] = a3_;
+  input[4U] = a4_;
+}
+
+static void Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
+{
+  uint64_t t0 = input[0U];
+  uint64_t t1 = input[1U];
+  uint64_t t2 = input[2U];
+  uint64_t t3 = input[3U];
+  uint64_t t4 = input[4U];
+  uint64_t o0 = t1 << (uint32_t)51U | t0;
+  uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U;
+  uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U;
+  uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U;
+  uint8_t *b0 = output;
+  uint8_t *b1 = output + (uint32_t)8U;
+  uint8_t *b2 = output + (uint32_t)16U;
+  uint8_t *b3 = output + (uint32_t)24U;
+  store64_le(b0, o0);
+  store64_le(b1, o1);
+  store64_le(b2, o2);
+  store64_le(b3, o3);
+}
+
+static void Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
+{
+  Hacl_EC_Format_fcontract_first_carry_full(input);
+  Hacl_EC_Format_fcontract_second_carry_full(input);
+  Hacl_EC_Format_fcontract_trim(input);
+  Hacl_EC_Format_fcontract_store(output, input);
+}
+
+static void Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
+{
+  uint64_t *x = point;
+  uint64_t *z = point + (uint32_t)5U;
+  uint64_t buf[10U] = { 0U };
+  uint64_t *zmone = buf;
+  uint64_t *sc = buf + (uint32_t)5U;
+  Hacl_Bignum_crecip(zmone, z);
+  Hacl_Bignum_fmul(sc, x, zmone);
+  Hacl_EC_Format_fcontract(scalar, sc);
+}
+
+static void
+Hacl_EC_AddAndDouble_fmonty(
+  uint64_t *pp,
+  uint64_t *ppq,
+  uint64_t *p,
+  uint64_t *pq,
+  uint64_t *qmqp
+)
+{
+  uint64_t *qx = qmqp;
+  uint64_t *x2 = pp;
+  uint64_t *z2 = pp + (uint32_t)5U;
+  uint64_t *x3 = ppq;
+  uint64_t *z3 = ppq + (uint32_t)5U;
+  uint64_t *x = p;
+  uint64_t *z = p + (uint32_t)5U;
+  uint64_t *xprime = pq;
+  uint64_t *zprime = pq + (uint32_t)5U;
+  uint64_t buf[40U] = { 0U };
+  uint64_t *origx = buf;
+  uint64_t *origxprime0 = buf + (uint32_t)5U;
+  uint64_t *xxprime0 = buf + (uint32_t)25U;
+  uint64_t *zzprime0 = buf + (uint32_t)30U;
+  uint64_t *origxprime;
+  uint64_t *xx0;
+  uint64_t *zz0;
+  uint64_t *xxprime;
+  uint64_t *zzprime;
+  uint64_t *zzzprime;
+  uint64_t *zzz;
+  uint64_t *xx;
+  uint64_t *zz;
+  uint64_t scalar;
+  memcpy(origx, x, (uint32_t)5U * sizeof x[0U]);
+  Hacl_Bignum_fsum(x, z);
+  Hacl_Bignum_fdifference(z, origx);
+  memcpy(origxprime0, xprime, (uint32_t)5U * sizeof xprime[0U]);
+  Hacl_Bignum_fsum(xprime, zprime);
+  Hacl_Bignum_fdifference(zprime, origxprime0);
+  Hacl_Bignum_fmul(xxprime0, xprime, z);
+  Hacl_Bignum_fmul(zzprime0, x, zprime);
+  origxprime = buf + (uint32_t)5U;
+  xx0 = buf + (uint32_t)15U;
+  zz0 = buf + (uint32_t)20U;
+  xxprime = buf + (uint32_t)25U;
+  zzprime = buf + (uint32_t)30U;
+  zzzprime = buf + (uint32_t)35U;
+  memcpy(origxprime, xxprime, (uint32_t)5U * sizeof xxprime[0U]);
+  Hacl_Bignum_fsum(xxprime, zzprime);
+  Hacl_Bignum_fdifference(zzprime, origxprime);
+  Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U);
+  Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U);
+  Hacl_Bignum_fmul(z3, zzzprime, qx);
+  Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U);
+  Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U);
+  zzz = buf + (uint32_t)10U;
+  xx = buf + (uint32_t)15U;
+  zz = buf + (uint32_t)20U;
+  Hacl_Bignum_fmul(x2, xx, zz);
+  Hacl_Bignum_fdifference(zz, xx);
+  scalar = (uint64_t)121665U;
+  Hacl_Bignum_fscalar(zzz, zz, scalar);
+  Hacl_Bignum_fsum(zzz, xx);
+  Hacl_Bignum_fmul(z2, zzz, zz);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
+  uint64_t *nq,
+  uint64_t *nqpq,
+  uint64_t *nq2,
+  uint64_t *nqpq2,
+  uint64_t *q,
+  uint8_t byt
+)
+{
+  uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U);
+  uint64_t bit;
+  Hacl_EC_Point_swap_conditional(nq, nqpq, bit0);
+  Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+  bit = (uint64_t)(byt >> (uint32_t)7U);
+  Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
+  uint64_t *nq,
+  uint64_t *nqpq,
+  uint64_t *nq2,
+  uint64_t *nqpq2,
+  uint64_t *q,
+  uint8_t byt
+)
+{
+  uint8_t byt1;
+  Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+  byt1 = byt << (uint32_t)1U;
+  Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
+  uint64_t *nq,
+  uint64_t *nqpq,
+  uint64_t *nq2,
+  uint64_t *nqpq2,
+  uint64_t *q,
+  uint8_t byt,
+  uint32_t i
+)
+{
+  if (!(i == (uint32_t)0U))
+  {
+    uint32_t i_ = i - (uint32_t)1U;
+    uint8_t byt_;
+    Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
+    byt_ = byt << (uint32_t)2U;
+    Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
+  }
+}
+
+static void
+Hacl_EC_Ladder_BigLoop_cmult_big_loop(
+  uint8_t *n1,
+  uint64_t *nq,
+  uint64_t *nqpq,
+  uint64_t *nq2,
+  uint64_t *nqpq2,
+  uint64_t *q,
+  uint32_t i
+)
+{
+  if (!(i == (uint32_t)0U))
+  {
+    uint32_t i1 = i - (uint32_t)1U;
+    uint8_t byte = n1[i1];
+    Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U);
+    Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
+  }
+}
+
+static void Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
+{
+  uint64_t point_buf[40U] = { 0U };
+  uint64_t *nq = point_buf;
+  uint64_t *nqpq = point_buf + (uint32_t)10U;
+  uint64_t *nq2 = point_buf + (uint32_t)20U;
+  uint64_t *nqpq2 = point_buf + (uint32_t)30U;
+  Hacl_EC_Point_copy(nqpq, q);
+  nq[0U] = (uint64_t)1U;
+  Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U);
+  Hacl_EC_Point_copy(result, nq);
+}
+
+void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+  uint64_t buf0[10U] = { 0U };
+  uint64_t *x0 = buf0;
+  uint64_t *z = buf0 + (uint32_t)5U;
+  uint64_t *q;
+  Hacl_EC_Format_fexpand(x0, basepoint);
+  z[0U] = (uint64_t)1U;
+  q = buf0;
+  {
+    uint8_t e[32U] = { 0U };
+    uint8_t e0;
+    uint8_t e31;
+    uint8_t e01;
+    uint8_t e311;
+    uint8_t e312;
+    uint8_t *scalar;
+    memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]);
+    e0 = e[0U];
+    e31 = e[31U];
+    e01 = e0 & (uint8_t)248U;
+    e311 = e31 & (uint8_t)127U;
+    e312 = e311 | (uint8_t)64U;
+    e[0U] = e01;
+    e[31U] = e312;
+    scalar = e;
+    {
+      uint64_t buf[15U] = { 0U };
+      uint64_t *nq = buf;
+      uint64_t *x = nq;
+      x[0U] = (uint64_t)1U;
+      Hacl_EC_Ladder_cmult(nq, scalar, q);
+      Hacl_EC_Format_scalar_of_point(mypublic, nq);
+    }
+  }
+}
+
diff --git a/3rdparty/everest/library/kremlib/FStar_UInt128_extracted.c b/3rdparty/everest/library/kremlib/FStar_UInt128_extracted.c
new file mode 100644
index 0000000..1060515
--- /dev/null
+++ b/3rdparty/everest/library/kremlib/FStar_UInt128_extracted.c
@@ -0,0 +1,413 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+   Licensed under the Apache 2.0 License. */
+
+/* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ * KreMLin invocation: ../krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrB9w -minimal -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir extracted -warn-error +9+11 -skip-compilation -extract-uints -add-include <inttypes.h> -add-include "kremlib.h" -add-include "kremlin/internal/compat.h" extracted/prims.krml extracted/FStar_Pervasives_Native.krml extracted/FStar_Pervasives.krml extracted/FStar_Mul.krml extracted/FStar_Squash.krml extracted/FStar_Classical.krml extracted/FStar_StrongExcludedMiddle.krml extracted/FStar_FunctionalExtensionality.krml extracted/FStar_List_Tot_Base.krml extracted/FStar_List_Tot_Properties.krml extracted/FStar_List_Tot.krml extracted/FStar_Seq_Base.krml extracted/FStar_Seq_Properties.krml extracted/FStar_Seq.krml extracted/FStar_Math_Lib.krml extracted/FStar_Math_Lemmas.krml extracted/FStar_BitVector.krml extracted/FStar_UInt.krml extracted/FStar_UInt32.krml extracted/FStar_Int.krml extracted/FStar_Int16.krml extracted/FStar_Preorder.krml extracted/FStar_Ghost.krml extracted/FStar_ErasedLogic.krml extracted/FStar_UInt64.krml extracted/FStar_Set.krml extracted/FStar_PropositionalExtensionality.krml extracted/FStar_PredicateExtensionality.krml extracted/FStar_TSet.krml extracted/FStar_Monotonic_Heap.krml extracted/FStar_Heap.krml extracted/FStar_Map.krml extracted/FStar_Monotonic_HyperHeap.krml extracted/FStar_Monotonic_HyperStack.krml extracted/FStar_HyperStack.krml extracted/FStar_Monotonic_Witnessed.krml extracted/FStar_HyperStack_ST.krml extracted/FStar_HyperStack_All.krml extracted/FStar_Date.krml extracted/FStar_Universe.krml extracted/FStar_GSet.krml extracted/FStar_ModifiesGen.krml extracted/LowStar_Monotonic_Buffer.krml extracted/LowStar_Buffer.krml extracted/Spec_Loops.krml extracted/LowStar_BufferOps.krml extracted/C_Loops.krml extracted/FStar_UInt8.krml extracted/FStar_Kremlin_Endianness.krml extracted/FStar_UInt63.krml extracted/FStar_Exn.krml extracted/FStar_ST.krml extracted/FStar_All.krml extracted/FStar_Dyn.krml extracted/FStar_Int63.krml extracted/FStar_Int64.krml extracted/FStar_Int32.krml extracted/FStar_Int8.krml extracted/FStar_UInt16.krml extracted/FStar_Int_Cast.krml extracted/FStar_UInt128.krml extracted/C_Endianness.krml extracted/FStar_List.krml extracted/FStar_Float.krml extracted/FStar_IO.krml extracted/C.krml extracted/FStar_Char.krml extracted/FStar_String.krml extracted/LowStar_Modifies.krml extracted/C_String.krml extracted/FStar_Bytes.krml extracted/FStar_HyperStack_IO.krml extracted/C_Failure.krml extracted/TestLib.krml extracted/FStar_Int_Cast_Full.krml
+ * F* version: 059db0c8
+ * KreMLin version: 916c37ac
+ */
+
+
+#include "FStar_UInt128.h"
+#include "kremlin/c_endianness.h"
+#include "FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.h"
+
+uint64_t FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee)
+{
+  return projectee.low;
+}
+
+uint64_t FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee)
+{
+  return projectee.high;
+}
+
+static uint64_t FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
+{
+  return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
+}
+
+static uint64_t FStar_UInt128_carry(uint64_t a, uint64_t b)
+{
+  return FStar_UInt128_constant_time_carry(a, b);
+}
+
+FStar_UInt128_uint128 FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low + b.low, a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) };
+  return flat;
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low + b.low, a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low + b.low, a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low - b.low, a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) };
+  return flat;
+}
+
+FStar_UInt128_uint128
+FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low - b.low, a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) };
+  return flat;
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat = { a.low - b.low, a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return FStar_UInt128_sub_mod_impl(a, b);
+}
+
+FStar_UInt128_uint128 FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128 flat = { a.low & b.low, a.high & b.high };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128 flat = { a.low ^ b.low, a.high ^ b.high };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128 flat = { a.low | b.low, a.high | b.high };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a)
+{
+  FStar_UInt128_uint128 flat = { ~a.low, ~a.high };
+  return flat;
+}
+
+static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
+
+static uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
+{
+  return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+  return FStar_UInt128_add_u64_shift_left(hi, lo, s);
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+  if (s == (uint32_t)0U)
+  {
+    return a;
+  }
+  else
+  {
+    FStar_UInt128_uint128
+    flat = { a.low << s, FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s) };
+    return flat;
+  }
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+  FStar_UInt128_uint128 flat = { (uint64_t)0U, a.low << (s - FStar_UInt128_u32_64) };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
+{
+  if (s < FStar_UInt128_u32_64)
+  {
+    return FStar_UInt128_shift_left_small(a, s);
+  }
+  else
+  {
+    return FStar_UInt128_shift_left_large(a, s);
+  }
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
+{
+  return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
+}
+
+static uint64_t FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
+{
+  return FStar_UInt128_add_u64_shift_right(hi, lo, s);
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
+{
+  if (s == (uint32_t)0U)
+  {
+    return a;
+  }
+  else
+  {
+    FStar_UInt128_uint128
+    flat = { FStar_UInt128_add_u64_shift_right_respec(a.high, a.low, s), a.high >> s };
+    return flat;
+  }
+}
+
+static FStar_UInt128_uint128
+FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
+{
+  FStar_UInt128_uint128 flat = { a.high >> (s - FStar_UInt128_u32_64), (uint64_t)0U };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
+{
+  if (s < FStar_UInt128_u32_64)
+  {
+    return FStar_UInt128_shift_right_small(a, s);
+  }
+  else
+  {
+    return FStar_UInt128_shift_right_large(a, s);
+  }
+}
+
+bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return a.low == b.low && a.high == b.high;
+}
+
+bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return a.high > b.high || (a.high == b.high && a.low > b.low);
+}
+
+bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return a.high < b.high || (a.high == b.high && a.low < b.low);
+}
+
+bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return a.high > b.high || (a.high == b.high && a.low >= b.low);
+}
+
+bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  return a.high < b.high || (a.high == b.high && a.low <= b.low);
+}
+
+FStar_UInt128_uint128 FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat =
+    {
+      FStar_UInt64_eq_mask(a.low,
+        b.low)
+      & FStar_UInt64_eq_mask(a.high, b.high),
+      FStar_UInt64_eq_mask(a.low,
+        b.low)
+      & FStar_UInt64_eq_mask(a.high, b.high)
+    };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
+{
+  FStar_UInt128_uint128
+  flat =
+    {
+      (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
+      | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)),
+      (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high))
+      | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low))
+    };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a)
+{
+  FStar_UInt128_uint128 flat = { a, (uint64_t)0U };
+  return flat;
+}
+
+uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
+{
+  return a.low;
+}
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Plus_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_add;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Plus_Question_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_add_underspec;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Plus_Percent_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_add_mod;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Subtraction_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_sub;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Subtraction_Question_Hat)(
+  FStar_UInt128_uint128 x0,
+  FStar_UInt128_uint128 x1
+) = FStar_UInt128_sub_underspec;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Subtraction_Percent_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_sub_mod;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Amp_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_logand;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Hat_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_logxor;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Bar_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_logor;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Less_Less_Hat)(FStar_UInt128_uint128 x0, uint32_t x1) =
+  FStar_UInt128_shift_left;
+
+FStar_UInt128_uint128
+(*FStar_UInt128_op_Greater_Greater_Hat)(FStar_UInt128_uint128 x0, uint32_t x1) =
+  FStar_UInt128_shift_right;
+
+bool
+(*FStar_UInt128_op_Equals_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_eq;
+
+bool
+(*FStar_UInt128_op_Greater_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_gt;
+
+bool
+(*FStar_UInt128_op_Less_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_lt;
+
+bool
+(*FStar_UInt128_op_Greater_Equals_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_gte;
+
+bool
+(*FStar_UInt128_op_Less_Equals_Hat)(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1) =
+  FStar_UInt128_lte;
+
+static uint64_t FStar_UInt128_u64_mod_32(uint64_t a)
+{
+  return a & (uint64_t)0xffffffffU;
+}
+
+static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
+
+static uint64_t FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
+{
+  return lo + (hi << FStar_UInt128_u32_32);
+}
+
+FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y)
+{
+  FStar_UInt128_uint128
+  flat =
+    {
+      FStar_UInt128_u32_combine((x >> FStar_UInt128_u32_32)
+        * (uint64_t)y
+        + (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32),
+        FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * (uint64_t)y)),
+      ((x >> FStar_UInt128_u32_32)
+      * (uint64_t)y
+      + (FStar_UInt128_u64_mod_32(x) * (uint64_t)y >> FStar_UInt128_u32_32))
+      >> FStar_UInt128_u32_32
+    };
+  return flat;
+}
+
+typedef struct K___uint64_t_uint64_t_uint64_t_uint64_t_s
+{
+  uint64_t fst;
+  uint64_t snd;
+  uint64_t thd;
+  uint64_t f3;
+}
+K___uint64_t_uint64_t_uint64_t_uint64_t;
+
+static K___uint64_t_uint64_t_uint64_t_uint64_t
+FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y)
+{
+  K___uint64_t_uint64_t_uint64_t_uint64_t
+  flat =
+    {
+      FStar_UInt128_u64_mod_32(x),
+      FStar_UInt128_u64_mod_32(FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y)),
+      x
+      >> FStar_UInt128_u32_32,
+      (x >> FStar_UInt128_u32_32)
+      * FStar_UInt128_u64_mod_32(y)
+      + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32)
+    };
+  return flat;
+}
+
+static uint64_t FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
+{
+  return lo + (hi << FStar_UInt128_u32_32);
+}
+
+static FStar_UInt128_uint128 FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y)
+{
+  K___uint64_t_uint64_t_uint64_t_uint64_t scrut = FStar_UInt128_mul_wide_impl_t_(x, y);
+  uint64_t u1 = scrut.fst;
+  uint64_t w3 = scrut.snd;
+  uint64_t x_ = scrut.thd;
+  uint64_t t_ = scrut.f3;
+  FStar_UInt128_uint128
+  flat =
+    {
+      FStar_UInt128_u32_combine_(u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_),
+        w3),
+      x_
+      * (y >> FStar_UInt128_u32_32)
+      + (t_ >> FStar_UInt128_u32_32)
+      + ((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_)) >> FStar_UInt128_u32_32)
+    };
+  return flat;
+}
+
+FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
+{
+  return FStar_UInt128_mul_wide_impl(x, y);
+}
+
diff --git a/3rdparty/everest/library/kremlib/FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.c b/3rdparty/everest/library/kremlib/FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.c
new file mode 100644
index 0000000..0826524
--- /dev/null
+++ b/3rdparty/everest/library/kremlib/FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.c
@@ -0,0 +1,100 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+   Licensed under the Apache 2.0 License. */
+
+/* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ * KreMLin invocation: ../krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrB9w -minimal -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -tmpdir dist/minimal -skip-compilation -extract-uints -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/internal/types.h" -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=* extracted/prims.krml extracted/FStar_Pervasives_Native.krml extracted/FStar_Pervasives.krml extracted/FStar_Mul.krml extracted/FStar_Squash.krml extracted/FStar_Classical.krml extracted/FStar_StrongExcludedMiddle.krml extracted/FStar_FunctionalExtensionality.krml extracted/FStar_List_Tot_Base.krml extracted/FStar_List_Tot_Properties.krml extracted/FStar_List_Tot.krml extracted/FStar_Seq_Base.krml extracted/FStar_Seq_Properties.krml extracted/FStar_Seq.krml extracted/FStar_Math_Lib.krml extracted/FStar_Math_Lemmas.krml extracted/FStar_BitVector.krml extracted/FStar_UInt.krml extracted/FStar_UInt32.krml extracted/FStar_Int.krml extracted/FStar_Int16.krml extracted/FStar_Preorder.krml extracted/FStar_Ghost.krml extracted/FStar_ErasedLogic.krml extracted/FStar_UInt64.krml extracted/FStar_Set.krml extracted/FStar_PropositionalExtensionality.krml extracted/FStar_PredicateExtensionality.krml extracted/FStar_TSet.krml extracted/FStar_Monotonic_Heap.krml extracted/FStar_Heap.krml extracted/FStar_Map.krml extracted/FStar_Monotonic_HyperHeap.krml extracted/FStar_Monotonic_HyperStack.krml extracted/FStar_HyperStack.krml extracted/FStar_Monotonic_Witnessed.krml extracted/FStar_HyperStack_ST.krml extracted/FStar_HyperStack_All.krml extracted/FStar_Date.krml extracted/FStar_Universe.krml extracted/FStar_GSet.krml extracted/FStar_ModifiesGen.krml extracted/LowStar_Monotonic_Buffer.krml extracted/LowStar_Buffer.krml extracted/Spec_Loops.krml extracted/LowStar_BufferOps.krml extracted/C_Loops.krml extracted/FStar_UInt8.krml extracted/FStar_Kremlin_Endianness.krml extracted/FStar_UInt63.krml extracted/FStar_Exn.krml extracted/FStar_ST.krml extracted/FStar_All.krml extracted/FStar_Dyn.krml extracted/FStar_Int63.krml extracted/FStar_Int64.krml extracted/FStar_Int32.krml extracted/FStar_Int8.krml extracted/FStar_UInt16.krml extracted/FStar_Int_Cast.krml extracted/FStar_UInt128.krml extracted/C_Endianness.krml extracted/FStar_List.krml extracted/FStar_Float.krml extracted/FStar_IO.krml extracted/C.krml extracted/FStar_Char.krml extracted/FStar_String.krml extracted/LowStar_Modifies.krml extracted/C_String.krml extracted/FStar_Bytes.krml extracted/FStar_HyperStack_IO.krml extracted/C_Failure.krml extracted/TestLib.krml extracted/FStar_Int_Cast_Full.krml
+ * F* version: 059db0c8
+ * KreMLin version: 916c37ac
+ */
+
+
+#include "FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.h"
+
+uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
+{
+  uint64_t x = a ^ b;
+  uint64_t minus_x = ~x + (uint64_t)1U;
+  uint64_t x_or_minus_x = x | minus_x;
+  uint64_t xnx = x_or_minus_x >> (uint32_t)63U;
+  return xnx - (uint64_t)1U;
+}
+
+uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
+{
+  uint64_t x = a;
+  uint64_t y = b;
+  uint64_t x_xor_y = x ^ y;
+  uint64_t x_sub_y = x - y;
+  uint64_t x_sub_y_xor_y = x_sub_y ^ y;
+  uint64_t q = x_xor_y | x_sub_y_xor_y;
+  uint64_t x_xor_q = x ^ q;
+  uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U;
+  return x_xor_q_ - (uint64_t)1U;
+}
+
+uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
+{
+  uint32_t x = a ^ b;
+  uint32_t minus_x = ~x + (uint32_t)1U;
+  uint32_t x_or_minus_x = x | minus_x;
+  uint32_t xnx = x_or_minus_x >> (uint32_t)31U;
+  return xnx - (uint32_t)1U;
+}
+
+uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
+{
+  uint32_t x = a;
+  uint32_t y = b;
+  uint32_t x_xor_y = x ^ y;
+  uint32_t x_sub_y = x - y;
+  uint32_t x_sub_y_xor_y = x_sub_y ^ y;
+  uint32_t q = x_xor_y | x_sub_y_xor_y;
+  uint32_t x_xor_q = x ^ q;
+  uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U;
+  return x_xor_q_ - (uint32_t)1U;
+}
+
+uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
+{
+  uint16_t x = a ^ b;
+  uint16_t minus_x = ~x + (uint16_t)1U;
+  uint16_t x_or_minus_x = x | minus_x;
+  uint16_t xnx = x_or_minus_x >> (uint32_t)15U;
+  return xnx - (uint16_t)1U;
+}
+
+uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
+{
+  uint16_t x = a;
+  uint16_t y = b;
+  uint16_t x_xor_y = x ^ y;
+  uint16_t x_sub_y = x - y;
+  uint16_t x_sub_y_xor_y = x_sub_y ^ y;
+  uint16_t q = x_xor_y | x_sub_y_xor_y;
+  uint16_t x_xor_q = x ^ q;
+  uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U;
+  return x_xor_q_ - (uint16_t)1U;
+}
+
+uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
+{
+  uint8_t x = a ^ b;
+  uint8_t minus_x = ~x + (uint8_t)1U;
+  uint8_t x_or_minus_x = x | minus_x;
+  uint8_t xnx = x_or_minus_x >> (uint32_t)7U;
+  return xnx - (uint8_t)1U;
+}
+
+uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
+{
+  uint8_t x = a;
+  uint8_t y = b;
+  uint8_t x_xor_y = x ^ y;
+  uint8_t x_sub_y = x - y;
+  uint8_t x_sub_y_xor_y = x_sub_y ^ y;
+  uint8_t q = x_xor_y | x_sub_y_xor_y;
+  uint8_t x_xor_q = x ^ q;
+  uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U;
+  return x_xor_q_ - (uint8_t)1U;
+}
+
diff --git a/3rdparty/everest/library/kremlib/fstar_uint128.c b/3rdparty/everest/library/kremlib/fstar_uint128.c
new file mode 100644
index 0000000..cadfbc7
--- /dev/null
+++ b/3rdparty/everest/library/kremlib/fstar_uint128.c
@@ -0,0 +1,216 @@
+/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
+   Licensed under the Apache 2.0 License. */
+
+/******************************************************************************/
+/* Machine integers (128-bit arithmetic)                                      */
+/******************************************************************************/
+
+/* This header makes KreMLin-generated C code work with:
+ * - the default setting where we assume the target compiler defines __int128
+ * - the setting where we use FStar.UInt128's implementation instead; in that
+ *   case, generated C files must be compiled with -DKRML_VERIFIED_UINT128
+ * - a refinement of the case above, wherein all structures are passed by
+ *   reference, a.k.a. "-fnostruct-passing", meaning that the KreMLin-generated
+ *   must be compiled with -DKRML_NOSTRUCT_PASSING
+ * Note: no MSVC support in this file.
+ */
+
+#include "FStar_UInt128.h"
+#include "kremlin/c_endianness.h"
+#include "FStar_UInt64_FStar_UInt32_FStar_UInt16_FStar_UInt8.h"
+
+#if !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER)
+
+/* GCC + using native unsigned __int128 support */
+
+uint128_t load128_le(uint8_t *b) {
+  uint128_t l = (uint128_t)load64_le(b);
+  uint128_t h = (uint128_t)load64_le(b + 8);
+  return (h << 64 | l);
+}
+
+void store128_le(uint8_t *b, uint128_t n) {
+  store64_le(b, (uint64_t)n);
+  store64_le(b + 8, (uint64_t)(n >> 64));
+}
+
+uint128_t load128_be(uint8_t *b) {
+  uint128_t h = (uint128_t)load64_be(b);
+  uint128_t l = (uint128_t)load64_be(b + 8);
+  return (h << 64 | l);
+}
+
+void store128_be(uint8_t *b, uint128_t n) {
+  store64_be(b, (uint64_t)(n >> 64));
+  store64_be(b + 8, (uint64_t)n);
+}
+
+uint128_t FStar_UInt128_add(uint128_t x, uint128_t y) {
+  return x + y;
+}
+
+uint128_t FStar_UInt128_mul(uint128_t x, uint128_t y) {
+  return x * y;
+}
+
+uint128_t FStar_UInt128_add_mod(uint128_t x, uint128_t y) {
+  return x + y;
+}
+
+uint128_t FStar_UInt128_sub(uint128_t x, uint128_t y) {
+  return x - y;
+}
+
+uint128_t FStar_UInt128_sub_mod(uint128_t x, uint128_t y) {
+  return x - y;
+}
+
+uint128_t FStar_UInt128_logand(uint128_t x, uint128_t y) {
+  return x & y;
+}
+
+uint128_t FStar_UInt128_logor(uint128_t x, uint128_t y) {
+  return x | y;
+}
+
+uint128_t FStar_UInt128_logxor(uint128_t x, uint128_t y) {
+  return x ^ y;
+}
+
+uint128_t FStar_UInt128_lognot(uint128_t x) {
+  return ~x;
+}
+
+uint128_t FStar_UInt128_shift_left(uint128_t x, uint32_t y) {
+  return x << y;
+}
+
+uint128_t FStar_UInt128_shift_right(uint128_t x, uint32_t y) {
+  return x >> y;
+}
+
+uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x) {
+  return (uint128_t)x;
+}
+
+uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x) {
+  return (uint64_t)x;
+}
+
+uint128_t FStar_UInt128_mul_wide(uint64_t x, uint64_t y) {
+  return ((uint128_t) x) * y;
+}
+
+uint128_t FStar_UInt128_eq_mask(uint128_t x, uint128_t y) {
+  uint64_t mask =
+      FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) &
+      FStar_UInt64_eq_mask(x, y);
+  return ((uint128_t)mask) << 64 | mask;
+}
+
+uint128_t FStar_UInt128_gte_mask(uint128_t x, uint128_t y) {
+  uint64_t mask =
+      (FStar_UInt64_gte_mask(x >> 64, y >> 64) &
+       ~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) |
+      (FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y));
+  return ((uint128_t)mask) << 64 | mask;
+}
+
+uint128_t FStar_Int_Cast_Full_uint64_to_uint128(uint64_t x) {
+  return x;
+}
+
+uint64_t FStar_Int_Cast_Full_uint128_to_uint64(uint128_t x) {
+  return x;
+}
+
+#elif !defined(_MSC_VER) && defined(KRML_VERIFIED_UINT128)
+
+/* Verified uint128 implementation. */
+
+/* Access 64-bit fields within the int128. */
+#define HIGH64_OF(x) ((x)->high)
+#define LOW64_OF(x)  ((x)->low)
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t_, uint128_t;
+
+/* A series of definitions written using pointers. */
+
+void load128_le_(uint8_t *b, uint128_t *r) {
+  LOW64_OF(r) = load64_le(b);
+  HIGH64_OF(r) = load64_le(b + 8);
+}
+
+void store128_le_(uint8_t *b, uint128_t *n) {
+  store64_le(b, LOW64_OF(n));
+  store64_le(b + 8, HIGH64_OF(n));
+}
+
+void load128_be_(uint8_t *b, uint128_t *r) {
+  HIGH64_OF(r) = load64_be(b);
+  LOW64_OF(r) = load64_be(b + 8);
+}
+
+void store128_be_(uint8_t *b, uint128_t *n) {
+  store64_be(b, HIGH64_OF(n));
+  store64_be(b + 8, LOW64_OF(n));
+}
+
+void
+FStar_Int_Cast_Full_uint64_to_uint128_(uint64_t x, uint128_t *dst) {
+  /* C89 */
+  LOW64_OF(dst) = x;
+  HIGH64_OF(dst) = 0;
+}
+
+uint64_t FStar_Int_Cast_Full_uint128_to_uint64_(uint128_t *x) {
+  return LOW64_OF(x);
+}
+
+#    ifndef KRML_NOSTRUCT_PASSING
+
+uint128_t load128_le(uint8_t *b) {
+  uint128_t r;
+  load128_le_(b, &r);
+  return r;
+}
+
+void store128_le(uint8_t *b, uint128_t n) {
+  store128_le_(b, &n);
+}
+
+uint128_t load128_be(uint8_t *b) {
+  uint128_t r;
+  load128_be_(b, &r);
+  return r;
+}
+
+void store128_be(uint8_t *b, uint128_t n) {
+  store128_be_(b, &n);
+}
+
+uint128_t FStar_Int_Cast_Full_uint64_to_uint128(uint64_t x) {
+  uint128_t dst;
+  FStar_Int_Cast_Full_uint64_to_uint128_(x, &dst);
+  return dst;
+}
+
+uint64_t FStar_Int_Cast_Full_uint128_to_uint64(uint128_t x) {
+  return FStar_Int_Cast_Full_uint128_to_uint64_(&x);
+}
+
+#    else /* !defined(KRML_STRUCT_PASSING) */
+
+#      define print128 print128_
+#      define load128_le load128_le_
+#      define store128_le store128_le_
+#      define load128_be load128_be_
+#      define store128_be store128_be_
+#      define FStar_Int_Cast_Full_uint128_to_uint64                            \
+        FStar_Int_Cast_Full_uint128_to_uint64_
+#      define FStar_Int_Cast_Full_uint64_to_uint128                            \
+        FStar_Int_Cast_Full_uint64_to_uint128_
+
+#    endif /* KRML_STRUCT_PASSING */
+
+#endif