Add Intel Indirect Branch Tracking support.

This allows operating systems to insist on IBT
enforcement as an exploit mitigation mechanism without
needing to make an exception for anything using a
bundled boringssl, such as chrome, mono, and qtwebengine.

Change-Id: Iac28dd3d2af177b89ffde10ae97bce23739feb94
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/60625
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: Bob Beck <bbe@google.com>
Reviewed-by: David Benjamin <davidben@google.com>
chromium-stable
Bob Beck 1 year ago committed by Boringssl LUCI CQ
parent 80dcb67d44
commit 9fc1c33e9c
  1. 1
      crypto/chacha/asm/chacha-x86_64.pl
  2. 17
      crypto/cipher_extra/asm/aes128gcmsiv-x86_64.pl
  3. 2
      crypto/cipher_extra/asm/chacha20_poly1305_x86_64.pl
  4. 9
      crypto/fipsmodule/aes/asm/aesni-x86_64.pl
  5. 6
      crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
  6. 6
      crypto/fipsmodule/bn/asm/rsaz-avx2.pl
  7. 1
      crypto/fipsmodule/bn/asm/x86_64-mont.pl
  8. 6
      crypto/fipsmodule/bn/asm/x86_64-mont5.pl
  9. 12
      crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl
  10. 1
      crypto/fipsmodule/ec/asm/p256_beeu-x86_64-asm.pl
  11. 1
      crypto/fipsmodule/md5/asm/md5-x86_64.pl
  12. 4
      crypto/fipsmodule/modes/asm/aesni-gcm-x86_64.pl
  13. 2
      crypto/fipsmodule/modes/asm/ghash-ssse3-x86_64.pl
  14. 6
      crypto/fipsmodule/modes/asm/ghash-x86_64.pl
  15. 2
      crypto/fipsmodule/rand/asm/rdrand-x86_64.pl
  16. 1
      crypto/fipsmodule/sha/asm/sha1-x86_64.pl
  17. 1
      crypto/fipsmodule/sha/asm/sha512-x86_64.pl
  18. 1
      crypto/hrss/asm/poly_rq_mul.S
  19. 1
      crypto/perlasm/x86_64-xlate.pl
  20. 7
      crypto/test/asm/trampoline-x86_64.pl
  21. 19
      include/openssl/asm_base.h
  22. 1
      third_party/fiat/asm/fiat_curve25519_adx_mul.S
  23. 1
      third_party/fiat/asm/fiat_curve25519_adx_square.S

@ -231,6 +231,7 @@ $code.=<<___;
.align 64
ChaCha20_ctr32:
.cfi_startproc
_CET_ENDBR
cmp \$0,$len
je .Lno_data
mov OPENSSL_ia32cap_P+4(%rip),%r10

@ -134,6 +134,7 @@ $code.=<<___;
.align 16
aesgcmsiv_htable_init:
.cfi_startproc
_CET_ENDBR
vmovdqa ($H), $T
vmovdqa $T, $TMP0
vmovdqa $T, ($Htbl) # H
@ -174,6 +175,7 @@ sub aesgcmsiv_htable6_init {
.align 16
aesgcmsiv_htable6_init:
.cfi_startproc
_CET_ENDBR
vmovdqa ($H), $T
vmovdqa $T, $TMP0
vmovdqa $T, ($Htbl) # H
@ -235,6 +237,7 @@ ___
.align 16
aesgcmsiv_htable_polyval:
.cfi_startproc
_CET_ENDBR
test $len, $len
jnz .Lhtable_polyval_start
ret
@ -420,6 +423,7 @@ sub aesgcmsiv_polyval_horner {
.align 16
aesgcmsiv_polyval_horner:
.cfi_startproc
_CET_ENDBR
test $L, $L
jnz .Lpolyval_horner_start
ret
@ -460,6 +464,7 @@ $code.=<<___;
.align 16
aes128gcmsiv_aes_ks:
.cfi_startproc
_CET_ENDBR
vmovdqu (%rdi), %xmm1 # xmm1 = user key
vmovdqa %xmm1, (%rsi) # rsi points to output
@ -521,6 +526,7 @@ $code.=<<___;
.align 16
aes256gcmsiv_aes_ks:
.cfi_startproc
_CET_ENDBR
vmovdqu (%rdi), %xmm1
vmovdqu 16(%rdi), %xmm3
vmovdqa %xmm1, (%rsi)
@ -614,6 +620,7 @@ ___
.align 16
aes128gcmsiv_aes_ks_enc_x1:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rcx), %xmm1 # xmm1 = first 16 bytes of random key
vmovdqa 0*16(%rdi), $BLOCK1
@ -687,6 +694,7 @@ ___
.align 16
aes128gcmsiv_kdf:
.cfi_startproc
_CET_ENDBR
# parameter 1: %rdi Pointer to NONCE
# parameter 2: %rsi Pointer to CT
# parameter 4: %rdx Pointer to keys
@ -787,6 +795,7 @@ ___
.align 16
aes128gcmsiv_enc_msg_x4:
.cfi_startproc
_CET_ENDBR
test $LEN, $LEN
jnz .L128_enc_msg_x4_start
ret
@ -984,6 +993,7 @@ ___
.align 16
aes128gcmsiv_enc_msg_x8:
.cfi_startproc
_CET_ENDBR
test $LEN, $LEN
jnz .L128_enc_msg_x8_start
ret
@ -1239,6 +1249,7 @@ ___
$code.=<<___;
.cfi_startproc
_CET_ENDBR
test \$~15, $LEN
jnz .L${labelPrefix}_dec_start
ret
@ -1578,6 +1589,7 @@ sub aes128gcmsiv_ecb_enc_block {
.align 16
aes128gcmsiv_ecb_enc_block:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdi), $STATE_1
vpxor ($KSp), $STATE_1, $STATE_1
@ -1670,6 +1682,7 @@ ___
.align 16
aes256gcmsiv_aes_ks_enc_x1:
.cfi_startproc
_CET_ENDBR
vmovdqa con1(%rip), $CON_MASK # CON_MASK = 1,1,1,1
vmovdqa mask(%rip), $MASK_256 # MASK_256
vmovdqa ($PT), $BLOCK1
@ -1711,6 +1724,7 @@ sub aes256gcmsiv_ecb_enc_block {
.align 16
aes256gcmsiv_ecb_enc_block:
.cfi_startproc
_CET_ENDBR
vmovdqa (%rdi), $STATE_1
vpxor ($KSp), $STATE_1, $STATE_1
vaesenc 1*16($KSp), $STATE_1, $STATE_1
@ -1794,6 +1808,7 @@ ___
.align 16
aes256gcmsiv_enc_msg_x4:
.cfi_startproc
_CET_ENDBR
test $LEN, $LEN
jnz .L256_enc_msg_x4_start
ret
@ -1994,6 +2009,7 @@ ___
.align 16
aes256gcmsiv_enc_msg_x8:
.cfi_startproc
_CET_ENDBR
test $LEN, $LEN
jnz .L256_enc_msg_x8_start
ret
@ -2200,6 +2216,7 @@ ___
.align 16
aes256gcmsiv_kdf:
.cfi_startproc
_CET_ENDBR
# parameter 1: %rdi Pointer to NONCE
# parameter 2: %rsi Pointer to CT
# parameter 4: %rdx Pointer to keys

@ -449,6 +449,7 @@ $code.="
.align 64
chacha20_poly1305_open:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_push %rbp
push %rbx
@ -871,6 +872,7 @@ $code.="
.align 64
chacha20_poly1305_seal:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_push %rbp
push %rbx

@ -275,6 +275,7 @@ $code.=<<___;
.align 16
${PREFIX}_encrypt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
movb \$1,BORINGSSL_function_hit+1(%rip)
@ -297,6 +298,7 @@ $code.=<<___;
.align 16
${PREFIX}_decrypt:
.cfi_startproc
_CET_ENDBR
movups ($inp),$inout0 # load input
mov 240($key),$rounds # key->rounds
___
@ -617,6 +619,7 @@ $code.=<<___;
.align 16
${PREFIX}_ecb_encrypt:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($win64);
lea -0x58(%rsp),%rsp
@ -1203,6 +1206,7 @@ $code.=<<___;
.align 16
${PREFIX}_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb \$1,BORINGSSL_function_hit(%rip)
#endif
@ -1781,6 +1785,7 @@ $code.=<<___;
.align 16
${PREFIX}_xts_encrypt:
.cfi_startproc
_CET_ENDBR
lea (%rsp),%r11 # frame pointer
.cfi_def_cfa_register %r11
push %rbp
@ -2264,6 +2269,7 @@ $code.=<<___;
.align 16
${PREFIX}_xts_decrypt:
.cfi_startproc
_CET_ENDBR
lea (%rsp),%r11 # frame pointer
.cfi_def_cfa_register %r11
push %rbp
@ -2782,6 +2788,7 @@ $code.=<<___;
.align 16
${PREFIX}_cbc_encrypt:
.cfi_startproc
_CET_ENDBR
test $len,$len # check length
jz .Lcbc_ret
@ -3331,6 +3338,7 @@ $code.=<<___;
.align 16
${PREFIX}_set_decrypt_key:
.cfi_startproc
_CET_ENDBR
.byte 0x48,0x83,0xEC,0x08 # sub rsp,8
.cfi_adjust_cfa_offset 8
call __aesni_set_encrypt_key
@ -3403,6 +3411,7 @@ $code.=<<___;
${PREFIX}_set_encrypt_key:
__aesni_set_encrypt_key:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
movb \$1,BORINGSSL_function_hit+3(%rip)
#endif

@ -871,6 +871,7 @@ _vpaes_schedule_mangle:
.align 16
${PREFIX}_set_encrypt_key:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
movb \$1, BORINGSSL_function_hit+5(%rip)
@ -926,6 +927,7 @@ $code.=<<___;
.align 16
${PREFIX}_set_decrypt_key:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@ -981,6 +983,7 @@ $code.=<<___;
.align 16
${PREFIX}_encrypt:
.cfi_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
movb \$1, BORINGSSL_function_hit+4(%rip)
@ -1030,6 +1033,7 @@ $code.=<<___;
.align 16
${PREFIX}_decrypt:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@ -1081,6 +1085,7 @@ $code.=<<___;
.align 16
${PREFIX}_cbc_encrypt:
.cfi_startproc
_CET_ENDBR
xchg $key,$len
___
($len,$key)=($key,$len);
@ -1166,6 +1171,7 @@ $code.=<<___;
.align 16
${PREFIX}_ctr32_encrypt_blocks:
.cfi_startproc
_CET_ENDBR
# _vpaes_encrypt_core and _vpaes_encrypt_core_2x expect the key in %rdx.
xchg $key, $blocks
___

@ -112,6 +112,7 @@ $code.=<<___;
.align 64
rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
.cfi_startproc
_CET_ENDBR
lea (%rsp), %rax
.cfi_def_cfa_register %rax
push %rbx
@ -863,6 +864,7 @@ $code.=<<___;
.align 64
rsaz_1024_mul_avx2:
.cfi_startproc
_CET_ENDBR
lea (%rsp), %rax
.cfi_def_cfa_register %rax
push %rbx
@ -1474,6 +1476,7 @@ $code.=<<___;
.align 32
rsaz_1024_red2norm_avx2:
.cfi_startproc
_CET_ENDBR
sub \$-128,$inp # size optimization
xor %rax,%rax
___
@ -1515,6 +1518,7 @@ $code.=<<___;
.align 32
rsaz_1024_norm2red_avx2:
.cfi_startproc
_CET_ENDBR
sub \$-128,$out # size optimization
mov ($inp),@T[0]
mov \$0x1fffffff,%eax
@ -1559,6 +1563,7 @@ $code.=<<___;
.align 32
rsaz_1024_scatter5_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
vmovdqu .Lscatter_permd(%rip),%ymm5
shl \$4,$power
@ -1586,6 +1591,7 @@ rsaz_1024_scatter5_avx2:
.align 32
rsaz_1024_gather5_avx2:
.cfi_startproc
_CET_ENDBR
vzeroupper
mov %rsp,%r11
.cfi_def_cfa_register %r11

@ -92,6 +92,7 @@ $code=<<___;
.align 16
bn_mul_mont:
.cfi_startproc
_CET_ENDBR
mov ${num}d,${num}d
mov %rsp,%rax
.cfi_def_cfa_register %rax

@ -79,6 +79,7 @@ $code=<<___;
.align 64
bn_mul_mont_gather5:
.cfi_startproc
_CET_ENDBR
mov ${num}d,${num}d
mov %rsp,%rax
.cfi_def_cfa_register %rax
@ -1098,6 +1099,7 @@ $code.=<<___;
.align 32
bn_power5:
.cfi_startproc
_CET_ENDBR
mov %rsp,%rax
.cfi_def_cfa_register %rax
___
@ -1240,6 +1242,7 @@ $code.=<<___;
bn_sqr8x_internal:
__bn_sqr8x_internal:
.cfi_startproc
_CET_ENDBR
##############################################################
# Squaring part:
#
@ -2737,6 +2740,7 @@ bn_powerx5:
bn_sqrx8x_internal:
__bn_sqrx8x_internal:
.cfi_startproc
_CET_ENDBR
##################################################################
# Squaring part:
#
@ -3448,6 +3452,7 @@ $code.=<<___;
.align 16
bn_scatter5:
.cfi_startproc
_CET_ENDBR
cmp \$0, $num
jz .Lscatter_epilogue
@ -3478,6 +3483,7 @@ bn_scatter5:
bn_gather5:
.cfi_startproc
.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
_CET_ENDBR
# I can't trust assembler to use specific encoding:-(
.byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
.cfi_def_cfa_register %r10

@ -98,6 +98,7 @@ $code.=<<___;
.align 32
ecp_nistz256_neg:
.cfi_startproc
_CET_ENDBR
push %r12
.cfi_push %r12
push %r13
@ -166,6 +167,7 @@ $code.=<<___;
.align 32
ecp_nistz256_ord_mul_mont:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -497,6 +499,7 @@ $code.=<<___;
.align 32
ecp_nistz256_ord_sqr_mont:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -1247,6 +1250,7 @@ $code.=<<___;
.align 32
ecp_nistz256_mul_mont:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -1549,6 +1553,7 @@ __ecp_nistz256_mul_montq:
.align 32
ecp_nistz256_sqr_mont:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -2098,6 +2103,7 @@ $code.=<<___;
.align 32
ecp_nistz256_select_w5:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($avx>1);
leaq OPENSSL_ia32cap_P(%rip), %rax
@ -2198,6 +2204,7 @@ $code.=<<___;
.align 32
ecp_nistz256_select_w7:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($avx>1);
leaq OPENSSL_ia32cap_P(%rip), %rax
@ -2403,6 +2410,7 @@ $code.=<<___;
ecp_nistz256_avx2_select_w7:
.cfi_startproc
.Lavx2_select_w7:
_CET_ENDBR
vzeroupper
___
$code.=<<___ if ($win64);
@ -2514,6 +2522,7 @@ $code.=<<___;
.type ecp_nistz256_avx2_select_w7,\@function,3
.align 32
ecp_nistz256_avx2_select_w7:
_CET_ENDBR
.byte 0x0f,0x0b # ud2
ret
.size ecp_nistz256_avx2_select_w7,.-ecp_nistz256_avx2_select_w7
@ -2718,6 +2727,7 @@ $code.=<<___;
.align 32
ecp_nistz256_point_double:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -2970,6 +2980,7 @@ $code.=<<___;
.align 32
ecp_nistz256_point_add:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx
@ -3368,6 +3379,7 @@ $code.=<<___;
.align 32
ecp_nistz256_point_add_affine:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($addx);
leaq OPENSSL_ia32cap_P(%rip), %rcx

@ -154,6 +154,7 @@ $code.=<<___;
.align 32
beeu_mod_inverse_vartime:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_push rbp
push %r12

@ -131,6 +131,7 @@ $code .= <<EOF;
.type md5_block_asm_data_order,\@function,3
md5_block_asm_data_order:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_push rbp
push %rbx

@ -442,6 +442,7 @@ $code.=<<___;
aesni_gcm_decrypt:
.cfi_startproc
.seh_startproc
_CET_ENDBR
xor %rax,%rax
# We call |_aesni_ctr32_ghash_6x|, which requires at least 96 (0x60)
@ -715,6 +716,7 @@ _aesni_ctr32_6x:
aesni_gcm_encrypt:
.cfi_startproc
.seh_startproc
_CET_ENDBR
#ifdef BORINGSSL_DISPATCH_TEST
.extern BORINGSSL_function_hit
movb \$1,BORINGSSL_function_hit+2(%rip)
@ -1089,6 +1091,7 @@ $code=<<___; # assembler is too old
.globl aesni_gcm_encrypt
.type aesni_gcm_encrypt,\@abi-omnipotent
aesni_gcm_encrypt:
_CET_ENDBR
xor %eax,%eax
ret
.size aesni_gcm_encrypt,.-aesni_gcm_encrypt
@ -1096,6 +1099,7 @@ aesni_gcm_encrypt:
.globl aesni_gcm_decrypt
.type aesni_gcm_decrypt,\@abi-omnipotent
aesni_gcm_decrypt:
_CET_ENDBR
xor %eax,%eax
ret
.size aesni_gcm_decrypt,.-aesni_gcm_decrypt

@ -104,6 +104,7 @@ my $code = <<____;
gcm_gmult_ssse3:
.cfi_startproc
.seh_startproc
_CET_ENDBR
____
$code .= <<____ if ($win64);
subq \$40, %rsp
@ -246,6 +247,7 @@ $code .= <<____;
gcm_ghash_ssse3:
.cfi_startproc
.seh_startproc
_CET_ENDBR
____
$code .= <<____ if ($win64);
subq \$56, %rsp

@ -206,6 +206,7 @@ $code.=<<___;
gcm_init_clmul:
.cfi_startproc
.seh_startproc
_CET_ENDBR
.L_init_clmul:
___
$code.=<<___ if ($win64);
@ -288,6 +289,7 @@ $code.=<<___;
.align 16
gcm_gmult_clmul:
.cfi_startproc
_CET_ENDBR
.L_gmult_clmul:
movdqu ($Xip),$Xi
movdqa .Lbswap_mask(%rip),$T3
@ -340,6 +342,7 @@ $code.=<<___;
gcm_ghash_clmul:
.cfi_startproc
.seh_startproc
_CET_ENDBR
.L_ghash_clmul:
___
$code.=<<___ if ($win64);
@ -708,6 +711,7 @@ $code.=<<___;
.align 32
gcm_init_avx:
.cfi_startproc
_CET_ENDBR
___
if ($avx) {
my ($Htbl,$Xip)=@_4args;
@ -853,6 +857,7 @@ $code.=<<___;
.align 32
gcm_gmult_avx:
.cfi_startproc
_CET_ENDBR
jmp .L_gmult_clmul
.cfi_endproc
.size gcm_gmult_avx,.-gcm_gmult_avx
@ -864,6 +869,7 @@ $code.=<<___;
.align 32
gcm_ghash_avx:
.cfi_startproc
_CET_ENDBR
___
if ($avx) {
my ($Xip,$Htbl,$inp,$len)=@_4args;

@ -46,6 +46,7 @@ print<<___;
.align 16
CRYPTO_rdrand:
.cfi_startproc
_CET_ENDBR
xorq %rax, %rax
rdrand $tmp1
# An add-with-carry of zero effectively sets %rax to the carry flag.
@ -64,6 +65,7 @@ CRYPTO_rdrand:
.align 16
CRYPTO_rdrand_multiple8_buf:
.cfi_startproc
_CET_ENDBR
test $len, $len
jz .Lout
movq \$8, $tmp1

@ -244,6 +244,7 @@ $code.=<<___;
.align 16
sha1_block_data_order:
.cfi_startproc
_CET_ENDBR
leaq OPENSSL_ia32cap_P(%rip),%r10
mov 0(%r10),%r9d
mov 4(%r10),%r8d

@ -263,6 +263,7 @@ $code=<<___;
.align 16
$func:
.cfi_startproc
_CET_ENDBR
___
$code.=<<___ if ($SZ==4 || $avx);
leaq OPENSSL_ia32cap_P(%rip),%r11

@ -301,6 +301,7 @@ mask_mod8192:
.att_syntax prefix
poly_Rq_mul:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16

@ -1499,6 +1499,7 @@ default rel
\%define XMMWORD
\%define YMMWORD
\%define ZMMWORD
\%define _CET_ENDBR
\%ifdef BORINGSSL_PREFIX
\%include "boringssl_prefix_symbols_nasm.inc"

@ -141,6 +141,7 @@ my $code = <<____;
abi_test_trampoline:
.cfi_startproc
.seh_startproc
_CET_ENDBR
# Stack layout:
# 8 bytes - align
# $caller_state_size bytes - saved caller registers
@ -307,6 +308,7 @@ foreach ("ax", "bx", "cx", "dx", "di", "si", "bp", 8..15) {
.globl abi_test_clobber_r$_
.align 16
abi_test_clobber_r$_:
_CET_ENDBR
xorq %r$_, %r$_
ret
.size abi_test_clobber_r$_,.-abi_test_clobber_r$_
@ -319,6 +321,7 @@ foreach (0..15) {
.globl abi_test_clobber_xmm$_
.align 16
abi_test_clobber_xmm$_:
_CET_ENDBR
pxor %xmm$_, %xmm$_
ret
.size abi_test_clobber_xmm$_,.-abi_test_clobber_xmm$_
@ -335,6 +338,7 @@ $code .= <<____;
abi_test_bad_unwind_wrong_register:
.cfi_startproc
.seh_startproc
_CET_ENDBR
pushq %r12
.cfi_push %r13 # This should be %r13
.seh_pushreg %r13 # This should be %r13
@ -358,6 +362,7 @@ abi_test_bad_unwind_wrong_register:
abi_test_bad_unwind_temporary:
.cfi_startproc
.seh_startproc
_CET_ENDBR
pushq %r12
.cfi_push %r12
.seh_pushreg %r12
@ -384,6 +389,7 @@ abi_test_bad_unwind_temporary:
.type abi_test_set_direction_flag, \@abi-omnipotent
.globl abi_test_get_and_clear_direction_flag
abi_test_get_and_clear_direction_flag:
_CET_ENDBR
pushfq
popq %rax
andq \$0x400, %rax
@ -397,6 +403,7 @@ abi_test_get_and_clear_direction_flag:
.type abi_test_set_direction_flag, \@abi-omnipotent
.globl abi_test_set_direction_flag
abi_test_set_direction_flag:
_CET_ENDBR
std
ret
.size abi_test_set_direction_flag,.-abi_test_set_direction_flag

@ -33,6 +33,9 @@
//
// - The file, on aarch64, uses the macros defined below to be compatible with
// BTI and PAC.
//
// - The file, on X86_64, requires the progrram to be compatible with Intel IBT
// and SHSTK
#if defined(__ASSEMBLER__)
@ -47,6 +50,22 @@
.popsection
#endif
#if defined(__CET__) && defined(OPENSSL_X86_64)
// Clang and GCC define __CET__ and provide <cet.h> when they support Intel's
// Indirect Branch Tracking.
// https://lpc.events/event/7/contributions/729/attachments/496/903/CET-LPC-2020.pdf
//
// cet.h defines _CET_ENDBR which is used to mark function entry points for IBT.
// and adds the assembly marker. The value of _CET_ENDBR is made dependant on if
// '-fcf-protection' is passed to the compiler. _CET_ENDBR is only required when
// the function is the target of an indirect jump, but BoringSSL chooses to mark
// all assembly entry points because it is easier, and allows BoringSSL's ABI
// tester to call the assembly entry points via an indirect jump.
#include <cet.h>
#else
#define _CET_ENDBR
#endif
#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
// We require the ARM assembler provide |__ARM_ARCH| from Arm C Language

@ -17,6 +17,7 @@ fiat_curve25519_adx_mul:
#endif
.cfi_startproc
_CET_ENDBR
mov [rsp - 0x08], rbp
.cfi_offset rbp, -8-0x08
mov rbp, rsp

@ -17,6 +17,7 @@ fiat_curve25519_adx_square:
#endif
.cfi_startproc
_CET_ENDBR
mov [rsp - 0x08], rbp
.cfi_offset rbp, -8-0x08
mov rbp, rsp

Loading…
Cancel
Save