From 27bf24a1a138f990736fc4823c3b5c9c102e59b6 Mon Sep 17 00:00:00 2001 From: Yuan Yao Date: Fri, 11 Feb 2022 10:18:37 +0800 Subject: [PATCH] [HACK] KVM: TDX: Retry seamcall when TDX_OPERAND_BUSY with operand SEPT Temporary retry in SEAMCALL wrappers when the TDX module returns TDX_OPERAND_BUSY with operand SEPT. The TDX module has many internal locks to protect its resources. To avoid staying in SEAM mode for too long, SEAMCALLs will return a TDX_OPERAND_BUSY error code to the kernel instead of spinning on the locks. Usually, callers of the SEAMCALL wrappers can avoid contentions by implementing proper locks on their side. For example, KVM can efficiently avoid the TDX module's lock contentions for resources like TDR, TDCS, KOT, and TDVPR by taking locks within KVM or making a resource per-thread. However, for performance reasons, callers like KVM may not want to use exclusive locks to avoid internal contentions on the SEPT tree within the TDX module. For instance, KVM allows TDH.VP.ENTER to run concurrently with TDH.MEM.SEPT.ADD, TDH.MEM.PAGE.AUG, and TDH.MEM.PAGE.REMOVE. Resources SHARED users EXCLUSIVE users ------------------------------------------------------------------------ SEPT tree TDH.MEM.SEPT.ADD TDH.VP.ENTER TDH.MEM.PAGE.AUG TDH.MEM.SEPT.REMOVE TDH.MEM.PAGE.REMOVE TDH.MEM.RANGE.BLOCK Inside the TDX module, although TDH.VP.ENTER only acquires an exclusive lock on the SEPT tree when zero-step mitigation is triggered, it is still possible to encounter TDX_OPERAND_BUSY with operand SEPT in KVM. Retry in the SEAMCALL wrappers temporarily until KVM either retries on the caller side or finds a way to avoid the contentions. Note: The wrappers only retry for 16 times for the TDX_OPERAND_BUSY with operand SEPT. Retries exceeding 16 times are rare. SEAMCALLs TDH.MEM.* can also contend with TDCALL TDG.MEM.PAGE.ACCEPT, returning TDX_OPERAND_BUSY without operand SEPT. Do not retry in the SEAMCALL wrappers for such rare errors. Let the callers handle these rare errors. Signed-off-by: Yuan Yao Co-developed-by: Isaku Yamahata Signed-off-by: Isaku Yamahata Co-developed-by: Rick Edgecombe Signed-off-by: Rick Edgecombe Co-developed-by: Yan Zhao Signed-off-by: Yan Zhao --- TDX MMU part 2 v2: - Updates the patch log. (Yan) TDX MMU part 2 v1: - Updates from seamcall overhaul (Kai) v19: - fix typo TDG.VP.ENTER => TDH.VP.ENTER, TDX_OPRRAN_BUSY => TDX_OPERAND_BUSY - drop the description on TDH.VP.ENTER as this patch doesn't touch TDH.VP.ENTER --- arch/x86/virt/vmx/tdx/tdx.c | 47 +++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index 8258289df9ec..d402329acd92 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -1585,6 +1585,43 @@ static void tdx_clflush_page(struct page *tdr) clflush_cache_range(page_to_virt(tdr), PAGE_SIZE); } +/* + * TDX module acquires its internal lock for resources. It doesn't spin to get + * locks because of its restrictions of allowed execution time. Instead, it + * returns TDX_OPERAND_BUSY with an operand id. + * + * Multiple VCPUs can operate on SEPT. Also with zero-step attack mitigation, + * TDH.VP.ENTER may rarely acquire SEPT lock and release it when zero-step + * attack is suspected. It results in TDX_OPERAND_BUSY | TDX_OPERAND_ID_SEPT + * with TDH.MEM.* operation. Note: TDH.MEM.TRACK is an exception. + * + * Because TDP MMU uses read lock for scalability, spin lock around SEAMCALL + * spoils TDP MMU effort. Retry several times with the assumption that SEPT + * lock contention is rare. But don't loop forever to avoid lockup. Let TDP + * MMU retry. + */ +#define TDX_OPERAND_BUSY 0x8000020000000000ULL +#define TDX_OPERAND_ID_SEPT 0x92 + +#define TDX_ERROR_SEPT_BUSY (TDX_OPERAND_BUSY | TDX_OPERAND_ID_SEPT) + +static inline u64 tdx_seamcall_sept(u64 op, struct tdx_module_args *in) +{ +#define SEAMCALL_RETRY_MAX 16 + struct tdx_module_args args_in; + int retry = SEAMCALL_RETRY_MAX; + u64 ret; + + do { + args_in = *in; + ret = seamcall_ret(op, in); + } while (ret == TDX_ERROR_SEPT_BUSY && retry-- > 0); + + *in = args_in; + + return ret; +} + u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page) { struct tdx_module_args args = { @@ -1608,7 +1645,7 @@ u64 tdh_mem_page_add(u64 tdr, u64 gpa, u64 hpa, u64 source, u64 *rcx, u64 *rdx) u64 ret; clflush_cache_range(__va(hpa), PAGE_SIZE); - ret = seamcall_ret(TDH_MEM_PAGE_ADD, &args); + ret = tdx_seamcall_sept(TDH_MEM_PAGE_ADD, &args); *rcx = args.rcx; *rdx = args.rdx; @@ -1627,7 +1664,7 @@ u64 tdh_mem_sept_add(u64 tdr, u64 gpa, u64 level, u64 hpa, u64 *rcx, u64 *rdx) u64 ret; clflush_cache_range(__va(hpa), PAGE_SIZE); - ret = seamcall_ret(TDH_MEM_SEPT_ADD, &args); + ret = tdx_seamcall_sept(TDH_MEM_SEPT_ADD, &args); *rcx = args.rcx; *rdx = args.rdx; @@ -1658,7 +1695,7 @@ u64 tdh_mem_page_aug(u64 tdr, u64 gpa, u64 hpa, u64 *rcx, u64 *rdx) u64 ret; clflush_cache_range(__va(hpa), PAGE_SIZE); - ret = seamcall_ret(TDH_MEM_PAGE_AUG, &args); + ret = tdx_seamcall_sept(TDH_MEM_PAGE_AUG, &args); *rcx = args.rcx; *rdx = args.rdx; @@ -1675,7 +1712,7 @@ u64 tdh_mem_range_block(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx) }; u64 ret; - ret = seamcall_ret(TDH_MEM_RANGE_BLOCK, &args); + ret = tdx_seamcall_sept(TDH_MEM_RANGE_BLOCK, &args); *rcx = args.rcx; *rdx = args.rdx; @@ -1902,7 +1939,7 @@ u64 tdh_mem_page_remove(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx) }; u64 ret; - ret = seamcall_ret(TDH_MEM_PAGE_REMOVE, &args); + ret = tdx_seamcall_sept(TDH_MEM_PAGE_REMOVE, &args); *rcx = args.rcx; *rdx = args.rdx;