From da21fa41344fbed1b492995a85d80e09de396a3c Mon Sep 17 00:00:00 2001 From: Dong-Heon Jung Date: Wed, 26 Oct 2022 17:15:18 +0900 Subject: [PATCH 01/91] [WIP] RISCV64 REL BUILD --- src/coreclr/clrdefinitions.cmake | 12 +- .../debug/createdump/createdumpunix.cpp | 4 +- src/coreclr/debug/createdump/datatarget.cpp | 4 +- src/coreclr/debug/createdump/dumpwriterelf.h | 2 + src/coreclr/debug/createdump/memoryregion.h | 2 +- src/coreclr/debug/createdump/threadinfo.h | 18 + .../debug/createdump/threadinfounix.cpp | 4 + src/coreclr/debug/daccess/daccess.cpp | 2 + src/coreclr/debug/daccess/request.cpp | 12 + src/coreclr/debug/di/CMakeLists.txt | 2 +- src/coreclr/debug/di/module.cpp | 2 + src/coreclr/debug/di/platformspecific.cpp | 3 + .../debug/di/riscv64/cordbregisterset.cpp | 116 ++ .../debug/di/riscv64/floatconversion.S | 0 .../di/riscv64/primitives.cpp} | 5 +- src/coreclr/debug/di/rsthread.cpp | 7 + src/coreclr/debug/di/shimremotedatatarget.cpp | 2 + src/coreclr/debug/ee/riscv64/dbghelpers.S | 17 +- src/coreclr/debug/ee/riscv64/primitives.cpp | 7 +- src/coreclr/debug/ee/riscv64/walker.cpp | 2 +- src/coreclr/debug/inc/dbgipcevents.h | 7 + src/coreclr/debug/inc/dbgtargetcontext.h | 68 + src/coreclr/debug/inc/riscv64/primitives.h | 259 ++++ .../debug/shared/riscv64/primitives.cpp | 17 +- src/coreclr/dlls/mscordac/CMakeLists.txt | 2 + src/coreclr/gc/env/gcenv.base.h | 6 + src/coreclr/gc/env/volatile.h | 6 +- src/coreclr/gcdump/gcdumpnonx86.cpp | 3 + src/coreclr/gcinfo/CMakeLists.txt | 8 +- src/coreclr/gcinfo/gcinfodumper.cpp | 48 +- src/coreclr/inc/clrnt.h | 58 + src/coreclr/inc/cordebuginfo.h | 36 + src/coreclr/inc/eetwain.h | 8 +- src/coreclr/inc/eexcp.h | 4 +- src/coreclr/inc/gcinfodecoder.h | 10 +- src/coreclr/inc/gcinfotypes.h | 59 +- src/coreclr/inc/jithelpers.h | 2 +- src/coreclr/inc/pedecoder.h | 2 + src/coreclr/inc/regdisp.h | 27 + src/coreclr/inc/switches.h | 2 +- src/coreclr/inc/targetosarch.h | 13 + src/coreclr/inc/volatile.h | 4 +- src/coreclr/jit/CMakeLists.txt | 45 +- src/coreclr/jit/codegen.h | 10 +- src/coreclr/jit/codegencommon.cpp | 61 +- src/coreclr/jit/codegeninterface.h | 10 +- src/coreclr/jit/codegenlinear.cpp | 8 +- src/coreclr/jit/codegenriscv64.cpp | 1308 +++++++++++++++++ src/coreclr/jit/compiler.h | 24 +- src/coreclr/jit/emit.cpp | 31 +- src/coreclr/jit/emit.h | 84 +- src/coreclr/jit/emitdef.h | 2 + src/coreclr/jit/emitfmts.h | 2 + src/coreclr/jit/emitfmtsriscv64.h | 43 + src/coreclr/jit/emitjmps.h | 4 + src/coreclr/jit/emitpub.h | 4 +- src/coreclr/jit/emitriscv64.cpp | 612 ++++++++ src/coreclr/jit/emitriscv64.h | 244 +++ src/coreclr/jit/error.h | 15 +- src/coreclr/jit/gentree.cpp | 16 + src/coreclr/jit/instr.cpp | 16 +- src/coreclr/jit/instr.h | 19 +- src/coreclr/jit/instrs.h | 2 + src/coreclr/jit/instrsriscv64.h | 39 + src/coreclr/jit/jit.h | 71 +- src/coreclr/jit/lclvars.cpp | 2 + src/coreclr/jit/lower.cpp | 5 +- src/coreclr/jit/lowerriscv64.cpp | 439 ++++++ src/coreclr/jit/lsrariscv64.cpp | 183 +++ src/coreclr/jit/morph.cpp | 7 +- src/coreclr/jit/register.h | 3 + src/coreclr/jit/registerriscv64.h | 106 ++ src/coreclr/jit/target.h | 14 +- src/coreclr/jit/targetriscv64.cpp | 27 + src/coreclr/jit/targetriscv64.h | 306 ++++ src/coreclr/jit/unwind.cpp | 4 + src/coreclr/jit/unwind.h | 21 +- src/coreclr/jit/unwindriscv64.cpp | 515 +++++++ src/coreclr/jit/valuenum.cpp | 4 +- src/coreclr/jit/valuenumfuncs.h | 3 + src/coreclr/nativeaot/Runtime/threadstore.inl | 1 + src/coreclr/pal/inc/pal.h | 10 +- src/coreclr/pal/inc/rt/ntimage.h | 1 + src/coreclr/pal/inc/unixasmmacrosriscv64.inc | 28 +- src/coreclr/pal/prebuilt/inc/cordebug.h | 69 +- .../arch/riscv64/activationhandlerwrapper.S | 18 +- .../pal/src/arch/riscv64/asmconstants.h | 17 +- .../arch/riscv64/callsignalhandlerwrapper.S | 2 +- src/coreclr/pal/src/arch/riscv64/context2.S | 10 +- .../arch/riscv64/dispatchexceptionwrapper.S | 2 +- .../pal/src/arch/riscv64/exceptionhelper.S | 41 +- .../src/arch/riscv64/signalhandlerhelper.cpp | 2 +- .../pal/src/exception/remote-unwind.cpp | 48 +- src/coreclr/pal/src/exception/seh-unwind.cpp | 12 +- src/coreclr/pal/src/include/pal/context.h | 6 +- src/coreclr/pal/src/thread/context.cpp | 5 +- .../superpmi/superpmi-shared/spmiutil.cpp | 2 + .../tools/superpmi/superpmi-shared/spmiutil.h | 5 +- src/coreclr/unwinder/riscv64/unwinder.cpp | 116 +- src/coreclr/unwinder/riscv64/unwinder.h | 2 +- src/coreclr/utilcode/util.cpp | 14 +- src/coreclr/vm/CMakeLists.txt | 25 +- src/coreclr/vm/arm64/cgencpu.h | 1 - src/coreclr/vm/callcounting.h | 2 + src/coreclr/vm/callingconvention.h | 56 +- src/coreclr/vm/ceeload.h | 2 + src/coreclr/vm/codeman.cpp | 21 +- src/coreclr/vm/codeman.h | 4 +- src/coreclr/vm/dynamicmethod.cpp | 2 +- src/coreclr/vm/eetwain.cpp | 4 +- src/coreclr/vm/encee.cpp | 2 +- src/coreclr/vm/excep.cpp | 12 +- src/coreclr/vm/frames.h | 3 + src/coreclr/vm/gccover.cpp | 34 +- src/coreclr/vm/gcinfodecoder.cpp | 151 +- src/coreclr/vm/interpreter.cpp | 30 +- src/coreclr/vm/interpreter.h | 2 + src/coreclr/vm/jitinterface.cpp | 5 + src/coreclr/vm/jitinterface.h | 2 +- src/coreclr/vm/precode.h | 14 + src/coreclr/vm/prestub.cpp | 2 +- src/coreclr/vm/riscv64/asmconstants.h | 233 ++- src/coreclr/vm/riscv64/asmhelpers.S | 359 ++++- .../vm/riscv64/calldescrworkerriscv64.S | 11 + src/coreclr/vm/riscv64/cgencpu.h | 518 ++++++- src/coreclr/vm/riscv64/crthelpers.S | 10 +- src/coreclr/vm/riscv64/excepcpu.h | 45 +- src/coreclr/vm/riscv64/gmscpu.h | 100 +- src/coreclr/vm/riscv64/pinvokestubs.S | 62 +- src/coreclr/vm/riscv64/profiler.cpp | 308 +++- src/coreclr/vm/riscv64/stubs.cpp | 718 ++++++++- src/coreclr/vm/riscv64/thunktemplates.S | 14 +- src/coreclr/vm/riscv64/virtualcallstubcpu.hpp | 237 ++- src/coreclr/vm/stackwalk.cpp | 4 +- src/coreclr/vm/stackwalk.h | 4 +- src/coreclr/vm/stubmgr.cpp | 2 +- src/coreclr/vm/threadsuspend.cpp | 8 +- src/native/external/libunwind.cmake | 1 + src/native/external/libunwind/CMakeLists.txt | 5 + .../external/libunwind/src/CMakeLists.txt | 37 + .../external/libunwind/src/riscv/getcontext.S | 2 +- .../external/libunwind/src/riscv/setcontext.S | 2 +- 142 files changed, 8415 insertions(+), 231 deletions(-) create mode 100644 src/coreclr/debug/di/riscv64/cordbregisterset.cpp create mode 100644 src/coreclr/debug/di/riscv64/floatconversion.S rename src/coreclr/{vm/riscv64/calldescrworkerloongarch64.S => debug/di/riscv64/primitives.cpp} (57%) create mode 100644 src/coreclr/debug/inc/riscv64/primitives.h create mode 100644 src/coreclr/jit/codegenriscv64.cpp create mode 100644 src/coreclr/jit/emitfmtsriscv64.h create mode 100644 src/coreclr/jit/emitriscv64.cpp create mode 100644 src/coreclr/jit/emitriscv64.h create mode 100644 src/coreclr/jit/instrsriscv64.h create mode 100644 src/coreclr/jit/lowerriscv64.cpp create mode 100644 src/coreclr/jit/lsrariscv64.cpp create mode 100644 src/coreclr/jit/registerriscv64.h create mode 100644 src/coreclr/jit/targetriscv64.cpp create mode 100644 src/coreclr/jit/targetriscv64.h create mode 100644 src/coreclr/jit/unwindriscv64.cpp create mode 100644 src/coreclr/vm/riscv64/calldescrworkerriscv64.S diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 9472c5f88ad720..b5a43f5057a47b 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -197,12 +197,12 @@ if (CLR_CMAKE_TARGET_ARCH_AMD64) add_definitions(-DUNIX_AMD64_ABI_ITF) endif (CLR_CMAKE_TARGET_ARCH_AMD64) add_definitions(-DFEATURE_USE_ASM_GC_WRITE_BARRIERS) -if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64) +if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) add_definitions(-DFEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) -endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64) -if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64) +endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) +if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) add_definitions(-DFEATURE_MANUALLY_MANAGED_CARD_BUNDLES) -endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64) +endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) if(NOT CLR_CMAKE_TARGET_UNIX) add_definitions(-DFEATURE_WIN32_REGISTRY) @@ -275,6 +275,10 @@ function(set_target_definitions_to_custom_os_and_arch) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_MULTIREG_RETURN) elseif((TARGETDETAILS_ARCH STREQUAL "arm") OR (TARGETDETAILS_ARCH STREQUAL "armel")) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_ARM) + elseif((TARGETDETAILS_ARCH STREQUAL "riscv64")) + target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_64BIT) + target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_RISCV64) + target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_MULTIREG_RETURN) endif() if (TARGETDETAILS_ARCH STREQUAL "armel") diff --git a/src/coreclr/debug/createdump/createdumpunix.cpp b/src/coreclr/debug/createdump/createdumpunix.cpp index f86a283546a356..86c63e9482037a 100644 --- a/src/coreclr/debug/createdump/createdumpunix.cpp +++ b/src/coreclr/debug/createdump/createdumpunix.cpp @@ -3,7 +3,7 @@ #include "createdump.h" -#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) +#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) || defined(__riscv) long g_pageSize = 0; #endif @@ -19,7 +19,7 @@ CreateDump(const char* dumpPathTemplate, int pid, const char* dumpType, MINIDUMP bool result = false; // Initialize PAGE_SIZE -#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) +#if defined(__arm__) || defined(__aarch64__) || defined(__loongarch64) || defined(__riscv) g_pageSize = sysconf(_SC_PAGESIZE); #endif TRACE("PAGE_SIZE %d\n", PAGE_SIZE); diff --git a/src/coreclr/debug/createdump/datatarget.cpp b/src/coreclr/debug/createdump/datatarget.cpp index 7ef154e034c8e2..0bf0ed23c66c5c 100644 --- a/src/coreclr/debug/createdump/datatarget.cpp +++ b/src/coreclr/debug/createdump/datatarget.cpp @@ -77,6 +77,8 @@ DumpDataTarget::GetMachineType( *machine = IMAGE_FILE_MACHINE_I386; #elif HOST_LOONGARCH64 *machine = IMAGE_FILE_MACHINE_LOONGARCH64; +#elif HOST_RISCV64 + *machine = IMAGE_FILE_MACHINE_RISCV64; #else #error Unsupported architecture #endif @@ -87,7 +89,7 @@ HRESULT STDMETHODCALLTYPE DumpDataTarget::GetPointerSize( /* [out] */ ULONG32 *size) { -#if defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) +#if defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64) *size = 8; #elif defined(HOST_ARM) || defined(HOST_X86) *size = 4; diff --git a/src/coreclr/debug/createdump/dumpwriterelf.h b/src/coreclr/debug/createdump/dumpwriterelf.h index cb8731871de211..874df9cc791fcc 100644 --- a/src/coreclr/debug/createdump/dumpwriterelf.h +++ b/src/coreclr/debug/createdump/dumpwriterelf.h @@ -23,6 +23,8 @@ #define ELF_ARCH EM_ARM #elif defined(__loongarch64) #define ELF_ARCH EM_LOONGARCH +#elif defined(__riscv) +#define ELF_ARCH EM_RISCV #endif #define PH_HDR_CANARY 0xFFFF diff --git a/src/coreclr/debug/createdump/memoryregion.h b/src/coreclr/debug/createdump/memoryregion.h index f4c115d539b476..e7b8f31a0b2692 100644 --- a/src/coreclr/debug/createdump/memoryregion.h +++ b/src/coreclr/debug/createdump/memoryregion.h @@ -1,7 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#if !defined(PAGE_SIZE) && (defined(__arm__) || defined(__aarch64__) || defined(__loongarch64)) +#if !defined(PAGE_SIZE) && (defined(__arm__) || defined(__aarch64__) || defined(__loongarch64)) || defined(__riscv) extern long g_pageSize; #define PAGE_SIZE g_pageSize #endif diff --git a/src/coreclr/debug/createdump/threadinfo.h b/src/coreclr/debug/createdump/threadinfo.h index ed82c1ec51a652..96fabbe6938dec 100644 --- a/src/coreclr/debug/createdump/threadinfo.h +++ b/src/coreclr/debug/createdump/threadinfo.h @@ -20,6 +20,14 @@ class CrashInfo; #define MCREG_Pc(mc) ((mc).pc) #endif +#if defined(__riscv) +// See src/coreclr/pal/src/include/pal/context.h +#define MCREG_Ra(mc) ((mc).ra) +#define MCREG_Fp(mc) ((mc).s0) +#define MCREG_Sp(mc) ((mc).sp) +#define MCREG_Pc(mc) ((mc).pc) +#endif + #define FPREG_ErrorOffset(fpregs) *(DWORD*)&((fpregs).rip) #define FPREG_ErrorSelector(fpregs) *(((WORD*)&((fpregs).rip)) + 2) #define FPREG_DataOffset(fpregs) *(DWORD*)&((fpregs).rdp) @@ -30,6 +38,12 @@ class CrashInfo; #elif defined(__loongarch64) // struct user_regs_struct {} defined `/usr/include/loongarch64-linux-gnu/sys/user.h` +struct user_fpregs_struct +{ + unsigned long long fpregs[32]; + unsigned long fpscr; +} __attribute__((__packed__)); +#elif defined(__riscv) struct user_fpregs_struct { unsigned long long fpregs[32]; @@ -154,6 +168,10 @@ class ThreadInfo inline const uint64_t GetInstructionPointer() const { return m_gpRegisters.ARM_pc; } inline const uint64_t GetStackPointer() const { return m_gpRegisters.ARM_sp; } inline const uint64_t GetFramePointer() const { return m_gpRegisters.ARM_fp; } +#elif defined(__riscv) + inline const uint64_t GetInstructionPointer() const { return MCREG_Pc(m_gpRegisters); } + inline const uint64_t GetStackPointer() const { return MCREG_Sp(m_gpRegisters); } + inline const uint64_t GetFramePointer() const { return MCREG_Fp(m_gpRegisters); } #endif #endif // __APPLE__ diff --git a/src/coreclr/debug/createdump/threadinfounix.cpp b/src/coreclr/debug/createdump/threadinfounix.cpp index c9ccc7f37d9a67..ca75a6128fee9e 100644 --- a/src/coreclr/debug/createdump/threadinfounix.cpp +++ b/src/coreclr/debug/createdump/threadinfounix.cpp @@ -59,6 +59,8 @@ ThreadInfo::Initialize() TRACE("Thread %04x RIP %016llx RSP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.rip, (unsigned long long)m_gpRegisters.rsp); #elif defined(__loongarch64) TRACE("Thread %04x PC %016llx SP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.pc, (unsigned long long)m_gpRegisters.gpr[3]); +#elif defined(__riscv) + TRACE("Thread %04x PC %016llx SP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.pc, (unsigned long long)m_gpRegisters.sp); #else #error "Unsupported architecture" #endif @@ -243,6 +245,8 @@ ThreadInfo::GetThreadContext(uint32_t flags, CONTEXT* context) const memcpy(context->F, m_fpRegisters.fpregs, sizeof(context->F)); context->Fcsr = m_fpRegisters.fpscr; } +#elif defined(__riscv) + _ASSERTE(!"TODO RISCV64 NYI"); #else #error Platform not supported #endif diff --git a/src/coreclr/debug/daccess/daccess.cpp b/src/coreclr/debug/daccess/daccess.cpp index 3bc22eaae78f29..4baa2c6fabbf5c 100644 --- a/src/coreclr/debug/daccess/daccess.cpp +++ b/src/coreclr/debug/daccess/daccess.cpp @@ -5446,6 +5446,8 @@ ClrDataAccess::Initialize(void) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_ARM64; #elif defined(TARGET_LOONGARCH64) CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_LOONGARCH64; + #elif defined(TARGET_RISCV64) + CorDebugPlatform hostPlatform = CORDB_PLATFORM_POSIX_RISCV64; #else #error Unknown Processor. #endif diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp index 08e9b8265829bf..fb7955225587f0 100644 --- a/src/coreclr/debug/daccess/request.cpp +++ b/src/coreclr/debug/daccess/request.cpp @@ -559,6 +559,18 @@ ClrDataAccess::GetRegisterName(int regNum, unsigned int count, _Inout_updates_z_ W("S6"), W("S7"), W("K0"), W("K1"), W("GP"), W("SP"), W("FP"), W("RA") }; +#elif defined(TARGET_RISCV64) + static const WCHAR *regs[] = + { + W("R0"), W("RA"), W("SP"), W("GP"), + W("TP"), W("T0"), W("T1"), W("T2"), + W("FP"), W("S1"), W("A0"), W("A1"), + W("A2"), W("A3"), W("A4"), W("A5"), + W("A6"), W("A7"), W("S2"), W("S3"), + W("S4"), W("S5"), W("S6"), W("S7"), + W("S8"), W("S9"), W("S10"), W("S11"), + W("T3"), W("T4"), W("T5"), W("T6") + }; #endif // Caller frame registers are encoded as "-(reg+1)". diff --git a/src/coreclr/debug/di/CMakeLists.txt b/src/coreclr/debug/di/CMakeLists.txt index 9d84f90b1a5c6a..b5f6872b8f690a 100644 --- a/src/coreclr/debug/di/CMakeLists.txt +++ b/src/coreclr/debug/di/CMakeLists.txt @@ -66,7 +66,7 @@ if(CLR_CMAKE_HOST_WIN32) endif() elseif(CLR_CMAKE_HOST_UNIX) - if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64) + if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) set(CORDBDI_SOURCES_ASM_FILE ${ARCH_SOURCES_DIR}/floatconversion.S ) diff --git a/src/coreclr/debug/di/module.cpp b/src/coreclr/debug/di/module.cpp index d62659291ec7e1..15fae1581e2bff 100644 --- a/src/coreclr/debug/di/module.cpp +++ b/src/coreclr/debug/di/module.cpp @@ -4872,6 +4872,8 @@ int CordbNativeCode::GetCallInstructionLength(BYTE *ip, ULONG32 count) _ASSERTE(!"Invalid opcode!"); return -1; +#elif defined(TARGET_RISCV64) + return MAX_INSTRUCTION_LENGTH; #else #error Platform not implemented #endif diff --git a/src/coreclr/debug/di/platformspecific.cpp b/src/coreclr/debug/di/platformspecific.cpp index 2dbdbd2a407056..cd690dccc2fd25 100644 --- a/src/coreclr/debug/di/platformspecific.cpp +++ b/src/coreclr/debug/di/platformspecific.cpp @@ -36,6 +36,9 @@ #elif TARGET_LOONGARCH64 #include "loongarch64/cordbregisterset.cpp" #include "loongarch64/primitives.cpp" +#elif TARGET_RISCV64 +#include "riscv64/cordbregisterset.cpp" +#include "riscv64/primitives.cpp" #else #error Unsupported platform #endif diff --git a/src/coreclr/debug/di/riscv64/cordbregisterset.cpp b/src/coreclr/debug/di/riscv64/cordbregisterset.cpp new file mode 100644 index 00000000000000..7a80fd91dee26f --- /dev/null +++ b/src/coreclr/debug/di/riscv64/cordbregisterset.cpp @@ -0,0 +1,116 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +//***************************************************************************** +// File: CordbRegisterSet.cpp +// + +// +//***************************************************************************** +#include "primitives.h" + + +HRESULT CordbRegisterSet::GetRegistersAvailable(ULONG64* pAvailable) +{ + FAIL_IF_NEUTERED(this); + VALIDATE_POINTER_TO_OBJECT(pAvailable, ULONG64 *); + + *pAvailable = SETBITULONG64(REGISTER_RISCV64_PC) + | SETBITULONG64(REGISTER_RISCV64_RA) + | SETBITULONG64(REGISTER_RISCV64_SP) + | SETBITULONG64(REGISTER_RISCV64_GP) + | SETBITULONG64(REGISTER_RISCV64_TP) + | SETBITULONG64(REGISTER_RISCV64_T0) + | SETBITULONG64(REGISTER_RISCV64_T1) + | SETBITULONG64(REGISTER_RISCV64_T2) + | SETBITULONG64(REGISTER_RISCV64_FP) + | SETBITULONG64(REGISTER_RISCV64_S1) + | SETBITULONG64(REGISTER_RISCV64_A0) + | SETBITULONG64(REGISTER_RISCV64_A1) + | SETBITULONG64(REGISTER_RISCV64_A2) + | SETBITULONG64(REGISTER_RISCV64_A3) + | SETBITULONG64(REGISTER_RISCV64_A4) + | SETBITULONG64(REGISTER_RISCV64_A5) + | SETBITULONG64(REGISTER_RISCV64_A6) + | SETBITULONG64(REGISTER_RISCV64_A7) + | SETBITULONG64(REGISTER_RISCV64_S2) + | SETBITULONG64(REGISTER_RISCV64_S3) + | SETBITULONG64(REGISTER_RISCV64_S4) + | SETBITULONG64(REGISTER_RISCV64_S5) + | SETBITULONG64(REGISTER_RISCV64_S6) + | SETBITULONG64(REGISTER_RISCV64_S7) + | SETBITULONG64(REGISTER_RISCV64_S8) + | SETBITULONG64(REGISTER_RISCV64_S9) + | SETBITULONG64(REGISTER_RISCV64_S10) + | SETBITULONG64(REGISTER_RISCV64_S11) + | SETBITULONG64(REGISTER_RISCV64_T3) + | SETBITULONG64(REGISTER_RISCV64_T4) + | SETBITULONG64(REGISTER_RISCV64_T5) + | SETBITULONG64(REGISTER_RISCV64_T6) + | SETBITULONG64(REGISTER_RISCV64_F0) + | SETBITULONG64(REGISTER_RISCV64_F1) + | SETBITULONG64(REGISTER_RISCV64_F2) + | SETBITULONG64(REGISTER_RISCV64_F3) + | SETBITULONG64(REGISTER_RISCV64_F4) + | SETBITULONG64(REGISTER_RISCV64_F5) + | SETBITULONG64(REGISTER_RISCV64_F6) + | SETBITULONG64(REGISTER_RISCV64_F7) + | SETBITULONG64(REGISTER_RISCV64_F8) + | SETBITULONG64(REGISTER_RISCV64_F9) + | SETBITULONG64(REGISTER_RISCV64_F10) + | SETBITULONG64(REGISTER_RISCV64_F11) + | SETBITULONG64(REGISTER_RISCV64_F12) + | SETBITULONG64(REGISTER_RISCV64_F13) + | SETBITULONG64(REGISTER_RISCV64_F14) + | SETBITULONG64(REGISTER_RISCV64_F15) + | SETBITULONG64(REGISTER_RISCV64_F16) + | SETBITULONG64(REGISTER_RISCV64_F17) + | SETBITULONG64(REGISTER_RISCV64_F18) + | SETBITULONG64(REGISTER_RISCV64_F19) + | SETBITULONG64(REGISTER_RISCV64_F20) + | SETBITULONG64(REGISTER_RISCV64_F21) + | SETBITULONG64(REGISTER_RISCV64_F22) + | SETBITULONG64(REGISTER_RISCV64_F23) + | SETBITULONG64(REGISTER_RISCV64_F24) + | SETBITULONG64(REGISTER_RISCV64_F25) + | SETBITULONG64(REGISTER_RISCV64_F26) + | SETBITULONG64(REGISTER_RISCV64_F27) + | SETBITULONG64(REGISTER_RISCV64_F28) + | SETBITULONG64(REGISTER_RISCV64_F29) + | SETBITULONG64(REGISTER_RISCV64_F30) + | SETBITULONG64(REGISTER_RISCV64_F31); + + return S_OK; +} + +HRESULT CordbRegisterSet::GetRegisters(ULONG64 mask, ULONG32 regCount, + CORDB_REGISTER regBuffer[]) +{ + _ASSERTE(!"RISCV64:NYI"); + return S_OK; +} + + +HRESULT CordbRegisterSet::GetRegistersAvailable(ULONG32 regCount, + BYTE pAvailable[]) +{ + _ASSERTE(!"RISCV64:NYI"); + return S_OK; +} + + +HRESULT CordbRegisterSet::GetRegisters(ULONG32 maskCount, BYTE mask[], + ULONG32 regCount, CORDB_REGISTER regBuffer[]) +{ + _ASSERTE(!"RISCV64:NYI"); + return S_OK; +} + + +// This is just a convenience function to convert a regdisplay into a Context. +// Since a context has more info than a regdisplay, the conversion isn't perfect +// and the context can't be fully accurate. +void CordbRegisterSet::InternalCopyRDToContext(DT_CONTEXT *pInputContext) +{ + _ASSERTE(!"RISCV64:NYI"); +} diff --git a/src/coreclr/debug/di/riscv64/floatconversion.S b/src/coreclr/debug/di/riscv64/floatconversion.S new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/coreclr/vm/riscv64/calldescrworkerloongarch64.S b/src/coreclr/debug/di/riscv64/primitives.cpp similarity index 57% rename from src/coreclr/vm/riscv64/calldescrworkerloongarch64.S rename to src/coreclr/debug/di/riscv64/primitives.cpp index a7cd5b6c4d2403..97b053a8d3e9c6 100644 --- a/src/coreclr/vm/riscv64/calldescrworkerloongarch64.S +++ b/src/coreclr/debug/di/riscv64/primitives.cpp @@ -1,7 +1,6 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#include "unixasmmacros.inc" -#include "asmconstants.h" +// -#error "TODO-RISCV64: missing implementation" +#include "../../shared/riscv64/primitives.cpp" diff --git a/src/coreclr/debug/di/rsthread.cpp b/src/coreclr/debug/di/rsthread.cpp index 075ed936fed530..5a2ea695ebc84c 100644 --- a/src/coreclr/debug/di/rsthread.cpp +++ b/src/coreclr/debug/di/rsthread.cpp @@ -8331,6 +8331,9 @@ HRESULT CordbJITILFrame::GetNativeVariable(CordbType *type, #elif defined(TARGET_LOONGARCH64) hr = m_nativeFrame->GetLocalFloatingPointValue(pNativeVarInfo->loc.vlReg.vlrReg + REGISTER_LOONGARCH64_F0, type, ppValue); +#elif defined(TARGET_RISCV64) + hr = m_nativeFrame->GetLocalFloatingPointValue(pNativeVarInfo->loc.vlReg.vlrReg + REGISTER_RISCV64_F0, + type, ppValue); #else #error Platform not implemented #endif // TARGET_ARM @ARMTODO @@ -8769,6 +8772,8 @@ HRESULT CordbJITILFrame::GetReturnValueForType(CordbType *pType, ICorDebugValue const CorDebugRegister floatRegister = REGISTER_ARM_D0; #elif defined(TARGET_LOONGARCH64) const CorDebugRegister floatRegister = REGISTER_LOONGARCH64_F0; +#elif defined(TARGET_RISCV64) + const CorDebugRegister floatRegister = REGISTER_RISCV64_F0; #endif #if defined(TARGET_X86) @@ -8783,6 +8788,8 @@ HRESULT CordbJITILFrame::GetReturnValueForType(CordbType *pType, ICorDebugValue const CorDebugRegister ptrHighWordRegister = REGISTER_ARM_R1; #elif defined(TARGET_LOONGARCH64) const CorDebugRegister ptrRegister = REGISTER_LOONGARCH64_A0; +#elif defined(TARGET_RISCV64) + const CorDebugRegister ptrRegister = REGISTER_RISCV64_A0; #endif CorElementType corReturnType = pType->GetElementType(); diff --git a/src/coreclr/debug/di/shimremotedatatarget.cpp b/src/coreclr/debug/di/shimremotedatatarget.cpp index 1a5fb8562a6be3..674d325560ff52 100644 --- a/src/coreclr/debug/di/shimremotedatatarget.cpp +++ b/src/coreclr/debug/di/shimremotedatatarget.cpp @@ -232,6 +232,8 @@ ShimRemoteDataTarget::GetPlatform( *pPlatform = CORDB_PLATFORM_POSIX_ARM64; #elif defined(TARGET_LOONGARCH64) *pPlatform = CORDB_PLATFORM_POSIX_LOONGARCH64; + #elif defined(TARGET_RISCV64) + *pPlatform = CORDB_PLATFORM_POSIX_RISCV64; #else #error Unknown Processor. #endif diff --git a/src/coreclr/debug/ee/riscv64/dbghelpers.S b/src/coreclr/debug/ee/riscv64/dbghelpers.S index 3515f38c8120d7..c53dd09fce8da2 100644 --- a/src/coreclr/debug/ee/riscv64/dbghelpers.S +++ b/src/coreclr/debug/ee/riscv64/dbghelpers.S @@ -4,4 +4,19 @@ #include "asmconstants.h" #include "unixasmmacros.inc" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" + +// +// hijacking stub used to perform a func-eval, see Debugger::FuncEvalSetup() for use. +// + +// @dbgtodo- once we port Funceval, use the ExceptionHijack stub instead of this func-eval stub. +NESTED_ENTRY FuncEvalHijack, _TEXT, UnhandledExceptionHandlerUnix +// TODO RISCV64 NYI +NESTED_END FuncEvalHijack + +// This is the general purpose hijacking stub. The DacDbi Hijack primitive will +// set up the stack and then set the IP here, and so this just makes the call. +NESTED_ENTRY ExceptionHijack, _TEXT, UnhandledExceptionHandlerUnix +// TODO RISCV64 NYI +NESTED_END ExceptionHijack, _TEXT diff --git a/src/coreclr/debug/ee/riscv64/primitives.cpp b/src/coreclr/debug/ee/riscv64/primitives.cpp index c4b50b4c66ef08..60bf9806f27b16 100644 --- a/src/coreclr/debug/ee/riscv64/primitives.cpp +++ b/src/coreclr/debug/ee/riscv64/primitives.cpp @@ -7,4 +7,9 @@ #include "threads.h" #include "../../shared/riscv64/primitives.cpp" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" +void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc) +{ + CONTEXT tmp; + CopyRegDisplay(pSrc, pDst, &tmp); +} diff --git a/src/coreclr/debug/ee/riscv64/walker.cpp b/src/coreclr/debug/ee/riscv64/walker.cpp index c428cd8f3dbd23..4bf789d5f5c6ce 100644 --- a/src/coreclr/debug/ee/riscv64/walker.cpp +++ b/src/coreclr/debug/ee/riscv64/walker.cpp @@ -14,6 +14,6 @@ #ifdef TARGET_RISCV64 -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" #endif diff --git a/src/coreclr/debug/inc/dbgipcevents.h b/src/coreclr/debug/inc/dbgipcevents.h index f40eae3c6a4276..76b228b6ebf1bc 100644 --- a/src/coreclr/debug/inc/dbgipcevents.h +++ b/src/coreclr/debug/inc/dbgipcevents.h @@ -1895,6 +1895,13 @@ C_ASSERT(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); C_ASSERT(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); C_ASSERT(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); #endif +#elif defined(TARGET_RISCV64) +#define DBG_TARGET_REGNUM_SP 2 +#define DBG_TARGET_REGNUM_AMBIENT_SP 34 +#ifdef TARGET_RISCV64 +C_ASSERT(DBG_TARGET_REGNUM_SP == ICorDebugInfo::REGNUM_SP); +C_ASSERT(DBG_TARGET_REGNUM_AMBIENT_SP == ICorDebugInfo::REGNUM_AMBIENT_SP); +#endif #else #error Target registers are not defined for this platform #endif diff --git a/src/coreclr/debug/inc/dbgtargetcontext.h b/src/coreclr/debug/inc/dbgtargetcontext.h index 403039e24d6a56..dc3b853014dee2 100644 --- a/src/coreclr/debug/inc/dbgtargetcontext.h +++ b/src/coreclr/debug/inc/dbgtargetcontext.h @@ -50,6 +50,8 @@ #define DTCONTEXT_IS_ARM64 #elif defined (TARGET_LOONGARCH64) #define DTCONTEXT_IS_LOONGARCH64 +#elif defined (TARGET_RISCV64) +#define DTCONTEXT_IS_RISCV64 #endif #if defined(DTCONTEXT_IS_X86) @@ -293,6 +295,7 @@ typedef struct DECLSPEC_ALIGN(16) { #define DT_ARM_MAX_BREAKPOINTS 8 #define DT_ARM_MAX_WATCHPOINTS 1 + typedef struct { ULONGLONG Low; LONGLONG High; @@ -513,6 +516,71 @@ typedef DECLSPEC_ALIGN(16) struct { ULONGLONG F[32]; } DT_CONTEXT; +#elif defined(DTCONTEXT_IS_RISCV64) +#define DT_CONTEXT_RISCV64 0x01000000L + +#define DT_CONTEXT_CONTROL (DT_CONTEXT_RISCV64 | 0x1L) +#define DT_CONTEXT_INTEGER (DT_CONTEXT_RISCV64 | 0x2L) +#define DT_CONTEXT_FLOATING_POINT (DT_CONTEXT_RISCV64 | 0x4L) +#define DT_CONTEXT_DEBUG_REGISTERS (DT_CONTEXT_RISCV64 | 0x8L) + +#define DT_CONTEXT_FULL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT) +#define DT_CONTEXT_ALL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT | DT_CONTEXT_DEBUG_REGISTERS) + +#define DT_RISCV64_MAX_BREAKPOINTS 8 +#define DT_RISCV64_MAX_WATCHPOINTS 2 + +typedef DECLSPEC_ALIGN(16) struct { + // + // Control flags. + // + + /* +0x000 */ DWORD ContextFlags; + + // + // Integer registers + // + DWORD64 ZR; + DWORD64 RA; + DWORD64 SP; + DWORD64 GP; + DWORD64 TP; + DWORD64 T0; + DWORD64 T1; + DWORD64 T2; + DWORD64 FP; + DWORD64 S1; + DWORD64 A0; + DWORD64 A1; + DWORD64 A2; + DWORD64 A3; + DWORD64 A4; + DWORD64 A5; + DWORD64 A6; + DWORD64 A7; + DWORD64 S2; + DWORD64 S3; + DWORD64 S4; + DWORD64 S5; + DWORD64 S6; + DWORD64 S7; + DWORD64 S8; + DWORD64 S9; + DWORD64 S10; + DWORD64 S11; + DWORD64 T3; + DWORD64 T4; + DWORD64 T5; + DWORD64 T6; + DWORD64 PC; + + // + // Floating Point Registers + // + ULONGLONG F[32]; +} DT_CONTEXT; + + #else #error Unsupported platform #endif diff --git a/src/coreclr/debug/inc/riscv64/primitives.h b/src/coreclr/debug/inc/riscv64/primitives.h new file mode 100644 index 00000000000000..99a1e28ce801c8 --- /dev/null +++ b/src/coreclr/debug/inc/riscv64/primitives.h @@ -0,0 +1,259 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +//***************************************************************************** +// File: primitives.h +// + +// +// Platform-specific debugger primitives +// +//***************************************************************************** + +#ifndef PRIMITIVES_H_ +#define PRIMITIVES_H_ + +typedef const BYTE CORDB_ADDRESS_TYPE; +typedef DPTR(CORDB_ADDRESS_TYPE) PTR_CORDB_ADDRESS_TYPE; + +#define MAX_INSTRUCTION_LENGTH 4 + +// Given a return address retrieved during stackwalk, +// this is the offset by which it should be decremented to land at the call instruction. +#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4 + +#define PRD_TYPE LONG +#define CORDbg_BREAK_INSTRUCTION_SIZE 4 +#define CORDbg_BREAK_INSTRUCTION (LONG)0x00100073 + +inline CORDB_ADDRESS GetPatchEndAddr(CORDB_ADDRESS patchAddr) +{ + LIMITED_METHOD_DAC_CONTRACT; + return patchAddr + CORDbg_BREAK_INSTRUCTION_SIZE; +} + +#define InitializePRDToBreakInst(_pPRD) *(_pPRD) = CORDbg_BREAK_INSTRUCTION +#define PRDIsBreakInst(_pPRD) (*(_pPRD) == CORDbg_BREAK_INSTRUCTION) + + +#define CORDbgGetInstructionEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \ + CORDbgGetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr))); + +#define CORDbgSetInstructionEx(_buffer, _requestedAddr, _patchAddr, _opcode, _dummy2) \ + CORDbgSetInstructionExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr)), (_opcode)); + +#define CORDbgInsertBreakpointEx(_buffer, _requestedAddr, _patchAddr, _dummy1, _dummy2) \ + CORDbgInsertBreakpointExImpl((CORDB_ADDRESS_TYPE *)((_buffer) + (_patchAddr) - (_requestedAddr))); + + +constexpr CorDebugRegister g_JITToCorDbgReg[] = +{ + REGISTER_RISCV64_RA, + REGISTER_RISCV64_SP, + REGISTER_RISCV64_GP, + REGISTER_RISCV64_TP, + REGISTER_RISCV64_T0, + REGISTER_RISCV64_T1, + REGISTER_RISCV64_T2, + REGISTER_RISCV64_FP, + REGISTER_RISCV64_S1, + REGISTER_RISCV64_A0, + REGISTER_RISCV64_A1, + REGISTER_RISCV64_A2, + REGISTER_RISCV64_A3, + REGISTER_RISCV64_A4, + REGISTER_RISCV64_A5, + REGISTER_RISCV64_A6, + REGISTER_RISCV64_A7, + REGISTER_RISCV64_S2, + REGISTER_RISCV64_S3, + REGISTER_RISCV64_S4, + REGISTER_RISCV64_S5, + REGISTER_RISCV64_S6, + REGISTER_RISCV64_S7, + REGISTER_RISCV64_S8, + REGISTER_RISCV64_S9, + REGISTER_RISCV64_S10, + REGISTER_RISCV64_S11, + REGISTER_RISCV64_T3, + REGISTER_RISCV64_T4, + REGISTER_RISCV64_T5, + REGISTER_RISCV64_T6, + REGISTER_RISCV64_PC +}; + +inline void CORDbgSetIP(DT_CONTEXT *context, LPVOID ip) { + LIMITED_METHOD_CONTRACT; + + context->PC = (DWORD64)ip; +} + +inline LPVOID CORDbgGetSP(const DT_CONTEXT * context) { + LIMITED_METHOD_CONTRACT; + + return (LPVOID)(size_t)(context->SP); +} + +inline void CORDbgSetSP(DT_CONTEXT *context, LPVOID esp) { + LIMITED_METHOD_CONTRACT; + + context->SP = (DWORD64)esp; +} + +inline LPVOID CORDbgGetFP(const DT_CONTEXT * context) { + LIMITED_METHOD_CONTRACT; + + return (LPVOID)(size_t)(context->FP); +} + +inline void CORDbgSetFP(DT_CONTEXT *context, LPVOID fp) { + LIMITED_METHOD_CONTRACT; + + context->FP = (DWORD64)fp; +} + + +inline BOOL CompareControlRegisters(const DT_CONTEXT * pCtx1, const DT_CONTEXT * pCtx2) +{ + LIMITED_METHOD_DAC_CONTRACT; + + // TODO-LoongArch64: Sort out frame registers + + if ((pCtx1->PC == pCtx2->PC) && + (pCtx1->SP == pCtx2->SP) && + (pCtx1->FP == pCtx2->FP)) + { + return TRUE; + } + + return FALSE; +} + +inline void CORDbgSetInstruction(CORDB_ADDRESS_TYPE* address, + PRD_TYPE instruction) +{ + // In a DAC build, this function assumes the input is an host address. + LIMITED_METHOD_DAC_CONTRACT; + + ULONGLONG ptraddr = dac_cast(address); + *(PRD_TYPE *)ptraddr = instruction; + FlushInstructionCache(GetCurrentProcess(), + address, + sizeof(PRD_TYPE)); +} + +inline PRD_TYPE CORDbgGetInstruction(UNALIGNED CORDB_ADDRESS_TYPE* address) +{ + LIMITED_METHOD_CONTRACT; + + ULONGLONG ptraddr = dac_cast(address); + return *(PRD_TYPE *)ptraddr; +} + +// +// Mapping from ICorDebugInfo register numbers to CorDebugRegister +// numbers. Note: this must match the order in corinfo.h. +// +inline CorDebugRegister ConvertRegNumToCorDebugRegister(ICorDebugInfo::RegNum reg) +{ + LIMITED_METHOD_CONTRACT; + _ASSERTE(reg >= 0); + _ASSERTE(static_cast(reg) < ARRAY_SIZE(g_JITToCorDbgReg)); + return g_JITToCorDbgReg[reg]; +} + +inline LPVOID CORDbgGetIP(DT_CONTEXT *context) +{ + LIMITED_METHOD_CONTRACT; + + return (LPVOID)(size_t)(context->PC); +} + +inline void CORDbgSetInstructionExImpl(CORDB_ADDRESS_TYPE* address, + PRD_TYPE instruction) +{ + LIMITED_METHOD_DAC_CONTRACT; + + *(PRD_TYPE *)address = instruction; + FlushInstructionCache(GetCurrentProcess(), + address, + sizeof(PRD_TYPE)); +} + +inline PRD_TYPE CORDbgGetInstructionExImpl(UNALIGNED CORDB_ADDRESS_TYPE* address) +{ + LIMITED_METHOD_CONTRACT; + + return *(PRD_TYPE *)address; +} + +inline void CORDbgInsertBreakpoint(UNALIGNED CORDB_ADDRESS_TYPE *address) +{ + LIMITED_METHOD_CONTRACT; + + CORDbgSetInstruction(address, CORDbg_BREAK_INSTRUCTION); +} + +inline void CORDbgInsertBreakpointExImpl(UNALIGNED CORDB_ADDRESS_TYPE *address) +{ + LIMITED_METHOD_CONTRACT; + + CORDbgSetInstruction(address, CORDbg_BREAK_INSTRUCTION); +} + +// After a breakpoint exception, the CPU points to _after_ the break instruction. +// Adjust the IP so that it points at the break instruction. This lets us patch that +// opcode and re-execute what was underneath the bp. +inline void CORDbgAdjustPCForBreakInstruction(DT_CONTEXT* pContext) +{ + LIMITED_METHOD_CONTRACT; + + // LoongArch64 appears to leave the PC at the start of the breakpoint. + return; +} + +inline bool AddressIsBreakpoint(CORDB_ADDRESS_TYPE* address) +{ + LIMITED_METHOD_CONTRACT; + + return CORDbgGetInstruction(address) == CORDbg_BREAK_INSTRUCTION; +} + +inline void SetSSFlag(DT_CONTEXT *pContext) +{ + // TODO-LoongArch64: LoongArch64 doesn't support cpsr. + _ASSERTE(!"unimplemented on RISCV64 yet"); +} + +inline void UnsetSSFlag(DT_CONTEXT *pContext) +{ + // TODO-LoongArch64: LoongArch64 doesn't support cpsr. + _ASSERTE(!"unimplemented on RISCV64 yet"); +} + +inline bool IsSSFlagEnabled(DT_CONTEXT * pContext) +{ + // TODO-LoongArch64: LoongArch64 doesn't support cpsr. + _ASSERTE(!"unimplemented on RISCV64 yet"); + return false; +} + + +inline bool PRDIsEqual(PRD_TYPE p1, PRD_TYPE p2) +{ + return p1 == p2; +} + +inline void InitializePRD(PRD_TYPE *p1) +{ + *p1 = 0; +} + +inline bool PRDIsEmpty(PRD_TYPE p1) +{ + LIMITED_METHOD_CONTRACT; + + return p1 == 0; +} + +#endif // PRIMITIVES_H_ diff --git a/src/coreclr/debug/shared/riscv64/primitives.cpp b/src/coreclr/debug/shared/riscv64/primitives.cpp index 50eae7be8a7ea8..1a65ce043deb34 100644 --- a/src/coreclr/debug/shared/riscv64/primitives.cpp +++ b/src/coreclr/debug/shared/riscv64/primitives.cpp @@ -12,4 +12,19 @@ #include "primitives.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" + +// CopyThreadContext() does an intelligent copy from pSrc to pDst, +// respecting the ContextFlags of both contexts. +// +void CORDbgCopyThreadContext(DT_CONTEXT* pDst, const DT_CONTEXT* pSrc) +{ + _ASSERTE(!"RISCV64:NYI"); +} + +#if defined(ALLOW_VMPTR_ACCESS) || !defined(RIGHT_SIDE_COMPILE) +void SetDebuggerREGDISPLAYFromREGDISPLAY(DebuggerREGDISPLAY* pDRD, REGDISPLAY* pRD) +{ + _ASSERTE(!"RISCV64:NYI"); +} +#endif // ALLOW_VMPTR_ACCESS || !RIGHT_SIDE_COMPILE diff --git a/src/coreclr/dlls/mscordac/CMakeLists.txt b/src/coreclr/dlls/mscordac/CMakeLists.txt index 25c2532358774b..ee29ceb80b2d5b 100644 --- a/src/coreclr/dlls/mscordac/CMakeLists.txt +++ b/src/coreclr/dlls/mscordac/CMakeLists.txt @@ -49,6 +49,8 @@ else(CLR_CMAKE_HOST_WIN32) if (CLR_CMAKE_HOST_ARCH_ARM OR CLR_CMAKE_HOST_ARCH_ARM64 OR CLR_CMAKE_HOST_ARCH_LOONGARCH64) set(JUMP_INSTRUCTION b) + elseif (CLR_CMAKE_HOST_ARCH_RISCV64) + set(JUMP_INSTRUCTION tail) else() set(JUMP_INSTRUCTION jmp) endif() diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index c6a73eb6afea5b..eb41566139ad05 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -139,6 +139,7 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); #define WAIT_FAILED 0xFFFFFFFF #if defined(_MSC_VER) +jungdongheon #if defined(HOST_ARM) __forceinline void YieldProcessor() { } @@ -226,6 +227,11 @@ typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); #define MemoryBarrier __sync_synchronize #endif // __loongarch64 +#ifdef __riscv // TODO RISCV64 + #define YieldProcessor() asm volatile( "wfi"); + #define MemoryBarrier __sync_synchronize +#endif // __riscv64 + #endif // _MSC_VER #ifdef _MSC_VER diff --git a/src/coreclr/gc/env/volatile.h b/src/coreclr/gc/env/volatile.h index bef01f680e5465..c40e5c2d60964e 100644 --- a/src/coreclr/gc/env/volatile.h +++ b/src/coreclr/gc/env/volatile.h @@ -66,8 +66,8 @@ #error The Volatile type is currently only defined for Visual C++ and GNU C++ #endif -#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_LOONGARCH64) && !defined(HOST_WASM) -#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, LOONGARCH64 or Wasm +#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_LOONGARCH64) && !defined(HOST_WASM) && !defined(HOST_RISCV64) +#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, LOONGARCH64, Wasm, RISCV64 #endif #if defined(__GNUC__) @@ -76,6 +76,8 @@ #define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb ish" : : : "memory") #elif defined(HOST_LOONGARCH64) #define VOLATILE_MEMORY_BARRIER() asm volatile ("dbar 0 " : : : "memory") +#elif defined(HOST_RISCV64) +#define VOLATILE_MEMORY_BARRIER() asm volatile ("fence rw,rw" : : : "memory") #else // // For GCC, we prevent reordering by the compiler by inserting the following after a volatile diff --git a/src/coreclr/gcdump/gcdumpnonx86.cpp b/src/coreclr/gcdump/gcdumpnonx86.cpp index f93e9bd42ca3ac..336dede0d608b3 100644 --- a/src/coreclr/gcdump/gcdumpnonx86.cpp +++ b/src/coreclr/gcdump/gcdumpnonx86.cpp @@ -72,6 +72,9 @@ PCSTR GetRegName (UINT32 regnum) #elif defined(TARGET_LOONGARCH64) assert(!"unimplemented on LOONGARCH yet"); return "???"; +#elif defined(TARGET_RISCV64) + assert(!"unimplemented on RISCV64 yet"); + return "???"; #endif } diff --git a/src/coreclr/gcinfo/CMakeLists.txt b/src/coreclr/gcinfo/CMakeLists.txt index a9d8a6f5848d0f..f1dcd7fc89d3a8 100644 --- a/src/coreclr/gcinfo/CMakeLists.txt +++ b/src/coreclr/gcinfo/CMakeLists.txt @@ -79,8 +79,12 @@ if (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) create_gcinfo_lib(TARGET gcinfo_unix_loongarch64 OS unix ARCH loongarch64) endif (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) -create_gcinfo_lib(TARGET gcinfo_universal_arm OS universal ARCH arm) -create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86) +if (CLR_CMAKE_TARGET_ARCH_RISCV64) + create_gcinfo_lib(TARGET gcinfo_unix_riscv64 OS unix ARCH riscv64) +else() + create_gcinfo_lib(TARGET gcinfo_universal_arm OS universal ARCH arm) + create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86) +endif (CLR_CMAKE_TARGET_ARCH_RISCV64) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) create_gcinfo_lib(TARGET gcinfo_unix_x86 OS unix ARCH x86) diff --git a/src/coreclr/gcinfo/gcinfodumper.cpp b/src/coreclr/gcinfo/gcinfodumper.cpp index ba70d9c68c8578..f64e8a2e904ec1 100644 --- a/src/coreclr/gcinfo/gcinfodumper.cpp +++ b/src/coreclr/gcinfo/gcinfodumper.cpp @@ -224,6 +224,46 @@ BOOL GcInfoDumper::ReportPointerRecord ( REG(ra, Ra), { offsetof(T_CONTEXT, Sp) }, #undef REG +#elif defined(TARGET_RISCV64) +#undef REG +#define REG(reg, field) { offsetof(Riscv64VolatileContextPointer, field) } + REG(zero, R0), + REG(a0, A0), + REG(a1, A1), + REG(a2, A2), + REG(a3, A3), + REG(a4, A4), + REG(a5, A5), + REG(a6, A6), + REG(a7, A7), + REG(t0, T0), + REG(t1, T1), + REG(t2, T2), + REG(t3, T3), + REG(t4, T4), + REG(t5, T5), + REG(t6, T6), + REG(x0, X0), +#undef REG +#define REG(reg, field) { offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, field) } + REG(s1, S1), + REG(s2, S2), + REG(s3, S3), + REG(s4, S4), + REG(s5, S5), + REG(s6, S6), + REG(s7, S7), + REG(s8, S8), + REG(s9, S9), + REG(s10, S10), + REG(s11, S11), + REG(ra, Ra), + REG(gp, Gp), + REG(tp, Tp), + REG(fp, Fp), + { offsetof(T_CONTEXT, Sp) }, +#undef REG + #else PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this platform.") #endif @@ -248,6 +288,9 @@ PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this #elif defined(TARGET_LOONGARCH64) assert(!"unimplemented on LOONGARCH yet"); iSPRegister = 0; +#elif defined(TARGET_RISCV64) + assert(!"unimplemented on RISCV64 yet"); + iSPRegister = 0; #endif #if defined(TARGET_ARM) || defined(TARGET_ARM64) @@ -660,8 +703,11 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges ( #elif defined(TARGET_LOONGARCH64) #pragma message("Unimplemented for LOONGARCH64 yet.") assert(!"unimplemented on LOONGARCH yet"); +#elif defined(TARGET_RISCV64) +#pragma message("Unimplemented for RISCV64 yet.") + assert(!"unimplemented on RISCV64 yet"); // TODO RISCV64 #else -PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on this platform.") +PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on this platform."); #endif #undef FILL_REGS diff --git a/src/coreclr/inc/clrnt.h b/src/coreclr/inc/clrnt.h index 0bba92c7e3a2f9..3da9a6abf56787 100644 --- a/src/coreclr/inc/clrnt.h +++ b/src/coreclr/inc/clrnt.h @@ -1084,4 +1084,62 @@ RtlVirtualUnwind( #endif // TARGET_LOONGARCH64 +#ifdef TARGET_RISCV64 +#include "daccess.h" + +#define UNW_FLAG_NHANDLER 0x0 /* any handler */ +#define UNW_FLAG_EHANDLER 0x1 /* filter handler */ +#define UNW_FLAG_UHANDLER 0x2 /* unwind handler */ + +// This function returns the RVA of the end of the function (exclusive, so one byte after the actual end) +// using the unwind info on ARM64. (see ExternalAPIs\Win9CoreSystem\inc\winnt.h) +FORCEINLINE +ULONG64 +RtlpGetFunctionEndAddress ( + _In_ PT_RUNTIME_FUNCTION FunctionEntry, + _In_ ULONG64 ImageBase + ) +{ + // TODO RISCV64 + ULONG64 FunctionLength; + + FunctionLength = FunctionEntry->UnwindData; + if ((FunctionLength & 3) != 0) { + FunctionLength = (FunctionLength >> 2) & 0x7ff; + } else { + memcpy(&FunctionLength, (void*)(ImageBase + FunctionLength), sizeof(UINT32)); + FunctionLength &= 0x3ffff; + } + + return FunctionEntry->BeginAddress + 4 * FunctionLength; +} + +#define RUNTIME_FUNCTION__BeginAddress(FunctionEntry) ((FunctionEntry)->BeginAddress) +#define RUNTIME_FUNCTION__SetBeginAddress(FunctionEntry,address) ((FunctionEntry)->BeginAddress = (address)) + +#define RUNTIME_FUNCTION__EndAddress(FunctionEntry, ImageBase) (RtlpGetFunctionEndAddress(FunctionEntry, (ULONG64)(ImageBase))) + +#define RUNTIME_FUNCTION__SetUnwindInfoAddress(prf,address) do { (prf)->UnwindData = (address); } while (0) + +typedef struct _UNWIND_INFO { + // dummy +} UNWIND_INFO, *PUNWIND_INFO; + +EXTERN_C +NTSYSAPI +PEXCEPTION_ROUTINE +NTAPI +RtlVirtualUnwind( + IN ULONG HandlerType, + IN ULONG64 ImageBase, + IN ULONG64 ControlPc, + IN PRUNTIME_FUNCTION FunctionEntry, + IN OUT PCONTEXT ContextRecord, + OUT PVOID *HandlerData, + OUT PULONG64 EstablisherFrame, + IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL + ); + +#endif // TARGET_RISCV64 + #endif // CLRNT_H_ diff --git a/src/coreclr/inc/cordebuginfo.h b/src/coreclr/inc/cordebuginfo.h index 35e0eca6fd7894..01780570570e48 100644 --- a/src/coreclr/inc/cordebuginfo.h +++ b/src/coreclr/inc/cordebuginfo.h @@ -179,6 +179,40 @@ class ICorDebugInfo REGNUM_S7, REGNUM_S8, REGNUM_PC, +#elif TARGET_RISCV64 + REGNUM_R0, + REGNUM_RA, + REGNUM_SP, + REGNUM_GP, + REGNUM_TP, + REGNUM_T0, + REGNUM_T1, + REGNUM_T2, + REGNUM_S0, + REGNUM_S1, + REGNUM_A0, + REGNUM_A1, + REGNUM_A2, + REGNUM_A3, + REGNUM_A4, + REGNUM_A5, + REGNUM_A6, + REGNUM_A7, + REGNUM_S2, + REGNUM_S3, + REGNUM_S4, + REGNUM_S5, + REGNUM_S6, + REGNUM_S7, + REGNUM_S8, + REGNUM_S9, + REGNUM_S10, + REGNUM_S11, + REGNUM_T3, + REGNUM_T4, + REGNUM_T5, + REGNUM_T6, + REGNUM_PC, #else PORTABILITY_WARNING("Register numbers not defined on this platform") #endif @@ -201,6 +235,8 @@ class ICorDebugInfo //Nothing to do here. FP is already alloted. #elif TARGET_LOONGARCH64 //Nothing to do here. FP is already alloted. +#elif TARGET_RISCV64 + //Nothing to do here. FP is already alloted. #else // RegNum values should be properly defined for this platform REGNUM_FP = 0, diff --git a/src/coreclr/inc/eetwain.h b/src/coreclr/inc/eetwain.h index 5b20a4ef3a2365..bb5bf893b99ee7 100644 --- a/src/coreclr/inc/eetwain.h +++ b/src/coreclr/inc/eetwain.h @@ -211,9 +211,9 @@ virtual bool UnwindStackFrame(PREGDISPLAY pContext, virtual bool IsGcSafe(EECodeInfo *pCodeInfo, DWORD dwRelOffset) = 0; -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) virtual bool HasTailCalls(EECodeInfo *pCodeInfo) = 0; -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 #if defined(TARGET_AMD64) && defined(_DEBUG) /* @@ -455,10 +455,10 @@ virtual bool IsGcSafe( EECodeInfo *pCodeInfo, DWORD dwRelOffset); -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) virtual bool HasTailCalls(EECodeInfo *pCodeInfo); -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || defined(TARGET_RISCV64) #if defined(TARGET_AMD64) && defined(_DEBUG) /* diff --git a/src/coreclr/inc/eexcp.h b/src/coreclr/inc/eexcp.h index 2de046d70dec8a..fb7bccbe073469 100644 --- a/src/coreclr/inc/eexcp.h +++ b/src/coreclr/inc/eexcp.h @@ -117,7 +117,7 @@ inline BOOL IsDuplicateClause(EE_ILEXCEPTION_CLAUSE* pEHClause) return pEHClause->Flags & COR_ILEXCEPTION_CLAUSE_DUPLICATED; } -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Finally is the only EH construct that can be part of the execution as being fall-through. // // "Cloned" finally is a construct that represents a finally block that is used as @@ -139,7 +139,7 @@ inline BOOL IsClonedFinally(EE_ILEXCEPTION_CLAUSE* pEHClause) (pEHClause->TryStartPC == pEHClause->HandlerStartPC) && IsFinally(pEHClause) && IsDuplicateClause(pEHClause)); } -#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #endif // __eexcp_h__ diff --git a/src/coreclr/inc/gcinfodecoder.h b/src/coreclr/inc/gcinfodecoder.h index 93bdd33ed3973e..f65cb5846bfbf5 100644 --- a/src/coreclr/inc/gcinfodecoder.h +++ b/src/coreclr/inc/gcinfodecoder.h @@ -216,7 +216,7 @@ enum GcInfoDecoderFlags DECODE_EDIT_AND_CONTINUE = 0x800, DECODE_REVERSE_PINVOKE_VAR = 0x1000, DECODE_RETURN_KIND = 0x2000, -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) DECODE_HAS_TAILCALLS = 0x4000, #endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 }; @@ -235,7 +235,7 @@ enum GcInfoHeaderFlags GC_INFO_HAS_STACK_BASE_REGISTER = 0x40, #ifdef TARGET_AMD64 GC_INFO_WANTS_REPORT_ONLY_LEAF = 0x80, -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) GC_INFO_HAS_TAILCALLS = 0x80, #endif // TARGET_AMD64 GC_INFO_HAS_EDIT_AND_CONTINUE_INFO = 0x100, @@ -539,9 +539,9 @@ class GcInfoDecoder bool HasMethodTableGenericsInstContext(); bool GetIsVarArg(); bool WantsReportOnlyLeaf(); -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool HasTailCalls(); -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || defined(TARGET_RISCV64) ReturnKind GetReturnKind(); UINT32 GetCodeLength(); UINT32 GetStackBaseRegister(); @@ -567,7 +567,7 @@ class GcInfoDecoder bool m_GenericSecretParamIsMT; #ifdef TARGET_AMD64 bool m_WantsReportOnlyLeaf; -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool m_HasTailCalls; #endif // TARGET_AMD64 INT32 m_GSCookieStackSlot; diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h index 57cae4d264b719..33759d14d949ef 100644 --- a/src/coreclr/inc/gcinfotypes.h +++ b/src/coreclr/inc/gcinfotypes.h @@ -156,7 +156,7 @@ struct GcStackSlot // 10 RT_ByRef // 11 RT_Unset -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Slim Header: @@ -829,6 +829,63 @@ void FASTCALL decodeCallPattern(int pattern, #define LIVESTATE_RLE_RUN_ENCBASE 2 #define LIVESTATE_RLE_SKIP_ENCBASE 4 +#elif defined(TARGET_RISCV64) +#ifndef TARGET_POINTER_SIZE +#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target +#endif +#define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64) +#define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6) +#define NORMALIZE_STACK_SLOT(x) ((x)>>3) // GC Pointers are 8-bytes aligned +#define DENORMALIZE_STACK_SLOT(x) ((x)<<3) +#define NORMALIZE_CODE_LENGTH(x) ((x)>>2) // All Instructions are 4 bytes long +#define DENORMALIZE_CODE_LENGTH(x) ((x)<<2) +#define NORMALIZE_STACK_BASE_REGISTER(x) ((x)^8) // Encode Frame pointer X8 as zero +#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x)^8) +#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3) +#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3) +#define CODE_OFFSETS_NEED_NORMALIZATION 0 +#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point +#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment. +#define NORMALIZE_REGISTER(x) (x) +#define DENORMALIZE_REGISTER(x) (x) +#define NORMALIZE_NUM_SAFE_POINTS(x) (x) +#define DENORMALIZE_NUM_SAFE_POINTS(x) (x) +#define NORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x) +#define DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x) + +#define PSP_SYM_STACK_SLOT_ENCBASE 6 +#define GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE 6 +#define SECURITY_OBJECT_STACK_SLOT_ENCBASE 6 +#define GS_COOKIE_STACK_SLOT_ENCBASE 6 +#define CODE_LENGTH_ENCBASE 8 +#define SIZE_OF_RETURN_KIND_IN_SLIM_HEADER 2 +#define SIZE_OF_RETURN_KIND_IN_FAT_HEADER 4 +#define STACK_BASE_REGISTER_ENCBASE 2 // TODO for RISCV64 +// FP encoded as 0, SP as 2?? +#define SIZE_OF_STACK_AREA_ENCBASE 3 +#define SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE 4 +#define SIZE_OF_EDIT_AND_CONTINUE_FIXED_STACK_FRAME_ENCBASE 4 +#define REVERSE_PINVOKE_FRAME_ENCBASE 6 +#define NUM_REGISTERS_ENCBASE 3 +#define NUM_STACK_SLOTS_ENCBASE 2 +#define NUM_UNTRACKED_SLOTS_ENCBASE 1 +#define NORM_PROLOG_SIZE_ENCBASE 5 +#define NORM_EPILOG_SIZE_ENCBASE 3 +#define NORM_CODE_OFFSET_DELTA_ENCBASE 3 +#define INTERRUPTIBLE_RANGE_DELTA1_ENCBASE 6 +#define INTERRUPTIBLE_RANGE_DELTA2_ENCBASE 6 +#define REGISTER_ENCBASE 3 +#define REGISTER_DELTA_ENCBASE 2 +#define STACK_SLOT_ENCBASE 6 +#define STACK_SLOT_DELTA_ENCBASE 4 +#define NUM_SAFE_POINTS_ENCBASE 3 +#define NUM_INTERRUPTIBLE_RANGES_ENCBASE 1 +#define NUM_EH_CLAUSES_ENCBASE 2 +#define POINTER_SIZE_ENCBASE 3 +#define LIVESTATE_RLE_RUN_ENCBASE 2 +#define LIVESTATE_RLE_SKIP_ENCBASE 4 + + #else #ifndef TARGET_X86 diff --git a/src/coreclr/inc/jithelpers.h b/src/coreclr/inc/jithelpers.h index 601588f98167e8..11cd8e36b8283b 100644 --- a/src/coreclr/inc/jithelpers.h +++ b/src/coreclr/inc/jithelpers.h @@ -325,7 +325,7 @@ JITHELPER(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, NULL, CORINFO_HELP_SIG_NO_ALIGN_STUB) -#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) JITHELPER(CORINFO_HELP_STACK_PROBE, JIT_StackProbe, CORINFO_HELP_SIG_REG_ONLY) #else JITHELPER(CORINFO_HELP_STACK_PROBE, NULL, CORINFO_HELP_SIG_UNDEF) diff --git a/src/coreclr/inc/pedecoder.h b/src/coreclr/inc/pedecoder.h index 17baa6483079cb..4ada5241163b7c 100644 --- a/src/coreclr/inc/pedecoder.h +++ b/src/coreclr/inc/pedecoder.h @@ -87,6 +87,8 @@ inline CHECK CheckOverflow(RVA value1, COUNT_T value2) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_POWERPC #elif defined(TARGET_S390X) #define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_UNKNOWN +#elif defined(TARGET_RISCV64) +#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_RISCV64 #else #error "port me" #endif diff --git a/src/coreclr/inc/regdisp.h b/src/coreclr/inc/regdisp.h index b6ccc87ff6d876..dfe3c908deb921 100644 --- a/src/coreclr/inc/regdisp.h +++ b/src/coreclr/inc/regdisp.h @@ -210,6 +210,29 @@ typedef struct _Loongarch64VolatileContextPointer } Loongarch64VolatileContextPointer; #endif +#if defined(TARGET_RISCV64) +typedef struct _Riscv64VolatileContextPointer +{ + PDWORD64 R0; + PDWORD64 A0; + PDWORD64 A1; + PDWORD64 A2; + PDWORD64 A3; + PDWORD64 A4; + PDWORD64 A5; + PDWORD64 A6; + PDWORD64 A7; + PDWORD64 T0; + PDWORD64 T1; + PDWORD64 T2; + PDWORD64 T3; + PDWORD64 T4; + PDWORD64 T5; + PDWORD64 T6; + PDWORD64 X0; +} Riscv64VolatileContextPointer; +#endif + struct REGDISPLAY : public REGDISPLAY_BASE { #ifdef TARGET_ARM64 Arm64VolatileContextPointer volatileCurrContextPointers; @@ -219,6 +242,10 @@ struct REGDISPLAY : public REGDISPLAY_BASE { Loongarch64VolatileContextPointer volatileCurrContextPointers; #endif +#ifdef TARGET_RISCV64 + Riscv64VolatileContextPointer volatileCurrContextPointers; +#endif + REGDISPLAY() { // Initialize diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index 65748b46099089..cb3d05b83d4c0b 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -53,7 +53,7 @@ #if defined(TARGET_X86) || defined(TARGET_ARM) #define USE_LAZY_PREFERRED_RANGE 0 -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) #if defined(HOST_UNIX) // In PAL we have a smechanism that reserves memory on start up that is diff --git a/src/coreclr/inc/targetosarch.h b/src/coreclr/inc/targetosarch.h index 9025a8608af0fc..fbe7806cf7d1c3 100644 --- a/src/coreclr/inc/targetosarch.h +++ b/src/coreclr/inc/targetosarch.h @@ -42,6 +42,7 @@ class TargetArchitecture static const bool IsArm32 = true; static const bool IsArmArch = true; static const bool IsLoongArch64 = false; + static const bool IsRiscv64 = false; #elif defined(TARGET_ARM64) static const bool IsX86 = false; static const bool IsX64 = false; @@ -49,6 +50,7 @@ class TargetArchitecture static const bool IsArm32 = false; static const bool IsArmArch = true; static const bool IsLoongArch64 = false; + static const bool IsRiscv64 = false; #elif defined(TARGET_AMD64) static const bool IsX86 = false; static const bool IsX64 = true; @@ -56,6 +58,7 @@ class TargetArchitecture static const bool IsArm32 = false; static const bool IsArmArch = false; static const bool IsLoongArch64 = false; + static const bool IsRiscv64 = false; #elif defined(TARGET_X86) static const bool IsX86 = true; static const bool IsX64 = false; @@ -63,6 +66,7 @@ class TargetArchitecture static const bool IsArm32 = false; static const bool IsArmArch = false; static const bool IsLoongArch64 = false; + static const bool IsRiscv64 = false; #elif defined(TARGET_LOONGARCH64) static const bool IsX86 = false; static const bool IsX64 = false; @@ -70,6 +74,15 @@ class TargetArchitecture static const bool IsArm32 = false; static const bool IsArmArch = false; static const bool IsLoongArch64 = true; + static const bool IsRiscv64 = false; +#elif defined(TARGET_RISCV64) + static const bool IsX86 = false; + static const bool IsX64 = false; + static const bool IsArm64 = false; + static const bool IsArm32 = false; + static const bool IsArmArch = false; + static const bool IsLoongArch64 = false; + static const bool IsRiscv64 = true; #else #error Unknown architecture #endif diff --git a/src/coreclr/inc/volatile.h b/src/coreclr/inc/volatile.h index 177c4932166c69..030638c2ef2a9a 100644 --- a/src/coreclr/inc/volatile.h +++ b/src/coreclr/inc/volatile.h @@ -68,8 +68,8 @@ #error The Volatile type is currently only defined for Visual C++ and GNU C++ #endif -#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_LOONGARCH64) && !defined(HOST_RISCV64) && !defined(HOST_S390X) && !defined(HOST_POWERPC64) -#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, LOONGARCH64, RISCV64, PPC64LE, or S390X CPUs +#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_LOONGARCH64) && !defined(HOST_RISCV64) && !defined(HOST_S390X) && !defined(HOST_POWERPC64) && !defined(HOST_RISCV64) +#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, LOONGARCH64, RISCV64, PPC64LE, S390X, or RISCV64 CPUs #endif #if defined(__GNUC__) diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index a3d65680ba0651..e6ccc85cc5a889 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -54,6 +54,9 @@ function(create_standalone_jit) elseif(TARGETDETAILS_ARCH STREQUAL "loongarch64") set(JIT_ARCH_SOURCES ${JIT_LOONGARCH64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_LOONGARCH64_HEADERS}) + elseif(TARGETDETAILS_ARCH STREQUAL "riscv64") + set(JIT_ARCH_SOURCES ${JIT_RISCV64_SOURCES}) + set(JIT_ARCH_HEADERS ${JIT_RISCV64_HEADERS}) else() clr_unknown_arch() endif() @@ -258,6 +261,15 @@ set( JIT_LOONGARCH64_SOURCES unwindloongarch64.cpp ) +set( JIT_RISCV64_SOURCES + codegenriscv64.cpp + emitriscv64.cpp + lowerriscv64.cpp + lsrariscv64.cpp + targetriscv64.cpp + unwindriscv64.cpp +) + # We include the headers here for better experience in IDEs. set( JIT_HEADERS ../inc/corinfo.h @@ -415,6 +427,13 @@ set( JIT_LOONGARCH64_HEADERS registerloongarch64.h ) +set( JIT_RISCV64_HEADERS + emitriscv64.h + emitfmtsriscv64.h + instrsriscv64.h + registerriscv64.h +) + convert_to_absolute_path(JIT_SOURCES ${JIT_SOURCES}) convert_to_absolute_path(JIT_HEADERS ${JIT_HEADERS}) convert_to_absolute_path(JIT_RESOURCES ${JIT_RESOURCES}) @@ -435,6 +454,8 @@ convert_to_absolute_path(JIT_S390X_SOURCES ${JIT_S390X_SOURCES}) convert_to_absolute_path(JIT_S390X_HEADERS ${JIT_S390X_HEADERS}) convert_to_absolute_path(JIT_LOONGARCH64_SOURCES ${JIT_LOONGARCH64_SOURCES}) convert_to_absolute_path(JIT_LOONGARCH64_HEADERS ${JIT_LOONGARCH64_HEADERS}) +convert_to_absolute_path(JIT_RISCV64_SOURCES ${JIT_RISCV64_SOURCES}) +convert_to_absolute_path(JIT_RISCV64_HEADERS ${JIT_RISCV64_HEADERS}) if(CLR_CMAKE_TARGET_ARCH_AMD64) set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES}) @@ -461,6 +482,8 @@ elseif(CLR_CMAKE_TARGET_ARCH_LOONGARCH64) set(JIT_ARCH_SOURCES ${JIT_LOONGARCH64_SOURCES}) set(JIT_ARCH_HEADERS ${JIT_LOONGARCH64_HEADERS}) elseif(CLR_CMAKE_TARGET_ARCH_RISCV64) + set(JIT_ARCH_SOURCES ${JIT_RISCV64_SOURCES}) + set(JIT_ARCH_HEADERS ${JIT_RISCV64_HEADERS}) else() clr_unknown_arch() endif() @@ -590,11 +613,6 @@ else() set(TARGET_OS_NAME win) endif() -if (NOT CLR_CMAKE_TARGET_ARCH_RISCV64) - create_standalone_jit(TARGET clrjit OS ${TARGET_OS_NAME} ARCH ${ARCH_TARGET_NAME} DESTINATIONS . sharedFramework) - install_clr(TARGETS clrjit DESTINATIONS . sharedFramework COMPONENT jit) -endif() - # Enable profile guided optimization add_pgo(clrjit) @@ -608,22 +626,27 @@ if (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) create_standalone_jit(TARGET clrjit_unix_loongarch64_${ARCH_HOST_NAME} OS unix ARCH loongarch64 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_LOONGARCH64) -create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) -target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) -create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) +if (CLR_CMAKE_TARGET_ARCH_RISCV64) + create_standalone_jit(TARGET clrjit_unix_riscv64_${ARCH_HOST_NAME} OS unix ARCH riscv64 DESTINATIONS .) +else() + create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) + target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) + create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) +endif (CLR_CMAKE_TARGET_ARCH_RISCV64) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) create_standalone_jit(TARGET clrjit_unix_x86_${ARCH_HOST_NAME} OS unix ARCH x86 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) -if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_RISCV64) - if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le) +if (CLR_CMAKE_TARGET_UNIX) + if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le AND NOT ARCH_TARGET_NAME STREQUAL riscv64) + message("CLAMP TEST") if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) install_clr(TARGETS clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) else() install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) endif() - endif(NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le) + endif(NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le AND NOT ARCH_TARGET_NAME STREQUAL riscv64) endif() if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_PGO_INSTRUMENT) diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index c75ae055e52d76..4a3135078a39af 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -273,7 +273,7 @@ class CodeGen final : public CodeGenInterface void genClearStackVec3ArgUpperBits(); #endif // UNIX_AMD64_ABI && FEATURE_SIMD -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool genInstrWithConstant(instruction ins, emitAttr attr, regNumber reg1, @@ -780,7 +780,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genLeaInstruction(GenTreeAddrMode* lea); void genSetRegToCond(regNumber dstReg, GenTree* tree); -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale); #endif // TARGET_ARMARCH || TARGET_LOONGARCH64 @@ -1229,9 +1229,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void genFloatReturn(GenTree* treeNode); #endif // TARGET_X86 -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) void genSimpleReturn(GenTree* treeNode); -#endif // TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 void genReturn(GenTree* treeNode); @@ -1541,7 +1541,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX static instruction JumpKindToCmov(emitJumpKind condition); #endif -#ifndef TARGET_LOONGARCH64 +#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) // Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions // such as X86's SETcc. A sequence of instructions rather than just a single one is required for // certain floating point conditions. diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 3c003a3e499818..3ccfb05bed66ce 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -135,9 +135,9 @@ CodeGen::CodeGen(Compiler* theCompiler) : CodeGenInterface(theCompiler) /* Assume that we not fully interruptible */ SetInterruptible(false); -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) SetHasTailCalls(false); -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 #ifdef DEBUG genInterruptibleUsed = false; genCurDispOffset = (unsigned)-1; @@ -1118,7 +1118,7 @@ bool CodeGen::genCreateAddrMode( cns += op2->AsIntConCommon()->IconValue(); -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) if (cns == 0) #endif { @@ -1138,7 +1138,7 @@ bool CodeGen::genCreateAddrMode( goto AGAIN; -#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index. case GT_MUL: if (op1->gtOverflow()) @@ -1161,7 +1161,7 @@ bool CodeGen::genCreateAddrMode( goto FOUND_AM; } break; -#endif // !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) +#endif // !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) default: break; @@ -1182,7 +1182,7 @@ bool CodeGen::genCreateAddrMode( switch (op1->gtOper) { -#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we don't try to create a scaled index. case GT_ADD: @@ -1244,7 +1244,7 @@ bool CodeGen::genCreateAddrMode( goto FOUND_AM; } break; -#endif // !TARGET_ARMARCH && !TARGET_LOONGARCH64 +#endif // !TARGET_ARMARCH && !TARGET_LOONGARCH64 && !TARGET_RISCV64 case GT_NOP: @@ -1263,7 +1263,7 @@ bool CodeGen::genCreateAddrMode( noway_assert(op2); switch (op2->gtOper) { -#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) // TODO-ARM64-CQ, TODO-ARM-CQ: For now we only handle MUL and LSH because // arm doesn't support both scale and offset at the same. Offset is handled // at the emitter as a peephole optimization. @@ -1323,7 +1323,7 @@ bool CodeGen::genCreateAddrMode( goto FOUND_AM; } break; -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 case GT_NOP: @@ -1592,6 +1592,9 @@ void CodeGen::genCheckOverflow(GenTree* tree) jumpKind = EJ_hs; } } +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64"); + jumpKind = EJ_NONE; // TODO RISCV64 #endif // defined(TARGET_ARMARCH) } @@ -1762,6 +1765,10 @@ void CodeGen::genGenerateMachineCode() { printf("generic LOONGARCH64 CPU"); } + else if (compiler->info.genCPU == CPU_RISCV64) + { + printf("generic RISCV64 CPU"); + } else { printf("unknown architecture"); @@ -1968,7 +1975,7 @@ void CodeGen::genEmitMachineCode() bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ? -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) trackedStackPtrsContig = false; #elif defined(TARGET_ARM) // On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous @@ -4488,6 +4495,8 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& #elif defined(TARGET_LOONGARCH64) // We will just zero out the entire vector register. This sets it to a double/float zero value GetEmitter()->emitIns_R_R(INS_movgr2fr_d, EA_8BYTE, reg, REG_R0); +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else // TARGET* #error Unsupported or unset target architecture #endif @@ -4525,6 +4534,8 @@ void CodeGen::genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, reg, 0x00, INS_OPTS_16B); #elif defined(TARGET_LOONGARCH64) GetEmitter()->emitIns_R_R(INS_movgr2fr_d, EA_8BYTE, reg, REG_R0); +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else // TARGET* #error Unsupported or unset target architecture #endif @@ -4542,6 +4553,8 @@ regNumber CodeGen::genGetZeroReg(regNumber initReg, bool* pInitRegZeroed) return REG_ZR; #elif defined(TARGET_LOONGARCH64) return REG_R0; +#elif defined(TARGET_RISCV64) + return REG_R0; #else // !TARGET_ARM64 if (*pInitRegZeroed == false) { @@ -4952,11 +4965,13 @@ void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed #elif defined(TARGET_LOONGARCH64) genInstrWithConstant(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), compiler->lvaCachedGenericContextArgOffset(), REG_R21); -#else // !ARM64 !ARM !LOONGARCH64 +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); +#else // !ARM64 !ARM !LOONGARCH64 !RISCV64 // mov [ebp-lvaCachedGenericContextArgOffset()], reg GetEmitter()->emitIns_AR_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE, reg, genFramePointerReg(), compiler->lvaCachedGenericContextArgOffset()); -#endif // !ARM64 !ARM !LOONGARCH64 +#endif // !ARM64 !ARM !LOONGARCH64 !RISCV64 } /***************************************************************************** @@ -5878,16 +5893,16 @@ void CodeGen::genFnProlog() } #endif // TARGET_XARCH -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) genPushCalleeSavedRegisters(initReg, &initRegZeroed); -#else // !TARGET_ARM64 || !TARGET_LOONGARCH64 +#else // !TARGET_ARM64 && !TARGET_LOONGARCH64 && !TARGET_RISCV64 if (!isOSRx64Root) { genPushCalleeSavedRegisters(); } -#endif // !TARGET_ARM64 || !TARGET_LOONGARCH64 +#endif // !TARGET_ARM64 && !TARGET_LOONGARCH64 && !TARGET_RISCV64 #ifdef TARGET_ARM bool needToEstablishFP = false; @@ -5918,7 +5933,7 @@ void CodeGen::genFnProlog() //------------------------------------------------------------------------- CLANG_FORMAT_COMMENT_ANCHOR; -#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) regMaskTP maskStackAlloc = RBM_NONE; #ifdef TARGET_ARM @@ -5931,7 +5946,7 @@ void CodeGen::genFnProlog() genAllocLclFrame(compiler->compLclFrameSize + extraFrameSize, initReg, &initRegZeroed, intRegState.rsCalleeRegArgMaskLiveIn); } -#endif // !TARGET_ARM64 && !TARGET_LOONGARCH64 +#endif // !TARGET_ARM64 && !TARGET_LOONGARCH64 && !TARGET_RISCV64 #ifdef TARGET_AMD64 // For x64 OSR we have to finish saving int callee saves. @@ -6142,7 +6157,7 @@ void CodeGen::genFnProlog() } }; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) || defined(TARGET_RISCV64) assignIncomingRegisterArgs(&intRegState); assignIncomingRegisterArgs(&floatRegState); #else @@ -6467,7 +6482,7 @@ bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallCo structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(hClass, callConv, &howToReturnStruct); -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) return (varTypeIsStruct(returnType) && (howToReturnStruct != SPK_PrimitiveType)); #else return (varTypeIsStruct(returnType)); @@ -6546,7 +6561,7 @@ unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass) // unsigned CodeGen::getFirstArgWithStackSlot() { -#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) unsigned baseVarNum = 0; // Iterate over all the lvParam variables in the Lcl var table until we find the first one // that's passed on the stack. @@ -7730,9 +7745,9 @@ void CodeGen::genReturn(GenTree* treeNode) // exit point where it is actually dead. genConsumeReg(op1); -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) genSimpleReturn(treeNode); -#else // !TARGET_ARM64 || !TARGET_LOONGARCH64 +#else // !TARGET_ARM64 || !TARGET_LOONGARCH64 || !TARGET_RISCV64 #if defined(TARGET_X86) if (varTypeUsesFloatReg(treeNode)) { @@ -7760,7 +7775,7 @@ void CodeGen::genReturn(GenTree* treeNode) regNumber retReg = varTypeUsesFloatReg(treeNode) ? REG_FLOATRET : REG_INTRET; inst_Mov_Extend(targetType, /* srcInReg */ true, retReg, op1->GetRegNum(), /* canSkip */ true); } -#endif // !TARGET_ARM64 || !TARGET_LOONGARCH64 +#endif // !TARGET_ARM64 || !TARGET_LOONGARCH64 || !TARGET_RISCV64 } } diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h index 0563f4fc26edc9..86bc7a7573adf5 100644 --- a/src/coreclr/jit/codegeninterface.h +++ b/src/coreclr/jit/codegeninterface.h @@ -118,7 +118,7 @@ class CodeGenInterface private: #if defined(TARGET_XARCH) static const insFlags instInfo[INS_count]; -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) static const BYTE instInfo[INS_count]; #else #error Unsupported target architecture @@ -364,7 +364,7 @@ class CodeGenInterface m_cgInterruptible = value; } -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool GetHasTailCalls() { @@ -374,13 +374,13 @@ class CodeGenInterface { m_cgHasTailCalls = value; } -#endif // TARGET_ARMARCH +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 private: bool m_cgInterruptible; -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool m_cgHasTailCalls; -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 // The following will be set to true if we've determined that we need to // generate a full-blown pointer register map for the current method. diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 3051aec3b67f10..34987b65830640 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -1214,7 +1214,7 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) assert(spillType != TYP_UNDEF); // TODO-Cleanup: The following code could probably be further merged and cleaned up. -#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Load local variable from its home location. // Never allow truncating the locals here, otherwise a subsequent // use of the local with a wider type would see the truncated @@ -2477,7 +2477,7 @@ CodeGen::GenIntCastDesc::GenIntCastDesc(GenTreeCast* cast) m_checkKind = CHECK_NONE; } -#ifdef TARGET_LOONGARCH64 +#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // For LoongArch64's ISA which is same with the MIPS64 ISA, even the instructions of 32bits operation need // the upper 32bits be sign-extended to 64 bits. m_extendKind = SIGN_EXTEND_INT; @@ -2587,7 +2587,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) } #endif // !defined(TARGET_64BIT) -#ifndef TARGET_LOONGARCH64 +#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) //------------------------------------------------------------------------ // genCodeForJcc: Generate code for a GT_JCC node. @@ -2645,4 +2645,4 @@ void CodeGen::genCodeForSetcc(GenTreeCC* setcc) inst_SETCC(setcc->gtCondition, setcc->TypeGet(), setcc->GetRegNum()); genProduceReg(setcc); } -#endif // !TARGET_LOONGARCH64 +#endif // !TARGET_LOONGARCH64 && !TARGET_RISCV64 diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp new file mode 100644 index 00000000000000..341b14f909c578 --- /dev/null +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -0,0 +1,1308 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX RISCV64 Code Generator XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#ifdef TARGET_RISCV64 +#include "emit.h" +#include "codegen.h" +#include "lower.h" +#include "gcinfo.h" +#include "gcinfoencoder.h" + +bool CodeGen::genInstrWithConstant(instruction ins, + emitAttr attr, + regNumber reg1, + regNumber reg2, + ssize_t imm, + regNumber tmpReg, + bool inUnwindRegion /* = false */) +{ + NYI("unimplemented on RISCV64 yet"); + return false; +} + +void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genPrologSaveRegPair(regNumber reg1, + regNumber reg2, + int spOffset, + int spDelta, + bool useSaveNextPair, + regNumber tmpReg, + bool* pTmpRegIsZero) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genEpilogRestoreRegPair(regNumber reg1, + regNumber reg2, + int spOffset, + int spDelta, + bool useSaveNextPair, + regNumber tmpReg, + bool* pTmpRegIsZero) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// static +void CodeGen::genBuildRegPairsStack(regMaskTP regsMask, ArrayStack* regStack) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// static +void CodeGen::genSetUseSaveNextPairs(ArrayStack* regStack) +{ + NYI("unimplemented on RISCV64 yet"); +} + +int CodeGen::genGetSlotSizeForRegsInMask(regMaskTP regsMask) +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +void CodeGen::genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// clang-format on + +void CodeGen::genFuncletProlog(BasicBlock* block) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genFuncletEpilog() +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genCaptureFuncletPrologEpilogInfo() +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genFnEpilog(BasicBlock* block) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock) +{ + NYI("unimplemented on RISCV64 yet"); +} + +BasicBlock* CodeGen::genCallFinally(BasicBlock* block) +{ + NYI("unimplemented on RISCV64 yet"); + return NULL; +} + +void CodeGen::genEHCatchRet(BasicBlock* block) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// move an immediate value into an integer register +void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, + regNumber reg, + ssize_t imm, + insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Produce code for a GT_INC_SATURATE node. +void CodeGen::genCodeForIncSaturate(GenTree* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Generate code to get the high N bits of a N*N=2N bit multiplication result +void CodeGen::genCodeForMulHi(GenTreeOp* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Generate code for ADD, SUB, MUL, AND, AND_NOT, OR and XOR +// This method is expected to have called genConsumeOperands() before calling it. +void CodeGen::genCodeForBinary(GenTreeOp* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForLclVar: Produce code for a GT_LCL_VAR node. +// +// Arguments: +// tree - the GT_LCL_VAR node +// +void CodeGen::genCodeForLclVar(GenTreeLclVar* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node. +// +// Arguments: +// tree - the GT_STORE_LCL_FLD node +// +void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node. +// +// Arguments: +// lclNode - the GT_STORE_LCL_VAR node +// +void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genSimpleReturn(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +/*********************************************************************************************** + * Generate code for localloc + */ +void CodeGen::genLclHeap(GenTree* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node. +// +// Arguments: +// tree - the node +// +void CodeGen::genCodeForNegNot(GenTree* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node. +// +// Arguments: +// tree - the node +// +void CodeGen::genCodeForBswap(GenTree* tree) +{ + NYI_RISCV64("genCodeForBswap unimpleement yet"); +} + +//------------------------------------------------------------------------ +// genCodeForDivMod: Produce code for a GT_DIV/GT_UDIV node. +// (1) float/double MOD is morphed into a helper call by front-end. +// +// Arguments: +// tree - the node +// +void CodeGen::genCodeForDivMod(GenTreeOp* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Generate code for InitBlk by performing a loop unroll +// Preconditions: +// a) Both the size and fill byte value are integer constants. +// b) The size of the struct to initialize is smaller than INITBLK_UNROLL_LIMIT bytes. +void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// generate code do a switch statement based on a table of ip-relative offsets +void CodeGen::genTableBasedSwitch(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// emits the table and an instruction to get the address of the first element +void CodeGen::genJumpTable(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node. +// +// Arguments: +// treeNode - the GT_XADD/XCHG node +// +void CodeGen::genLockedInstructions(GenTreeOp* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node. +// +// Arguments: +// tree - the GT_CMPXCHG node +// +void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +static inline bool isImmed(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); + return false; +} + +instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type) +{ + NYI("unimplemented on RISCV64 yet"); + return INS_invalid; +} + +//------------------------------------------------------------------------ +// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node. +// +// Arguments: +// tree - the GT_RETURNTRAP node +// +void CodeGen::genCodeForReturnTrap(GenTreeOp* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForStoreInd: Produce code for a GT_STOREIND node. +// +// Arguments: +// tree - the GT_STOREIND node +// +void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForSwap: Produce code for a GT_SWAP node. +// +// Arguments: +// tree - the GT_SWAP node +// +void CodeGen::genCodeForSwap(GenTreeOp* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genIntToFloatCast: Generate code to cast an int/long to float/double +// +// Arguments: +// treeNode - The GT_CAST node +// +// Return Value: +// None. +// +// Assumptions: +// Cast is a non-overflow conversion. +// The treeNode must have an assigned register. +// SrcType= int32/uint32/int64/uint64 and DstType=float/double. +// +void CodeGen::genIntToFloatCast(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genFloatToIntCast: Generate code to cast float/double to int/long +// +// Arguments: +// treeNode - The GT_CAST node +// +// Return Value: +// None. +// +// Assumptions: +// Cast is a non-overflow conversion. +// The treeNode must have an assigned register. +// SrcType=float/double and DstType= int32/uint32/int64/uint64 +// +void CodeGen::genFloatToIntCast(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCkfinite: Generate code for ckfinite opcode. +// +// Arguments: +// treeNode - The GT_CKFINITE node +// +// Return Value: +// None. +// +// Assumptions: +// GT_CKFINITE node has reserved an internal register. +// +void CodeGen::genCkfinite(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT node. +// +// Arguments: +// tree - the node +// +void CodeGen::genCodeForCompare(GenTreeOp* jtree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genSPtoFPdelta - return offset from the stack pointer (Initial-SP) to the frame pointer. The frame pointer +// will point to the saved frame pointer slot (i.e., there will be frame pointer chaining). +// +int CodeGenInterface::genSPtoFPdelta() const +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +//--------------------------------------------------------------------- +// genTotalFrameSize - return the total size of the stack frame, including local size, +// callee-saved register size, etc. +// +// Return value: +// Total frame size +// + +int CodeGenInterface::genTotalFrameSize() const +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +//--------------------------------------------------------------------- +// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer. +// This number is going to be negative, since the Caller-SP is at a higher +// address than the frame pointer. +// +// There must be a frame pointer to call this function! + +int CodeGenInterface::genCallerSPtoFPdelta() const +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +//--------------------------------------------------------------------- +// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP. +// +// This number will be negative. + +int CodeGenInterface::genCallerSPtoInitialSPdelta() const +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +/***************************************************************************** + * Emit a call to a helper function. + */ + +void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */) +{ + NYI("unimplemented on RISCV64 yet"); +} + +#ifdef FEATURE_SIMD + +//------------------------------------------------------------------------ +// genSIMDIntrinsic: Generate code for a SIMD Intrinsic. This is the main +// routine which in turn calls appropriate genSIMDIntrinsicXXX() routine. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +// Notes: +// Currently, we only recognize SIMDVector and SIMDVector, and +// a limited set of methods. +// +// TODO-CLEANUP Merge all versions of this function and move to new file simdcodegencommon.cpp. +void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +insOpts CodeGen::genGetSimdInsOpt(emitAttr size, var_types elementType) +{ + NYI("unimplemented on RISCV64 yet"); + return INS_OPTS_NONE; +} + +// getOpForSIMDIntrinsic: return the opcode for the given SIMD Intrinsic +// +// Arguments: +// intrinsicId - SIMD intrinsic Id +// baseType - Base type of the SIMD vector +// immed - Out param. Any immediate byte operand that needs to be passed to SSE2 opcode +// +// +// Return Value: +// Instruction (op) to be used, and immed is set if instruction requires an immediate operand. +// +instruction CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival /*=nullptr*/) +{ + NYI("unimplemented on RISCV64 yet"); + return INS_invalid; +} + +//------------------------------------------------------------------------ +// genSIMDIntrinsicInit: Generate code for SIMD Intrinsic Initialize. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------------------------- +// genSIMDIntrinsicInitN: Generate code for SIMD Intrinsic Initialize for the form that takes +// a number of arguments equal to the length of the Vector. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//---------------------------------------------------------------------------------- +// genSIMDIntrinsicUnOp: Generate code for SIMD Intrinsic unary operations like sqrt. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//-------------------------------------------------------------------------------- +// genSIMDIntrinsicWiden: Generate code for SIMD Intrinsic Widen operations +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Notes: +// The Widen intrinsics are broken into separate intrinsics for the two results. +// +void CodeGen::genSIMDIntrinsicWiden(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//-------------------------------------------------------------------------------- +// genSIMDIntrinsicNarrow: Generate code for SIMD Intrinsic Narrow operations +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Notes: +// This intrinsic takes two arguments. The first operand is narrowed to produce the +// lower elements of the results, and the second operand produces the high elements. +// +void CodeGen::genSIMDIntrinsicNarrow(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//-------------------------------------------------------------------------------- +// genSIMDIntrinsicBinOp: Generate code for SIMD Intrinsic binary operations +// add, sub, mul, bit-wise And, AndNot and Or. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//-------------------------------------------------------------------------------- +// genSIMDIntrinsicRelOp: Generate code for a SIMD Intrinsic relational operator +// == and != +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//-------------------------------------------------------------------------------- +// genSIMDIntrinsicDotProduct: Generate code for SIMD Intrinsic Dot Product. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------------------ +// genSIMDIntrinsicGetItem: Generate code for SIMD Intrinsic get element at index i. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------------------ +// genSIMDIntrinsicSetItem: Generate code for SIMD Intrinsic set element at index i. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +void CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------- +// genSIMDIntrinsicUpperSave: save the upper half of a TYP_SIMD16 vector to +// the given register, if any, or to memory. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +// Notes: +// The upper half of all SIMD registers are volatile, even the callee-save registers. +// When a 16-byte SIMD value is live across a call, the register allocator will use this intrinsic +// to cause the upper half to be saved. It will first attempt to find another, unused, callee-save +// register. If such a register cannot be found, it will save it to an available caller-save register. +// In that case, this node will be marked GTF_SPILL, which will cause this method to save +// the upper half to the lclVar's home location. +// +void CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------- +// genSIMDIntrinsicUpperRestore: Restore the upper half of a TYP_SIMD16 vector to +// the given register, if any, or to memory. +// +// Arguments: +// simdNode - The GT_SIMD node +// +// Return Value: +// None. +// +// Notes: +// For consistency with genSIMDIntrinsicUpperSave, and to ensure that lclVar nodes always +// have their home register, this node has its targetReg on the lclVar child, and its source +// on the simdNode. +// Regarding spill, please see the note above on genSIMDIntrinsicUpperSave. If we have spilled +// an upper-half to the lclVar's home location, this node will be marked GTF_SPILLED. +// +void CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------- +// genStoreIndTypeSIMD12: store indirect a TYP_SIMD12 (i.e. Vector3) to memory. +// Since Vector3 is not a hardware supported write size, it is performed +// as two writes: 8 byte followed by 4-byte. +// +// Arguments: +// treeNode - tree node that is attempting to store indirect +// +// +// Return Value: +// None. +// +void CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------- +// genLoadIndTypeSIMD12: load indirect a TYP_SIMD12 (i.e. Vector3) value. +// Since Vector3 is not a hardware supported write size, it is performed +// as two loads: 8 byte followed by 4-byte. +// +// Arguments: +// treeNode - tree node of GT_IND +// +// +// Return Value: +// None. +// +void CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------- +// genStoreLclTypeSIMD12: store a TYP_SIMD12 (i.e. Vector3) type field. +// Since Vector3 is not a hardware supported write size, it is performed +// as two stores: 8 byte followed by 4-byte. +// +// Arguments: +// treeNode - tree node that is attempting to store TYP_SIMD12 field +// +// Return Value: +// None. +// +void CodeGen::genStoreLclTypeSIMD12(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +#endif // FEATURE_SIMD + +void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer, +// and probe the stack as appropriate. Should only be called as a helper for +// genStackPointerConstantAdjustmentLoopWithProbe. +// +// Arguments: +// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens, +// but the stack pointer doesn't move. +// regTmp - temporary register to use as target for probe load instruction +// +// Return Value: +// None. +// +void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer, +// and probe the stack as appropriate. Generates one probe per page, up to the total amount required. +// This will generate a sequence of probes in-line. +// +// Arguments: +// spDelta - the value to add to SP. Must be negative. +// regTmp - temporary register to use as target for probe load instruction +// +// Return Value: +// Offset in bytes from SP to last probed address. +// +target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp) +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +//------------------------------------------------------------------------ +// genCodeForTreeNode Generate code for a single node in the tree. +// +// Preconditions: +// All operands have been evaluated. +// +void CodeGen::genCodeForTreeNode(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog. +// +// Arguments: +// initReg - register to use as a scratch register +// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if +// this call sets 'initReg' to a non-zero value. +// +// Return Value: +// None +// +void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genEmitGSCookieCheck: Generate code to check that the GS cookie +// wasn't thrashed by a buffer overrun. +// +void CodeGen::genEmitGSCookieCheck(bool pushReg) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genIntrinsic - generate code for a given intrinsic +// +// Arguments +// treeNode - the GT_INTRINSIC node +// +// Return value: +// None +// +void CodeGen::genIntrinsic(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genPutArgStk - generate code for a GT_PUTARG_STK node +// +// Arguments +// treeNode - the GT_PUTARG_STK node +// +// Return value: +// None +// +void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + + +//--------------------------------------------------------------------- +// genPutArgReg - generate code for a GT_PUTARG_REG node +// +// Arguments +// tree - the GT_PUTARG_REG node +// +// Return value: +// None +// +void CodeGen::genPutArgReg(GenTreeOp* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genPutArgSplit - generate code for a GT_PUTARG_SPLIT node +// +// Arguments +// tree - the GT_PUTARG_SPLIT node +// +// Return value: +// None +// +void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genRangeCheck: generate code for GT_BOUNDS_CHECK node. +// +void CodeGen::genRangeCheck(GenTree* oper) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genCodeForPhysReg - generate code for a GT_PHYSREG node +// +// Arguments +// tree - the GT_PHYSREG node +// +// Return value: +// None +// +void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//--------------------------------------------------------------------- +// genCodeForNullCheck - generate code for a GT_NULLCHECK node +// +// Arguments +// tree - the GT_NULLCHECK node +// +// Return value: +// None +// +void CodeGen::genCodeForNullCheck(GenTreeIndir* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference, +// producing the effective index by subtracting the lower bound. +// +// Arguments: +// arrIndex - the node for which we're generating code +// +// Return Value: +// None. +// +void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForArrOffset: Generates code to compute the flattened array offset for +// one dimension of an array reference: +// result = (prevDimOffset * dimSize) + effectiveIndex +// where dimSize is obtained from the arrObj operand +// +// Arguments: +// arrOffset - the node for which we're generating code +// +// Return Value: +// None. +// +// Notes: +// dimSize and effectiveIndex are always non-negative, the former by design, +// and the latter because it has been normalized to be zero-based. + +void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForShift: Generates the code sequence for a GenTree node that +// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror). +// +// Arguments: +// tree - the bit shift node (that specifies the type of bit shift to perform). +// +// Assumptions: +// a) All GenTrees are register allocated. +// +void CodeGen::genCodeForShift(GenTree* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR. +// +// Arguments: +// tree - the node. +// +void CodeGen::genCodeForLclAddr(GenTreeLclVarCommon* lclAddrNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForLclFld: Produce code for a GT_LCL_FLD node. +// +// Arguments: +// tree - the GT_LCL_FLD node +// +void CodeGen::genCodeForLclFld(GenTreeLclFld* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genScaledAdd: A helper for `dest = base + (index << scale)` +// and maybe optimize the instruction(s) for this operation. +// +void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node. +// +// Arguments: +// tree - the GT_INDEX_ADDR node +// +void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForIndir: Produce code for a GT_IND node. +// +// Arguments: +// tree - the GT_IND node +// +void CodeGen::genCodeForIndir(GenTreeIndir* tree) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//---------------------------------------------------------------------------------- +// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call +// +// Arguments: +// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK] +// +// Preconditions: +// The register assignments have been set appropriately. +// This is validated by genConsumeBlockOp(). +// +void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//---------------------------------------------------------------------------------- +// genCodeForCpBlkUnroll: Generates CpBlk code by performing a loop unroll +// +// Arguments: +// cpBlkNode - Copy block node +// +// Return Value: +// None +// +// Assumption: +// The size argument of the CpBlk node is a constant and <= CPBLK_UNROLL_LIMIT bytes. +// +void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call +// +// Arguments: +// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK] +// +// Preconditions: +// The register assignments have been set appropriately. +// This is validated by genConsumeBlockOp(). +// +void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Generate code for a load from some address + offset +// base: tree node which can be either a local address or arbitrary node +// offset: distance from the base from which to load +void CodeGen::genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCall: Produce code for a GT_CALL node +// +void CodeGen::genCall(GenTreeCall* call) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCallInstruction - Generate instructions necessary to transfer control to the call. +// +// Arguments: +// call - the GT_CALL node +// +// Remaks: +// For tailcalls this function will generate a jump. +// +void CodeGen::genCallInstruction(GenTreeCall* call) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// Produce code for a GT_JMP node. +// The arguments of the caller needs to be transferred to the callee before exiting caller. +// The actual jump to callee is generated as part of caller epilog sequence. +// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup. +void CodeGen::genJmpMethod(GenTree* jmp) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genIntCastOverflowCheck: Generate overflow checking code for an integer cast. +// +// Arguments: +// cast - The GT_CAST node +// desc - The cast description +// reg - The register containing the value to check +// +void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genIntToIntCast(GenTreeCast* cast) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genFloatToFloatCast: Generate code for a cast between float and double +// +// Arguments: +// treeNode - The GT_CAST node +// +// Return Value: +// None. +// +// Assumptions: +// Cast is a non-overflow conversion. +// The treeNode must have an assigned register. +// The cast is between float and double. +// +void CodeGen::genFloatToFloatCast(GenTree* treeNode) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCreateAndStoreGCInfo: Create and record GC Info for the function. +// +void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize, + unsigned prologSize, + unsigned epilogSize DEBUGARG(void* codePtr)) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genCodeForStoreBlk: Produce code for a GT_STORE_OBJ/GT_STORE_DYN_BLK/GT_STORE_BLK node. +// +// Arguments: +// tree - the node +// +void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genLeaInstruction: Produce code for a GT_LEA node. +// +// Arguments: +// lea - the node +// +void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer. +// +// Arguments: +// delta - the offset to add to the current stack pointer to establish the frame pointer +// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data. + +void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//------------------------------------------------------------------------ +// genAllocLclFrame: Probe the stack and allocate the local stack frame: subtract from SP. +// +// Notes: +// On LOONGARCH64, this only does the probing; allocating the frame is done when callee-saved registers are saved. +// This is done before anything has been pushed. The previous frame might have a large outgoing argument +// space that has been allocated, but the lowest addresses have not been touched. Our frame setup might +// not touch up to the first 504 bytes. This means we could miss a guard page. On Windows, however, +// there are always three guard pages, so we will not miss them all. On Linux, there is only one guard +// page by default, so we need to be more careful. We do an extra probe if we might not have probed +// recently enough. That is, if a call and prolog establishment might lead to missing a page. We do this +// on Windows as well just to be consistent, even though it should not be necessary. +// +void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------------- +// instGen_MemoryBarrier: Emit a MemoryBarrier instruction +// +// Arguments: +// barrierKind - kind of barrier to emit (Only supports the Full now!! This depends on the CPU). +// +// Notes: +// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1 +// +void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------------- +// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback. +// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node. +// +// Arguments: +// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL +// +// Return Value: +// None +// +void CodeGen::genProfilingLeaveCallback(unsigned helper /*= CORINFO_HELP_PROF_FCN_LEAVE*/) +{ + NYI("unimplemented on RISCV64 yet"); +} + +/*----------------------------------------------------------------------------- + * + * Push/Pop any callee-saved registers we have used + */ +void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed) +{ + NYI("unimplemented on RISCV64 yet"); +} + +void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) +{ + NYI("unimplemented on RISCV64 yet"); +} + +//----------------------------------------------------------------------------------- +// genProfilingEnterCallback: Generate the profiling function enter callback. +// +// Arguments: +// initReg - register to use as scratch register +// pInitRegZeroed - OUT parameter. *pInitRegZeroed set to 'false' if 'initReg' is +// set to non-zero value after this call. +// +// Return Value: +// None +// +void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) +{ + NYI("unimplemented on RISCV64 yet"); +} + +// return size +// alignmentWB is out param +unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB) +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +// return size +// alignmentWB is out param +unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignmentWB) +{ + NYI("unimplemented on RISCV64 yet"); + return 0; +} + +#endif // TARGET_RISCV64 diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index dd4b883679f8e7..b027cd40468fb8 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -1786,7 +1786,7 @@ struct FuncInfoDsc emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section @@ -1801,7 +1801,7 @@ struct FuncInfoDsc emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 #if defined(FEATURE_CFI_SUPPORT) jitstd::vector* cfiCodes; @@ -7839,6 +7839,10 @@ class Compiler #elif defined(TARGET_LOONGARCH64) reg = REG_T8; regMask = RBM_T8; +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV NYI"); + reg = REG_T6; + regMask = RBM_T6; #else #error Unsupported or unset target architecture #endif @@ -8196,6 +8200,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX void unwindReturn(regNumber reg); #endif // defined(TARGET_LOONGARCH64) +#if defined(TARGET_RISCV64) + void unwindNop(); + void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last + // instruction and the current location. + void unwindSaveReg(regNumber reg, int offset); + void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); + void unwindReturn(regNumber reg); +#endif // defined(TARGET_RISCV64) + + // // Private "helper" functions for the unwind implementation. // @@ -9881,6 +9895,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #define CPU_LOONGARCH64 0x0800 // The generic LOONGARCH64 CPU +#define CPU_RISCV64 0x1000 // The generic RISCV64 CPU + unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method @@ -11676,6 +11692,10 @@ const instruction INS_ABS = INS_fabs_d; // NOTE: default is double. const instruction INS_SQRT = INS_fsqrt_d; // NOTE: default is double. #endif // TARGET_LOONGARCH64 +#ifdef TARGET_RISCV64 +const instruction INS_BREAKPOINT = (instruction)0; +#endif // TARGET_RISCV64 + /*****************************************************************************/ extern const BYTE genTypeSizes[]; diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 1749c6cc20afc0..4016ffa2665d96 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -1398,6 +1398,12 @@ void emitter::dispIns(instrDesc* id) // For LoongArch64 using the emitDisInsName(). NYI_LOONGARCH64("Not used on LOONGARCH64."); } +#elif defined(TARGET_RISCV64) +void emitter::dispIns(instrDesc* id) +{ + // For LoongArch64 using the emitDisInsName(). + NYI_RISCV64("Not used on RISCV64."); +} #else void emitter::dispIns(instrDesc* id) { @@ -2887,7 +2893,7 @@ const char* emitter::emitLabelString(insGroup* ig) return retbuf; } -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Does the argument location point to an IG at the end of a function or funclet? // We can ignore the codePos part of the location, since it doesn't affect the @@ -3250,7 +3256,9 @@ void emitter::emitGenerateUnwindNop(instrDesc* id, void* context) comp->unwindNop(id->idCodeSize()); #elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) comp->unwindNop(); -#endif // defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_RISCV64) + NYI_RISCV64("Not used on RISCV64."); +#endif // defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) } /***************************************************************************** @@ -3264,7 +3272,7 @@ void emitter::emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp) emitWalkIDs(locFrom, emitGenerateUnwindNop, comp); } -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 #if EMIT_BACKWARDS_NAVIGATION @@ -3691,6 +3699,10 @@ const size_t hexEncodingSize = 11; #elif defined(TARGET_LOONGARCH64) const size_t basicIndent = 12; const size_t hexEncodingSize = 19; +#elif defined(TARGET_RISCV64) +// TODO RISCV64 +const size_t basicIndent = 12; +const size_t hexEncodingSize = 19; #endif #ifdef DEBUG @@ -4665,7 +4677,7 @@ void emitter::emitRemoveJumpToNextInst() * LoongArch64 has an individual implementation for emitJumpDistBind(). */ -#ifndef TARGET_LOONGARCH64 +#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) void emitter::emitJumpDistBind() { #ifdef DEBUG @@ -6598,7 +6610,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, } #endif -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // For arm64/LoongArch64, we're going to put the data in the code section. So make sure the code section has // adequate alignment. if (emitConsDsc.dsdOffs > 0) @@ -7167,6 +7179,7 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, #elif defined(TARGET_LOONGARCH64) isJccAffectedIns = true; +#elif defined(TARGET_RISCV64) #endif // TARGET_LOONGARCH64 @@ -7347,8 +7360,8 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, #elif defined(TARGET_ARM64) assert(!jmp->idAddr()->iiaHasInstrCount()); emitOutputLJ(NULL, adr, jmp); -#elif defined(TARGET_LOONGARCH64) - // For LoongArch64 `emitFwdJumps` is always false. +#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) + // For LoongArch64 and Riscv64 `emitFwdJumps` is always false. unreached(); #else #error Unsupported or unset target architecture @@ -7363,8 +7376,8 @@ unsigned emitter::emitEndCodeGen(Compiler* comp, #elif defined(TARGET_ARMARCH) assert(!jmp->idAddr()->iiaHasInstrCount()); emitOutputLJ(NULL, adr, jmp); -#elif defined(TARGET_LOONGARCH64) - // For LoongArch64 `emitFwdJumps` is always false. +#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) + // For LoongArch64 and Riscv64 `emitFwdJumps` is always false. unreached(); #else #error Unsupported or unset target architecture diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 6fdbd8bf5abd9f..e76dff9f57321e 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -650,6 +650,7 @@ class emitter insFormat _idInsFmt : 7; #elif defined(TARGET_LOONGARCH64) unsigned _idCodeSize : 5; // the instruction(s) size of this instrDesc described. +#elif defined(TARGET_RISCV64) #else static_assert_no_msg(IF_COUNT <= 256); insFormat _idInsFmt : 8; @@ -685,6 +686,16 @@ class emitter void idInsFmt(insFormat insFmt) { } +#elif defined(TARGET_RISCV64) + insFormat idInsFmt() const + { + _ASSERTE(!"TODO RISCV64 NYI"); + return (insFormat)0; + } + void idInsFmt(insFormat insFmt) + { + _ASSERTE(!"TODO RISCV64 NYI"); + } #else insFormat idInsFmt() const { @@ -714,11 +725,11 @@ class emitter opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16, 5=32 // At this point we have fully consumed first DWORD so that next field // doesn't cross a byte boundary. -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) /* _idOpSize defined below. */ #else opSize _idOpSize : 2; // operand size: 0=1 , 1=2 , 2=4 , 3=8 -#endif // ARM || TARGET_LOONGARCH64 +#endif // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 // On Amd64, this is where the second DWORD begins // On System V a call could return a struct in 2 registers. The instrDescCGCA struct below has member that @@ -773,6 +784,13 @@ class emitter unsigned _idLclVar : 1; // access a local on stack. #endif +#ifdef TARGET_RISCV64 + // TODO RISCV64 + opSize _idOpSize : 3; // operand size: 0=1 , 1=2 , 2=4 , 3=8, 4=16 + insOpts _idInsOpt : 6; // options for instructions + unsigned _idLclVar : 1; // access a local on stack +#endif + #ifdef TARGET_ARM insSize _idInsSize : 2; // size of instruction: 16, 32 or 48 bits insFlags _idInsFlags : 1; // will this instruction set the flags @@ -799,7 +817,7 @@ class emitter #define ID_EXTRA_BITFIELD_BITS (16) #elif defined(TARGET_ARM64) #define ID_EXTRA_BITFIELD_BITS (17) -#elif defined(TARGET_XARCH) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_XARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO RISCV64 #define ID_EXTRA_BITFIELD_BITS (14) #else #error Unsupported or unset target architecture @@ -1097,6 +1115,16 @@ class emitter assert(sz <= 16); _idCodeSize = sz; } +#elif defined(TARGET_RISCV64) + unsigned idCodeSize() const + { + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; + } + void idCodeSize(unsigned sz) + { + _ASSERTE(!"TODO RISCV64 NYI"); + } #endif // TARGET_LOONGARCH64 emitAttr idOpSize() @@ -1280,6 +1308,37 @@ class emitter #endif // TARGET_LOONGARCH64 +#ifdef TARGET_RISCV64 + insOpts idInsOpt() const + { + _ASSERTE(!"RISCV64: NYI"); + } + void idInsOpt(insOpts opt) + { + _ASSERTE(!"RISCV64: NYI"); + } + + regNumber idReg3() const + { + _ASSERTE(!"RISCV64: NYI"); + return REG_NA; + } + void idReg3(regNumber reg) + { + _ASSERTE(!"RISCV64: NYI"); + } + regNumber idReg4() const + { + _ASSERTE(!"RISCV64: NYI"); + return REG_NA; + } + void idReg4(regNumber reg) + { + _ASSERTE(!"RISCV64: NYI"); + } + +#endif // TARGET_RISCV64 + inline static bool fitsInSmallCns(ssize_t val) { return ((val >= ID_MIN_SMALL_CNS) && (val <= ID_MAX_SMALL_CNS)); @@ -1379,6 +1438,18 @@ class emitter } #endif // TARGET_LOONGARCH64 +#ifdef TARGET_RISCV64 + bool idIsLclVar() const + { + _ASSERTE(!"RISCV64: NYI"); + return true; + } + void idSetIsLclVar() + { + _ASSERTE(!"RISCV64: NYI"); + } +#endif // TARGET_RISCV64 + bool idIsCnsReloc() const { return _idCnsReloc != 0; @@ -2341,7 +2412,7 @@ class emitter void emitPrintLabel(insGroup* ig); const char* emitLabelString(insGroup* ig); -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) void emitGetInstrDescs(insGroup* ig, instrDesc** id, int* insCnt); @@ -2561,6 +2632,9 @@ class emitter #elif defined(TARGET_LOONGARCH64) bool emitInsMayWriteToGCReg(instruction ins); bool emitInsWritesToLclVarStackLoc(instrDesc* id); +#elif defined(TARGET_RISCV64) + bool emitInsMayWriteToGCReg(instruction ins); + bool emitInsWritesToLclVarStackLoc(instrDesc* id); #endif // TARGET_LOONGARCH64 /************************************************************************/ @@ -2867,7 +2941,9 @@ class emitter inline void emitter::instrDesc::checkSizes() { +#ifndef TARGET_RISCV64 // TODO RISCV64 C_ASSERT(SMALL_IDSC_SIZE == offsetof(instrDesc, _idAddrUnion)); +#endif // TARGET_RISCV64 } /***************************************************************************** diff --git a/src/coreclr/jit/emitdef.h b/src/coreclr/jit/emitdef.h index 35b46314a1225a..1d261919e7e51d 100644 --- a/src/coreclr/jit/emitdef.h +++ b/src/coreclr/jit/emitdef.h @@ -14,6 +14,8 @@ #include "emitarm64.h" #elif defined(TARGET_LOONGARCH64) #include "emitloongarch64.h" +#elif defined(TARGET_RISCV64) +#include "emitriscv64.h" #else #error Unsupported or unset target architecture #endif diff --git a/src/coreclr/jit/emitfmts.h b/src/coreclr/jit/emitfmts.h index 77712ed95cce3d..883b4458d94290 100644 --- a/src/coreclr/jit/emitfmts.h +++ b/src/coreclr/jit/emitfmts.h @@ -10,6 +10,8 @@ #include "emitfmtsarm64.h" #elif defined(TARGET_LOONGARCH64) #include "emitfmtsloongarch64.h" +#elif defined(TARGET_RISCV64) +#include "emitfmtsriscv64.h" #else #error Unsupported or unset target architecture #endif // target type diff --git a/src/coreclr/jit/emitfmtsriscv64.h b/src/coreclr/jit/emitfmtsriscv64.h new file mode 100644 index 00000000000000..31bf86c7f14f9a --- /dev/null +++ b/src/coreclr/jit/emitfmtsriscv64.h @@ -0,0 +1,43 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +////////////////////////////////////////////////////////////////////////////// + +// clang-format off +#if !defined(TARGET_RISCV64) +#error Unexpected target type +#endif + +#ifdef DEFINE_ID_OPS +////////////////////////////////////////////////////////////////////////////// + +enum ID_OPS +{ + ID_OP_NONE, // no additional arguments +}; + +#undef DEFINE_ID_OPS + +////////////////////////////////////////////////////////////////////////////// +#else // !DEFINE_ID_OPS +////////////////////////////////////////////////////////////////////////////// + +#ifndef IF_DEF +#error Must define IF_DEF macro before including this file +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// enum insFormat instruction enum ID_OPS +// scheduling +// (unused) +////////////////////////////////////////////////////////////////////////////// + +IF_DEF(NONE, IS_NONE, NONE) + +///////////////////////////////////////////////////////////////////////////////////////////////////////// +#undef IF_DEF +////////////////////////////////////////////////////////////////////////////// + +#endif // !DEFINE_ID_OPS +////////////////////////////////////////////////////////////////////////////// +// clang-format on diff --git a/src/coreclr/jit/emitjmps.h b/src/coreclr/jit/emitjmps.h index cd10727f6eec32..ef6edf973496ac 100644 --- a/src/coreclr/jit/emitjmps.h +++ b/src/coreclr/jit/emitjmps.h @@ -53,6 +53,10 @@ JMP_SMALL(jmp , jmp , b ) JMP_SMALL(eq , ne , beq ) // EQ JMP_SMALL(ne , eq , bne ) // NE +#elif defined(TARGET_RISCV64) +// TODO RISCV64: adding other condition branches +JMP_SMALL(jmp , jmp , j ) + #else #error Unsupported or unset target architecture #endif // target type diff --git a/src/coreclr/jit/emitpub.h b/src/coreclr/jit/emitpub.h index 0133fb19f0212d..9fcb29f42b7dbc 100644 --- a/src/coreclr/jit/emitpub.h +++ b/src/coreclr/jit/emitpub.h @@ -133,7 +133,7 @@ static void InitTranslator(PDBRewriter* pPDB, int* rgSecMap, IMAGE_SECTION_HEADE /* Interface for generating unwind information */ /************************************************************************/ -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool emitIsFuncEnd(emitLocation* emitLoc, emitLocation* emitLocNextFragment = NULL); @@ -145,7 +145,7 @@ void emitSplit(emitLocation* startLoc, void emitUnwindNopPadding(emitLocation* locFrom, Compiler* comp); -#endif // TARGET_ARMARCH || defined(TARGET_LOONGARCH64) +#endif // TARGET_ARMARCH || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #if defined(TARGET_ARM) diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp new file mode 100644 index 00000000000000..3b23786c464923 --- /dev/null +++ b/src/coreclr/jit/emitriscv64.cpp @@ -0,0 +1,612 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX emitriscv64.cpp XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#if defined(TARGET_RISCV64) + +/*****************************************************************************/ +/*****************************************************************************/ + +#include "instr.h" +#include "emit.h" +#include "codegen.h" + +/*****************************************************************************/ + +const instruction emitJumpKindInstructions[] = { + INS_nop, + +#define JMP_SMALL(en, rev, ins) INS_##ins, +#include "emitjmps.h" +}; + +const emitJumpKind emitReverseJumpKinds[] = { + EJ_NONE, + +#define JMP_SMALL(en, rev, ins) EJ_##rev, +#include "emitjmps.h" +}; + +/***************************************************************************** + * Look up the instruction for a jump kind + */ + +/*static*/ instruction emitter::emitJumpKindToIns(emitJumpKind jumpKind) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + assert((unsigned)jumpKind < ArrLen(emitJumpKindInstructions)); + return emitJumpKindInstructions[jumpKind]; +} + +/***************************************************************************** +* Look up the jump kind for an instruction. It better be a conditional +* branch instruction with a jump kind! +*/ + +/*static*/ emitJumpKind emitter::emitInsToJumpKind(instruction ins) +{ + NYI_RISCV64("emitInsToJumpKind-----unimplemented on RISCV64 yet----"); + return EJ_NONE; +} + +/***************************************************************************** + * Reverse the conditional jump + */ + +/*static*/ emitJumpKind emitter::emitReverseJumpKind(emitJumpKind jumpKind) +{ + assert(jumpKind < EJ_COUNT); + return emitReverseJumpKinds[jumpKind]; +} + +/***************************************************************************** + * + * Return the allocated size (in bytes) of the given instruction descriptor. + */ + +size_t emitter::emitSizeOfInsDsc(instrDesc* id) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +inline bool emitter::emitInsMayWriteToGCReg(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +bool emitter::emitInsWritesToLclVarStackLoc(instrDesc* id) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +// clang-format off +/*static*/ const BYTE CodeGenInterface::instInfo[] = +{ + #define INST(id, nm, info, e1) info, + #include "instrs.h" +}; +// clang-format on + +//------------------------------------------------------------------------ +// emitInsLoad: Returns true if the instruction is some kind of load instruction. +// +bool emitter::emitInsIsLoad(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +//------------------------------------------------------------------------ +// emitInsIsStore: Returns true if the instruction is some kind of store instruction. +// +bool emitter::emitInsIsStore(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +//------------------------------------------------------------------------- +// emitInsIsLoadOrStore: Returns true if the instruction is some kind of load/store instruction. +// +bool emitter::emitInsIsLoadOrStore(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +/***************************************************************************** + * + * Returns the specific encoding of the given CPU instruction. + */ + +inline emitter::code_t emitter::emitInsCode(instruction ins /*, insFormat fmt*/) +{ + code_t code = BAD_CODE; + + // clang-format off + const static code_t insCode[] = + { + #define INST(id, nm, info, e1) e1, + #include "instrs.h" + }; + // clang-format on + + code = insCode[ins]; + + assert((code != BAD_CODE)); + + return code; +} + +/**************************************************************************** + * + * Add an instruction with no operands. + */ + +void emitter::emitIns(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * emitter::emitIns_S_R() and emitter::emitIns_R_S(): + * + * Add an Load/Store instruction(s): base+offset and base-addr-computing if needed. + * For referencing a stack-based local variable and a register + * + */ +void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/* + * Special notes for `offs`, please see the comment for `emitter::emitIns_S_R`. + */ +void emitter::emitIns_R_S(instruction ins, emitAttr attr, regNumber reg1, int varx, int offs) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction with a single immediate value. + */ + +void emitter::emitIns_I(instruction ins, emitAttr attr, ssize_t imm) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void emitter::emitIns_I_I(instruction ins, emitAttr attr, ssize_t cc, ssize_t offs) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing a register and a constant. + */ + +void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insOpts opt /* = INS_OPTS_NONE */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// emitIns_Mov: Emits a move instruction +// +// Arguments: +// ins -- The instruction being emitted +// attr -- The emit attribute +// dstReg -- The destination register +// srcReg -- The source register +// canSkip -- true if the move can be elided when dstReg == srcReg, otherwise false +// insOpts -- The instruction options +// +void emitter::emitIns_Mov( + instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip, insOpts opt /* = INS_OPTS_NONE */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing two registers + */ + +void emitter::emitIns_R_R( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insOpts opt /* = INS_OPTS_NONE */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing two registers and a constant. + */ + +void emitter::emitIns_R_R_I( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insOpts opt /* = INS_OPTS_NONE */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing three registers. + */ + +void emitter::emitIns_R_R_R( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insOpts opt) /* = INS_OPTS_NONE */ +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing three registers and a constant. + */ + +void emitter::emitIns_R_R_R_I(instruction ins, + emitAttr attr, + regNumber reg1, + regNumber reg2, + regNumber reg3, + ssize_t imm, + insOpts opt /* = INS_OPTS_NONE */, + emitAttr attrReg2 /* = EA_UNKNOWN */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing two registers and two constants. + */ + +void emitter::emitIns_R_R_I_I( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int imm1, int imm2, insOpts opt) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction referencing four registers. + */ + +void emitter::emitIns_R_R_R_R( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add an instruction with a register + static member operands. + * Constant is stored into JIT data which is adjacent to code. + * + */ +void emitter::emitIns_R_C( + instruction ins, emitAttr attr, regNumber reg, regNumber addrReg, CORINFO_FIELD_HANDLE fldHnd, int offs) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void emitter::emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs) +{ + NYI_RISCV64("emitIns_R_AR-----unimplemented/unused on RISCV64 yet----"); +} + +// This computes address from the immediate which is relocatable. +void emitter::emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, + ssize_t addr DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags)) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Record that a jump instruction uses the short encoding + * + */ +void emitter::emitSetShortJump(instrDescJmp* id) +{ + // TODO-RISCV64: maybe delete it on future. + NYI_RISCV64("emitSetShortJump-----unimplemented/unused on RISCV64 yet----"); +} + +/***************************************************************************** + * + * Add a label instruction. + */ + +void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void emitter::emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg) +{ + NYI_RISCV64("emitIns_J_R-----unimplemented/unused on RISCV64 yet----"); +} + +void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void emitter::emitIns_J_cond_la(instruction ins, BasicBlock* dst, regNumber reg1, regNumber reg2) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void emitter::emitIns_I_la(emitAttr size, regNumber reg, ssize_t imm) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Add a call instruction (direct or indirect). + * argSize<0 means that the caller will pop the arguments + * + * The other arguments are interpreted depending on callType as shown: + * Unless otherwise specified, ireg,xreg,xmul,disp should have default values. + * + * EC_FUNC_TOKEN : addr is the method address + * + * If callType is one of these emitCallTypes, addr has to be NULL. + * EC_INDIR_R : "call ireg". + * + */ + +void emitter::emitIns_Call(EmitCallType callType, + CORINFO_METHOD_HANDLE methHnd, + INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE + void* addr, + ssize_t argSize, + emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), + VARSET_VALARG_TP ptrVars, + regMaskTP gcrefRegs, + regMaskTP byrefRegs, + const DebugInfo& di /* = DebugInfo() */, + regNumber ireg /* = REG_NA */, + regNumber xreg /* = REG_NA */, + unsigned xmul /* = 0 */, + ssize_t disp /* = 0 */, + bool isJump /* = false */) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Output a call instruction. + */ + +unsigned emitter::emitOutputCall(insGroup* ig, BYTE* dst, instrDesc* id, code_t code) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +void emitter::emitJumpDistBind() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Emit a 32-bit RISCV64 instruction + */ + +/*static*/ unsigned emitter::emitOutput_Instr(BYTE* dst, code_t code) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +/***************************************************************************** +* + * Append the machine code corresponding to the given instruction descriptor + * to the code block at '*dp'; the base of the code block is 'bp', and 'ig' + * is the instruction group that contains the instruction. Updates '*dp' to + * point past the generated code, and returns the size of the instruction + * descriptor in bytes. + */ + +size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +/*****************************************************************************/ +/*****************************************************************************/ + +#ifdef DEBUG + +// clang-format off +static const char* const RegNames[] = +{ + #define REGDEF(name, rnum, mask, sname) sname, + #include "register.h" +}; +// clang-format on + +//---------------------------------------------------------------------------------------- +// Disassemble the given instruction. +// The `emitter::emitDisInsName` is focused on the most important for debugging. +// So it implemented as far as simply and independently which is very useful for +// porting easily to the release mode. +// +// Arguments: +// code - The instruction's encoding. +// addr - The address of the code. +// id - The instrDesc of the code if needed. +// +// Note: +// The length of the instruction's name include aligned space is 13. +// + +void emitter::emitDisInsName(code_t code, const BYTE* addr, instrDesc* id) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/***************************************************************************** + * + * Display (optionally) the instruction encoding in hex + */ + +void emitter::emitDispInsHex(instrDesc* id, BYTE* code, size_t sz) +{ + // We do not display the instruction hex if we want diff-able disassembly + if (!emitComp->opts.disDiffable) + { + if (sz == 4) + { + printf(" %08X ", (*((code_t*)code))); + } + else + { + assert(sz == 0); + printf(" "); + } + } +} + +void emitter::emitDispIns( + instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* pCode, size_t sz, insGroup* ig) +{ + // LA implements this similar by `emitter::emitDisInsName`. + // For LA maybe the `emitDispIns` is over complicate. + // The `emitter::emitDisInsName` is focused on the most important for debugging. + NYI_RISCV64("LA not used the emitter::emitDispIns"); +} + +/***************************************************************************** + * + * Display a stack frame reference. + */ + +void emitter::emitDispFrameRef(int varx, int disp, int offs, bool asmfm) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#endif // DEBUG + +// Generate code for a load or store operation with a potentially complex addressing mode +// This method handles the case of a GT_IND with contained GT_LEA op1 of the x86 form [base + index*sccale + offset] +// +void emitter::emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// The callee must call genConsumeReg() for any non-contained srcs +// and genProduceReg() for any non-contained dsts. + +regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src) +{ + NYI_RISCV64("emitInsBinary-----unused"); + return REG_R0; +} + +// The callee must call genConsumeReg() for any non-contained srcs +// and genProduceReg() for any non-contained dsts. +regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst, GenTree* src1, GenTree* src2) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return REG_R0; + // dst can only be a reg +} + +unsigned emitter::get_curTotalCodeSize() +{ + return emitTotalCodeSize; +} + +#if defined(DEBUG) || defined(LATE_DISASM) + +//---------------------------------------------------------------------------------------- +// getInsExecutionCharacteristics: +// Returns the current instruction execution characteristics +// +// Arguments: +// id - The current instruction descriptor to be evaluated +// +// Return Value: +// A struct containing the current instruction execution characteristics +// +// Notes: +// The instruction latencies and throughput values returned by this function +// are NOT accurate and just a function feature. +emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(instrDesc* id) +{ + insExecutionCharacteristics result; + + // TODO-RISCV64: support this function. + result.insThroughput = PERFSCORE_THROUGHPUT_ZERO; + result.insLatency = PERFSCORE_LATENCY_ZERO; + result.insMemoryAccessKind = PERFSCORE_MEMORY_NONE; + + return result; +} + +#endif // defined(DEBUG) || defined(LATE_DISASM) + +#ifdef DEBUG +//------------------------------------------------------------------------ +// emitRegName: Returns a general-purpose register name or SIMD and floating-point scalar register name. +// +// TODO-RISCV64: supporting SIMD. +// Arguments: +// reg - A general-purpose register orfloating-point register. +// size - unused parameter. +// varName - unused parameter. +// +// Return value: +// A string that represents a general-purpose register name or floating-point scalar register name. +// +const char* emitter::emitRegName(regNumber reg, emitAttr size, bool varName) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return nullptr; +} +#endif + +//------------------------------------------------------------------------ +// IsMovInstruction: Determines whether a give instruction is a move instruction +// +// Arguments: +// ins -- The instruction being checked +// +bool emitter::IsMovInstruction(instruction ins) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +#endif // defined(TARGET_RISCV64) diff --git a/src/coreclr/jit/emitriscv64.h b/src/coreclr/jit/emitriscv64.h new file mode 100644 index 00000000000000..7e640d9b106a09 --- /dev/null +++ b/src/coreclr/jit/emitriscv64.h @@ -0,0 +1,244 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#if defined(TARGET_RISCV64) + +// The LOONGARCH64 instructions are all 32 bits in size. +// we use an unsigned int to hold the encoded instructions. +// This typedef defines the type that we use to hold encoded instructions. +// +typedef unsigned int code_t; + +/************************************************************************/ +/* Routines that compute the size of / encode instructions */ +/************************************************************************/ + +struct CnsVal +{ + ssize_t cnsVal; + bool cnsReloc; +}; + +#ifdef DEBUG + +/************************************************************************/ +/* Debug-only routines to display instructions */ +/************************************************************************/ + +const char* emitFPregName(unsigned reg, bool varName = true); +const char* emitVectorRegName(regNumber reg); + +void emitDisInsName(code_t code, const BYTE* addr, instrDesc* id); +#endif // DEBUG + +void emitIns_J_cond_la(instruction ins, BasicBlock* dst, regNumber reg1 = REG_R0, regNumber reg2 = REG_R0); +void emitIns_I_la(emitAttr attr, regNumber reg, ssize_t imm); + +/************************************************************************/ +/* Private members that deal with target-dependent instr. descriptors */ +/************************************************************************/ + +private: +instrDesc* emitNewInstrCallDir(int argCnt, + VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, + regMaskTP byrefRegs, + emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)); + +instrDesc* emitNewInstrCallInd(int argCnt, + ssize_t disp, + VARSET_VALARG_TP GCvars, + regMaskTP gcrefRegs, + regMaskTP byrefRegs, + emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize)); + +/************************************************************************/ +/* Private helpers for instruction output */ +/************************************************************************/ + +private: +bool emitInsIsLoad(instruction ins); +bool emitInsIsStore(instruction ins); +bool emitInsIsLoadOrStore(instruction ins); + +emitter::code_t emitInsCode(instruction ins /*, insFormat fmt*/); + +// Generate code for a load or store operation and handle the case of contained GT_LEA op1 with [base + offset] +void emitInsLoadStoreOp(instruction ins, emitAttr attr, regNumber dataReg, GenTreeIndir* indir); + +// Emit the 32-bit LOONGARCH64 instruction 'code' into the 'dst' buffer +unsigned emitOutput_Instr(BYTE* dst, code_t code); + +// Method to do check if mov is redundant with respect to the last instruction. +// If yes, the caller of this method can choose to omit current mov instruction. +static bool IsMovInstruction(instruction ins); +bool IsRedundantMov(instruction ins, emitAttr size, regNumber dst, regNumber src, bool canSkip); +bool IsRedundantLdStr( + instruction ins, regNumber reg1, regNumber reg2, ssize_t imm, emitAttr size, insFormat fmt); // New functions end. + +/************************************************************************/ +/* Public inline informational methods */ +/************************************************************************/ + +public: +// Returns true if 'value' is a legal signed immediate 12 bit encoding. +static bool isValidSimm12(ssize_t value) +{ + return -(((int)1) << 11) <= value && value < (((int)1) << 11); +}; + +// Returns true if 'value' is a legal unsigned immediate 12 bit encoding. +static bool isValidUimm12(ssize_t value) +{ + return (0 == (value >> 12)); +} + +// Returns true if 'value' is a legal unsigned immediate 11 bit encoding. +static bool isValidUimm11(ssize_t value) +{ + return (0 == (value >> 11)); +} + +// Returns true if 'value' is a legal signed immediate 20 bit encoding. +static bool isValidSimm20(ssize_t value) +{ + return -(((int)1) << 19) <= value && value < (((int)1) << 19); +}; + +// Returns true if 'value' is a legal signed immediate 38 bit encoding. +static bool isValidSimm38(ssize_t value) +{ + return -(((ssize_t)1) << 37) <= value && value < (((ssize_t)1) << 37); +}; + +// Returns the number of bits used by the given 'size'. +inline static unsigned getBitWidth(emitAttr size) +{ + assert(size <= EA_8BYTE); + return (unsigned)size * BITS_PER_BYTE; +} + +inline static bool isGeneralRegister(regNumber reg) +{ + return (reg >= REG_INT_FIRST) && (reg <= REG_INT_LAST); +} + +inline static bool isGeneralRegisterOrR0(regNumber reg) +{ + return (reg >= REG_FIRST) && (reg <= REG_INT_LAST); +} // Includes REG_R0 + +inline static bool isFloatReg(regNumber reg) +{ + return (reg >= REG_FP_FIRST && reg <= REG_FP_LAST); +} + +/************************************************************************/ +/* Output target-independent instructions */ +/************************************************************************/ + +void emitIns_J(instruction ins, BasicBlock* dst, int instrCount = 0); + +/************************************************************************/ +/* The public entry points to output instructions */ +/************************************************************************/ + +public: +void emitIns(instruction ins); + +void emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs); +void emitIns_R_S(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs); + +void emitIns_I(instruction ins, emitAttr attr, ssize_t imm); +void emitIns_I_I(instruction ins, emitAttr attr, ssize_t cc, ssize_t offs); + +void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, insOpts opt = INS_OPTS_NONE); + +void emitIns_Mov( + instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip, insOpts opt = INS_OPTS_NONE); + +void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insOpts opt = INS_OPTS_NONE); + +void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insFlags flags) +{ + _ASSERTE(!"RISCV64: NYI"); +} + +void emitIns_R_R_I( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insOpts opt = INS_OPTS_NONE); + +void emitIns_R_R_R( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, insOpts opt = INS_OPTS_NONE); + +void emitIns_R_R_R_I(instruction ins, + emitAttr attr, + regNumber reg1, + regNumber reg2, + regNumber reg3, + ssize_t imm, + insOpts opt = INS_OPTS_NONE, + emitAttr attrReg2 = EA_UNKNOWN); + +void emitIns_R_R_I_I( + instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, int imm1, int imm2, insOpts opt = INS_OPTS_NONE); + +void emitIns_R_R_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, regNumber reg3, regNumber reg4); + +void emitIns_R_C( + instruction ins, emitAttr attr, regNumber reg, regNumber tmpReg, CORINFO_FIELD_HANDLE fldHnd, int offs); + +void emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg); + +void emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg); + +void emitIns_R_AR(instruction ins, emitAttr attr, regNumber ireg, regNumber reg, int offs); + +void emitIns_R_AI(instruction ins, + emitAttr attr, + regNumber reg, + ssize_t disp DEBUGARG(size_t targetHandle = 0) DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY)); + +enum EmitCallType +{ + + // I have included here, but commented out, all the values used by the x86 emitter. + // However, LOONGARCH has a much reduced instruction set, and so the LOONGARCH emitter only + // supports a subset of the x86 variants. By leaving them commented out, it becomes + // a compile time error if code tries to use them (and hopefully see this comment + // and know why they are unavailable on LOONGARCH), while making it easier to stay + // in-sync with x86 and possibly add them back in if needed. + + EC_FUNC_TOKEN, // Direct call to a helper/static/nonvirtual/global method + // EC_FUNC_TOKEN_INDIR, // Indirect call to a helper/static/nonvirtual/global method + // EC_FUNC_ADDR, // Direct call to an absolute address + + // EC_FUNC_VIRTUAL, // Call to a virtual method (using the vtable) + EC_INDIR_R, // Indirect call via register + // EC_INDIR_SR, // Indirect call via stack-reference (local var) + // EC_INDIR_C, // Indirect call via static class var + // EC_INDIR_ARD, // Indirect call via an addressing mode + + EC_COUNT +}; + +void emitIns_Call(EmitCallType callType, + CORINFO_METHOD_HANDLE methHnd, + INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo) // used to report call sites to the EE + void* addr, + ssize_t argSize, + emitAttr retSize MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize), + VARSET_VALARG_TP ptrVars, + regMaskTP gcrefRegs, + regMaskTP byrefRegs, + const DebugInfo& di, + regNumber ireg = REG_NA, + regNumber xreg = REG_NA, + unsigned xmul = 0, + ssize_t disp = 0, + bool isJump = false); + +unsigned emitOutputCall(insGroup* ig, BYTE* dst, instrDesc* id, code_t code); + +unsigned get_curTotalCodeSize(); // bytes of code + +#endif // TARGET_LOONGARCH64 diff --git a/src/coreclr/jit/error.h b/src/coreclr/jit/error.h index 3ce4df25eef963..f31732e28c9f56 100644 --- a/src/coreclr/jit/error.h +++ b/src/coreclr/jit/error.h @@ -174,6 +174,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) do { } while (0) #define NYI_LOONGARCH64(msg) do { } while (0) +#define NYI_RISCV64(msg) do { } while (0) #elif defined(TARGET_X86) @@ -182,6 +183,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) do { } while (0) #define NYI_LOONGARCH64(msg) do { } while (0) +#define NYI_RISCV64(msg) do { } while (0) #elif defined(TARGET_ARM) @@ -190,6 +192,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI_ARM(msg) NYIRAW("NYI_ARM: " msg) #define NYI_ARM64(msg) do { } while (0) #define NYI_LOONGARCH64(msg) do { } while (0) +#define NYI_RISCV64(msg) do { } while (0) #elif defined(TARGET_ARM64) @@ -198,6 +201,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) NYIRAW("NYI_ARM64: " msg) #define NYI_LOONGARCH64(msg) do { } while (0) +#define NYI_RISCV64(msg) do { } while (0) #elif defined(TARGET_LOONGARCH64) #define NYI_AMD64(msg) do { } while (0) @@ -205,10 +209,19 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); #define NYI_ARM(msg) do { } while (0) #define NYI_ARM64(msg) do { } while (0) #define NYI_LOONGARCH64(msg) NYIRAW("NYI_LOONGARCH64: " msg) +#define NYI_RISCV64(msg) do { } while (0) + +#elif defined(TARGET_RISCV64) +#define NYI_AMD64(msg) do { } while (0) +#define NYI_X86(msg) do { } while (0) +#define NYI_ARM(msg) do { } while (0) +#define NYI_ARM64(msg) do { } while (0) +#define NYI_LOONGARCH64(msg) do { } while (0) +#define NYI_RISCV64(msg) NYIRAW("NYI_RISCV64: " msg) #else -#error "Unknown platform, not x86, ARM, LOONGARCH64 or AMD64?" +#error "Unknown platform, not x86, ARM, LOONGARCH64, AMD64, or RISCV64?" #endif diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 16fe9b2f19cc05..5f8df72a14c77d 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -4311,6 +4311,8 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ addrModeCostSz += 4; } } +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else #error "Unknown TARGET" #endif @@ -4760,6 +4762,14 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costEx = 1; costSz = 4; goto COMMON_CNS; +#elif defined(TARGET_RISCV64) + case GT_CNS_STR: + case GT_CNS_LNG: + case GT_CNS_INT: + costEx = 1; + costSz = 4; + _ASSERTE(!"TODO RISCV64 NYI"); + goto COMMON_CNS; #else case GT_CNS_STR: case GT_CNS_LNG: @@ -4833,6 +4843,10 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) // TODO-LoongArch64-CQ: tune the costs. costEx = 2; costSz = 8; +#elif defined(TARGET_RISCV64) + costEx = 2; + costSz = 8; + _ASSERTE(!"TODO RISCV64 NYI"); #else #error "Unknown TARGET" #endif @@ -5036,6 +5050,8 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) // TODO-LoongArch64-CQ: tune the costs. costEx = 1; costSz = 4; +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else #error "Unknown TARGET" #endif diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp index 91e018cbd8ac8b..156324f11257e9 100644 --- a/src/coreclr/jit/instr.cpp +++ b/src/coreclr/jit/instr.cpp @@ -69,6 +69,10 @@ const char* CodeGen::genInsName(instruction ins) #define INST(id, nm, ldst, e1) nm, #include "instrs.h" +#elif defined(TARGET_RISCV64) + #define INST(id, nm, ldst, e1) nm, + #include "instrs.h" + #else #error "Unknown TARGET" #endif @@ -340,6 +344,8 @@ void CodeGen::inst_RV(instruction ins, regNumber reg, var_types type, emitAttr s #ifdef TARGET_LOONGARCH64 // inst_RV is not used for LoongArch64, so there is no need to define `emitIns_R`. NYI_LOONGARCH64("inst_RV-----unused on LOONGARCH64----"); +#elif defined(TARGET_RISCV64) + NYI_RISCV64("inst_RV-----unused on RISCV64----"); #else GetEmitter()->emitIns_R(ins, size, reg); #endif @@ -1571,8 +1577,11 @@ instruction CodeGen::ins_Copy(var_types dstType) { return INS_mov; } +#elif defined(TARGET_RISCV64) + NYI_RISCV64("TODO RISCV64"); + return INS_invalid; #else // TARGET_* -#error "Unknown TARGET_" +#error "Unknown TARGET" #endif } @@ -1635,6 +1644,9 @@ instruction CodeGen::ins_Copy(regNumber srcReg, var_types dstType) assert(genIsValidFloatReg(srcReg)); return EA_SIZE(emitActualTypeSize(dstType)) == EA_4BYTE ? INS_movfr2gr_s : INS_movfr2gr_d; } +#elif defined(TARGET_RISCV64) + NYI_RISCV64("TODO RISCV64"); + return INS_invalid; #else // TARGET* #error "Unknown TARGET" #endif @@ -2042,6 +2054,8 @@ void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags fla GetEmitter()->emitIns_Mov(INS_mov, size, reg, REG_ZR, /* canSkip */ true); #elif defined(TARGET_LOONGARCH64) GetEmitter()->emitIns_R_R_I(INS_ori, size, reg, REG_R0, 0); +#elif defined(TARGET_RISCV64) + NYI_RISCV64("TODO RISCV64"); #else #error "Unknown TARGET" #endif diff --git a/src/coreclr/jit/instr.h b/src/coreclr/jit/instr.h index 180ad19ad3a96e..0b9cd8362d419f 100644 --- a/src/coreclr/jit/instr.h +++ b/src/coreclr/jit/instr.h @@ -55,6 +55,12 @@ enum instruction : unsigned #define INST(id, nm, ldst, e1) INS_##id, #include "instrs.h" + INS_lea, // Not a real instruction. It is used for load the address of stack locals +#elif defined(TARGET_RISCV64) + // TODO RISCV64 + #define INST(id, nm, ldst, e1) INS_##id, + #include "instrs.h" + INS_lea, // Not a real instruction. It is used for load the address of stack locals #else #error Unsupported target architecture @@ -158,7 +164,7 @@ enum insFlags : uint64_t INS_FLAGS_DONT_CARE = 0x00ULL, }; -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // TODO-Cleanup: Move 'insFlags' under TARGET_ARM enum insFlags: unsigned { @@ -337,6 +343,17 @@ enum insBarrier : unsigned INS_BARRIER_REL = INS_BARRIER_FULL,//18, INS_BARRIER_RMB = INS_BARRIER_FULL,//19, }; +#elif defined(TARGET_RISCV64) +enum insOpts : unsigned +{ + INS_OPTS_NONE, +}; + +enum insBarrier : unsigned +{ + INS_BARRIER_FULL = 0, +}; + #endif #if defined(TARGET_XARCH) diff --git a/src/coreclr/jit/instrs.h b/src/coreclr/jit/instrs.h index aa16547f44be73..1bbbd3f2367e2b 100644 --- a/src/coreclr/jit/instrs.h +++ b/src/coreclr/jit/instrs.h @@ -9,6 +9,8 @@ #include "instrsarm64.h" #elif defined(TARGET_LOONGARCH64) #include "instrsloongarch64.h" +#elif defined(TARGET_RISCV64) +#include "instrsriscv64.h" #else #error Unsupported or unset target architecture #endif // target type diff --git a/src/coreclr/jit/instrsriscv64.h b/src/coreclr/jit/instrsriscv64.h new file mode 100644 index 00000000000000..87729fac46b6ff --- /dev/null +++ b/src/coreclr/jit/instrsriscv64.h @@ -0,0 +1,39 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/***************************************************************************** + * LoongArch64 instructions for JIT compiler + * + * id -- the enum name for the instruction + * nm -- textual name (for assembly dipslay) + * ld/st/cmp -- load/store/compare instruction + * encode -- encoding 1 + * +******************************************************************************/ + +#if !defined(TARGET_RISCV64) +#error Unexpected target type +#endif + +#ifndef INST +#error INST must be defined before including this file. +#endif + +/*****************************************************************************/ +/* The following is RISCV64-specific */ +/*****************************************************************************/ + +// If you're adding a new instruction: +// You need not only to fill in one of these macros describing the instruction, but also: +// * If the instruction writes to more than one destination register, update the function +// emitInsMayWriteMultipleRegs in emitLoongarch64.cpp. + +// clang-format off +INST(invalid, "INVALID", 0, BAD_CODE) +INST(nop, "addi", 0, 0x00000003) +INST(mov, "addi", 0, 0x00000003) +INST(j, "j", 0, 0x0000006f) +// clang-format on +/*****************************************************************************/ +#undef INST +/*****************************************************************************/ diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index eef88bee6c977b..e1aabfba6d10b3 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -45,6 +45,9 @@ #if defined(HOST_LOONGARCH64) #error Cannot define both HOST_X86 and HOST_LOONGARCH64 #endif +#if defined(HOST_RISCV64) +#error Cannot define both HOST_X86 and HOST_RISCV64 +#endif #elif defined(HOST_AMD64) #if defined(HOST_X86) #error Cannot define both HOST_AMD64 and HOST_X86 @@ -58,6 +61,9 @@ #if defined(HOST_LOONGARCH64) #error Cannot define both HOST_AMD64 and HOST_LOONGARCH64 #endif +#if defined(HOST_RISCV64) +#error Cannot define both HOST_AMD64 and HOST_RISCV64 +#endif #elif defined(HOST_ARM) #if defined(HOST_X86) #error Cannot define both HOST_ARM and HOST_X86 @@ -71,6 +77,9 @@ #if defined(HOST_LOONGARCH64) #error Cannot define both HOST_ARM and HOST_LOONGARCH64 #endif +#if defined(HOST_RISCV64) +#error Cannot define both HOST_ARM and HOST_RISCV64 +#endif #elif defined(HOST_ARM64) #if defined(HOST_X86) #error Cannot define both HOST_ARM64 and HOST_X86 @@ -84,6 +93,9 @@ #if defined(HOST_LOONGARCH64) #error Cannot define both HOST_ARM64 and HOST_LOONGARCH64 #endif +#if defined(HOST_RISCV64) +#error Cannot define both HOST_ARM64 and HOST_RISCV64 +#endif #elif defined(HOST_LOONGARCH64) #if defined(HOST_X86) #error Cannot define both HOST_LOONGARCH64 and HOST_X86 @@ -97,6 +109,25 @@ #if defined(HOST_ARM64) #error Cannot define both HOST_LOONGARCH64 and HOST_ARM64 #endif +#if defined(HOST_RISCV64) +#error Cannot define both HOST_LOONGARCH64 and HOST_RISCV64 +#endif +#elif defined(HOST_RISCV64) +#if defined(HOST_X86) +#error Cannot define both HOST_RISCV64 and HOST_X86 +#endif +#if defined(HOST_AMD64) +#error Cannot define both HOST_RISCV64 and HOST_AMD64 +#endif +#if defined(HOST_ARM) +#error Cannot define both HOST_RISCV64 and HOST_ARM +#endif +#if defined(HOST_ARM64) +#error Cannot define both HOST_RISCV64 and HOST_ARM64 +#endif +#if defined(HOST_LOONGARCH64) +#error Cannot define both HOST_RISCV64 and HOST_LOONGARCH64 +#endif #else #error Unsupported or unset host architecture #endif @@ -114,6 +145,9 @@ #if defined(TARGET_LOONGARCH64) #error Cannot define both TARGET_X86 and TARGET_LOONGARCH64 #endif +#if defined(TARGET_RISCV64) +#error Cannot define both TARGET_X86 and TARGET_RISCV64 +#endif #elif defined(TARGET_AMD64) #if defined(TARGET_X86) #error Cannot define both TARGET_AMD64 and TARGET_X86 @@ -127,6 +161,9 @@ #if defined(TARGET_LOONGARCH64) #error Cannot define both TARGET_AMD64 and TARGET_LOONGARCH64 #endif +#if defined(TARGET_RISCV64) +#error Cannot define both TARGET_AMD64 and TARGET_RISCV64 +#endif #elif defined(TARGET_ARM) #if defined(TARGET_X86) #error Cannot define both TARGET_ARM and TARGET_X86 @@ -140,6 +177,9 @@ #if defined(TARGET_LOONGARCH64) #error Cannot define both TARGET_ARM and TARGET_LOONGARCH64 #endif +#if defined(TARGET_RISCV64) +#error Cannot define both TARGET_ARM and TARGET_RISCV64 +#endif #elif defined(TARGET_ARM64) #if defined(TARGET_X86) #error Cannot define both TARGET_ARM64 and TARGET_X86 @@ -153,6 +193,9 @@ #if defined(TARGET_LOONGARCH64) #error Cannot define both TARGET_ARM64 and TARGET_LOONGARCH64 #endif +#if defined(TARGET_RISCV64) +#error Cannot define both TARGET_ARM64 and TARGET_RISCV64 +#endif #elif defined(TARGET_LOONGARCH64) #if defined(TARGET_X86) #error Cannot define both TARGET_LOONGARCH64 and TARGET_X86 @@ -166,6 +209,26 @@ #if defined(TARGET_ARM64) #error Cannot define both TARGET_LOONGARCH64 and TARGET_ARM64 #endif +#if defined(TARGET_RISCV64) +#error Cannot define both TARGET_LOONGARCH64 and TARGET_RISCV64 +#endif +#elif defined(TARGET_RISCV64) +#if defined(TARGET_X86) +#error Cannot define both TARGET_RISCV64 and TARGET_X86 +#endif +#if defined(TARGET_AMD64) +#error Cannot define both TARGET_RISCV64 and TARGET_AMD64 +#endif +#if defined(TARGET_ARM) +#error Cannot define both TARGET_RISCV64 and TARGET_ARM +#endif +#if defined(TARGET_ARM64) +#error Cannot define both TARGET_RISCV64 and TARGET_ARM64 +#endif +#if defined(TARGET_LOONGARCH64) +#error Cannot define both TARGET_RISCV64 and TARGET_LOONGARCH64 +#endif + #else #error Unsupported or unset target architecture #endif @@ -215,6 +278,8 @@ #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_ARM64 // 0xAA64 #elif defined(TARGET_LOONGARCH64) #define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_LOONGARCH64 // 0x6264 +#elif defined(TARGET_RISCV64) +#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_RISCV64 // 0x5641 #else #error Unsupported or unset target architecture #endif @@ -269,7 +334,7 @@ typedef ptrdiff_t ssize_t; #define UNIX_LOONGARCH64_ONLY(x) #endif // TARGET_LOONGARCH64 -#if defined(UNIX_AMD64_ABI) || !defined(TARGET_64BIT) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || !defined(TARGET_64BIT) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #define FEATURE_PUT_STRUCT_ARG_STK 1 #endif @@ -281,7 +346,7 @@ typedef ptrdiff_t ssize_t; #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) -#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #define MULTIREG_HAS_SECOND_GC_RET 1 #define MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(x) , x #define MULTIREG_HAS_SECOND_GC_RET_ONLY(x) x @@ -295,7 +360,7 @@ typedef ptrdiff_t ssize_t; // the official Arm64 ABI. // Case: splitting 16 byte struct between x7 and stack // LoongArch64's ABI supports FEATURE_ARG_SPLIT which splitting 16 byte struct between a7 and stack. -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #define FEATURE_ARG_SPLIT 1 #else #define FEATURE_ARG_SPLIT 0 diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 7cd22c1d9ed6da..3630396040e060 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6130,6 +6130,8 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, argOffs += TARGET_POINTER_SIZE; } +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else // TARGET* #error Unsupported or unset target architecture #endif // TARGET* diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index c5a895a4ebccea..47a4c87d54150d 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -6218,6 +6218,9 @@ GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) #elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; +#elif defined(TARGET_RISCV64) + NYI_RISCV64("TODO RISCV64"); + return nullptr; #else #error Unsupported or unset target architecture #endif @@ -7439,7 +7442,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas ind->ChangeType(comp->gtTypeForNullCheck(ind)); -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool useNullCheck = true; #elif defined(TARGET_ARM) bool useNullCheck = false; diff --git a/src/coreclr/jit/lowerriscv64.cpp b/src/coreclr/jit/lowerriscv64.cpp new file mode 100644 index 00000000000000..0c1f69263afcaf --- /dev/null +++ b/src/coreclr/jit/lowerriscv64.cpp @@ -0,0 +1,439 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Lowering for RISCV64 common code XX +XX XX +XX This encapsulates common logic for lowering trees for the RISCV64 XX +XX architectures. For a more detailed view of what is lowering, please XX +XX take a look at Lower.cpp XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#ifdef TARGET_RISCV64 // This file is ONLY used for RISCV64 architectures + +#include "jit.h" +#include "sideeffects.h" +#include "lower.h" +#include "lsra.h" + +#ifdef FEATURE_HW_INTRINSICS +#include "hwintrinsic.h" +#endif + +//------------------------------------------------------------------------ +// IsCallTargetInRange: Can a call target address be encoded in-place? +// +// Return Value: +// True if the addr fits into the range. +// +bool Lowering::IsCallTargetInRange(void* addr) +{ + // TODO-RISCV64-CQ: using B/BL for optimization. + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +//------------------------------------------------------------------------ +// IsContainableImmed: Is an immediate encodable in-place? +// +// Return Value: +// True if the immediate can be folded into an instruction, +// for example small enough and non-relocatable. +// +bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +//------------------------------------------------------------------------ +// LowerMul: Lower a GT_MUL/GT_MULHI/GT_MUL_LONG node. +// +// Performs contaiment checks. +// +// TODO-RISCV64-CQ: recognize GT_MULs that can be turned into MUL_LONGs, +// as those are cheaper. +// +// Arguments: +// mul - The node to lower +// +// Return Value: +// The next node to lower. +// +GenTree* Lowering::LowerMul(GenTreeOp* mul) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return nullptr; +} + +//------------------------------------------------------------------------ +// LowerBinaryArithmetic: lowers the given binary arithmetic node. +// +// Arguments: +// node - the arithmetic node to lower +// +// Returns: +// The next node to lower. +// +GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return nullptr; +} + +//------------------------------------------------------------------------ +// LowerStoreLoc: Lower a store of a lclVar +// +// Arguments: +// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) +// +// Notes: +// This involves: +// - Widening operations of unsigneds. +// +void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained. +// +// Arguments: +// node - The indirect store node (GT_STORE_IND) of interest +// +// Return Value: +// None. +// +void Lowering::LowerStoreIndir(GenTreeStoreInd* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// LowerBlockStore: Set block store type +// +// Arguments: +// blkNode - The block store node of interest +// +// Return Value: +// None. +// +void Lowering::LowerBlockStore(GenTreeBlk* blkNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainBlockStoreAddress: Attempt to contain an address used by an unrolled block store. +// +// Arguments: +// blkNode - the block store node +// size - the block size +// addr - the address node to try to contain +// +void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// LowerPutArgStkOrSplit: Lower a GT_PUTARG_STK/GT_PUTARG_SPLIT. +// +// Arguments: +// putArgNode - The node to lower +// +void Lowering::LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// LowerCast: Lower GT_CAST(srcType, DstType) nodes. +// +// Arguments: +// tree - GT_CAST node to be lowered +// +// Return Value: +// None. +// +// Notes: +// Casts from float/double to a smaller int type are transformed as follows: +// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte) +// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte) +// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16) +// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16) +// +// Note that for the overflow conversions we still depend on helper calls and +// don't expect to see them here. +// i) GT_CAST(float/double, int type with overflow detection) +// + +void Lowering::LowerCast(GenTree* tree) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// LowerRotate: Lower GT_ROL and GT_ROR nodes. +// +// Arguments: +// tree - the node to lower +// +// Return Value: +// None. +// +void Lowering::LowerRotate(GenTree* tree) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef FEATURE_SIMD +//---------------------------------------------------------------------------------------------- +// Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node. +// +// Arguments: +// simdNode - The SIMD intrinsic node. +// +void Lowering::LowerSIMD(GenTreeSIMD* simdNode) +{ + NYI_RISCV64("LowerSIMD"); +} +#endif // FEATURE_SIMD + +#ifdef FEATURE_HW_INTRINSICS +//---------------------------------------------------------------------------------------------- +// Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node. +// +// Arguments: +// node - The hardware intrinsic node. +// +void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node) +{ + NYI_RISCV64("LowerHWIntrinsic"); +} + +//---------------------------------------------------------------------------------------------- +// Lowering::IsValidConstForMovImm: Determines if the given node can be replaced by a mov/fmov immediate instruction +// +// Arguments: +// node - The hardware intrinsic node. +// +// Returns: +// true if the node can be replaced by a mov/fmov immediate instruction; otherwise, false +// +// IMPORTANT: +// This check may end up modifying node->gtOp1 if it is a cast node that can be removed +bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node) +{ + NYI_RISCV64("IsValidConstForMovImm"); + return false; +} + +//---------------------------------------------------------------------------------------------- +// Lowering::LowerHWIntrinsicCmpOp: Lowers a Vector128 or Vector256 comparison intrinsic +// +// Arguments: +// node - The hardware intrinsic node. +// cmpOp - The comparison operation, currently must be GT_EQ or GT_NE +// +void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp) +{ + NYI_RISCV64("LowerHWIntrinsicCmpOp"); +} + +//---------------------------------------------------------------------------------------------- +// Lowering::LowerHWIntrinsicCreate: Lowers a Vector64 or Vector128 Create call +// +// Arguments: +// node - The hardware intrinsic node. +// +void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node) +{ + NYI_RISCV64("LowerHWIntrinsicCreate"); +} + +//---------------------------------------------------------------------------------------------- +// Lowering::LowerHWIntrinsicDot: Lowers a Vector64 or Vector128 Dot call +// +// Arguments: +// node - The hardware intrinsic node. +// +void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node) +{ + NYI_RISCV64("LowerHWIntrinsicDot"); +} + +#endif // FEATURE_HW_INTRINSICS + +//------------------------------------------------------------------------ +// Containment analysis +//------------------------------------------------------------------------ + +//------------------------------------------------------------------------ +// ContainCheckCallOperands: Determine whether operands of a call should be contained. +// +// Arguments: +// call - The call node of interest +// +// Return Value: +// None. +// +void Lowering::ContainCheckCallOperands(GenTreeCall* call) +{ + // There are no contained operands for RISCV64. +} + +//------------------------------------------------------------------------ +// ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained. +// +// Arguments: +// node - pointer to the node +// +void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckIndir: Determine whether operands of an indir should be contained. +// +// Arguments: +// indirNode - The indirection node of interest +// +// Notes: +// This is called for both store and load indirections. +// +// Return Value: +// None. +// +void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckBinary: Determine whether a binary op's operands should be contained. +// +// Arguments: +// node - the node we care about +// +void Lowering::ContainCheckBinary(GenTreeOp* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckMul: Determine whether a mul op's operands should be contained. +// +// Arguments: +// node - the node we care about +// +void Lowering::ContainCheckMul(GenTreeOp* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckDivOrMod: determine which operands of a div/mod should be contained. +// +// Arguments: +// node - the node we care about +// +void Lowering::ContainCheckDivOrMod(GenTreeOp* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained. +// +// Arguments: +// node - the node we care about +// +void Lowering::ContainCheckShiftRotate(GenTreeOp* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained. +// +// Arguments: +// node - pointer to the node +// +void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckCast: determine whether the source of a CAST node should be contained. +// +// Arguments: +// node - pointer to the node +// +void Lowering::ContainCheckCast(GenTreeCast* node) +{ + // There are no contained operands for RISCV64. +} + +//------------------------------------------------------------------------ +// ContainCheckCompare: determine whether the sources of a compare node should be contained. +// +// Arguments: +// node - pointer to the node +// +void Lowering::ContainCheckCompare(GenTreeOp* cmp) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +//------------------------------------------------------------------------ +// ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained. +// +// Arguments: +// node - pointer to the node +// +void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef FEATURE_SIMD +//---------------------------------------------------------------------------------------------- +// ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node. +// +// Arguments: +// simdNode - The SIMD intrinsic node. +// +void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) +{ + NYI_RISCV64("ContainCheckSIMD"); +} +#endif // FEATURE_SIMD + +#ifdef FEATURE_HW_INTRINSICS +//---------------------------------------------------------------------------------------------- +// ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node. +// +// Arguments: +// node - The hardware intrinsic node. +// +void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) +{ + NYI_RISCV64("ContainCheckHWIntrinsic"); +} +#endif // FEATURE_HW_INTRINSICS + +#endif // TARGET_RISCV64 diff --git a/src/coreclr/jit/lsrariscv64.cpp b/src/coreclr/jit/lsrariscv64.cpp new file mode 100644 index 00000000000000..26e8e49c523757 --- /dev/null +++ b/src/coreclr/jit/lsrariscv64.cpp @@ -0,0 +1,183 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Register Requirements for RISCV64 XX +XX XX +XX This encapsulates all the logic for setting register requirements for XX +XX the RISCV64 architecture. XX +XX XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#ifdef TARGET_RISCV64 + +#include "jit.h" +#include "sideeffects.h" +#include "lower.h" + +//------------------------------------------------------------------------ +// BuildNode: Build the RefPositions for a node +// +// Arguments: +// treeNode - the node of interest +// +// Return Value: +// The number of sources consumed by this node. +// +// Notes: +// Preconditions: +// LSRA Has been initialized. +// +// Postconditions: +// RefPositions have been built for all the register defs and uses required +// for this node. +// +int LinearScan::BuildNode(GenTree* tree) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +#ifdef FEATURE_SIMD +//------------------------------------------------------------------------ +// BuildSIMD: Set the NodeInfo for a GT_SIMD tree. +// +// Arguments: +// tree - The GT_SIMD node of interest +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) +{ + NYI_RISCV64("-----unimplemented on RISCV64 yet----"); + return 0; +} +#endif // FEATURE_SIMD + +#ifdef FEATURE_HW_INTRINSICS +#include "hwintrinsic.h" +//------------------------------------------------------------------------ +// BuildHWIntrinsic: Set the NodeInfo for a GT_HWINTRINSIC tree. +// +// Arguments: +// tree - The GT_HWINTRINSIC node of interest +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) +{ + NYI_RISCV64("-----unimplemented on RISCV64 yet----"); + return 0; +} +#endif + +//------------------------------------------------------------------------ +// BuildIndir: Specify register requirements for address expression +// of an indirection operation. +// +// Arguments: +// indirTree - GT_IND, GT_STOREIND or block gentree node +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildIndir(GenTreeIndir* indirTree) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +//------------------------------------------------------------------------ +// BuildCall: Set the NodeInfo for a call. +// +// Arguments: +// call - The call node of interest +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildCall(GenTreeCall* call) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +//------------------------------------------------------------------------ +// BuildPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node +// +// Arguments: +// argNode - a GT_PUTARG_STK node +// +// Return Value: +// The number of sources consumed by this node. +// +// Notes: +// Set the child node(s) to be contained when we have a multireg arg +// +int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +//------------------------------------------------------------------------ +// BuildPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node +// +// Arguments: +// argNode - a GT_PUTARG_SPLIT node +// +// Return Value: +// The number of sources consumed by this node. +// +// Notes: +// Set the child node(s) to be contained +// +int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +//------------------------------------------------------------------------ +// BuildBlockStore: Build the RefPositions for a block store node. +// +// Arguments: +// blkNode - The block store node of interest +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +//------------------------------------------------------------------------ +// BuildCast: Set the NodeInfo for a GT_CAST. +// +// Arguments: +// cast - The GT_CAST node +// +// Return Value: +// The number of sources consumed by this node. +// +int LinearScan::BuildCast(GenTreeCast* cast) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +#endif // TARGET_RISCV64 diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index a34552aa1f1972..dff021911ff1e0 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -2348,6 +2348,11 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call passUsingFloatRegs = varTypeUsesFloatReg(argSigType); DWORD floatFieldFlags = STRUCT_NO_FLOAT_FIELD; +#elif defined(TARGET_RISCV64) + assert(!callIsVararg && !isHfaArg); + passUsingFloatRegs = varTypeUsesFloatReg(argSigType); + NYI_RISCV64("TODO RISCV64"); + #else #error Unsupported or unset target architecture #endif // TARGET* @@ -2395,7 +2400,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call } #endif // UNIX_AMD64_ABI -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) if (isStructArg) { if (isHfaArg) diff --git a/src/coreclr/jit/register.h b/src/coreclr/jit/register.h index ca90673e85adfe..e8a5afeed08ede 100644 --- a/src/coreclr/jit/register.h +++ b/src/coreclr/jit/register.h @@ -126,6 +126,9 @@ REGDEF(STK, 32+XMMBASE, 0x0000, "STK" ) #elif defined(TARGET_LOONGARCH64) #include "registerloongarch64.h" +#elif defined(TARGET_RISCV64) + #include "registerriscv64.h" + #else #error Unsupported or unset target architecture #endif // target type diff --git a/src/coreclr/jit/registerriscv64.h b/src/coreclr/jit/registerriscv64.h new file mode 100644 index 00000000000000..fea2e3cf5e1a3a --- /dev/null +++ b/src/coreclr/jit/registerriscv64.h @@ -0,0 +1,106 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +// clang-format off + +/*****************************************************************************/ +/*****************************************************************************/ +#ifndef REGDEF +#error Must define REGDEF macro before including this file +#endif +#ifndef REGALIAS +#define REGALIAS(alias, realname) +#endif + +#define RMASK(x) (1ULL << (x)) + +/* +REGDEF(name, rnum, mask, sname) */ +REGDEF(R0, 0, 0x0001, "zero") +REGDEF(RA, 1, 0x0002, "ra" ) +REGDEF(SP, 2, 0x0004, "sp" ) +REGDEF(GP, 3, 0x0008, "gp" ) +REGDEF(TP, 4, 0x0010, "tp" ) +REGDEF(T0, 5, 0x0020, "t0" ) +REGDEF(T1, 6, 0x0040, "t1" ) +REGDEF(T2, 7, 0x0080, "t2" ) +REGDEF(FP, 8, 0x0100, "fp" ) +REGDEF(S1, 9, 0x0200, "s1" ) +REGDEF(A0, 10, 0x0400, "a0" ) +REGDEF(A1, 11, 0x0800, "a1" ) +REGDEF(A2, 12, 0x1000, "a2" ) +REGDEF(A3, 13, 0x2000, "a3" ) +REGDEF(A4, 14, 0x4000, "a4" ) +REGDEF(A5, 15, 0x8000, "a5" ) +REGDEF(A6, 16, 0x10000, "a6" ) +REGDEF(A7, 17, 0x20000, "a7" ) +REGDEF(S2, 18, 0x40000, "s2" ) +REGDEF(S3, 19, 0x80000, "s3" ) +REGDEF(S4, 20, 0x100000, "s4" ) +REGDEF(S5, 21, 0x200000, "s5" ) +REGDEF(S6, 22, 0x400000, "s6" ) +REGDEF(S7, 23, 0x800000, "s7" ) +REGDEF(S8, 24, 0x1000000, "s8" ) +REGDEF(S9, 25, 0x2000000, "s9" ) +REGDEF(S10, 26, 0x4000000, "s10" ) +REGDEF(S11, 27, 0x8000000, "s11" ) +REGDEF(T3, 28, 0x10000000, "t3" ) +REGDEF(T4, 29, 0x20000000, "t4" ) +REGDEF(T5, 30, 0x40000000, "t5" ) +REGDEF(T6, 31, 0x80000000, "t6" ) + +REGALIAS(R8, FP) + +#define FBASE 32 +#define FMASK(x) (1ULL << (FBASE+(x))) + +/* +REGDEF(name, rnum, mask, sname) */ +REGDEF(F0, 0+FBASE, FMASK(0), "f0") +REGDEF(F1, 1+FBASE, FMASK(1), "f1") +REGDEF(F2, 2+FBASE, FMASK(2), "f2") +REGDEF(F3, 3+FBASE, FMASK(3), "f3") +REGDEF(F4, 4+FBASE, FMASK(4), "f4") +REGDEF(F5, 5+FBASE, FMASK(5), "f5") +REGDEF(F6, 6+FBASE, FMASK(6), "f6") +REGDEF(F7, 7+FBASE, FMASK(7), "f7") +REGDEF(F8, 8+FBASE, FMASK(8), "f8") +REGDEF(F9, 9+FBASE, FMASK(9), "f9") +REGDEF(F10, 10+FBASE, FMASK(10), "f10") +REGDEF(F11, 11+FBASE, FMASK(11), "f11") +REGDEF(F12, 12+FBASE, FMASK(12), "f12") +REGDEF(F13, 13+FBASE, FMASK(13), "f13") +REGDEF(F14, 14+FBASE, FMASK(14), "f14") +REGDEF(F15, 15+FBASE, FMASK(15), "f15") +REGDEF(F16, 16+FBASE, FMASK(16), "f16") +REGDEF(F17, 17+FBASE, FMASK(17), "f17") +REGDEF(F18, 18+FBASE, FMASK(18), "f18") +REGDEF(F19, 19+FBASE, FMASK(19), "f19") +REGDEF(F20, 20+FBASE, FMASK(20), "f20") +REGDEF(F21, 21+FBASE, FMASK(21), "f21") +REGDEF(F22, 22+FBASE, FMASK(22), "f22") +REGDEF(F23, 23+FBASE, FMASK(23), "f23") +REGDEF(F24, 24+FBASE, FMASK(24), "f24") +REGDEF(F25, 25+FBASE, FMASK(25), "f25") +REGDEF(F26, 26+FBASE, FMASK(26), "f26") +REGDEF(F27, 27+FBASE, FMASK(27), "f27") +REGDEF(F28, 28+FBASE, FMASK(28), "f28") +REGDEF(F29, 29+FBASE, FMASK(29), "f29") +REGDEF(F30, 30+FBASE, FMASK(30), "f30") +REGDEF(F31, 31+FBASE, FMASK(31), "f31") + +// The registers with values 64 (NBASE) and above are not real register numbers +#define NBASE 64 + +REGDEF(STK, 0+NBASE, 0x0000, "STK") + +/*****************************************************************************/ +#undef RMASK +#undef VMASK +#undef VBASE +#undef NBASE +#undef REGDEF +#undef REGALIAS +/*****************************************************************************/ + +// clang-format on diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h index 8baf645453adf5..7225b29b00cc43 100644 --- a/src/coreclr/jit/target.h +++ b/src/coreclr/jit/target.h @@ -58,6 +58,8 @@ inline bool compUnixX86Abi() #define TARGET_READABLE_NAME "ARM64" #elif defined(TARGET_LOONGARCH64) #define TARGET_READABLE_NAME "LOONGARCH64" +#elif defined(TARGET_RISCV64) +#define TARGET_READABLE_NAME "RISCV64" #else #error Unsupported or unset target architecture #endif @@ -85,6 +87,10 @@ inline bool compUnixX86Abi() #define REGMASK_BITS 64 #define CSE_CONST_SHARED_LOW_BITS 12 +#elif defined(TARGET_RISCV64) +#define REGMASK_BITS 64 +#define CSE_CONST_SHARED_LOW_BITS 12 + #else #error Unsupported or unset target architecture #endif @@ -100,7 +106,7 @@ inline bool compUnixX86Abi() // be assigned during register allocation. // REG_NA - Used to indicate that a register is either not yet assigned or not required. // -#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) enum _regNumber_enum : unsigned { #define REGDEF(name, rnum, mask, sname) REG_##name = rnum, @@ -208,7 +214,7 @@ enum _regMask_enum : unsigned // In any case, we believe that is OK to freely cast between these types; no information will // be lost. -#if defined(TARGET_AMD64) || defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) typedef unsigned __int64 regMaskTP; #else typedef unsigned regMaskTP; @@ -262,6 +268,8 @@ typedef unsigned char regNumberSmall; #include "targetarm64.h" #elif defined(TARGET_LOONGARCH64) #include "targetloongarch64.h" +#elif defined(TARGET_RISCV64) +#include "targetriscv64.h" #else #error Unsupported or unset target architecture #endif @@ -559,7 +567,7 @@ inline regMaskTP genRegMask(regNumber reg) inline regMaskTP genRegMaskFloat(regNumber reg, var_types type /* = TYP_DOUBLE */) { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_X86) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) assert(genIsValidFloatReg(reg)); assert((unsigned)reg < ArrLen(regMasks)); return regMasks[reg]; diff --git a/src/coreclr/jit/targetriscv64.cpp b/src/coreclr/jit/targetriscv64.cpp new file mode 100644 index 00000000000000..c2fc1b1af23cdf --- /dev/null +++ b/src/coreclr/jit/targetriscv64.cpp @@ -0,0 +1,27 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*****************************************************************************/ + +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#if defined(TARGET_RISCV64) + +#include "target.h" + +const char* Target::g_tgtCPUName = "riscv64"; +const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; +const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L; + +// clang-format off +const regNumber intArgRegs [] = {REG_A0, REG_A1, REG_A2, REG_A3, REG_A4, REG_A5, REG_A6, REG_A7}; +const regMaskTP intArgMasks[] = {RBM_A0, RBM_A1, RBM_A2, RBM_A3, RBM_A4, RBM_A5, RBM_A6, RBM_A7}; + +const regNumber fltArgRegs [] = {REG_F0, REG_F1, REG_F2, REG_F3, REG_F4, REG_F5, REG_F6, REG_F7 }; +const regMaskTP fltArgMasks[] = {RBM_F0, RBM_F1, RBM_F2, RBM_F3, RBM_F4, RBM_F5, RBM_F6, RBM_F7 }; +// clang-format on + +#endif // TARGET_RISCV64 diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h new file mode 100644 index 00000000000000..e16ec574cede67 --- /dev/null +++ b/src/coreclr/jit/targetriscv64.h @@ -0,0 +1,306 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +#pragma once + +#if !defined(TARGET_RISCV64) +#error The file should not be included for this platform. +#endif + +// clang-format off + #define CPU_LOAD_STORE_ARCH 1 + #define CPU_HAS_FP_SUPPORT 1 + #define ROUND_FLOAT 0 // Do not round intermed float expression results + #define CPU_HAS_BYTE_REGS 0 + + #define CPBLK_UNROLL_LIMIT 64 // Upper bound to let the code generator to loop unroll CpBlk + #define INITBLK_UNROLL_LIMIT 64 // Upper bound to let the code generator to loop unroll InitBlk + +#ifdef FEATURE_SIMD +#pragma error("SIMD Unimplemented yet RISCV64") +#endif // FEATURE_SIMD + + #define FEATURE_FIXED_OUT_ARGS 1 // Preallocate the outgoing arg area in the prolog + #define FEATURE_STRUCTPROMOTE 1 // JIT Optimization to promote fields of structs into registers + #define FEATURE_MULTIREG_STRUCT_PROMOTE 1 // True when we want to promote fields of a multireg struct into registers + #define FEATURE_FASTTAILCALL 1 // Tail calls made as epilog+jmp + #define FEATURE_TAILCALL_OPT 1 // opportunistic Tail calls (i.e. without ".tail" prefix) made as fast tail calls. + #define FEATURE_SET_FLAGS 0 // Set to true to force the JIT to mark the trees with GTF_SET_FLAGS when the flags need to be set + #define FEATURE_IMPLICIT_BYREFS 1 // Support for struct parameters passed via pointers to shadow copies + #define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register + #define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register + #define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register + #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register + #define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (8-byte vector). + #define MAX_PASS_MULTIREG_BYTES 16 // Maximum size of a struct that could be passed in more than one register + #define MAX_RET_MULTIREG_BYTES 16 // Maximum size of a struct that could be returned in more than one register (Max is an HFA or 2 doubles) + #define MAX_ARG_REG_COUNT 2 // Maximum registers used to pass a single argument in multiple registers. + #define MAX_RET_REG_COUNT 2 // Maximum registers used to return a value. + #define MAX_MULTIREG_COUNT 2 // Maximum number of registers defined by a single instruction (including calls). + // This is also the maximum number of registers for a MultiReg node. + + #define NOGC_WRITE_BARRIERS 1 // We have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers + #define USER_ARGS_COME_LAST 1 + #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really + // need to track stack depth, but this is currently necessary to get GC information reported at call sites. + #define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target + #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. + #define FEATURE_EH_CALLFINALLY_THUNKS 1 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. + #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods + #define CSE_CONSTS 1 // Enable if we want to CSE constants + + #define REG_FP_FIRST REG_F0 + #define REG_FP_LAST REG_F31 + #define FIRST_FP_ARGREG REG_F0 + #define LAST_FP_ARGREG REG_F15 + + #define REGNUM_BITS 6 // number of bits in a REG_* + #define REGSIZE_BYTES 8 // number of bytes in one general purpose register + #define FP_REGSIZE_BYTES 8 // number of bytes in one FP/SIMD register + #define FPSAVE_REGSIZE_BYTES 8 // number of bytes in one FP/SIMD register that are saved/restored, for callee-saved registers + + #define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call. + + #define CODE_ALIGN 4 // code alignment requirement + #define STACK_ALIGN 16 // stack alignment requirement + + #define RBM_INT_CALLEE_SAVED (RBM_S1|RBM_S2|RBM_S3|RBM_S4|RBM_S5|RBM_S6|RBM_S7|RBM_S8|RBM_S9|RBM_S10|RBM_S11) + #define RBM_INT_CALLEE_TRASH (RBM_A0|RBM_A1|RBM_A2|RBM_A3|RBM_A4|RBM_A5|RBM_A6|RBM_A7|RBM_T0|RBM_T1|RBM_T2|RBM_T3|RBM_T4|RBM_T5|RBM_T6) + #define RBM_FLT_CALLEE_SAVED (RBM_F8|RBM_F9|RBM_F18|RBM_F19|RBM_F20|RBM_F21|RBM_F22|RBM_F23|RBM_F24|RBM_F25|RBM_F26|RBM_F27) + #define RBM_FLT_CALLEE_TRASH (RBM_F0|RBM_F1|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7|RBM_F10|RBM_F11|RBM_F12|RBM_F13|RBM_F14|RBM_F15|RBM_F16|RBM_F17|RBM_F28|RBM_F29|RBM_F30|RBM_F31) + + #define RBM_CALLEE_SAVED (RBM_INT_CALLEE_SAVED | RBM_FLT_CALLEE_SAVED) + #define RBM_CALLEE_TRASH (RBM_INT_CALLEE_TRASH | RBM_FLT_CALLEE_TRASH) + + #define REG_DEFAULT_HELPER_CALL_TARGET REG_T2 + #define RBM_DEFAULT_HELPER_CALL_TARGET RBM_T2 + + #define RBM_ALLINT (RBM_INT_CALLEE_SAVED | RBM_INT_CALLEE_TRASH) + #define RBM_ALLFLOAT (RBM_FLT_CALLEE_SAVED | RBM_FLT_CALLEE_TRASH) + #define RBM_ALLDOUBLE RBM_ALLFLOAT + + // REG_VAR_ORDER is: (CALLEE_TRASH & ~CALLEE_TRASH_NOGC), CALLEE_TRASH_NOGC, CALLEE_SAVED + #define REG_VAR_ORDER REG_A0,REG_A1,REG_A2,REG_A3,REG_A4,REG_A5,REG_A6,REG_A7, \ + REG_T0,REG_T1,REG_T2,REG_T3,REG_T4,REG_T5,REG_T6, \ + REG_CALLEE_SAVED_ORDER + + #define REG_VAR_ORDER_FLT REG_F12,REG_F13,REG_F14,REG_F15,REG_F16,REG_F17,REG_F18,REG_F19, \ + REG_F2,REG_F3,REG_F4,REG_F5,REG_F6,REG_F7,REG_F8,REG_F9,REG_F10, \ + REG_F20,REG_F21,REG_F22,REG_F23, \ + REG_F24,REG_F25,REG_F26,REG_F27,REG_F28,REG_F29,REG_F30,REG_F31, \ + REG_F1,REG_F0 + + #define REG_CALLEE_SAVED_ORDER REG_S1,REG_S2,REG_S3,REG_S4,REG_S5,REG_S6,REG_S7,REG_S8,REG_S9,REG_S10,REG_S11 + #define RBM_CALLEE_SAVED_ORDER RBM_S1,RBM_S2,RBM_S3,RBM_S4,RBM_S5,RBM_S6,RBM_S7,RBM_S8,RBM_S9,RBM_S10,RBM_S11 + + #define CNT_CALLEE_SAVED (11) + #define CNT_CALLEE_TRASH (15) + #define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED-1) + + #define CNT_CALLEE_SAVED_FLOAT (12) + #define CNT_CALLEE_TRASH_FLOAT (20) + + #define CALLEE_SAVED_REG_MAXSZ (CNT_CALLEE_SAVED * REGSIZE_BYTES) + #define CALLEE_SAVED_FLOAT_MAXSZ (CNT_CALLEE_SAVED_FLOAT * FPSAVE_REGSIZE_BYTES) + + #define REG_TMP_0 REG_T0 + + // Temporary registers used for the GS cookie check. + #define REG_GSCOOKIE_TMP_0 REG_T0 + #define REG_GSCOOKIE_TMP_1 REG_T1 + + // register to hold shift amount; no special register is required on ARM64. + #define REG_SHIFT REG_NA + #define RBM_SHIFT RBM_ALLINT + + // This is a general scratch register that does not conflict with the argument registers + #define REG_SCRATCH REG_T0 + + // This is a float scratch register that does not conflict with the argument registers + #define REG_SCRATCH_FLT REG_F28 + + // This is a general register that can be optionally reserved for other purposes during codegen + #define REG_OPT_RSVD REG_T1 + #define RBM_OPT_RSVD RBM_T1 + + // Where is the exception object on entry to the handler block? + #define REG_EXCEPTION_OBJECT REG_A0 + #define RBM_EXCEPTION_OBJECT RBM_A0 + + #define REG_JUMP_THUNK_PARAM REG_T2 + #define RBM_JUMP_THUNK_PARAM RBM_T2 + + #define REG_WRITE_BARRIER_DST REG_T3 + #define RBM_WRITE_BARRIER_DST RBM_T3 + + #define REG_WRITE_BARRIER_SRC REG_T4 + #define RBM_WRITE_BARRIER_SRC RBM_T4 + + #define REG_WRITE_BARRIER_DST_BYREF REG_T5 + #define RBM_WRITE_BARRIER_DST_BYREF RBM_T5 + + #define REG_WRITE_BARRIER_SRC_BYREF REG_T6 + #define RBM_WRITE_BARRIER_SRC_BYREF RBM_T6 + + #define RBM_CALLEE_TRASH_NOGC (RBM_T0|RBM_T1|RBM_T2|RBM_T3|RBM_T4|RBM_T5|RBM_T6|RBM_DEFAULT_HELPER_CALL_TARGET) + + // Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. + #define RBM_CALLEE_TRASH_WRITEBARRIER (RBM_WRITE_BARRIER_DST|RBM_CALLEE_TRASH_NOGC) + + // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. + #define RBM_CALLEE_GCTRASH_WRITEBARRIER RBM_CALLEE_TRASH_NOGC + + // Registers killed by CORINFO_HELP_ASSIGN_BYREF. + #define RBM_CALLEE_TRASH_WRITEBARRIER_BYREF (RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF | RBM_CALLEE_TRASH_NOGC) + + // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_BYREF. + // Note that x13 and x14 are still valid byref pointers after this helper call, despite their value being changed. + #define RBM_CALLEE_GCTRASH_WRITEBARRIER_BYREF RBM_CALLEE_TRASH_NOGC + + // GenericPInvokeCalliHelper VASigCookie Parameter + #define REG_PINVOKE_COOKIE_PARAM REG_T0 + #define RBM_PINVOKE_COOKIE_PARAM RBM_T0 + + // GenericPInvokeCalliHelper unmanaged target Parameter + #define REG_PINVOKE_TARGET_PARAM REG_T2 + #define RBM_PINVOKE_TARGET_PARAM RBM_T2 + + // IL stub's secret MethodDesc parameter (JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM) + #define REG_SECRET_STUB_PARAM REG_T2 + #define RBM_SECRET_STUB_PARAM RBM_T2 + + // R2R indirect call. Use the same registers as VSD + #define REG_R2R_INDIRECT_PARAM REG_T6 + #define RBM_R2R_INDIRECT_PARAM RBM_T6 + + // JMP Indirect call register + #define REG_INDIRECT_CALL_TARGET_REG REG_T5 + + // Registers used by PInvoke frame setup + #define REG_PINVOKE_FRAME REG_T0 + #define RBM_PINVOKE_FRAME RBM_T0 + #define REG_PINVOKE_TCB REG_T1 + #define RBM_PINVOKE_TCB RBM_T1 + #define REG_PINVOKE_SCRATCH REG_T1 + #define RBM_PINVOKE_SCRATCH RBM_T1 + + // The following defines are useful for iterating a regNumber + #define REG_FIRST REG_R0 + #define REG_INT_FIRST REG_R0 + #define REG_INT_LAST REG_T6 + #define REG_INT_COUNT (REG_INT_LAST - REG_INT_FIRST + 1) + #define REG_NEXT(reg) ((regNumber)((unsigned)(reg) + 1)) + #define REG_PREV(reg) ((regNumber)((unsigned)(reg) - 1)) + + // The following registers are used in emitting Enter/Leave/Tailcall profiler callbacks + #define REG_PROFILER_ENTER_ARG_FUNC_ID REG_R10 + #define RBM_PROFILER_ENTER_ARG_FUNC_ID RBM_R10 + #define REG_PROFILER_ENTER_ARG_CALLER_SP REG_R11 + #define RBM_PROFILER_ENTER_ARG_CALLER_SP RBM_R11 + #define REG_PROFILER_LEAVE_ARG_FUNC_ID REG_R10 + #define RBM_PROFILER_LEAVE_ARG_FUNC_ID RBM_R10 + #define REG_PROFILER_LEAVE_ARG_CALLER_SP REG_R11 + #define RBM_PROFILER_LEAVE_ARG_CALLER_SP RBM_R11 + + // The registers trashed by profiler enter/leave/tailcall hook + #define RBM_PROFILER_ENTER_TRASH (RBM_CALLEE_TRASH & ~(RBM_ARG_REGS|RBM_FLTARG_REGS|RBM_FP)) + #define RBM_PROFILER_LEAVE_TRASH (RBM_CALLEE_TRASH & ~(RBM_ARG_REGS|RBM_FLTARG_REGS|RBM_FP)) + #define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH + + // Which register are int and long values returned in ? + #define REG_INTRET REG_A0 + #define RBM_INTRET RBM_A0 + #define REG_LNGRET REG_A0 + #define RBM_LNGRET RBM_A0 + // second return register for 16-byte structs + #define REG_INTRET_1 REG_A1 + #define RBM_INTRET_1 RBM_A1 + + #define REG_FLOATRET REG_F10 + #define RBM_FLOATRET RBM_F10 + #define RBM_DOUBLERET RBM_F10 + #define REG_FLOATRET_1 REG_F11 + #define RBM_FLOATRET_1 RBM_F11 + #define RBM_DOUBLERET_1 RBM_F11 + + // The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper + #define RBM_STOP_FOR_GC_TRASH RBM_CALLEE_TRASH + + // The registers trashed by the CORINFO_HELP_INIT_PINVOKE_FRAME helper. + #define RBM_INIT_PINVOKE_FRAME_TRASH RBM_CALLEE_TRASH + + #define RBM_VALIDATE_INDIRECT_CALL_TRASH (RBM_INT_CALLEE_TRASH & ~(RBM_A0 | RBM_A1 | RBM_A2 | RBM_A3 | RBM_A4 | RBM_A5 | RBM_A6 | RBM_A7 | RBM_T3)) + #define REG_VALIDATE_INDIRECT_CALL_ADDR REG_T3 + #define REG_DISPATCH_INDIRECT_CALL_ADDR REG_T0 + + #define REG_FPBASE REG_FP + #define RBM_FPBASE RBM_FP + #define STR_FPBASE "fp" + #define REG_SPBASE REG_SP + #define RBM_SPBASE RBM_SP // reuse the RBM for REG_ZR + #define STR_SPBASE "sp" + + #define FIRST_ARG_STACK_OFFS (2*REGSIZE_BYTES) // Caller's saved FP and return address + + #define MAX_REG_ARG 8 + #define MAX_FLOAT_REG_ARG 8 + + #define REG_ARG_FIRST REG_A0 + #define REG_ARG_LAST REG_A7 + #define REG_ARG_FP_FIRST REG_F10 + #define REG_ARG_FP_LAST REG_F17 + #define INIT_ARG_STACK_SLOT 0 // No outgoing reserved stack slots + + #define REG_ARG_0 REG_A0 + #define REG_ARG_1 REG_A1 + #define REG_ARG_2 REG_A2 + #define REG_ARG_3 REG_A3 + #define REG_ARG_4 REG_A4 + #define REG_ARG_5 REG_A5 + #define REG_ARG_6 REG_A6 + #define REG_ARG_7 REG_A7 + + extern const regNumber intArgRegs [MAX_REG_ARG]; + extern const regMaskTP intArgMasks[MAX_REG_ARG]; + + #define RBM_ARG_0 RBM_A0 + #define RBM_ARG_1 RBM_A1 + #define RBM_ARG_2 RBM_A2 + #define RBM_ARG_3 RBM_A3 + #define RBM_ARG_4 RBM_A4 + #define RBM_ARG_5 RBM_A5 + #define RBM_ARG_6 RBM_A6 + #define RBM_ARG_7 RBM_A7 + + #define REG_FLTARG_0 REG_F10 + #define REG_FLTARG_1 REG_F11 + #define REG_FLTARG_2 REG_F12 + #define REG_FLTARG_3 REG_F13 + #define REG_FLTARG_4 REG_F14 + #define REG_FLTARG_5 REG_F15 + #define REG_FLTARG_6 REG_F16 + #define REG_FLTARG_7 REG_F17 + + #define RBM_FLTARG_0 RBM_F10 + #define RBM_FLTARG_1 RBM_F11 + #define RBM_FLTARG_2 RBM_F12 + #define RBM_FLTARG_3 RBM_F13 + #define RBM_FLTARG_4 RBM_F14 + #define RBM_FLTARG_5 RBM_F15 + #define RBM_FLTARG_6 RBM_F16 + #define RBM_FLTARG_7 RBM_F17 + + #define RBM_ARG_REGS (RBM_ARG_0|RBM_ARG_1|RBM_ARG_2|RBM_ARG_3|RBM_ARG_4|RBM_ARG_5|RBM_ARG_6|RBM_ARG_7) + #define RBM_FLTARG_REGS (RBM_FLTARG_0|RBM_FLTARG_1|RBM_FLTARG_2|RBM_FLTARG_3|RBM_FLTARG_4|RBM_FLTARG_5|RBM_FLTARG_6|RBM_FLTARG_7) + + extern const regNumber fltArgRegs [MAX_FLOAT_REG_ARG]; + extern const regMaskTP fltArgMasks[MAX_FLOAT_REG_ARG]; + + #define B_DIST_SMALL_MAX_NEG (-131072) + #define B_DIST_SMALL_MAX_POS (+131071) + + #define OFFSET_DIST_SMALL_MAX_NEG (-2048) + #define OFFSET_DIST_SMALL_MAX_POS (+2047) + + #define STACK_PROBE_BOUNDARY_THRESHOLD_BYTES 0 + +// clang-format on diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index 12114916f5362c..27047d50a19bee 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -465,6 +465,10 @@ UNATIVE_OFFSET Compiler::unwindGetCurrentOffset(FuncInfoDsc* func) // See unwindLoongarch64.cpp +#elif defined(TARGET_RISCV64) + +// See unwindRiscv64.cpp + #else // TARGET* #error Unsupported or unset target architecture diff --git a/src/coreclr/jit/unwind.h b/src/coreclr/jit/unwind.h index 46485e0eb11c6a..badfe369d191f4 100644 --- a/src/coreclr/jit/unwind.h +++ b/src/coreclr/jit/unwind.h @@ -10,7 +10,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Windows no longer imposes a maximum prolog size. However, we still have an // assert here just to inform us if we increase the size of the prolog @@ -42,7 +42,16 @@ const unsigned MAX_EPILOG_SIZE_BYTES = 200; #define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) #define UW_MAX_CODE_WORDS_COUNT 31 #define UW_MAX_EPILOG_START_INDEX 0x3FFU -#endif // TARGET_LOONGARCH64 +#elif defined(TARGET_RISCV64) // TODO RISCV64 +const unsigned MAX_PROLOG_SIZE_BYTES = 200; +const unsigned MAX_EPILOG_SIZE_BYTES = 200; +#define UWC_END 0xE4 // "end" unwind code +#define UWC_END_C 0xE5 // "end_c" unwind code +#define UW_MAX_FRAGMENT_SIZE_BYTES (1U << 20) +#define UW_MAX_CODE_WORDS_COUNT 31 +#define UW_MAX_EPILOG_START_INDEX 0x3FFU + +#endif // TARGET_RISCV64 #define UW_MAX_EPILOG_COUNT 31 // Max number that can be encoded in the "Epilog count" field // of the .pdata record @@ -111,9 +120,9 @@ class UnwindCodesBase { #if defined(TARGET_ARM) return b >= 0xFD; -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) return (b == UWC_END); // TODO-ARM64-Bug?: what about the "end_c" code? -#endif // TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 } #ifdef DEBUG @@ -795,7 +804,7 @@ class UnwindInfo : public UnwindBase // Given the first byte of the unwind code, check that its opsize matches // the last instruction added in the emitter. void CheckOpsize(BYTE b1); -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) void CheckOpsize(BYTE b1) { } // nothing to do; all instructions are 4 bytes @@ -846,4 +855,4 @@ void DumpUnwindInfo(Compiler* comp, #endif // DEBUG -#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 +#endif // TARGET_ARMARCH || TARGET_LOONGARCH64 || TARGET_RISCV64 diff --git a/src/coreclr/jit/unwindriscv64.cpp b/src/coreclr/jit/unwindriscv64.cpp new file mode 100644 index 00000000000000..d8f4e15c9f0772 --- /dev/null +++ b/src/coreclr/jit/unwindriscv64.cpp @@ -0,0 +1,515 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX UnwindInfo XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#include "jitpch.h" +#ifdef _MSC_VER +#pragma hdrstop +#endif + +#if defined(TARGET_RISCV64) + +#if defined(FEATURE_CFI_SUPPORT) +short Compiler::mapRegNumToDwarfReg(regNumber reg) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} +#endif // FEATURE_CFI_SUPPORT + +void Compiler::unwindPush(regNumber reg) +{ + unreached(); // use one of the unwindSaveReg* functions instead. +} + +void Compiler::unwindAllocStack(unsigned size) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindSaveReg(regNumber reg, unsigned offset) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindNop() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindSaveReg(regNumber reg, int offset) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset) +{ + assert(!"unused on RISCV64 yet"); +} + +void Compiler::unwindReturn(regNumber reg) +{ + // Nothing to do; we will always have at least one trailing "end" opcode in our padding. +} + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Unwind Info Debug helpers XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#ifdef DEBUG + +// Return the size of the unwind code (from 1 to 4 bytes), given the first byte of the unwind bytes + +unsigned GetUnwindSizeFromUnwindHeader(BYTE b1) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +#endif // DEBUG + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Unwind Info Support Classes XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindCodesBase +// +/////////////////////////////////////////////////////////////////////////////// + +#ifdef DEBUG + +// Walk the prolog codes and calculate the size of the prolog or epilog, in bytes. +unsigned UnwindCodesBase::GetCodeSizeFromUnwindCodes(bool isProlog) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +#endif // DEBUG + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Debug dumpers XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +#ifdef DEBUG + +// start is 0-based index from LSB, length is number of bits +DWORD ExtractBits(DWORD dw, DWORD start, DWORD length) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return 0; +} + +// Dump the unwind data. +// Arguments: +// isHotCode: true if this unwind data is for the hot section +// startOffset: byte offset of the code start that this unwind data represents +// endOffset: byte offset of the code end that this unwind data represents +// pHeader: pointer to the unwind data blob +// unwindBlockSize: size in bytes of the unwind data blob + +void DumpUnwindInfo(Compiler* comp, + bool isHotCode, + UNATIVE_OFFSET startOffset, + UNATIVE_OFFSET endOffset, + const BYTE* const pHeader, + ULONG unwindBlockSize) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#endif // DEBUG + +/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XX XX +XX Unwind APIs XX +XX XX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +*/ + +void Compiler::unwindBegProlog() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindEndProlog() +{ + assert(compGeneratingProlog); +} + +void Compiler::unwindBegEpilog() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindEndEpilog() +{ + assert(compGeneratingEpilog); +} + +// The instructions between the last captured "current state" and the current instruction +// are in the prolog but have no effect for unwinding. Emit the appropriate NOP unwind codes +// for them. +void Compiler::unwindPadding() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Ask the VM to reserve space for the unwind information for the function and +// all its funclets. +void Compiler::unwindReserve() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindReserveFunc(FuncInfoDsc* func) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// unwindEmit: Report all the unwind information to the VM. +// Arguments: +// pHotCode: Pointer to the beginning of the memory with the function and funclet hot code +// pColdCode: Pointer to the beginning of the memory with the function and funclet cold code. + +void Compiler::unwindEmit(void* pHotCode, void* pColdCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindPrologCodes +// +/////////////////////////////////////////////////////////////////////////////// + +// We're going to use the prolog codes memory to store the final unwind data. +// Ensure we have enough memory to store everything. If 'epilogBytes' > 0, then +// move the prolog codes so there are 'epilogBytes' bytes after the prolog codes. +// Set the header pointer for future use, adding the header bytes (this pointer +// is updated when a header byte is added), and remember the index that points +// to the beginning of the header. + +void UnwindPrologCodes::SetFinalSize(int headerBytes, int epilogBytes) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Add a header word. Header words are added starting at the beginning, in order: first to last. +// This is in contrast to the prolog unwind codes, which are added in reverse order. +void UnwindPrologCodes::AddHeaderWord(DWORD d) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// AppendEpilog: copy the epilog bytes to the next epilog bytes slot +void UnwindPrologCodes::AppendEpilog(UnwindEpilogInfo* pEpi) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// GetFinalInfo: return a pointer to the final unwind info to hand to the VM, and the size of this info in bytes +void UnwindPrologCodes::GetFinalInfo(/* OUT */ BYTE** ppUnwindBlock, /* OUT */ ULONG* pUnwindBlockSize) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +int UnwindPrologCodes::Match(UnwindEpilogInfo* pEpi) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return -1; +} + +// Copy the prolog codes from another prolog. The only time this is legal is +// if we are at the initial state and no prolog codes have been added. +// This is used to create the 'phantom' prolog for non-first fragments. + +void UnwindPrologCodes::CopyFrom(UnwindPrologCodes* pCopyFrom) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindPrologCodes::EnsureSize(int requiredSize) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef DEBUG +void UnwindPrologCodes::Dump(int indent) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} +#endif // DEBUG + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindEpilogCodes +// +/////////////////////////////////////////////////////////////////////////////// + +void UnwindEpilogCodes::EnsureSize(int requiredSize) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef DEBUG +void UnwindEpilogCodes::Dump(int indent) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} +#endif // DEBUG + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindEpilogInfo +// +/////////////////////////////////////////////////////////////////////////////// + +// Do the current unwind codes match those of the argument epilog? +// If they don't match, return -1. If they do, return the offset into +// our codes at which the argument codes match. Note that this means that +// the argument codes can match a subset of our codes. The subset needs to be at +// the end, for the "end" code to match. +// +// Note that if we wanted to handle 0xFD and 0xFE codes, by converting +// an existing 0xFF code to one of those, we might do that here. + +int UnwindEpilogInfo::Match(UnwindEpilogInfo* pEpi) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return -1; +} + +void UnwindEpilogInfo::CaptureEmitLocation() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindEpilogInfo::FinalizeOffset() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef DEBUG +void UnwindEpilogInfo::Dump(int indent) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} +#endif // DEBUG + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindFragmentInfo +// +/////////////////////////////////////////////////////////////////////////////// + +UnwindFragmentInfo::UnwindFragmentInfo(Compiler* comp, emitLocation* emitLoc, bool hasPhantomProlog) + : UnwindBase(comp) + , ufiNext(NULL) + , ufiEmitLoc(emitLoc) + , ufiHasPhantomProlog(hasPhantomProlog) + , ufiPrologCodes(comp) + , ufiEpilogFirst(comp) + , ufiEpilogList(NULL) + , ufiEpilogLast(NULL) + , ufiCurCodes(&ufiPrologCodes) + , ufiSize(0) + , ufiStartOffset(UFI_ILLEGAL_OFFSET) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindFragmentInfo::FinalizeOffset() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindFragmentInfo::AddEpilog() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Copy the prolog codes from the 'pCopyFrom' fragment. These prolog codes will +// become 'phantom' prolog codes in this fragment. Note that this fragment should +// not have any prolog codes currently; it is at the initial state. + +void UnwindFragmentInfo::CopyPrologCodes(UnwindFragmentInfo* pCopyFrom) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Split the epilog codes that currently exist in 'pSplitFrom'. The ones that represent +// epilogs that start at or after the location represented by 'emitLoc' are removed +// from 'pSplitFrom' and moved to this fragment. Note that this fragment should not have +// any epilog codes currently; it is at the initial state. + +void UnwindFragmentInfo::SplitEpilogCodes(emitLocation* emitLoc, UnwindFragmentInfo* pSplitFrom) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Is this epilog at the end of an unwind fragment? Ask the emitter. +// Note that we need to know this before all code offsets are finalized, +// so we can determine whether we can omit an epilog scope word for a +// single matching epilog. + +bool UnwindFragmentInfo::IsAtFragmentEnd(UnwindEpilogInfo* pEpi) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + return false; +} + +// Merge the unwind codes as much as possible. +// This function is called before all offsets are final. +// Also, compute the size of the final unwind block. Store this +// and some other data for later, when we actually emit the +// unwind block. + +void UnwindFragmentInfo::MergeCodes() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Finalize: Prepare the unwind information for the VM. Compute and prepend the unwind header. + +void UnwindFragmentInfo::Finalize(UNATIVE_OFFSET functionLength) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindFragmentInfo::Reserve(bool isFunclet, bool isHotCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Allocate the unwind info for a fragment with the VM. +// Arguments: +// funKind: funclet kind +// pHotCode: hot section code buffer +// pColdCode: cold section code buffer +// funcEndOffset: offset of the end of this function/funclet. Used if this fragment is the last one for a +// function/funclet. +// isHotCode: are we allocating the unwind info for the hot code section? + +void UnwindFragmentInfo::Allocate( + CorJitFuncKind funKind, void* pHotCode, void* pColdCode, UNATIVE_OFFSET funcEndOffset, bool isHotCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef DEBUG +void UnwindFragmentInfo::Dump(int indent) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} +#endif // DEBUG + +/////////////////////////////////////////////////////////////////////////////// +// +// UnwindInfo +// +/////////////////////////////////////////////////////////////////////////////// + +void UnwindInfo::InitUnwindInfo(Compiler* comp, emitLocation* startLoc, emitLocation* endLoc) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Split the unwind codes in 'puwi' into those that are in the hot section (leave them in 'puwi') +// and those that are in the cold section (move them to 'this'). There is exactly one fragment +// in each UnwindInfo; the fragments haven't been split for size, yet. + +void UnwindInfo::HotColdSplitCodes(UnwindInfo* puwi) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Split the function or funclet into fragments that are no larger than 512K, +// so the fragment size will fit in the unwind data "Function Length" field. + +void UnwindInfo::Split() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +/*static*/ void UnwindInfo::EmitSplitCallback(void* context, emitLocation* emitLoc) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Reserve space for the unwind info for all fragments + +void UnwindInfo::Reserve(bool isFunclet, bool isHotCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +// Allocate and populate VM unwind info for all fragments + +void UnwindInfo::Allocate(CorJitFuncKind funKind, void* pHotCode, void* pColdCode, bool isHotCode) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindInfo::AddEpilog() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindInfo::CaptureLocation() +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +void UnwindInfo::AddFragment(emitLocation* emitLoc) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#ifdef DEBUG + +void UnwindInfo::Dump(bool isHotCode, int indent) +{ + _ASSERTE(!"TODO RISCV64 NYI"); +} + +#endif // DEBUG + +#endif // TARGET_RISCV64 diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index c43bb47a36f8d4..c7c4fcfe969590 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -55,7 +55,7 @@ struct FloatTraits { #if defined(TARGET_XARCH) unsigned bits = 0xFFC00000u; -#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) unsigned bits = 0x7FC00000u; #else #error Unsupported or unset target architecture @@ -81,7 +81,7 @@ struct DoubleTraits { #if defined(TARGET_XARCH) unsigned long long bits = 0xFFF8000000000000ull; -#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) unsigned long long bits = 0x7FF8000000000000ull; #else #error Unsupported or unset target architecture diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h index 8dcb66c5f5669b..6146a5d070d183 100644 --- a/src/coreclr/jit/valuenumfuncs.h +++ b/src/coreclr/jit/valuenumfuncs.h @@ -186,6 +186,9 @@ ValueNumFuncDef(HWI_##isa##_##name, argCount, false, false, false) // All of t #elif defined (TARGET_LOONGARCH64) //TODO-LOONGARCH64-CQ: add LoongArch64's Hardware Intrinsics Instructions if supported. +#elif defined (TARGET_RISCV64) + // TODO RISCV64 + #else #error Unsupported platform #endif diff --git a/src/coreclr/nativeaot/Runtime/threadstore.inl b/src/coreclr/nativeaot/Runtime/threadstore.inl index 29495046a98272..90a8c96747709e 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.inl +++ b/src/coreclr/nativeaot/Runtime/threadstore.inl @@ -11,6 +11,7 @@ EXTERN_C __thread ThreadBuffer tls_CurrentThread; // static inline Thread * ThreadStore::RawGetCurrentThread() { + fprintf(stderr, "[CLAMP] tls_CurrentThread.m_pTransitionFrame\n"); return (Thread *) &tls_CurrentThread; } diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 88dcd4e8d77f7a..3d76c7505772d1 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -2100,7 +2100,7 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS { #elif defined(HOST_RISCV64) -#error "TODO-RISCV64: review this when src/coreclr/pal/src/arch/riscv64/asmconstants.h is ported" +// #error "TODO-RISCV64: review this when src/coreclr/pal/src/arch/riscv64/asmconstants.h is ported" // Please refer to src/coreclr/pal/src/arch/riscv64/asmconstants.h #define CONTEXT_RISCV64 0x04000000L @@ -2150,6 +2150,7 @@ typedef struct DECLSPEC_ALIGN(16) _CONTEXT { // // Integer registers. // + DWORD64 R0; DWORD64 Ra; DWORD64 Sp; DWORD64 Gp; @@ -2157,7 +2158,7 @@ typedef struct DECLSPEC_ALIGN(16) _CONTEXT { DWORD64 T0; DWORD64 T1; DWORD64 T2; - DWORD64 S0; + DWORD64 Fp; DWORD64 S1; DWORD64 A0; DWORD64 A1; @@ -2198,10 +2199,13 @@ typedef struct DECLSPEC_ALIGN(16) _CONTEXT { typedef struct _KNONVOLATILE_CONTEXT_POINTERS { PDWORD64 Ra; + PDWORD64 Sp; + PDWORD64 Gp; PDWORD64 Tp; PDWORD64 T0; PDWORD64 T1; - PDWORD64 S0; + PDWORD64 T2; + PDWORD64 Fp; PDWORD64 S1; PDWORD64 A0; PDWORD64 A1; diff --git a/src/coreclr/pal/inc/rt/ntimage.h b/src/coreclr/pal/inc/rt/ntimage.h index 2cf95e40721ab2..a7bac8cbc6c47a 100644 --- a/src/coreclr/pal/inc/rt/ntimage.h +++ b/src/coreclr/pal/inc/rt/ntimage.h @@ -244,6 +244,7 @@ typedef struct _IMAGE_FILE_HEADER { #define IMAGE_FILE_MACHINE_ARM64 0xAA64 // ARM64 Little-Endian #define IMAGE_FILE_MACHINE_CEE 0xC0EE #define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264 // LOONGARCH64. +#define IMAGE_FILE_MACHINE_RISCV64 0x5641 // RISCV64 // // Directory format. diff --git a/src/coreclr/pal/inc/unixasmmacrosriscv64.inc b/src/coreclr/pal/inc/unixasmmacrosriscv64.inc index 683ae88415a907..8a2727ab7c1fd7 100644 --- a/src/coreclr/pal/inc/unixasmmacrosriscv64.inc +++ b/src/coreclr/pal/inc/unixasmmacrosriscv64.inc @@ -1,12 +1,12 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#error "TODO-RISCV64: review this; missing many macros for VM" +//#error "TODO-RISCV64: review this; missing many macros for VM" .macro NESTED_ENTRY Name, Section, Handler LEAF_ENTRY \Name, \Section .ifnc \Handler, NoHandler - .personality C_FUNC(\Handler) + .cfi_personality 0x1c, C_FUNC(\Handler) // TODO RISCV NYI .endif .endm @@ -37,6 +37,30 @@ C_FUNC(\Name\()_End): LEAF_END \Name, \Section .endm +.macro PROLOG_SAVE_REG_PAIR_INDEXED reg1, reg2, ofs + addi sp, sp, -\ofs + .cfi_adjust_cfa_offset -\ofs + sd \reg1, sp, 0 + sd \ret2, sp, 8 + .cfi_rel_offset \reg1, 0 + .cfi_rel_offset \reg2, 8 + .ifc \reg1, fp + mov fp, sp + .cfi_def_cfa_register fp + .endif +.endm + + +.macro EPILOG_RESTORE_REG_PAIR_INDEXED reg1, reg2, ofs + ld \reg2, sp, 8 + ld \reg1, sp, 0 + .cfi_restore \reg1 + .cfi_restore \reg2 + + addi sp, sp, \ofs + .cfi_adjust_cfa_offset -\ofs +.endm + .macro EMIT_BREAKPOINT ebreak .endm diff --git a/src/coreclr/pal/prebuilt/inc/cordebug.h b/src/coreclr/pal/prebuilt/inc/cordebug.h index fa4b434eccf554..54bde6b445ea84 100644 --- a/src/coreclr/pal/prebuilt/inc/cordebug.h +++ b/src/coreclr/pal/prebuilt/inc/cordebug.h @@ -1476,7 +1476,8 @@ enum CorDebugPlatform CORDB_PLATFORM_POSIX_X86 = ( CORDB_PLATFORM_POSIX_AMD64 + 1 ) , CORDB_PLATFORM_POSIX_ARM = ( CORDB_PLATFORM_POSIX_X86 + 1 ) , CORDB_PLATFORM_POSIX_ARM64 = ( CORDB_PLATFORM_POSIX_ARM + 1 ) , - CORDB_PLATFORM_POSIX_LOONGARCH64 = ( CORDB_PLATFORM_POSIX_ARM64 + 1 ) + CORDB_PLATFORM_POSIX_LOONGARCH64 = ( CORDB_PLATFORM_POSIX_ARM64 + 1 ) , + CORDB_PLATFORM_POSIX_RISCV64 = ( CORDB_PLATFORM_POSIX_LOONGARCH64 + 1 ) } CorDebugPlatform; @@ -9256,7 +9257,71 @@ enum CorDebugRegister REGISTER_LOONGARCH64_F28 = ( REGISTER_LOONGARCH64_F27 + 1 ) , REGISTER_LOONGARCH64_F29 = ( REGISTER_LOONGARCH64_F28 + 1 ) , REGISTER_LOONGARCH64_F30 = ( REGISTER_LOONGARCH64_F29 + 1 ) , - REGISTER_LOONGARCH64_F31 = ( REGISTER_LOONGARCH64_F30 + 1 ) + REGISTER_LOONGARCH64_F31 = ( REGISTER_LOONGARCH64_F30 + 1 ), + REGISTER_RISCV64_PC = 0, + REGISTER_RISCV64_RA = ( REGISTER_RISCV64_PC + 1), + REGISTER_RISCV64_SP = ( REGISTER_RISCV64_RA + 1), + REGISTER_RISCV64_GP = ( REGISTER_RISCV64_SP + 1), + REGISTER_RISCV64_TP = ( REGISTER_RISCV64_GP + 1 ), + REGISTER_RISCV64_T0 = ( REGISTER_RISCV64_TP + 1 ), + REGISTER_RISCV64_T1 = ( REGISTER_RISCV64_T0 + 1 ), + REGISTER_RISCV64_T2 = ( REGISTER_RISCV64_T1 + 1 ), + REGISTER_RISCV64_FP = ( REGISTER_RISCV64_T2 + 1 ), + REGISTER_RISCV64_S1 = ( REGISTER_RISCV64_FP + 1 ), + REGISTER_RISCV64_A0 = ( REGISTER_RISCV64_S1 + 1 ), + REGISTER_RISCV64_A1 = ( REGISTER_RISCV64_A0 + 1 ), + REGISTER_RISCV64_A2 = ( REGISTER_RISCV64_A1 + 1 ), + REGISTER_RISCV64_A3 = ( REGISTER_RISCV64_A2 + 1 ), + REGISTER_RISCV64_A4 = ( REGISTER_RISCV64_A3 + 1 ), + REGISTER_RISCV64_A5 = ( REGISTER_RISCV64_A4 + 1 ), + REGISTER_RISCV64_A6 = ( REGISTER_RISCV64_A5 + 1 ), + REGISTER_RISCV64_A7 = ( REGISTER_RISCV64_A6 + 1 ), + REGISTER_RISCV64_S2 = ( REGISTER_RISCV64_A7 + 1 ), + REGISTER_RISCV64_S3 = ( REGISTER_RISCV64_S2 + 1 ), + REGISTER_RISCV64_S4 = ( REGISTER_RISCV64_S3 + 1 ), + REGISTER_RISCV64_S5 = ( REGISTER_RISCV64_S4 + 1 ), + REGISTER_RISCV64_S6 = ( REGISTER_RISCV64_S5 + 1 ), + REGISTER_RISCV64_S7 = ( REGISTER_RISCV64_S6 + 1 ), + REGISTER_RISCV64_S8 = ( REGISTER_RISCV64_S7 + 1 ), + REGISTER_RISCV64_S9 = ( REGISTER_RISCV64_S8 + 1 ), + REGISTER_RISCV64_S10 = ( REGISTER_RISCV64_S9 + 1 ), + REGISTER_RISCV64_S11 = ( REGISTER_RISCV64_S10 + 1 ), + REGISTER_RISCV64_T3 = ( REGISTER_RISCV64_S11 + 1 ), + REGISTER_RISCV64_T4 = ( REGISTER_RISCV64_T3 + 1 ), + REGISTER_RISCV64_T5 = ( REGISTER_RISCV64_T4 + 1 ), + REGISTER_RISCV64_T6 = ( REGISTER_RISCV64_T5 + 1 ), + REGISTER_RISCV64_F0 = ( REGISTER_RISCV64_T6 + 1 ), + REGISTER_RISCV64_F1 = ( REGISTER_RISCV64_F0 + 1 ), + REGISTER_RISCV64_F2 = ( REGISTER_RISCV64_F1 + 1 ), + REGISTER_RISCV64_F3 = ( REGISTER_RISCV64_F2 + 1 ), + REGISTER_RISCV64_F4 = ( REGISTER_RISCV64_F3 + 1 ), + REGISTER_RISCV64_F5 = ( REGISTER_RISCV64_F4 + 1 ), + REGISTER_RISCV64_F6 = ( REGISTER_RISCV64_F5 + 1 ), + REGISTER_RISCV64_F7 = ( REGISTER_RISCV64_F6 + 1 ), + REGISTER_RISCV64_F8 = ( REGISTER_RISCV64_F7 + 1 ), + REGISTER_RISCV64_F9 = ( REGISTER_RISCV64_F8 + 1 ), + REGISTER_RISCV64_F10 = ( REGISTER_RISCV64_F9 + 1 ), + REGISTER_RISCV64_F11 = ( REGISTER_RISCV64_F10 + 1 ), + REGISTER_RISCV64_F12 = ( REGISTER_RISCV64_F11 + 1 ), + REGISTER_RISCV64_F13 = ( REGISTER_RISCV64_F12 + 1 ), + REGISTER_RISCV64_F14 = ( REGISTER_RISCV64_F13 + 1 ), + REGISTER_RISCV64_F15 = ( REGISTER_RISCV64_F14 + 1 ), + REGISTER_RISCV64_F16 = ( REGISTER_RISCV64_F15 + 1 ), + REGISTER_RISCV64_F17 = ( REGISTER_RISCV64_F16 + 1 ), + REGISTER_RISCV64_F18 = ( REGISTER_RISCV64_F17 + 1 ), + REGISTER_RISCV64_F19 = ( REGISTER_RISCV64_F18 + 1 ), + REGISTER_RISCV64_F20 = ( REGISTER_RISCV64_F19 + 1 ), + REGISTER_RISCV64_F21 = ( REGISTER_RISCV64_F20 + 1 ), + REGISTER_RISCV64_F22 = ( REGISTER_RISCV64_F21 + 1 ), + REGISTER_RISCV64_F23 = ( REGISTER_RISCV64_F22 + 1 ), + REGISTER_RISCV64_F24 = ( REGISTER_RISCV64_F23 + 1 ), + REGISTER_RISCV64_F25 = ( REGISTER_RISCV64_F24 + 1 ), + REGISTER_RISCV64_F26 = ( REGISTER_RISCV64_F25 + 1 ), + REGISTER_RISCV64_F27 = ( REGISTER_RISCV64_F26 + 1 ), + REGISTER_RISCV64_F28 = ( REGISTER_RISCV64_F27 + 1 ), + REGISTER_RISCV64_F29 = ( REGISTER_RISCV64_F28 + 1 ), + REGISTER_RISCV64_F30 = ( REGISTER_RISCV64_F29 + 1 ), + REGISTER_RISCV64_F31 = ( REGISTER_RISCV64_F30 + 1 ), } CorDebugRegister; diff --git a/src/coreclr/pal/src/arch/riscv64/activationhandlerwrapper.S b/src/coreclr/pal/src/arch/riscv64/activationhandlerwrapper.S index a7cd5b6c4d2403..03602c1c1bb1e5 100644 --- a/src/coreclr/pal/src/arch/riscv64/activationhandlerwrapper.S +++ b/src/coreclr/pal/src/arch/riscv64/activationhandlerwrapper.S @@ -4,4 +4,20 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#error "TODO-RISCV64: missing implementation" +// Offset of the return address of from the ActivationHandler in the ActivationHanleWrapper +.global C_FUNC(ActivationHandlerReturnOffset) +C_FUNC(ActivationHandlerReturnOffset): + .int LOCAL_LABEL(ActivationHandlerReturn)-C_FUNC(ActivationHandlerWrapper) + +NESTED_ENTRY ActivationHandlerWrapper, _TEXT, NoHandler + PROLOG_SAVE_REG_PAIR_INDEXED fp, ra, -(16 + CONTEXT_Size) + + // Should never actually run + EMIT_BREAKPOINT +LOCAL_LABEL(ActivationHandlerReturn): + // Should never return + EMIT_BREAKPOINT + EPILOG_RESTORE_REG_PAIR_INDEXED fp, ra, (16 + CONTEXT_Size) + ret +NESTED_END ActivationHandler, _TEXT + diff --git a/src/coreclr/pal/src/arch/riscv64/asmconstants.h b/src/coreclr/pal/src/arch/riscv64/asmconstants.h index 015ac39c13fc9f..138e3e5d542899 100644 --- a/src/coreclr/pal/src/arch/riscv64/asmconstants.h +++ b/src/coreclr/pal/src/arch/riscv64/asmconstants.h @@ -7,7 +7,7 @@ // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-dwarf.adoc // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc -#error "TODO-RISCV64: review this when other files are ported in this directory" +// #error "TODO-RISCV64: review this when other files are ported in this directory" #define CONTEXT_RISCV64 0x04000000L @@ -61,10 +61,25 @@ #define CONTEXT_Pc CONTEXT_X31 + SIZEOF_RISCV64_INTR #define CONTEXT_FPU_OFFSET CONTEXT_Pc + SIZEOF_RISCV64_INTR +#define CONTEXT_Ra CONTEXT_X1 #define CONTEXT_Sp CONTEXT_X2 +#define CONTEXT_Gp CONTEXT_X3 #define CONTEXT_Tp CONTEXT_X4 #define CONTEXT_Fp CONTEXT_X8 +#define CONTEXT_S0 CONTEXT_X8 +#define CONTEXT_S1 CONTEXT_X9 +#define CONTEXT_S2 CONTEXT_X18 +#define CONTEXT_S3 CONTEXT_X19 +#define CONTEXT_S4 CONTEXT_X20 +#define CONTEXT_S5 CONTEXT_X21 +#define CONTEXT_S6 CONTEXT_X22 +#define CONTEXT_S7 CONTEXT_X23 +#define CONTEXT_S8 CONTEXT_X24 +#define CONTEXT_S9 CONTEXT_X25 +#define CONTEXT_S10 CONTEXT_X26 +#define CONTEXT_S11 CONTEXT_X27 + #define CONTEXT_F0 0 #define CONTEXT_F1 CONTEXT_F1 + SIZEOF_RISCV64_FPR #define CONTEXT_F2 CONTEXT_F2 + SIZEOF_RISCV64_FPR diff --git a/src/coreclr/pal/src/arch/riscv64/callsignalhandlerwrapper.S b/src/coreclr/pal/src/arch/riscv64/callsignalhandlerwrapper.S index a7cd5b6c4d2403..94f1b52e3bb0da 100644 --- a/src/coreclr/pal/src/arch/riscv64/callsignalhandlerwrapper.S +++ b/src/coreclr/pal/src/arch/riscv64/callsignalhandlerwrapper.S @@ -4,4 +4,4 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" diff --git a/src/coreclr/pal/src/arch/riscv64/context2.S b/src/coreclr/pal/src/arch/riscv64/context2.S index 8e3a01ac4f608a..e5033fd9d0db97 100644 --- a/src/coreclr/pal/src/arch/riscv64/context2.S +++ b/src/coreclr/pal/src/arch/riscv64/context2.S @@ -9,4 +9,12 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO RISCV64: missing implementation" +LEAF_ENTRY RtlRestoreContext, _TEXT +LEAF_END RtlRestoreContext, _TEXT + +LEAF_ENTRY RtlCaptureContext, _TEXT +LEAF_END RtlCaptureContext, _TEXT + +LEAF_ENTRY CONTEXT_CaptureContext, _TEXT +LEAF_END CONTEXT_CaptureContext, _TEXT diff --git a/src/coreclr/pal/src/arch/riscv64/dispatchexceptionwrapper.S b/src/coreclr/pal/src/arch/riscv64/dispatchexceptionwrapper.S index 41f5e08472dfff..3ebace416d14a1 100644 --- a/src/coreclr/pal/src/arch/riscv64/dispatchexceptionwrapper.S +++ b/src/coreclr/pal/src/arch/riscv64/dispatchexceptionwrapper.S @@ -10,4 +10,4 @@ #include "unixasmmacros.inc" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" diff --git a/src/coreclr/pal/src/arch/riscv64/exceptionhelper.S b/src/coreclr/pal/src/arch/riscv64/exceptionhelper.S index a7cd5b6c4d2403..733d5cea66116d 100644 --- a/src/coreclr/pal/src/arch/riscv64/exceptionhelper.S +++ b/src/coreclr/pal/src/arch/riscv64/exceptionhelper.S @@ -4,4 +4,43 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -#error "TODO-RISCV64: missing implementation" +////////////////////////////////////////////////////////////////////////// +// +// This function creates a stack frame right below the target frame, restores all callee +// saved registers, SP, and RA from the passed in context. +// Then it uses the ThrowExceptionHelper to throw the passed in exception from that context. +// EXTERN_C void ThrowExceptionFromContextInternal(CONTEXT* context, PAL_SEHException* ex); +LEAF_ENTRY ThrowExceptionFromContextInternal, _TEXT +#ifdef HAS_ASAN +#pragma error("LLVM v3.9 ASAN unimplemented on RISCV64 yet") +#endif + addi sp, sp, -16 + .cfi_adjust_cfa_offset 16 + + // Save the FP & RA to the stack so that the unwind can work at the instruction after + // loading the FP from the context, but before loading the SP from the context. + sd fp, 0(sp) + sd ra, 8(sp) + + ld tp, CONTEXT_Tp(a0) + ld gp, CONTEXT_Gp(a0) + ld s1, CONTEXT_S1(a0) + ld s2, CONTEXT_S2(a0) + ld s3, CONTEXT_S3(a0) + ld s4, CONTEXT_S4(a0) + ld s5, CONTEXT_S5(a0) + ld s6, CONTEXT_S6(a0) + ld s7, CONTEXT_S7(a0) + ld s8, CONTEXT_S8(a0) + ld s9, CONTEXT_S9(a0) + ld s10, CONTEXT_S10(a0) + ld s11, CONTEXT_S11(a0) + ld ra, CONTEXT_Ra(a0) + + ld fp, CONTEXT_Fp(a0) + ld sp, CONTEXT_Sp(a0) + + // The PAL_SEHException pointer + ori a0, a1, 0 + j ThrowExceptionHelper +LEAF_END ThrowExceptionFromContextInternal, _TEXT diff --git a/src/coreclr/pal/src/arch/riscv64/signalhandlerhelper.cpp b/src/coreclr/pal/src/arch/riscv64/signalhandlerhelper.cpp index a4ce803b47afdb..903d6bb5497219 100644 --- a/src/coreclr/pal/src/arch/riscv64/signalhandlerhelper.cpp +++ b/src/coreclr/pal/src/arch/riscv64/signalhandlerhelper.cpp @@ -26,5 +26,5 @@ Parameters : --*/ void ExecuteHandlerOnCustomStack(int code, siginfo_t *siginfo, void *context, size_t customSp, SignalHandlerWorkerReturnPoint* returnPoint) { -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" } diff --git a/src/coreclr/pal/src/exception/remote-unwind.cpp b/src/coreclr/pal/src/exception/remote-unwind.cpp index 22c72eeb709f08..076d1b51558287 100644 --- a/src/coreclr/pal/src/exception/remote-unwind.cpp +++ b/src/coreclr/pal/src/exception/remote-unwind.cpp @@ -124,7 +124,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) +#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 @@ -1895,6 +1895,20 @@ static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, GetContextPointer(cursor, unwContext, UNW_PPC64_R29, (SIZE_T **)&contextPointers->R29); GetContextPointer(cursor, unwContext, UNW_PPC64_R30, (SIZE_T **)&contextPointers->R30); GetContextPointer(cursor, unwContext, UNW_PPC64_R31, (SIZE_T **)&contextPointers->R31); +#elif defined(TARGET_RISCV64) + GetContextPointer(cursor, unwContext, UNW_RISCV_X2, (SIZE_T **)&contextPointers->Sp); + GetContextPointer(cursor, unwContext, UNW_RISCV_X8, (SIZE_T **)&contextPointers->Fp); + GetContextPointer(cursor, unwContext, UNW_RISCV_X9, (SIZE_T **)&contextPointers->S1); + GetContextPointer(cursor, unwContext, UNW_RISCV_X18, (SIZE_T **)&contextPointers->S2); + GetContextPointer(cursor, unwContext, UNW_RISCV_X19, (SIZE_T **)&contextPointers->S3); + GetContextPointer(cursor, unwContext, UNW_RISCV_X20, (SIZE_T **)&contextPointers->S4); + GetContextPointer(cursor, unwContext, UNW_RISCV_X21, (SIZE_T **)&contextPointers->S5); + GetContextPointer(cursor, unwContext, UNW_RISCV_X22, (SIZE_T **)&contextPointers->S6); + GetContextPointer(cursor, unwContext, UNW_RISCV_X23, (SIZE_T **)&contextPointers->S7); + GetContextPointer(cursor, unwContext, UNW_RISCV_X24, (SIZE_T **)&contextPointers->S8); + GetContextPointer(cursor, unwContext, UNW_RISCV_X25, (SIZE_T **)&contextPointers->S9); + GetContextPointer(cursor, unwContext, UNW_RISCV_X26, (SIZE_T **)&contextPointers->S10); + GetContextPointer(cursor, unwContext, UNW_RISCV_X27, (SIZE_T **)&contextPointers->S11); #else #error unsupported architecture #endif @@ -2003,6 +2017,24 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_PPC64_R28, (unw_word_t *) &winContext->R28); unw_get_reg(cursor, UNW_PPC64_R29, (unw_word_t *) &winContext->R29); unw_get_reg(cursor, UNW_PPC64_R30, (unw_word_t *) &winContext->R30); +#elif defined(TARGET_RISCV64) + unw_get_reg(cursor, UNW_RISCV_X1, (unw_word_t *) &winContext->Ra); + unw_get_reg(cursor, UNW_RISCV_X2, (unw_word_t *) &winContext->Sp); + unw_get_reg(cursor, UNW_RISCV_X3, (unw_word_t *) &winContext->Gp); + unw_get_reg(cursor, UNW_RISCV_X4, (unw_word_t *) &winContext->Tp); + unw_get_reg(cursor, UNW_RISCV_X8, (unw_word_t *) &winContext->Fp); + unw_get_reg(cursor, UNW_RISCV_X9, (unw_word_t *) &winContext->S1); + unw_get_reg(cursor, UNW_RISCV_X18, (unw_word_t *) &winContext->S2); + unw_get_reg(cursor, UNW_RISCV_X19, (unw_word_t *) &winContext->S3); + unw_get_reg(cursor, UNW_RISCV_X20, (unw_word_t *) &winContext->S4); + unw_get_reg(cursor, UNW_RISCV_X21, (unw_word_t *) &winContext->S5); + unw_get_reg(cursor, UNW_RISCV_X22, (unw_word_t *) &winContext->S6); + unw_get_reg(cursor, UNW_RISCV_X23, (unw_word_t *) &winContext->S7); + unw_get_reg(cursor, UNW_RISCV_X24, (unw_word_t *) &winContext->S8); + unw_get_reg(cursor, UNW_RISCV_X25, (unw_word_t *) &winContext->S9); + unw_get_reg(cursor, UNW_RISCV_X26, (unw_word_t *) &winContext->S10); + unw_get_reg(cursor, UNW_RISCV_X27, (unw_word_t *) &winContext->S11); + TRACE("sp %p gp %p fp %p tp %p ra %p\n", winContext->Sp, winContext->Gp, winContext->FP, winContext->Tp, winContext->Ra); #else #error unsupported architecture #endif @@ -2138,6 +2170,20 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write case UNW_PPC64_R30: *valp = (unw_word_t)winContext->R30; break; case UNW_PPC64_R31: *valp = (unw_word_t)winContext->R31; break; case UNW_PPC64_NIP: *valp = (unw_word_t)winContext->Nip; break; +#elif defined(TARGET_RISCV64) + case UNW_RISCV_X2: *valp = (unw_word_t)winContext->Sp; break; + case UNW_RISCV_X8: *valp = (unw_word_t)winContext->Fp; break; + case UNW_RISCV_X9: *valp = (unw_word_t)winContext->S1; break; + case UNW_RISCV_X18: *valp = (unw_word_t)winContext->S2; break; + case UNW_RISCV_X19: *valp = (unw_word_t)winContext->S3; break; + case UNW_RISCV_X20: *valp = (unw_word_t)winContext->S4; break; + case UNW_RISCV_X21: *valp = (unw_word_t)winContext->S5; break; + case UNW_RISCV_X22: *valp = (unw_word_t)winContext->S6; break; + case UNW_RISCV_X23: *valp = (unw_word_t)winContext->S7; break; + case UNW_RISCV_X24: *valp = (unw_word_t)winContext->S8; break; + case UNW_RISCV_X25: *valp = (unw_word_t)winContext->S9; break; + case UNW_RISCV_X26: *valp = (unw_word_t)winContext->S10; break; + case UNW_RISCV_X27: *valp = (unw_word_t)winContext->S11; break; #else #error unsupported architecture #endif diff --git a/src/coreclr/pal/src/exception/seh-unwind.cpp b/src/coreclr/pal/src/exception/seh-unwind.cpp index b3a4ca23726bb2..8c7f5c05b43795 100644 --- a/src/coreclr/pal/src/exception/seh-unwind.cpp +++ b/src/coreclr/pal/src/exception/seh-unwind.cpp @@ -154,7 +154,7 @@ enum ASSIGN_REG(S8) #elif (defined(HOST_UNIX) && defined(HOST_RISCV64)) -#error "TODO-RISCV64: review this" +// #error "TODO-RISCV64: review this" // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc @@ -165,7 +165,7 @@ enum ASSIGN_REG(Gp) \ ASSIGN_REG(Tp) \ ASSIGN_REG(Pc) \ - ASSIGN_REG(S0) \ + ASSIGN_REG(Fp) \ ASSIGN_REG(S1) \ ASSIGN_REG(S2) \ ASSIGN_REG(S3) \ @@ -473,7 +473,7 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_LOONGARCH64_R30, (unw_word_t *) &winContext->S7); unw_get_reg(cursor, UNW_LOONGARCH64_R31, (unw_word_t *) &winContext->S8); #elif (defined(HOST_UNIX) && defined(HOST_RISCV64)) -#error "TODO-RISCV64: review this" +// #error "TODO-RISCV64: review this" // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc @@ -484,7 +484,7 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext) unw_get_reg(cursor, UNW_RISCV_X5, (unw_word_t *) &winContext->T0); unw_get_reg(cursor, UNW_RISCV_X6, (unw_word_t *) &winContext->T1); unw_get_reg(cursor, UNW_RISCV_X7, (unw_word_t *) &winContext->T2); - unw_get_reg(cursor, UNW_RISCV_X8, (unw_word_t *) &winContext->S0); + unw_get_reg(cursor, UNW_RISCV_X8, (unw_word_t *) &winContext->Fp); unw_get_reg(cursor, UNW_RISCV_X9, (unw_word_t *) &winContext->S1); unw_get_reg(cursor, UNW_RISCV_X10, (unw_word_t *) &winContext->A0); unw_get_reg(cursor, UNW_RISCV_X11, (unw_word_t *) &winContext->A1); @@ -627,7 +627,7 @@ void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOL GetContextPointer(cursor, unwContext, UNW_LOONGARCH64_R30, (SIZE_T **)&contextPointers->S7); GetContextPointer(cursor, unwContext, UNW_LOONGARCH64_R31, (SIZE_T **)&contextPointers->S8); #elif (defined(HOST_UNIX) && defined(HOST_RISCV64)) -#error "TODO-RISCV64: review this" +// #error "TODO-RISCV64: review this" // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc @@ -635,7 +635,7 @@ void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOL GetContextPointer(cursor, unwContext, UNW_RISCV_X4, (SIZE_T **)&contextPointers->Tp); GetContextPointer(cursor, unwContext, UNW_RISCV_X5, (SIZE_T **)&contextPointers->T0); GetContextPointer(cursor, unwContext, UNW_RISCV_X7, (SIZE_T **)&contextPointers->T1); - GetContextPointer(cursor, unwContext, UNW_RISCV_X8, (SIZE_T **)&contextPointers->S0); + GetContextPointer(cursor, unwContext, UNW_RISCV_X8, (SIZE_T **)&contextPointers->Fp); GetContextPointer(cursor, unwContext, UNW_RISCV_X9, (SIZE_T **)&contextPointers->S1); GetContextPointer(cursor, unwContext, UNW_RISCV_X10, (SIZE_T **)&contextPointers->A0); GetContextPointer(cursor, unwContext, UNW_RISCV_X11, (SIZE_T **)&contextPointers->A1); diff --git a/src/coreclr/pal/src/include/pal/context.h b/src/coreclr/pal/src/include/pal/context.h index c702ae272a76bd..5fab30aca173c5 100644 --- a/src/coreclr/pal/src/include/pal/context.h +++ b/src/coreclr/pal/src/include/pal/context.h @@ -157,7 +157,7 @@ using asm_sigcontext::_xstate; #define MCREG_Pc(mc) ((mc).__pc) #elif defined(HOST_RISCV64) -#error "TODO-RISCV64: review this" +// #error "TODO-RISCV64: review this" #define MCREG_Ra(mc) ((mc).__gregs[1]) #define MCREG_Sp(mc) ((mc).__gregs[2]) @@ -166,7 +166,7 @@ using asm_sigcontext::_xstate; #define MCREG_T0(mc) ((mc).__gregs[5]) #define MCREG_T1(mc) ((mc).__gregs[6]) #define MCREG_T2(mc) ((mc).__gregs[7]) -#define MCREG_S0(mc) ((mc).__gregs[8]) +#define MCREG_Fp(mc) ((mc).__gregs[8]) #define MCREG_S1(mc) ((mc).__gregs[9]) #define MCREG_A0(mc) ((mc).__gregs[10]) #define MCREG_A1(mc) ((mc).__gregs[11]) @@ -1130,8 +1130,6 @@ inline static DWORD64 CONTEXTGetFP(LPCONTEXT pContext) return pContext->R11; #elif defined(HOST_POWERPC64) return pContext->R31; -#elif defined(HOST_RISCV64) - return pContext->S0; #else return pContext->Fp; #endif diff --git a/src/coreclr/pal/src/thread/context.cpp b/src/coreclr/pal/src/thread/context.cpp index a17c6c077da3b7..c0da070adc39d7 100644 --- a/src/coreclr/pal/src/thread/context.cpp +++ b/src/coreclr/pal/src/thread/context.cpp @@ -199,23 +199,22 @@ typedef int __ptrace_request; #elif defined(HOST_RISCV64) -#error "TODO-RISCV64: review this" +// #error "TODO-RISCV64: review this" // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc #define ASSIGN_CONTROL_REGS \ ASSIGN_REG(Ra) \ ASSIGN_REG(Sp) \ - ASSIGN_REG(Sp) \ ASSIGN_REG(Gp) \ ASSIGN_REG(Tp) \ + ASSIGN_REG(Fp) \ ASSIGN_REG(Pc) #define ASSIGN_INTEGER_REGS \ ASSIGN_REG(T0) \ ASSIGN_REG(T1) \ ASSIGN_REG(T2) \ - ASSIGN_REG(S0) \ ASSIGN_REG(S1) \ ASSIGN_REG(A0) \ ASSIGN_REG(A1) \ diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.cpp b/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.cpp index ff4dd582f3c1bf..676c490753924f 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.cpp @@ -253,6 +253,8 @@ static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTUR static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_ARM64; #elif defined(TARGET_LOONGARCH64) static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_LOONGARCH64; +#elif defined(TARGET_RISCV64) +static SPMI_TARGET_ARCHITECTURE SpmiTargetArchitecture = SPMI_TARGET_ARCHITECTURE_RISCV64; #else #error Unsupported architecture #endif diff --git a/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.h b/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.h index a97e8fab4b32e6..4b13202f5e2933 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/spmiutil.h @@ -54,7 +54,8 @@ enum SPMI_TARGET_ARCHITECTURE SPMI_TARGET_ARCHITECTURE_AMD64, SPMI_TARGET_ARCHITECTURE_ARM64, SPMI_TARGET_ARCHITECTURE_ARM, - SPMI_TARGET_ARCHITECTURE_LOONGARCH64 + SPMI_TARGET_ARCHITECTURE_LOONGARCH64, + SPMI_TARGET_ARCHITECTURE_RISCV64 }; SPMI_TARGET_ARCHITECTURE GetSpmiTargetArchitecture(); @@ -67,7 +68,7 @@ inline bool IsSpmiTarget32Bit() inline bool IsSpmiTarget64Bit() { - return (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_AMD64) || (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_ARM64) || (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_LOONGARCH64); + return (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_AMD64) || (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_ARM64) || (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_LOONGARCH64) || (GetSpmiTargetArchitecture() == SPMI_TARGET_ARCHITECTURE_RISCV64); } inline size_t SpmiTargetPointerSize() diff --git a/src/coreclr/unwinder/riscv64/unwinder.cpp b/src/coreclr/unwinder/riscv64/unwinder.cpp index bec3a8da31b44d..12f2bea751a259 100644 --- a/src/coreclr/unwinder/riscv64/unwinder.cpp +++ b/src/coreclr/unwinder/riscv64/unwinder.cpp @@ -9,4 +9,118 @@ #include "unwinder.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" +// +#if 0 +NTSTATUS +RtlpUnwindCustom( + __inout PT_CONTEXT ContextRecord, + _In_ BYTE Opcode, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return STATUS_SUCCESS; +} + +ULONG +RtlpComputeScopeSize( + _In_ ULONG_PTR UnwindCodePtr, + _In_ ULONG_PTR UnwindCodesEndPtr, + _In_ BOOLEAN IsEpilog, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) + +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return 0; +} + +NTSTATUS +RtlpUnwindRestoreRegisterRange( + __inout PT_CONTEXT ContextRecord, + _In_ LONG SpOffset, + _In_ ULONG FirstRegister, + _In_ ULONG RegisterCount, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return STATUS_SUCCESS; +} + +NTSTATUS +RtlpUnwindRestoreFpRegisterRange( + __inout PT_CONTEXT ContextRecord, + _In_ LONG SpOffset, + _In_ ULONG FirstRegister, + _In_ ULONG RegisterCount, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return STATUS_SUCCESS; +} + +NTSTATUS +RtlpUnwindFunctionFull( + _In_ DWORD64 ControlPcRva, + _In_ ULONG_PTR ImageBase, + _In_ PT_RUNTIME_FUNCTION FunctionEntry, + __inout T_CONTEXT *ContextRecord, + _Out_ PDWORD64 EstablisherFrame, + __deref_opt_out_opt PEXCEPTION_ROUTINE *HandlerRoutine, + _Out_ PVOID *HandlerData, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return STATUS_SUCCESS; +} + +NTSTATUS +RtlpUnwindFunctionCompact( + _In_ DWORD64 ControlPcRva, + _In_ PT_RUNTIME_FUNCTION FunctionEntry, + __inout T_CONTEXT *ContextRecord, + _Out_ PDWORD64 EstablisherFrame, + __deref_opt_out_opt PEXCEPTION_ROUTINE *HandlerRoutine, + _Out_ PVOID *HandlerData, + _In_ PLOONGARCH64_UNWIND_PARAMS UnwindParams + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return STATUS_SUCCESS; +} + +BOOL OOPStackUnwinderRiscv64::Unwind(T_CONTEXT * pContext) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return false; +} +#endif + +BOOL DacUnwindStackFrame(T_CONTEXT *pContext, T_KNONVOLATILE_CONTEXT_POINTERS* pContextPointers) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return false; +} + +#if defined(HOST_UNIX) +PEXCEPTION_ROUTINE +RtlVirtualUnwind( + IN ULONG HandlerType, + IN ULONG64 ImageBase, + IN ULONG64 ControlPc, + IN PT_RUNTIME_FUNCTION FunctionEntry, + IN OUT PCONTEXT ContextRecord, + OUT PVOID *HandlerData, + OUT PULONG64 EstablisherFrame, + IN OUT PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL + ) +{ + _ASSERTE(!"TODO RISCV64 NUYI"); + return NULL; + +} +#endif diff --git a/src/coreclr/unwinder/riscv64/unwinder.h b/src/coreclr/unwinder/riscv64/unwinder.h index efcd109cceb5dc..fe9412ff3f8968 100644 --- a/src/coreclr/unwinder/riscv64/unwinder.h +++ b/src/coreclr/unwinder/riscv64/unwinder.h @@ -8,6 +8,6 @@ #include "baseunwinder.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" #endif // __unwinder_riscv64__ diff --git a/src/coreclr/utilcode/util.cpp b/src/coreclr/utilcode/util.cpp index 0901e06d1f3028..76f07e451c47c2 100644 --- a/src/coreclr/utilcode/util.cpp +++ b/src/coreclr/utilcode/util.cpp @@ -561,7 +561,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr, /*static*/ CPU_Group_Info *CPUGroupInfo::m_CPUGroupInfoArray = NULL; /*static*/ LONG CPUGroupInfo::m_initialization = 0; -#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) // Calculate greatest common divisor DWORD GCD(DWORD u, DWORD v) { @@ -591,7 +591,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) BYTE *bBuffer = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL; @@ -672,7 +672,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) USHORT groupCount = 0; // On Windows 11+ and Windows Server 2022+, a process is no longer restricted to a single processor group by default. @@ -758,7 +758,7 @@ DWORD LCM(DWORD u, DWORD v) { LIMITED_METHOD_CONTRACT; -#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) WORD bTemp = 0; WORD bDiff = processor_number - bTemp; @@ -789,7 +789,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if !defined(FEATURE_NATIVEAOT) && (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) _ASSERTE(m_enableGCCPUGroups && m_threadUseAllCpuGroups); PROCESSOR_NUMBER proc_no; @@ -838,7 +838,7 @@ DWORD LCM(DWORD u, DWORD v) } CONTRACTL_END; -#if (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) WORD i, minGroup = 0; DWORD minWeight = 0; @@ -880,7 +880,7 @@ DWORD LCM(DWORD u, DWORD v) /*static*/ void CPUGroupInfo::ClearCPUGroupAffinity(GROUP_AFFINITY *gf) { LIMITED_METHOD_CONTRACT; -#if (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if (defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) _ASSERTE(m_enableGCCPUGroups && m_threadUseAllCpuGroups && m_threadAssignCpuGroups); WORD group = gf->Group; diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index 7d7826b5a0d70a..f09f43c432d664 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -758,6 +758,14 @@ else(CLR_CMAKE_TARGET_WIN32) ${ARCH_SOURCES_DIR}/pinvokestubs.S ${ARCH_SOURCES_DIR}/thunktemplates.S ) + elseif(CLR_CMAKE_TARGET_ARCH_RISCV64) + set(VM_SOURCES_WKS_ARCH_ASM + ${ARCH_SOURCES_DIR}/asmhelpers.S + ${ARCH_SOURCES_DIR}/calldescrworkerriscv64.S + ${ARCH_SOURCES_DIR}/crthelpers.S + ${ARCH_SOURCES_DIR}/pinvokestubs.S + ${ARCH_SOURCES_DIR}/thunktemplates.S + ) endif() endif(CLR_CMAKE_TARGET_WIN32) @@ -860,7 +868,22 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM64) ${ARCH_SOURCES_DIR}/singlestepper.cpp ) endif(CLR_CMAKE_HOST_UNIX) -elseif(CLR_CMAKE_TARGET_ARCH_LOONGARCH64) +elseif(clr_cmake_target_arch_loongarch64) + set(vm_sources_dac_and_wks_arch + ${arch_sources_dir}/stubs.cpp + exceptionhandling.cpp + ) + + set(vm_headers_dac_and_wks_arch + ${arch_sources_dir}/virtualcallstubcpu.hpp + exceptionhandling.h + ) + + set(vm_sources_wks_arch + ${arch_sources_dir}/profiler.cpp + gcinfodecoder.cpp + ) +elseif(CLR_CMAKE_TARGET_ARCH_RISCV64) set(VM_SOURCES_DAC_AND_WKS_ARCH ${ARCH_SOURCES_DIR}/stubs.cpp exceptionhandling.cpp diff --git a/src/coreclr/vm/arm64/cgencpu.h b/src/coreclr/vm/arm64/cgencpu.h index ea29ec2bdce028..39c3184aaea07f 100644 --- a/src/coreclr/vm/arm64/cgencpu.h +++ b/src/coreclr/vm/arm64/cgencpu.h @@ -2,7 +2,6 @@ // The .NET Foundation licenses this file to you under the MIT license. // - #ifndef TARGET_ARM64 #error Should only include "cGenCpu.h" for ARM64 builds #endif diff --git a/src/coreclr/vm/callcounting.h b/src/coreclr/vm/callcounting.h index 3d25e1c2826267..f47bfc05c16757 100644 --- a/src/coreclr/vm/callcounting.h +++ b/src/coreclr/vm/callcounting.h @@ -99,6 +99,8 @@ class CallCountingStub static const int CodeSize = 32; #elif defined(TARGET_LOONGARCH64) static const int CodeSize = 40; +#elif defined(TARGET_RISCV64) + static const int CodeSize = 40; // TODO RISCV64 #endif private: diff --git a/src/coreclr/vm/callingconvention.h b/src/coreclr/vm/callingconvention.h index 364c850fca49b3..51ada4199a29ab 100644 --- a/src/coreclr/vm/callingconvention.h +++ b/src/coreclr/vm/callingconvention.h @@ -164,6 +164,25 @@ struct TransitionBlock }; //TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK ArgumentRegisters m_argumentRegisters; +#elif defined(TARGET_RISCV64) + union { + CalleeSavedRegisters m_calleeSavedRegisters; + struct { + TADDR m_ReturnAddress; + INT64 s0; // frame pointer + INT64 s1; + INT64 s2; + INT64 s3; + INT64 s4; + INT64 s5; + INT64 s6; + INT64 s7; + INT64 s8; + INT64 tp; + }; + }; + //TADDR padding; // Keep size of TransitionBlock as multiple of 16-byte. Simplifies code in PROLOG_WITH_TRANSITION_BLOCK + ArgumentRegisters m_argumentRegisters; #else PORTABILITY_ASSERT("TransitionBlock"); #endif @@ -505,6 +524,8 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE #elif defined(TARGET_LOONGARCH64) // Composites greater than 16 bytes are passed by reference return (size > ENREGISTERED_PARAMTYPE_MAXSIZE); +#elif defined(TARGET_RISCV64) + return (size > ENREGISTERED_PARAMTYPE_MAXSIZE); #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; @@ -566,6 +587,13 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE return (m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE); } return FALSE; +#elif defined(TARGET_RISCV64) + if (m_argType == ELEMENT_TYPE_VALUETYPE) + { + _ASSERTE(!m_argTypeHandle.IsNull()); + return ((m_argSize > ENREGISTERED_PARAMTYPE_MAXSIZE) && (!m_argTypeHandle.IsHFA() || this->IsVarArg())); + } + return FALSE; #else PORTABILITY_ASSERT("ArgIteratorTemplate::IsArgPassedByRef"); return FALSE; @@ -627,7 +655,7 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE ArgLocDesc* GetArgLocDescForStructInRegs() { -#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined (TARGET_RISCV64) return m_hasArgLocDescForStructInRegs ? &m_argLocDescForStructInRegs : NULL; #else return NULL; @@ -876,6 +904,13 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE #endif // TARGET_LOONGARCH64 +#ifdef TARGET_RISCV64 + // Get layout information for the argument that the ArgIterator is currently visiting. + void GetArgLoc(int argOffset, ArgLocDesc *pLoc) + { + // TODO RISCV64 + } +#endif // TARGET_RISCV64 protected: DWORD m_dwFlags; // Cached flags int m_nSizeOfArgStack; // Cached value of SizeOfArgStack @@ -886,10 +921,10 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE CorElementType m_argType; int m_argSize; TypeHandle m_argTypeHandle; -#if (defined(TARGET_AMD64) && defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if (defined(TARGET_AMD64) && defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) ArgLocDesc m_argLocDescForStructInRegs; bool m_hasArgLocDescForStructInRegs; -#endif // (TARGET_AMD64 && UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // (TARGET_AMD64 && UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 int m_ofsStack; // Current position of the stack iterator, in bytes @@ -923,6 +958,11 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE int m_idxFPReg; // Next FP register to be assigned a value #endif +#ifdef TARGET_RISCV64 + int m_idxGenReg; // Next general register to be assigned a value + int m_idxFPReg; // Next FP register to be assigned a value +#endif + enum { ITERATION_STARTED = 0x0001, // Started iterating over arguments SIZE_OF_ARG_STACK_COMPUTED = 0x0002, @@ -1169,6 +1209,11 @@ int ArgIteratorTemplate::GetNextOffset() #elif defined(TARGET_LOONGARCH64) m_idxGenReg = numRegistersUsed; m_ofsStack = 0; + m_idxFPReg = 0; +#elif defined(TARGET_RISCV64) + m_idxGenReg = numRegistersUsed; + m_ofsStack = 0; + m_idxFPReg = 0; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); @@ -1199,7 +1244,7 @@ int ArgIteratorTemplate::GetNextOffset() m_argSize = argSize; m_argTypeHandle = thValueType; -#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) || defined (TARGET_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || defined (TARGET_ARM64) || defined (TARGET_LOONGARCH64) || defined (TARGET_RISCV64) m_hasArgLocDescForStructInRegs = false; #endif @@ -1751,6 +1796,9 @@ int ArgIteratorTemplate::GetNextOffset() m_ofsStack += ALIGN_UP(cbArg, TARGET_POINTER_SIZE); return argOfs; +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 + return TransitionBlock::InvalidOffset; #else PORTABILITY_ASSERT("ArgIteratorTemplate::GetNextOffset"); return TransitionBlock::InvalidOffset; diff --git a/src/coreclr/vm/ceeload.h b/src/coreclr/vm/ceeload.h index f1cf9000fea411..de2ad6bdcc9b59 100644 --- a/src/coreclr/vm/ceeload.h +++ b/src/coreclr/vm/ceeload.h @@ -87,6 +87,8 @@ class JITInlineTrackingMap; #define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll") #elif defined(HOST_LOONGARCH64) #define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.loongarch64.dll") +#elif defined(HOST_RISCV64) +#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.riscv64.dll") #endif typedef DPTR(JITInlineTrackingMap) PTR_JITInlineTrackingMap; diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 471bd1628d32a3..ce5f0ca2afd841 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -805,7 +805,7 @@ ExecutionManager::DeleteRangeHelper //----------------------------------------------------------------------------- -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS #endif @@ -910,6 +910,9 @@ BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) } return ((*pUnwindCodes & 0xFF) == 0xE5); +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 + return 0; #else PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform"); #endif @@ -1113,7 +1116,9 @@ PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFuncti *pSize = size; return xdata; - +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 + return NULL; #else PORTABILITY_ASSERT("GetUnwindDataBlob"); return NULL; @@ -2235,6 +2240,8 @@ BOOL EEJitManager::LoadJIT() altJitName = MAKEDLLNAME_W(W("clrjit_unix_x64_x64")); #elif defined(TARGET_LOONGARCH64) altJitName = MAKEDLLNAME_W(W("clrjit_unix_loongarch64_loongarch64")); +#elif defined(TARGET_RISCV64) + altJitName = MAKEDLLNAME_W(W("clrjit_unix_riscv64_riscv64")); #endif #endif // TARGET_WINDOWS @@ -2623,7 +2630,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. @@ -2675,7 +2682,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap bool fAllocatedFromEmergencyJumpStubReserve = false; size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize); -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); #endif pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)allocationSize, &dwSizeAcquiredFromInitialBlock); @@ -2722,7 +2729,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) pHp->CLRPersonalityRoutine = (BYTE *)pCodeHeap->m_LoaderHeap.AllocMem(JUMP_ALLOCATE_SIZE); #else // Ensure that the heap has a reserved block of memory and so the GetReservedBytesFree() @@ -2875,7 +2882,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi size_t reserveSize = initialRequestSize; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) reserveSize += JUMP_ALLOCATE_SIZE; #endif @@ -4422,7 +4429,7 @@ PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress)) { -#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && (defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) +#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && (defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) // If we might have fragmented unwind, and we're on ARM64/LoongArch64, // make sure to returning the root record, // as the trailing records don't have prolog unwind codes. diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 8e432e6e5209ba..a277674e5a3644 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -478,13 +478,13 @@ struct HeapList size_t maxCodeHeapSize;// Size of the entire contiguous block of memory size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) BYTE* CLRPersonalityRoutine; // jump thunk to personality routine #endif TADDR GetModuleBase() { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) return (TADDR)CLRPersonalityRoutine; #else return (TADDR)mapBase; diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 5a140d6ed0a13d..897adc463c82fd 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -437,7 +437,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) TrackAllocation *pTracker = NULL; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) diff --git a/src/coreclr/vm/eetwain.cpp b/src/coreclr/vm/eetwain.cpp index 640591c7d9b71f..1e16cb811bc6f9 100644 --- a/src/coreclr/vm/eetwain.cpp +++ b/src/coreclr/vm/eetwain.cpp @@ -1486,7 +1486,7 @@ bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo, return gcInfoDecoder.IsInterruptible(); } -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool EECodeManager::HasTailCalls( EECodeInfo *pCodeInfo) { CONTRACTL { @@ -1504,7 +1504,7 @@ bool EECodeManager::HasTailCalls( EECodeInfo *pCodeInfo) return gcInfoDecoder.HasTailCalls(); } -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 #if defined(TARGET_AMD64) && defined(_DEBUG) diff --git a/src/coreclr/vm/encee.cpp b/src/coreclr/vm/encee.cpp index 37625bbce4bbf4..19c777431981ff 100644 --- a/src/coreclr/vm/encee.cpp +++ b/src/coreclr/vm/encee.cpp @@ -608,7 +608,7 @@ HRESULT EditAndContinueModule::ResumeInUpdatedFunction( SIZE_T newILOffset, CONTEXT *pOrigContext) { -#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) return E_NOTIMPL; #else LOG((LF_ENC, LL_INFO100, "EnCModule::ResumeInUpdatedFunction for %s at IL offset 0x%x, ", diff --git a/src/coreclr/vm/excep.cpp b/src/coreclr/vm/excep.cpp index 1a9666d7d9f7e3..44900ba17d2ff1 100644 --- a/src/coreclr/vm/excep.cpp +++ b/src/coreclr/vm/excep.cpp @@ -6278,9 +6278,9 @@ IsDebuggerFault(EXCEPTION_RECORD *pExceptionRecord, #endif // TARGET_UNIX -#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) EXTERN_C void JIT_StackProbe_End(); -#endif // TARGET_ARM64 +#endif // !TARGET_ARM64 && !TARGET_LOONGARCH64 && !TARGET_RISCV64 #ifdef FEATURE_EH_FUNCLETS @@ -6345,9 +6345,9 @@ bool IsIPInMarkedJitHelper(UINT_PTR uControlPc) CHECK_RANGE(JIT_WriteBarrier) CHECK_RANGE(JIT_CheckedWriteBarrier) CHECK_RANGE(JIT_ByRefWriteBarrier) -#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !(TARGET_RISCV64) CHECK_RANGE(JIT_StackProbe) -#endif // !TARGET_ARM64 +#endif // !TARGET_ARM64 && !TARGET_LOONGARCH64 && !TARGET_RISCV64 #else #ifdef TARGET_UNIX CHECK_RANGE(JIT_WriteBarrierGroup) @@ -6469,7 +6469,7 @@ AdjustContextForJITHelpers( Thread::VirtualUnwindToFirstManagedCallFrame(pContext); -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // We had an AV in the writebarrier that needs to be treated // as originating in managed code. At this point, the stack (growing // from left->right) looks like this: @@ -6493,7 +6493,7 @@ AdjustContextForJITHelpers( // Now we save the address back into the context so that it gets used // as the faulting address. SetIP(pContext, ControlPCPostAdjustment); -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 // Unwind the frame chain - On Win64, this is required since we may handle the managed fault and to do so, // we will replace the exception context with the managed context and "continue execution" there. Thus, we do not diff --git a/src/coreclr/vm/frames.h b/src/coreclr/vm/frames.h index 2261fe4431be1c..0241b4005f8d93 100644 --- a/src/coreclr/vm/frames.h +++ b/src/coreclr/vm/frames.h @@ -863,6 +863,9 @@ class RedirectedThreadFrame : public ResumableFrame #elif defined(TARGET_LOONGARCH64) Object** firstIntReg = (Object**)&this->GetContext()->Tp; Object** lastIntReg = (Object**)&this->GetContext()->S8; +#elif defined(TARGET_RISCV64) + Object** firstIntReg = (Object**)&this->GetContext()->Gp; + Object** lastIntReg = (Object**)&this->GetContext()->T6; #else _ASSERTE(!"nyi for platform"); #endif diff --git a/src/coreclr/vm/gccover.cpp b/src/coreclr/vm/gccover.cpp index 71a49b63be540a..99b0849d4ab233 100644 --- a/src/coreclr/vm/gccover.cpp +++ b/src/coreclr/vm/gccover.cpp @@ -36,7 +36,7 @@ MethodDesc* AsMethodDesc(size_t addr); static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE*nextInstr); -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) static void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID codeStart); static bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 stopOffset, LPVOID codeStart); #endif @@ -97,7 +97,7 @@ bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) { UINT32 instrVal; -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) instrVal = *reinterpret_cast(instrPtr); #elif defined(TARGET_ARM) size_t instrLen = GetARMInstructionLength(instrPtr); @@ -118,7 +118,7 @@ bool IsGcCoverageInterruptInstruction(PBYTE instrPtr) bool IsOriginalInstruction(PBYTE instrPtr, GCCoverageInfo* gcCover, DWORD offset) { -#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) UINT32 instrVal = *reinterpret_cast(instrPtr); UINT32 origInstrVal = *reinterpret_cast(gcCover->savedCode + offset); return (instrVal == origInstrVal); @@ -174,7 +174,7 @@ void SetupAndSprinkleBreakpoints( fZapped); // This is not required for ARM* as the above call does the work for both hot & cold regions -#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) if (gcCover->methodRegion.coldSize != 0) { gcCover->SprinkleBreakpoints(gcCover->savedCode + gcCover->methodRegion.hotSize, @@ -341,6 +341,8 @@ void ReplaceInstrAfterCall(PBYTE instrToReplace, MethodDesc* callMD) *(DWORD*)instrToReplace = INTERRUPT_INSTR_PROTECT_RET; else *(DWORD*)instrToReplace = INTERRUPT_INSTR; +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 #else _ASSERTE(!"not implemented for platform"); #endif @@ -624,7 +626,7 @@ void GCCoverageInfo::SprinkleBreakpoints( if ((regionOffsetAdj==0) && (*codeStart != INTERRUPT_INSTR)) doingEpilogChecks = false; -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) //Save the method code from hotRegion memcpy(saveAddr, (BYTE*)methodRegion.hotStartAddress, methodRegion.hotSize); @@ -668,7 +670,7 @@ void GCCoverageInfo::SprinkleBreakpoints( #endif // TARGET_X86 } -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) #ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED @@ -769,6 +771,8 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID { instructionIsACallThroughRegister = TRUE; } +#elif defined(TARGET_RISCV64) +// #error TODO RISCV #endif // _TARGET_XXXX_ // safe point must always be after a call instruction @@ -792,7 +796,7 @@ void replaceSafePointInstructionWithGcStressInstr(UINT32 safePointOffset, LPVOID // safe point will be replaced with appropriate illegal instruction at execution time when reg value is known #if defined(TARGET_ARM) *((WORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) *((DWORD*)instrPtrWriterHolder.GetRW()) = INTERRUPT_INSTR_CALL; #endif // _TARGET_XXXX_ } @@ -913,7 +917,7 @@ bool replaceInterruptibleRangesWithGcStressInstr (UINT32 startOffset, UINT32 sto } instrPtrRW += instrLen; -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) *((DWORD*)instrPtrRW) = INTERRUPT_INSTR; instrPtrRW += 4; #endif // TARGET_XXXX_ @@ -1015,6 +1019,8 @@ static PBYTE getTargetOfCall(PBYTE instrPtr, PCONTEXT regs, PBYTE* nextInstr) { { return 0; // Fail } +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 #endif #ifdef TARGET_AMD64 @@ -1256,6 +1262,8 @@ void RemoveGcCoverageInterrupt(TADDR instrPtr, BYTE * savedInstrPtr, GCCoverageI *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #elif defined(TARGET_LOONGARCH64) *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; +#elif defined(TARGET_RISCV64) + *(DWORD *)instrPtrWriterHolder.GetRW() = *(DWORD *)savedInstrPtr; #else *(BYTE *)instrPtrWriterHolder.GetRW() = *savedInstrPtr; #endif @@ -1478,6 +1486,8 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) atCall = (instrVal == INTERRUPT_INSTR_CALL); afterCallProtect[0] = (instrVal == INTERRUPT_INSTR_PROTECT_RET); +#elif defined(TARGET_RISCV64) +// #error TODO RISCV64 #endif // _TARGET_* if (!IsGcCoverageInterruptInstruction(instrPtr)) @@ -1596,7 +1606,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) } #endif // TARGET_X86 -#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) /* In non-fully interruptible code, if the EIP is just after a call instr means something different because it expects that we are IN the @@ -1652,6 +1662,8 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #elif defined(TARGET_LOONGARCH64) *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; +#elif defined(TARGET_RISCV64) + *(DWORD*)nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #else *nextInstrWriterHolder.GetRW() = INTERRUPT_INSTR; #endif @@ -1734,6 +1746,8 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) retValRegs[numberOfRegs++] = regs->X0; #elif defined(TARGET_LOONGARCH64) retValRegs[numberOfRegs++] = regs->A0; +#elif defined(TARGET_RISCV64) + retValRegs[numberOfRegs++] = regs->A0; #endif // TARGET_ARM64 } @@ -1787,6 +1801,8 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion) regs->X[0] = retValRegs[0]; #elif defined(TARGET_LOONGARCH64) regs->A0 = retValRegs[0]; +#elif defined(TARGET_RISCV64) + regs->A0 = retValRegs[0]; #else PORTABILITY_ASSERT("DoGCStress - return register"); #endif diff --git a/src/coreclr/vm/gcinfodecoder.cpp b/src/coreclr/vm/gcinfodecoder.cpp index 67fbb3b25e2548..7a518d3caa0654 100644 --- a/src/coreclr/vm/gcinfodecoder.cpp +++ b/src/coreclr/vm/gcinfodecoder.cpp @@ -133,7 +133,7 @@ GcInfoDecoder::GcInfoDecoder( int hasStackBaseRegister = headerFlags & GC_INFO_HAS_STACK_BASE_REGISTER; #ifdef TARGET_AMD64 m_WantsReportOnlyLeaf = ((headerFlags & GC_INFO_WANTS_REPORT_ONLY_LEAF) != 0); -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) m_HasTailCalls = ((headerFlags & GC_INFO_HAS_TAILCALLS) != 0); #endif // TARGET_AMD64 int hasEncInfo = headerFlags & GC_INFO_HAS_EDIT_AND_CONTINUE_INFO; @@ -144,7 +144,7 @@ GcInfoDecoder::GcInfoDecoder( (ReturnKind)((UINT32)m_Reader.Read(returnKindBits)); remainingFlags &= ~(DECODE_RETURN_KIND | DECODE_VARARG); -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) remainingFlags &= ~DECODE_HAS_TAILCALLS; #endif if (remainingFlags == 0) @@ -383,7 +383,7 @@ bool GcInfoDecoder::IsSafePoint(UINT32 codeOffset) if(m_NumSafePoints == 0) return false; -#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Safepoints are encoded with a -1 adjustment codeOffset--; #endif @@ -403,7 +403,7 @@ UINT32 GcInfoDecoder::FindSafePoint(UINT32 breakOffset) const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength)); UINT32 result = m_NumSafePoints; -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Safepoints are encoded with a -1 adjustment // but normalizing them masks off the low order bit // Thus only bother looking if the address is odd @@ -450,7 +450,7 @@ void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback, UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset); UINT32 offset = DENORMALIZE_CODE_OFFSET(normOffset) + 2; -#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) // Safepoints are encoded with a -1 adjustment offset--; #endif @@ -536,13 +536,13 @@ bool GcInfoDecoder::GetIsVarArg() return m_IsVarArg; } -#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) bool GcInfoDecoder::HasTailCalls() { _ASSERTE( m_Flags & DECODE_HAS_TAILCALLS ); return m_HasTailCalls; } -#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 +#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 bool GcInfoDecoder::WantsReportOnlyLeaf() { @@ -1893,6 +1893,141 @@ void GcInfoDecoder::ReportRegisterToGC( pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false))); } +#elif defined(TARGET_RISCV64) + +#if defined(TARGET_UNIX) && !defined(FEATURE_NATIVEAOT) +OBJECTREF* GcInfoDecoder::GetCapturedRegister( + int regNum, + PREGDISPLAY pRD + ) +{ + _ASSERTE(regNum >= 1 && regNum <= 31); + + // The fields of CONTEXT are in the same order as + // the processor encoding numbers. + + DWORD64 *pR0 = &pRD->pCurrentContext->R0; + + return (OBJECTREF*)(pR0 + regNum); +} +#endif // TARGET_UNIX && !FEATURE_NATIVEAOT + +OBJECTREF* GcInfoDecoder::GetRegisterSlot( + int regNum, + PREGDISPLAY pRD + ) +{ + _ASSERTE((regNum == 1) || (regNum >= 5 && regNum <= 31)); + +#ifdef FEATURE_NATIVEAOT + PTR_UIntNative* ppReg = &pRD->pR0; + + return (OBJECTREF*)*(ppReg + regNum); +#else + if(regNum == 1) + { + return (OBJECTREF*) pRD->pCurrentContextPointers->Ra; + } + else if(regNum == 8) + { + return (OBJECTREF*) pRD->pCurrentContextPointers->Fp; + } + else if (regNum == 9) + { + return (OBJECTREF*) pRD->pCurrentContextPointers->S1; + } + else if (regNum < 8) + { + return (OBJECTREF*)*(DWORD64**)(&pRD->volatileCurrContextPointers.T0 + (regNum - 5)); + } + else if (regNum < 18) + { + return (OBJECTREF*)*(DWORD64**)(&pRD->volatileCurrContextPointers.A0 + (regNum - 10)); + } + + return (OBJECTREF*)*(DWORD64**)(&pRD->pCurrentContextPointers->S2 + (regNum-18)); +#endif +} + +bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD) +{ + _ASSERTE(regNum >= 0 && regNum <= 31); + + return (regNum >= 6 && regNum <= 8) || (regNum >= 10 and regNum <= 17) || regNum >= 28; +} + +bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD) +{ +#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA + _ASSERTE( m_Flags & DECODE_GC_LIFETIMES ); + + TADDR pSlot = (TADDR) GetStackSlot(spOffset, spBase, pRD); + _ASSERTE(pSlot >= pRD->SP); + + return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea); +#else + return FALSE; +#endif +} + +void GcInfoDecoder::ReportRegisterToGC( + int regNum, + unsigned gcFlags, + PREGDISPLAY pRD, + unsigned flags, + GCEnumCallback pCallBack, + void * hCallBack) +{ + GCINFODECODER_CONTRACT; + + _ASSERTE(regNum > 0 && regNum <= 31); + + LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum )); + + OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD ); +#if defined(TARGET_UNIX) && !defined(FEATURE_NATIVEAOT) && !defined(SOS_TARGET_AMD64) + + // On PAL, we don't always have the context pointers available due to + // a limitation of an unwinding library. In such case, the context + // pointers for some nonvolatile registers are NULL. + // In such case, we let the pObjRef point to the captured register + // value in the context and pin the object itself. + if (pObjRef == NULL) + { + // Report a pinned object to GC only in the promotion phase when the + // GC is scanning roots. + GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack); + if (!pGCCtx->sc->promotion) + { + return; + } + + pObjRef = GetCapturedRegister(regNum, pRD); + + gcFlags |= GC_CALL_PINNED; + } +#endif // TARGET_UNIX && !SOS_TARGET_ARM64 + +#ifdef _DEBUG + if(IsScratchRegister(regNum, pRD)) + { + // Scratch registers cannot be reported for non-leaf frames + _ASSERTE(flags & ActiveStackFrame); + } + + LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */ + "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) )); + + VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef); + + LOG_PIPTR(pObjRef, gcFlags, hCallBack); +#endif //_DEBUG + + gcFlags |= CHECK_APP_DOMAIN; + + pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false))); +} + #else // Unknown platform OBJECTREF* GcInfoDecoder::GetRegisterSlot( @@ -1980,6 +2115,8 @@ int GcInfoDecoder::GetStackReg(int spBase) int esp = 31; #elif defined(TARGET_LOONGARCH64) int esp = 3; +#elif defined(TARGET_RISCV64) + int esp = 3; #endif if( GC_SP_REL == spBase ) diff --git a/src/coreclr/vm/interpreter.cpp b/src/coreclr/vm/interpreter.cpp index 3eafbc0e7557e3..f151d5fa921163 100644 --- a/src/coreclr/vm/interpreter.cpp +++ b/src/coreclr/vm/interpreter.cpp @@ -91,7 +91,7 @@ InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* } #endif -#if defined(UNIX_AMD64_ABI) || defined(HOST_LOONGARCH64) +#if defined(UNIX_AMD64_ABI) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64) // ...or it fits into two registers. if (hasRetBuff && getClassSize(methInfo->args.retTypeClass) <= 2 * sizeof(void*)) { @@ -537,6 +537,10 @@ void Interpreter::ArgState::AddArg(unsigned canonIndex, short numSlots, bool noR #elif defined(HOST_LOONGARCH64) callerArgStackSlots += numSlots; ClrSafeInt offset(-callerArgStackSlots); +#elif defined(HOST_RISCV64) + callerArgStackSlots += numSlots; + ClrSafeInt offset(-callerArgStackSlots); + assert(!"Unimplemented on RISCV64 yet"); #endif offset *= static_cast(sizeof(void*)); _ASSERTE(!offset.IsOverflow()); @@ -700,7 +704,8 @@ void Interpreter::ArgState::AddFPArg(unsigned canonIndex, unsigned short numSlot fpArgsUsed |= (0x1 << (numFPRegArgSlots + i)); } numFPRegArgSlots += numSlots; - +#elif defined(HOST_RISCV64) + assert(!"Unimplemented on RISCV64 yet"); #else #error "Unsupported architecture" #endif @@ -1125,7 +1130,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, #elif defined(HOST_ARM) // LONGS have 2-reg alignment; inc reg if necessary. argState.AddArg(k, 2, /*noReg*/false, /*twoSlotAlign*/true); -#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64) argState.AddArg(k); #else #error unknown platform @@ -1138,7 +1143,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argState.AddArg(k, 1, /*noReg*/true); #elif defined(HOST_ARM) argState.AddFPArg(k, 1, /*twoSlotAlign*/false); -#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64) argState.AddFPArg(k, 1, false); #else #error unknown platform @@ -1151,7 +1156,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argState.AddArg(k, 2, /*noReg*/true); #elif defined(HOST_ARM) argState.AddFPArg(k, 2, /*twoSlotAlign*/true); -#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) +#elif defined(HOST_AMD64) || defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64) // TODO RISCV64 argState.AddFPArg(k, 1, false); #else #error unknown platform @@ -1194,6 +1199,8 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, } #elif defined(HOST_LOONGARCH64) argState.AddArg(k, static_cast(szSlots)); +#elif defined(HOST_RISCV64) + assert(!"Unimplemented on RISCV64 yet"); #else #error unknown platform #endif @@ -1250,6 +1257,9 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, // See StubLinkerCPU::EmitProlog for the layout of the stack unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*); unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*)); +#elif defined(HOST_RISCV64) + unsigned intRegArgBaseOffset = (argState.numFPRegArgSlots) * sizeof(void*); + unsigned short stackArgBaseOffset = (unsigned short) ((argState.numRegArgs + argState.numFPRegArgSlots) * sizeof(void*)); #else #error unsupported platform #endif @@ -1300,6 +1310,8 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, argState.argOffsets[k] = (regArgsFound - 1) * sizeof(void*); #elif defined(HOST_LOONGARCH64) argState.argOffsets[k] += intRegArgBaseOffset; +#elif defined(HOST_RISCV64) + assert(!"Unimplemented on RISCV64 yet"); #else #error unsupported platform #endif @@ -1614,7 +1626,8 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp, #elif defined(HOST_LOONGARCH64) assert(!"unimplemented on LOONGARCH yet"); - +#elif defined(HOST_RISCV64) + assert(!"Unimplemented on RISCV64 yet"); #else #error unsupported platform #endif @@ -6308,6 +6321,9 @@ void Interpreter::MkRefany() #elif defined(HOST_LOONGARCH64) tbr = NULL; NYI_INTERP("Unimplemented code: MkRefAny on LOONGARCH"); +#elif defined(HOST_RISCV64) + tbr = NULL; + NYI_INTERP("Unimplemented code: MkRefAny on RISCV64"); #else #error "unsupported platform" #endif @@ -9446,6 +9462,8 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T unsigned totalArgSlots = nSlots; #elif defined(HOST_LOONGARCH64) unsigned totalArgSlots = nSlots; +#elif defined(HOST_RISCV64) + unsigned totalArgSlots = nSlots; #else #error "unsupported platform" #endif diff --git a/src/coreclr/vm/interpreter.h b/src/coreclr/vm/interpreter.h index c76c4160c80cd4..f4eb3566a778b1 100644 --- a/src/coreclr/vm/interpreter.h +++ b/src/coreclr/vm/interpreter.h @@ -2056,6 +2056,8 @@ unsigned short Interpreter::NumberOfIntegerRegArgs() { return 4; } unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; } #elif defined(HOST_LOONGARCH64) unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; } +#elif defined(HOST_RISCV64) +unsigned short Interpreter::NumberOfIntegerRegArgs() { return 8; } #else #error Unsupported architecture. #endif diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index cd0161a4aec634..1cb285f3048c60 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11024,6 +11024,8 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) // Add space for personality routine, it must be 4-byte aligned. unwindSize += sizeof(ULONG); +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); #else PORTABILITY_ASSERT("reservePersonalityRoutineSpace"); #endif // !defined(TARGET_AMD64) @@ -11246,6 +11248,9 @@ void CEEJitInfo::allocUnwindInfo ( ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); +#elif defined(TARGET_RISCV64) + _ASSERTE(!"TODO RISCV64 NYI"); + #endif EE_TO_JIT_TRANSITION(); diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h index cc2cccca72a096..dd9de6b0be98bb 100644 --- a/src/coreclr/vm/jitinterface.h +++ b/src/coreclr/vm/jitinterface.h @@ -401,7 +401,7 @@ extern "C" void STDCALL JIT_MemCpy(void *dest, const void *src, SIZE_T count); void STDMETHODCALLTYPE JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle); -#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !(TARGET_RISCV64) void STDCALL JIT_StackProbe(); #endif // TARGET_ARM64 }; diff --git a/src/coreclr/vm/precode.h b/src/coreclr/vm/precode.h index 4822ccfec7054d..b51e6e95a5c8fc 100644 --- a/src/coreclr/vm/precode.h +++ b/src/coreclr/vm/precode.h @@ -44,6 +44,11 @@ EXTERN_C VOID STDCALL PrecodeRemotingThunk(); #define SIZEOF_PRECODE_BASE CODE_SIZE_ALIGN #define OFFSETOF_PRECODE_TYPE 0 +#elif defined(HOST_RISCV64) + +#define SIZEOF_PRECODE_BASE CODE_SIZE_ALIGN +#define OFFSETOF_PRECODE_TYPE 0 + #endif // HOST_AMD64 #ifndef DACCESS_COMPILE @@ -63,6 +68,8 @@ struct InvalidPrecode static const int Type = 0; #elif defined(HOST_LOONGARCH64) static const int Type = 0xff; +#elif defined(HOST_RISCV64) + static const int Type = 0x50; #endif }; @@ -98,6 +105,9 @@ struct StubPrecode #elif defined(HOST_LOONGARCH64) static const int Type = 0x4; static const int CodeSize = 24; +#elif defined(HOST_RISCV64) + static const int Type = 0x51; + static const int CodeSize = 24; // TODO RISCV64 #endif // HOST_AMD64 BYTE m_code[CodeSize]; @@ -234,6 +244,10 @@ struct FixupPrecode static const int Type = 0x3; static const int CodeSize = 32; static const int FixupCodeOffset = 12; +#elif defined(HOST_RISCV64) + static const int Type = 0x52; + static const int CodeSize = 24; // TODO RISCV64 + static const int FixupCodeOffset = 8; #endif // HOST_AMD64 BYTE m_code[CodeSize]; diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp index 4b4373ac40e818..afc658961517bf 100644 --- a/src/coreclr/vm/prestub.cpp +++ b/src/coreclr/vm/prestub.cpp @@ -2303,7 +2303,7 @@ PCODE TheVarargNDirectStub(BOOL hasRetBuffArg) { LIMITED_METHOD_CONTRACT; -#if !defined(TARGET_X86) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) +#if !defined(TARGET_X86) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) if (hasRetBuffArg) { return GetEEFuncEntryPoint(VarargPInvokeStub_RetBuffArg); diff --git a/src/coreclr/vm/riscv64/asmconstants.h b/src/coreclr/vm/riscv64/asmconstants.h index 9a0cdd4e406d37..0952361c330726 100644 --- a/src/coreclr/vm/riscv64/asmconstants.h +++ b/src/coreclr/vm/riscv64/asmconstants.h @@ -1,4 +1,235 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +// asmconstants.h - +// +// This header defines field offsets and constants used by assembly code +// Be sure to rebuild clr/src/vm/ceemain.cpp after changing this file, to +// ensure that the constants match the expected C/C++ values -#error "TODO-RISCV64: missing implementation" +// #ifndef HOST_RISCV64 +// #error this file should only be used on an ARM platform +// #endif // HOST_RISCV64 + +#include "../../inc/switches.h" + +//----------------------------------------------------------------------------- + +#ifndef ASMCONSTANTS_C_ASSERT +#define ASMCONSTANTS_C_ASSERT(cond) +#endif + +#ifndef ASMCONSTANTS_RUNTIME_ASSERT +#define ASMCONSTANTS_RUNTIME_ASSERT(cond) +#endif + +// Some constants are different in _DEBUG builds. This macro factors out ifdefs from below. +#ifdef _DEBUG +#define DBG_FRE(dbg,fre) dbg +#else +#define DBG_FRE(dbg,fre) fre +#endif + +#define DynamicHelperFrameFlags_Default 0 +#define DynamicHelperFrameFlags_ObjectArg 1 +#define DynamicHelperFrameFlags_ObjectArg2 2 + +#define Thread__m_fPreemptiveGCDisabled 0x0C +#define Thread__m_pFrame 0x10 + +ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); +ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame)); + +#define Thread_m_pFrame Thread__m_pFrame +#define Thread_m_fPreemptiveGCDisabled Thread__m_fPreemptiveGCDisabled + +#define METHODDESC_REGISTER t2 // TODO RISCV64 + +#define SIZEOF__ArgumentRegisters 0x40 +ASMCONSTANTS_C_ASSERT(SIZEOF__ArgumentRegisters == sizeof(ArgumentRegisters)) + +// 8*8=0x40, fa0-fa7 +#define SIZEOF__FloatArgumentRegisters 0x40 +ASMCONSTANTS_C_ASSERT(SIZEOF__FloatArgumentRegisters == sizeof(FloatArgumentRegisters)) + +#define ASM_ENREGISTERED_RETURNTYPE_MAXSIZE 0x10 +ASMCONSTANTS_C_ASSERT(ASM_ENREGISTERED_RETURNTYPE_MAXSIZE == ENREGISTERED_RETURNTYPE_MAXSIZE) + +#define CallDescrData__pSrc 0x00 +#define CallDescrData__numStackSlots 0x08 +#define CallDescrData__pArgumentRegisters 0x10 +#define CallDescrData__pFloatArgumentRegisters 0x18 +#define CallDescrData__fpReturnSize 0x20 +#define CallDescrData__pTarget 0x28 +#define CallDescrData__returnValue 0x30 + +ASMCONSTANTS_C_ASSERT(CallDescrData__pSrc == offsetof(CallDescrData, pSrc)) +ASMCONSTANTS_C_ASSERT(CallDescrData__numStackSlots == offsetof(CallDescrData, numStackSlots)) +ASMCONSTANTS_C_ASSERT(CallDescrData__pArgumentRegisters == offsetof(CallDescrData, pArgumentRegisters)) +ASMCONSTANTS_C_ASSERT(CallDescrData__pFloatArgumentRegisters == offsetof(CallDescrData, pFloatArgumentRegisters)) +ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrData, fpReturnSize)) +ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget)) +ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue)) + +#define CORINFO_NullReferenceException_ASM 0 +ASMCONSTANTS_C_ASSERT( CORINFO_NullReferenceException_ASM + == CORINFO_NullReferenceException); + + +#define CORINFO_IndexOutOfRangeException_ASM 3 +ASMCONSTANTS_C_ASSERT( CORINFO_IndexOutOfRangeException_ASM + == CORINFO_IndexOutOfRangeException); + + +// Offset of the array containing the address of captured registers in MachState +#define MachState__captureS0_S11 0x0 +ASMCONSTANTS_C_ASSERT(MachState__captureS0_S11 == offsetof(MachState, captureS0_S11)) + +// Offset of the array containing the address of preserved registers in MachState +#define MachState__ptrS0_S11 0x60 +ASMCONSTANTS_C_ASSERT(MachState__ptrS0_S11 == offsetof(MachState, ptrS0_S11)) + +#define MachState__isValid 0xd0 +ASMCONSTANTS_C_ASSERT(MachState__isValid == offsetof(MachState, _isValid)) + +#define LazyMachState_captureS0_S11 MachState__captureS0_S11 +ASMCONSTANTS_C_ASSERT(LazyMachState_captureS0_S11 == offsetof(LazyMachState, captureS0_S11)) + +#define LazyMachState_captureSp (MachState__isValid+8) // padding for alignment +ASMCONSTANTS_C_ASSERT(LazyMachState_captureSp == offsetof(LazyMachState, captureSp)) + +#define LazyMachState_captureIp (LazyMachState_captureSp+8) +ASMCONSTANTS_C_ASSERT(LazyMachState_captureIp == offsetof(LazyMachState, captureIp)) + +#define VASigCookie__pNDirectILStub 0x8 +ASMCONSTANTS_C_ASSERT(VASigCookie__pNDirectILStub == offsetof(VASigCookie, pNDirectILStub)) + +#define DelegateObject___methodPtr 0x18 +ASMCONSTANTS_C_ASSERT(DelegateObject___methodPtr == offsetof(DelegateObject, _methodPtr)); + +#define DelegateObject___target 0x08 +ASMCONSTANTS_C_ASSERT(DelegateObject___target == offsetof(DelegateObject, _target)); + +#define SIZEOF__GSCookie 0x8 +ASMCONSTANTS_C_ASSERT(SIZEOF__GSCookie == sizeof(GSCookie)); + +#define SIZEOF__Frame 0x10 +ASMCONSTANTS_C_ASSERT(SIZEOF__Frame == sizeof(Frame)); + +#define SIZEOF__CONTEXT 0x220 +ASMCONSTANTS_C_ASSERT(SIZEOF__CONTEXT == sizeof(T_CONTEXT)); + + +//========================================= +#define MethodTable__m_dwFlags 0x0 +ASMCONSTANTS_C_ASSERT(MethodTable__m_dwFlags == offsetof(MethodTable, m_dwFlags)); + +#define MethodTable__m_BaseSize 0x04 +ASMCONSTANTS_C_ASSERT(MethodTable__m_BaseSize == offsetof(MethodTable, m_BaseSize)); + +#define MethodTable__m_ElementType DBG_FRE(0x38, 0x30) +ASMCONSTANTS_C_ASSERT(MethodTable__m_ElementType == offsetof(MethodTable, m_pMultipurposeSlot1)); + +#define ArrayBase__m_NumComponents 0x8 +ASMCONSTANTS_C_ASSERT(ArrayBase__m_NumComponents == offsetof(ArrayBase, m_NumComponents)); + +#define PtrArray__m_Array 0x10 +ASMCONSTANTS_C_ASSERT(PtrArray__m_Array == offsetof(PtrArray, m_Array)); + +#define TypeHandle_CanCast 0x1 // TypeHandle::CanCast + +//========================================= + + + +#ifdef FEATURE_COMINTEROP + +#define SIZEOF__ComMethodFrame 0x70 +ASMCONSTANTS_C_ASSERT(SIZEOF__ComMethodFrame == sizeof(ComMethodFrame)); + +#define UnmanagedToManagedFrame__m_pvDatum 0x10 +ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedToManagedFrame, m_pvDatum)); + +#endif // FEATURE_COMINTEROP + + +#define REDIRECTSTUB_SP_OFFSET_CONTEXT 0 + +#define CONTEXT_Pc 0x108 +ASMCONSTANTS_C_ASSERT(CONTEXT_Pc == offsetof(T_CONTEXT,Pc)) + +#define SIZEOF__FaultingExceptionFrame (SIZEOF__Frame + 0x10 + SIZEOF__CONTEXT) +#define FaultingExceptionFrame__m_fFilterExecuted SIZEOF__Frame +ASMCONSTANTS_C_ASSERT(SIZEOF__FaultingExceptionFrame == sizeof(FaultingExceptionFrame)); +ASMCONSTANTS_C_ASSERT(FaultingExceptionFrame__m_fFilterExecuted == offsetof(FaultingExceptionFrame, m_fFilterExecuted)); + +#define SIZEOF__FixupPrecode 0 // TODO RISCV64 +//#define Offset_PrecodeChunkIndex 15 +//#define Offset_MethodDescChunkIndex 14 +#define MethodDesc_ALIGNMENT_SHIFT 3 +//#define FixupPrecode_ALIGNMENT_SHIFT_1 3 +//#define FixupPrecode_ALIGNMENT_SHIFT_2 4 + +// ASMCONSTANTS_C_ASSERT(SIZEOF__FixupPrecode == sizeof(FixupPrecode)); // TODO RISCV64 +//ASMCONSTANTS_C_ASSERT(Offset_PrecodeChunkIndex == offsetof(FixupPrecode, m_PrecodeChunkIndex)); +//ASMCONSTANTS_C_ASSERT(Offset_MethodDescChunkIndex == offsetof(FixupPrecode, m_MethodDescChunkIndex)); +ASMCONSTANTS_C_ASSERT(MethodDesc_ALIGNMENT_SHIFT == MethodDesc::ALIGNMENT_SHIFT); +//ASMCONSTANTS_C_ASSERT((1< + +#ifndef TARGET_UNIX +#define USE_REDIRECT_FOR_GCSTRESS +#endif // TARGET_UNIX + +EXTERN_C void getFPReturn(int fpSize, INT64 *pRetVal); +EXTERN_C void setFPReturn(int fpSize, INT64 retVal); + + +class ComCallMethodDesc; + +extern PCODE GetPreStubEntryPoint(); + +#define COMMETHOD_PREPAD 24 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc) +#ifdef FEATURE_COMINTEROP +#define COMMETHOD_CALL_PRESTUB_SIZE 24 +#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 16 // the offset of the call target address inside the prestub +#endif // FEATURE_COMINTEROP + +#define STACK_ALIGN_SIZE 16 + +#define JUMP_ALLOCATE_SIZE 16 // # bytes to allocate for a jump instruction +#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 16 // # bytes to allocate for a back to back jump instruction + +#define HAS_NDIRECT_IMPORT_PRECODE 1 + +#define USE_INDIRECT_CODEHEADER + +#define HAS_FIXUP_PRECODE 1 + +// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer +#define HAS_THISPTR_RETBUF_PRECODE 1 + +#define CODE_SIZE_ALIGN 8 +#define CACHE_LINE_SIZE 64 +#define LOG2SLOT LOG2_PTRSIZE + +#define ENREGISTERED_RETURNTYPE_MAXSIZE 16 // bytes (two FP registers: f10 and f11 +#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 16 // bytes (two int registers: a0 and a1) +#define ENREGISTERED_PARAMTYPE_MAXSIZE 16 // bytes (max value type size that can be passed by value) + +#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter +#define CALLDESCR_FPARGREGS 1 // CallDescrWorker has FloatArgumentRegisters parameter +// #define CALLDESCR_RETBUFFARGREG 1 // CallDescrWorker has RetBuffArg parameter that's separate from arg regs // TODO RISCV64 + +#define FLOAT_REGISTER_SIZE 16 // each register in FloatArgumentRegisters is 16 bytes. + +// Given a return address retrieved during stackwalk, +// this is the offset by which it should be decremented to arrive at the callsite. +#define STACKWALK_CONTROLPC_ADJUST_OFFSET 4 + +//********************************************************************** +// Parameter size +//********************************************************************** + +inline unsigned StackElemSize(unsigned parmSize, bool isValueType, bool isFloatHfa) +{ + const unsigned stackSlotSize = 8; + return ALIGN_UP(parmSize, stackSlotSize); +} + +// +// JIT HELPERS. +// +// Create alias for optimized implementations of helpers provided on this platform +// +#define JIT_GetSharedGCStaticBase JIT_GetSharedGCStaticBase_SingleAppDomain +#define JIT_GetSharedNonGCStaticBase JIT_GetSharedNonGCStaticBase_SingleAppDomain +#define JIT_GetSharedGCStaticBaseNoCtor JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain +#define JIT_GetSharedNonGCStaticBaseNoCtor JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain + +//********************************************************************** +// Frames +//********************************************************************** + +//-------------------------------------------------------------------- +// This represents the callee saved (non-volatile) integer registers saved as +// of a FramedMethodFrame. +//-------------------------------------------------------------------- +typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters; +struct CalleeSavedRegisters { + INT64 sp; // stack pointer + INT64 fp; // frame pointer + INT64 s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11; +}; + +//-------------------------------------------------------------------- +// This represents the arguments that are stored in volatile integer registers. +// This should not overlap the CalleeSavedRegisters since those are already +// saved separately and it would be wasteful to save the same register twice. +// If we do use a non-volatile register as an argument, then the ArgIterator +// will probably have to communicate this back to the PromoteCallerStack +// routine to avoid a double promotion. +//-------------------------------------------------------------------- +#define NUM_ARGUMENT_REGISTERS 8 +typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters; +struct ArgumentRegisters { + INT64 a[8]; // a0 ....a7 +}; + +#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters) + + +//-------------------------------------------------------------------- +// This represents the floating point argument registers which are saved +// as part of the NegInfo for a FramedMethodFrame. Note that these +// might not be saved by all stubs: typically only those that call into +// C++ helpers will need to preserve the values in these volatile +// registers. +//-------------------------------------------------------------------- +#define NUM_FLOAT_ARGUMENT_REGISTERS 8 +typedef DPTR(struct FloatArgumentRegisters) PTR_FloatArgumentRegisters; +struct FloatArgumentRegisters { + //TODO: not supports RISCV64-SIMD. + double f[8]; // f0-f7 +}; + + +//********************************************************************** +// Exception handling +//********************************************************************** + +inline PCODE GetIP(const T_CONTEXT * context) { + LIMITED_METHOD_DAC_CONTRACT; + return context->Pc; +} + +inline void SetIP(T_CONTEXT *context, PCODE ip) { + LIMITED_METHOD_DAC_CONTRACT; + context->Pc = ip; +} + +inline TADDR GetSP(const T_CONTEXT * context) { + LIMITED_METHOD_DAC_CONTRACT; + return TADDR(context->Sp); +} + +inline TADDR GetRA(const T_CONTEXT * context) { + LIMITED_METHOD_DAC_CONTRACT; + return context->Ra; +} + +inline void SetRA( T_CONTEXT * context, TADDR ip) { + LIMITED_METHOD_DAC_CONTRACT; + context->Ra = ip; +} + +inline TADDR GetReg(T_CONTEXT * context, int Regnum) +{ + LIMITED_METHOD_DAC_CONTRACT; + _ASSERTE(Regnum >= 0 && Regnum < 32 ); + return (TADDR)(&context->R0 + Regnum); +} + +inline void SetReg(T_CONTEXT * context, int Regnum, PCODE RegContent) +{ + LIMITED_METHOD_DAC_CONTRACT; + _ASSERTE(Regnum >= 0 && Regnum <=28 ); + *(&context->R0 + Regnum) = RegContent; +} + +extern "C" LPVOID __stdcall GetCurrentSP(); + +inline void SetSP(T_CONTEXT *context, TADDR sp) { + LIMITED_METHOD_DAC_CONTRACT; + context->Sp = DWORD64(sp); +} + +inline void SetFP(T_CONTEXT *context, TADDR fp) { + LIMITED_METHOD_DAC_CONTRACT; + context->Fp = DWORD64(fp); +} + +inline TADDR GetFP(const T_CONTEXT * context) +{ + LIMITED_METHOD_DAC_CONTRACT; + return (TADDR)(context->Fp); +} + + +inline TADDR GetMem(PCODE address, SIZE_T size, bool signExtend) +{ + TADDR mem; + LIMITED_METHOD_DAC_CONTRACT; + EX_TRY + { + switch (size) + { + case 4: + if (signExtend) + mem = *(int32_t*)address; + else + mem = *(uint32_t*)address; + break; + case 8: + mem = *(uint64_t*)address; + break; + default: + UNREACHABLE(); + } + } + EX_CATCH + { + mem = NULL; + _ASSERTE(!"Memory read within jitted Code Failed, this should not happen!!!!"); + } + EX_END_CATCH(SwallowAllExceptions); + return mem; +} + +#ifdef FEATURE_COMINTEROP +void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target); +#endif // FEATURE_COMINTEROP + +inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode, bool hasCodeExecutedBefore = false) +{ + return FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode); +} + +//------------------------------------------------------------------------ +inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target) +{ + LIMITED_METHOD_CONTRACT; + UINT32* pCode = (UINT32*)pBufferRW; + +#if 0 // TODO RISCV64 + // We require 8-byte alignment so the LDR instruction is aligned properly + _ASSERTE(((UINT_PTR)pCode & 7) == 0); + + // +0: ldr x16, [pc, #8] + // +4: br x16 + // +8: [target address] + + pCode[0] = 0x58000050UL; // ldr x16, [pc, #8] + pCode[1] = 0xD61F0200UL; // br x16 + + // Ensure that the updated instructions get updated in the I-Cache + ClrFlushInstructionCache(pBufferRX, 8); + + *((LPVOID *)(pCode + 2)) = target; // 64-bit target address +#endif + +} + +//------------------------------------------------------------------------ +// Given the same pBuffer that was used by emitJump this method +// decodes the instructions and returns the jump target +inline PCODE decodeJump(PCODE pCode) +{ + LIMITED_METHOD_CONTRACT; + + TADDR pInstr = PCODEToPINSTR(pCode); + + return *dac_cast(pInstr + 2*sizeof(DWORD)); +} + +//------------------------------------------------------------------------ +inline BOOL isJump(PCODE pCode) +{ + LIMITED_METHOD_DAC_CONTRACT; + + TADDR pInstr = PCODEToPINSTR(pCode); + + return *dac_cast(pInstr) == 0x58000050; +} + +//------------------------------------------------------------------------ +inline BOOL isBackToBackJump(PCODE pBuffer) +{ + WRAPPER_NO_CONTRACT; + SUPPORTS_DAC; + return isJump(pBuffer); +} + +//------------------------------------------------------------------------ +inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target) +{ + WRAPPER_NO_CONTRACT; + emitJump(pBufferRX, pBufferRW, target); +} + +//------------------------------------------------------------------------ +inline PCODE decodeBackToBackJump(PCODE pBuffer) +{ + WRAPPER_NO_CONTRACT; + return decodeJump(pBuffer); +} + +//---------------------------------------------------------------------- + +struct IntReg +{ + int reg; + IntReg(int reg):reg(reg) + { + _ASSERTE(0 <= reg && reg < 32); + } + + operator int () { return reg; } + operator int () const { return reg; } + int operator == (IntReg other) { return reg == other.reg; } + int operator != (IntReg other) { return reg != other.reg; } + WORD Mask() const { return 1 << reg; } +}; + +struct VecReg +{ + int reg; + VecReg(int reg):reg(reg) + { + _ASSERTE(0 <= reg && reg < 32); + } + + operator int() { return reg; } + int operator == (VecReg other) { return reg == other.reg; } + int operator != (VecReg other) { return reg != other.reg; } + WORD Mask() const { return 1 << reg; } +}; + +struct CondCode +{ + int cond; + CondCode(int cond):cond(cond) + { + _ASSERTE(0 <= cond && cond < 16); + } +}; + +const IntReg RegSp = IntReg(2); +const IntReg RegFp = IntReg(8); +const IntReg RegRa = IntReg(1); + +#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn) + +#if 1 // TODO RISCV64 +class StubLinkerCPU : public StubLinker +{ + +private: + void EmitLoadStoreRegPairImm(DWORD flags, int regNum1, int regNum2, IntReg Xn, int offset, BOOL isVec); + void EmitLoadStoreRegImm(DWORD flags, int regNum, IntReg Xn, int offset, BOOL isVec, int log2Size = 3); +public: + +#if 0 + // BitFlags for EmitLoadStoreReg(Pair)Imm methods + enum { + eSTORE = 0x0, + eLOAD = 0x1, + eWRITEBACK = 0x2, + ePOSTINDEX = 0x4, + eFLAGMASK = 0x7 + }; + + // BitFlags for Register offsetted loads/stores + // Bits(1-3) indicate the encoding, while the bits(0) indicate the shift + enum { + eSHIFT = 0x1, // 0y0001 + eUXTW = 0x4, // 0y0100 + eSXTW = 0xC, // 0y1100 + eLSL = 0x7, // 0y0111 + eSXTX = 0xD, // 0y1110 + }; +#endif + + + static void Init(); + + void EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall); + void EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect); + + void EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray); + +#if defined(FEATURE_SHARE_GENERIC_CODE) + void EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg); +#endif // FEATURE_SHARE_GENERIC_CODE + +#ifdef _DEBUG + void EmitNop() { _ASSERTE(!"RISCV64: not implementation on riscv64!!!"); } +#endif + void EmitBreakPoint() { _ASSERTE(!"RISCV64: not implementation on riscv64!!!"); } + void EmitMovConstant(IntReg target, UINT64 constant); + void EmitCmpImm(IntReg reg, int imm); + void EmitCmpReg(IntReg Xn, IntReg Xm); + void EmitCondFlagJump(CodeLabel * target, UINT cond); + void EmitJumpRegister(IntReg regTarget); + void EmitMovReg(IntReg dest, IntReg source); + + void EmitSubImm(IntReg Xd, IntReg Xn, unsigned int value); + void EmitAddImm(IntReg Xd, IntReg Xn, unsigned int value); + + void EmitLoadStoreRegPairImm(DWORD flags, IntReg Xt1, IntReg Xt2, IntReg Xn, int offset=0); + void EmitLoadStoreRegPairImm(DWORD flags, VecReg Vt1, VecReg Vt2, IntReg Xn, int offset=0); + + void EmitLoadStoreRegImm(DWORD flags, IntReg Xt, IntReg Xn, int offset=0, int log2Size = 3); + void EmitLoadStoreRegImm(DWORD flags, VecReg Vt, IntReg Xn, int offset=0); + + void EmitLoadRegReg(IntReg Xt, IntReg Xn, IntReg Xm, DWORD option); + + void EmitCallRegister(IntReg reg); + void EmitProlog(unsigned short cIntRegArgs, + unsigned short cVecRegArgs, + unsigned short cCalleeSavedRegs, + unsigned short cbStackSpace = 0); + + void EmitEpilog(); + + void EmitRet(IntReg reg); + + +}; +#endif + +extern "C" void SinglecastDelegateInvokeStub(); + + +// preferred alignment for data +#define DATA_ALIGNMENT 8 + +// TODO RISCV64 +struct DECLSPEC_ALIGN(16) UMEntryThunkCode +{ + DWORD m_code[4]; + + TADDR m_pTargetCode; + TADDR m_pvSecretParam; + + void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam); + void Poison(); + + LPCBYTE GetEntryPoint() const + { + LIMITED_METHOD_CONTRACT; + + return (LPCBYTE)this; + } + + static int GetEntryPointOffset() + { + LIMITED_METHOD_CONTRACT; + + return 0; + } +}; + +struct HijackArgs +{ + union + { + DWORD64 Ra; + size_t ReturnAddress; + }; + union + { + struct { + DWORD64 A0; + DWORD64 A1; + }; + size_t ReturnValue[2]; + }; + DWORD64 Fp; // frame pointer + DWORD64 Tp, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11; + }; + +// Precode to shuffle this and retbuf for closed delegates over static methods with return buffer +struct ThisPtrRetBufPrecode { + + static const int Type = 0x53; + + UINT32 m_rgCode[6]; + TADDR m_pTarget; + TADDR m_pMethodDesc; + + void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator); + + TADDR GetMethodDesc() + { + LIMITED_METHOD_DAC_CONTRACT; + + return m_pMethodDesc; + } + + PCODE GetTarget() + { + LIMITED_METHOD_DAC_CONTRACT; + return m_pTarget; + } + +#ifndef DACCESS_COMPILE + BOOL SetTargetInterlocked(TADDR target, TADDR expected) + { + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + } + CONTRACTL_END; + + ExecutableWriterHolder precodeWriterHolder(this, sizeof(ThisPtrRetBufPrecode)); + return (TADDR)InterlockedCompareExchange64( + (LONGLONG*)&precodeWriterHolder.GetRW()->m_pTarget, (TADDR)target, (TADDR)expected) == expected; + } +#endif // !DACCESS_COMPILE +}; +typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode; + +#endif // __cgencpu_h__ diff --git a/src/coreclr/vm/riscv64/crthelpers.S b/src/coreclr/vm/riscv64/crthelpers.S index f6c1fb2c96ce46..de720f87a37f57 100644 --- a/src/coreclr/vm/riscv64/crthelpers.S +++ b/src/coreclr/vm/riscv64/crthelpers.S @@ -3,4 +3,12 @@ #include "unixasmmacros.inc" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" + +LEAF_ENTRY JIT_MemSet, _TEXT +// TODO RISCV NYI +LEAF_END_MARKED JIT_MemSet, _TEXT + +LEAF_ENTRY JIT_MemCpy, _TEXT +// TODO RISCV NYI +LEAF_END_MARKED JIT_MemCpy, _TEXT diff --git a/src/coreclr/vm/riscv64/excepcpu.h b/src/coreclr/vm/riscv64/excepcpu.h index 4800154434a53a..eb575235af8feb 100644 --- a/src/coreclr/vm/riscv64/excepcpu.h +++ b/src/coreclr/vm/riscv64/excepcpu.h @@ -1,7 +1,50 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +// +// + #ifndef __excepcpu_h__ #define __excepcpu_h__ -#error "TODO-RISCV64: missing implementation" + +#define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThreadAbort +EXTERN_C void RedirectForThreadAbort(); + + +#define STATUS_CLR_GCCOVER_CODE STATUS_ILLEGAL_INSTRUCTION + +class Thread; +class FaultingExceptionFrame; + +#define INSTALL_EXCEPTION_HANDLING_RECORD(record) +#define UNINSTALL_EXCEPTION_HANDLING_RECORD(record) +// +// On ARM, the COMPlusFrameHandler's work is done by our personality routine. +// +#define DECLARE_CPFH_EH_RECORD(pCurThread) + +// +// Retrieves the redirected CONTEXT* from the stack frame of one of the +// RedirectedHandledJITCaseForXXX_Stub's. +// +PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext); +PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext); + +// +// Retrieves the FaultingExceptionFrame* from the stack frame of +// RedirectForThrowControl. +// +FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext); + +inline +PCODE GetAdjustedCallAddress(PCODE returnAddress) +{ + LIMITED_METHOD_CONTRACT; + + return returnAddress - 4; +} + +BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, T_CONTEXT *pContext); + +#endif // __excepcpu_h__ diff --git a/src/coreclr/vm/riscv64/gmscpu.h b/src/coreclr/vm/riscv64/gmscpu.h index 9a0cdd4e406d37..121ae87469c593 100644 --- a/src/coreclr/vm/riscv64/gmscpu.h +++ b/src/coreclr/vm/riscv64/gmscpu.h @@ -1,4 +1,102 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -#error "TODO-RISCV64: missing implementation" +/**************************************************************/ +/* gmscpu.h */ +/**************************************************************/ +/* HelperFrame is defines 'GET_STATE(machState)' macro, which + figures out what the state of the machine will be when the + current method returns. It then stores the state in the + JIT_machState structure. */ + +/**************************************************************/ + +#ifndef __gmscpu_h__ +#define __gmscpu_h__ + +#define __gmscpu_h__ + +// S0 - S11 +#define NUM_NONVOLATILE_CONTEXT_POINTERS 12 + +struct MachState { + ULONG64 captureS0_S11[NUM_NONVOLATILE_CONTEXT_POINTERS]; // preserved registers + PTR_ULONG64 ptrS0_S11[NUM_NONVOLATILE_CONTEXT_POINTERS]; // pointers to preserved registers + TADDR _pc; // program counter after the function returns + TADDR _sp; // stack pointer after the function returns + BOOL _isValid; + + BOOL isValid() { LIMITED_METHOD_DAC_CONTRACT; return _isValid; } + TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; return _pc; } +}; + +struct LazyMachState : public MachState{ + + TADDR captureSp; // Stack pointer at the time of capture + TADDR captureIp; // Instruction pointer at the time of capture + + void setLazyStateFromUnwind(MachState* copy); + static void unwindLazyState(LazyMachState* baseState, + MachState* lazyState, + DWORD threadId, + int funCallDepth = 1, + HostCallPreference hostCallPreference = AllowHostCalls); +}; + +inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) +{ +#if defined(DACCESS_COMPILE) + // This function cannot be called in DAC because DAC cannot update target memory. + DacError(E_FAIL); + return; + +#else // !DACCESS_COMPILE + + _sp = copy->_sp; + _pc = copy->_pc; + + // Capture* has already been set, so there is no need to touch it + + // loop over the nonvolatile context pointers and make + // sure to properly copy interior pointers into the + // new struct + + PULONG64* pSrc = (PULONG64 *)©->ptrS0_S11; + PULONG64* pDst = (PULONG64 *)&this->ptrS0_S11; + + const PULONG64 LowerBoundDst = (PULONG64) this; + const PULONG64 LowerBoundSrc = (PULONG64) copy; + + const PULONG64 UpperBoundSrc = (PULONG64) ((BYTE*)LowerBoundSrc + sizeof(*copy)); + + for (int i = 0; i < NUM_NONVOLATILE_CONTEXT_POINTERS; i++) + { + PULONG64 valueSrc = *pSrc++; + + if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc)) + { + // make any pointer interior to 'src' interior to 'dst' + valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst); + } + + *pDst++ = valueSrc; + captureS0_S11[i] = copy->captureS0_S11[i]; + } + + + // this has to be last because we depend on write ordering to + // synchronize the race implicit in updating this struct + VolatileStore(&_isValid, TRUE); +#endif // DACCESS_COMPILE +} + +// Do the initial capture of the machine state. This is meant to be +// as light weight as possible, as we may never need the state that +// we capture. +EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState); + +#define CAPTURE_STATE(machState, ret) \ + LazyMachStateCaptureState(machState) + + +#endif diff --git a/src/coreclr/vm/riscv64/pinvokestubs.S b/src/coreclr/vm/riscv64/pinvokestubs.S index 3515f38c8120d7..3b48f2f62cf2ab 100644 --- a/src/coreclr/vm/riscv64/pinvokestubs.S +++ b/src/coreclr/vm/riscv64/pinvokestubs.S @@ -4,4 +4,64 @@ #include "asmconstants.h" #include "unixasmmacros.inc" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" + +// ------------------------------------------------------------------ +// Macro to generate PInvoke Stubs. +// $__PInvokeStubFuncName : function which calls the actual stub obtained from VASigCookie +// $__PInvokeGenStubFuncName : function which generates the IL stubs for PInvoke +// +// Params :- +// $FuncPrefix : prefix of the function name for the stub +// Eg. VarargPinvoke, GenericPInvokeCalli +// $VASigCookieReg : register which contains the VASigCookie +// $SaveFPArgs : "Yes" or "No" . For varidic functions FP Args are not present in FP regs +// So need not save FP Args registers for vararg Pinvoke +.macro PINVOKE_STUB __PInvokeStubFuncName,__PInvokeGenStubFuncName,__PInvokeStubWorkerName,VASigCookieReg,HiddenArg,SaveFPArgs,ShiftLeftAndOrSecret=0 + + + NESTED_ENTRY \__PInvokeStubFuncName, _TEXT, NoHandler + // TODO RISCV64 NYI + NESTED_END \__PInvokeStubFuncName, _TEXT + + + NESTED_ENTRY \__PInvokeGenStubFuncName, _TEXT, NoHandler + // TODO RISCV64 NYI + NESTED_END \__PInvokeGenStubFuncName, _TEXT +.endm + +// ------------------------------------------------------------------ +// IN: +// InlinedCallFrame (x0) = pointer to the InlinedCallFrame data, including the GS cookie slot (GS cookie right +// before actual InlinedCallFrame data) +// +// + NESTED_ENTRY JIT_PInvokeBegin, _TEXT, NoHandler + // TODO RISCV64 NYI + NESTED_END JIT_PInvokeBegin, _TEXT + +// ------------------------------------------------------------------ +// IN: +// InlinedCallFrame (x0) = pointer to the InlinedCallFrame data, including the GS cookie slot (GS cookie right +// before actual InlinedCallFrame data) +// +// + LEAF_ENTRY JIT_PInvokeEnd, _TEXT + // TODO RISCV64 NYI + LEAF_END JIT_PInvokeEnd, _TEXT + +// ------------------------------------------------------------------ +// VarargPInvokeStub & VarargPInvokeGenILStub +// +PINVOKE_STUB VarargPInvokeStub, VarargPInvokeGenILStub, VarargPInvokeStubWorker, a0, t2, 1, 1 + + +// ------------------------------------------------------------------ +// GenericPInvokeCalliHelper & GenericPInvokeCalliGenILStub +// Helper for generic pinvoke calli instruction +// +// in: +// x15 = VASigCookie* +// x12 = Unmanaged target +// +PINVOKE_STUB GenericPInvokeCalliHelper, GenericPInvokeCalliGenILStub, GenericPInvokeCalliStubWorker, t3, t2, 1, 1 diff --git a/src/coreclr/vm/riscv64/profiler.cpp b/src/coreclr/vm/riscv64/profiler.cpp index 8d7dc92a1a59e7..e8e0d11ca159c2 100644 --- a/src/coreclr/vm/riscv64/profiler.cpp +++ b/src/coreclr/vm/riscv64/profiler.cpp @@ -3,4 +3,310 @@ #include "common.h" -#error "TODO-RISCV64: missing implementation" +// #error "TODO-RISCV64: missing implementation" +#ifdef PROFILING_SUPPORTED +#include "proftoeeinterfaceimpl.h" + +#define PROFILE_ENTER 1 +#define PROFILE_LEAVE 2 +#define PROFILE_TAILCALL 4 + +// Scratch space to store HFA return values (max 16 bytes) +#define PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE 16 + +typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA +{ + void* Fp; + void* Pc; + void* x8; + ArgumentRegisters argumentRegisters; + FunctionID functionId; + FloatArgumentRegisters floatArgumentRegisters; + void* probeSp; + void* profiledSp; + void* hiddenArg; + UINT32 flags; + UINT32 unused; + BYTE buffer[PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE]; +} PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA; + +UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void* pPlatformSpecificHandle) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + LIMITED_METHOD_CONTRACT; + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(pPlatformSpecificHandle); + return (UINT_PTR)pData->Pc; +} + +void ProfileSetFunctionIDInPlatformSpecificHandle(void* pPlatformSpecificHandle, FunctionID functionId) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + LIMITED_METHOD_CONTRACT; + + _ASSERTE(pPlatformSpecificHandle != nullptr); + _ASSERTE(functionId != 0); + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(pPlatformSpecificHandle); + pData->functionId = functionId; +} + +ProfileArgIterator::ProfileArgIterator(MetaSig* pSig, void* pPlatformSpecificHandle) + : m_argIterator(pSig) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + WRAPPER_NO_CONTRACT; + + _ASSERTE(pSig != nullptr); + _ASSERTE(pPlatformSpecificHandle != nullptr); + + m_handle = pPlatformSpecificHandle; + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(pPlatformSpecificHandle); +#ifdef _DEBUG + // Unwind a frame and get the SP for the profiled method to make sure it matches + // what the JIT gave us + + // Setup the context to represent the frame that called ProfileEnterNaked + CONTEXT ctx; + memset(&ctx, 0, sizeof(CONTEXT)); + + ctx.Sp = (DWORD64)pData->probeSp; + ctx.Fp = (DWORD64)pData->Fp; + ctx.Pc = (DWORD64)pData->Pc; + + // Walk up a frame to the caller frame (called the managed method which called ProfileEnterNaked) + Thread::VirtualUnwindCallFrame(&ctx); + + _ASSERTE(pData->profiledSp == (void*)ctx.Sp); +#endif + + // Get the hidden arg if there is one + MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); + + if ((pData->hiddenArg == nullptr) && (pMD->RequiresInstArg() || pMD->AcquiresInstMethodTableFromThis())) + { + if ((pData->flags & PROFILE_ENTER) != 0) + { + if (pMD->AcquiresInstMethodTableFromThis()) + { + pData->hiddenArg = GetThis(); + } + else + { + // On ARM64 the generic instantiation parameter comes after the optional "this" pointer. + if (m_argIterator.HasThis()) + { + pData->hiddenArg = (void*)pData->argumentRegisters.a[1]; + } + else + { + pData->hiddenArg = (void*)pData->argumentRegisters.a[0]; + } + } + } + else + { + EECodeInfo codeInfo((PCODE)pData->Pc); + + // We want to pass the caller SP here. + pData->hiddenArg = EECodeManager::GetExactGenericsToken((SIZE_T)(pData->profiledSp), &codeInfo); + } + } +} + +ProfileArgIterator::~ProfileArgIterator() +{ + _ASSERTE(!"TODO RISCV64 NYI"); + LIMITED_METHOD_CONTRACT; + + m_handle = nullptr; +} + +LPVOID ProfileArgIterator::GetNextArgAddr() +{ + _ASSERTE(!"TODO RISCV64 NYI"); + WRAPPER_NO_CONTRACT; + + _ASSERTE(m_handle != nullptr); + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(m_handle); + + if ((pData->flags & (PROFILE_LEAVE | PROFILE_TAILCALL)) != 0) + { + _ASSERTE(!"GetNextArgAddr() - arguments are not available in leave and tailcall probes"); + return nullptr; + } + + int argOffset = m_argIterator.GetNextOffset(); + + if (argOffset == TransitionBlock::InvalidOffset) + { + return nullptr; + } + + if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) + { + return (LPBYTE)&pData->floatArgumentRegisters + (argOffset - TransitionBlock::GetOffsetOfFloatArgumentRegisters()); + } + + LPVOID pArg = nullptr; + + if (TransitionBlock::IsArgumentRegisterOffset(argOffset)) + { + pArg = (LPBYTE)&pData->argumentRegisters + (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters()); + } + else + { + _ASSERTE(TransitionBlock::IsStackArgumentOffset(argOffset)); + + pArg = (LPBYTE)pData->profiledSp + (argOffset - TransitionBlock::GetOffsetOfArgs()); + } + + if (m_argIterator.IsArgPassedByRef()) + { + pArg = *(LPVOID*)pArg; + } + + return pArg; +} + +LPVOID ProfileArgIterator::GetHiddenArgValue(void) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + LIMITED_METHOD_CONTRACT; + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(m_handle); + + return pData->hiddenArg; +} + +LPVOID ProfileArgIterator::GetThis(void) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle; + MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); + + // We guarantee to return the correct "this" pointer in the enter probe. + // For the leave and tailcall probes, we only return a valid "this" pointer if it is the generics token. + if (pData->hiddenArg != nullptr) + { + if (pMD->AcquiresInstMethodTableFromThis()) + { + return pData->hiddenArg; + } + } + + if ((pData->flags & PROFILE_ENTER) != 0) + { + if (m_argIterator.HasThis()) + { + return (LPVOID)pData->argumentRegisters.a[0]; + } + } + + return nullptr; +} + +LPVOID ProfileArgIterator::GetReturnBufferAddr(void) +{ + _ASSERTE(!"TODO RISCV64 NYI"); + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + + PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast(m_handle); + + if ((pData->flags & PROFILE_TAILCALL) != 0) + { + _ASSERTE(!"GetReturnBufferAddr() - return buffer address is not available in tailcall probe"); + return nullptr; + } + + if (m_argIterator.HasRetBuffArg()) + { + if ((pData->flags & PROFILE_ENTER) != 0) + { + return (LPVOID)pData->x8; + } + else + { + // On ARM64 there is no requirement for the method to preserve the value stored in x8. + // In order to workaround this JIT will explicitly return the return buffer address in x0. + _ASSERTE((pData->flags & PROFILE_LEAVE) != 0); + return (LPVOID)pData->argumentRegisters.a[0]; + } + } + + UINT fpReturnSize = m_argIterator.GetFPReturnSize(); + if (fpReturnSize != 0) + { + TypeHandle thReturnValueType; + m_argIterator.GetSig()->GetReturnTypeNormalized(&thReturnValueType); + if (!thReturnValueType.IsNull() && thReturnValueType.IsHFA()) + { + UINT hfaFieldSize = fpReturnSize / 4; + UINT totalSize = m_argIterator.GetSig()->GetReturnTypeSize(); + _ASSERTE(totalSize % hfaFieldSize == 0); + _ASSERTE(totalSize <= 16); + + BYTE *dest = pData->buffer; + for (UINT floatRegIdx = 0; floatRegIdx < totalSize / hfaFieldSize; ++floatRegIdx) + { + if (hfaFieldSize == 4) + { + *(UINT32*)dest = *(UINT32*)&pData->floatArgumentRegisters.f[floatRegIdx]; + dest += 4; + } + else if (hfaFieldSize == 8) + { + *(UINT64*)dest = *(UINT64*)&pData->floatArgumentRegisters.f[floatRegIdx]; + dest += 8; + } + else + { + _ASSERTE(!"unimplemented on RISCV64 yet!"); +#if 0 + _ASSERTE(hfaFieldSize == 16); + *(NEON128*)dest = pData->floatArgumentRegisters.f[floatRegIdx]; + dest += 16; +#endif + } + + if (floatRegIdx > 8) + { + // There's only space for 8 arguments in buffer + _ASSERTE(FALSE); + break; + } + } + + return pData->buffer; + } + + return &pData->floatArgumentRegisters.f[0]; + } + + if (!m_argIterator.GetSig()->IsReturnTypeVoid()) + { + return &pData->argumentRegisters.a[0]; + } + + return nullptr; +} + +#undef PROFILE_ENTER +#undef PROFILE_LEAVE +#undef PROFILE_TAILCALL + +#endif // PROFILING_SUPPORTED diff --git a/src/coreclr/vm/riscv64/stubs.cpp b/src/coreclr/vm/riscv64/stubs.cpp index 9a0cdd4e406d37..ce1299a3c7a93e 100644 --- a/src/coreclr/vm/riscv64/stubs.cpp +++ b/src/coreclr/vm/riscv64/stubs.cpp @@ -1,4 +1,720 @@ +// #error "TODO-RISCV64: missing implementation" + // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +// +// File: stubs.cpp +// +// This file contains stub functions for unimplemented features need to +// run on the ARM64 platform. + +#include "common.h" +#include "dllimportcallback.h" +#include "comdelegate.h" +#include "asmconstants.h" +#include "virtualcallstub.h" +#include "jitinterface.h" +#include "ecall.h" + + +#ifndef DACCESS_COMPILE +//----------------------------------------------------------------------- +// InstructionFormat for B.cond +//----------------------------------------------------------------------- +class ConditionalBranchInstructionFormat : public InstructionFormat +{ + + public: + ConditionalBranchInstructionFormat() : InstructionFormat(InstructionFormat::k32) + { + LIMITED_METHOD_CONTRACT; + } + + virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(!"RISCV64: not implementation on riscv64!!!"); + _ASSERTE(refsize == InstructionFormat::k32); + + return 4; + } + + virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode) + { + WRAPPER_NO_CONTRACT; + return 0; + } + + + virtual BOOL CanReach(UINT refSize, UINT variationCode, BOOL fExternal, INT_PTR offset) + { + _ASSERTE(!fExternal || "RISCV64:NYI - CompareAndBranchInstructionFormat::CanReach external"); + if (fExternal) + return false; + + if (offset < -1048576 || offset > 1048572) + return false; + return true; + } + // TODO: add for RISCV64. unised now! + // B.