We’ve been asked numerous times the question – “When will IDA support a RISC-V decompilation?”. We are delighted to say that IDA 9.0 will include RISC-V Decompiler, and on top of that, the disassembler will support the T-Head instruction set for the XUANTIE-RV architecture. Why is it so important to have RISC-V in IDA? The answer is simple – because it is becoming more and more popular. In fact, RISC-V has established itself as a revolutionary architecture, offering an open-source, modular design built on the principles of reduced instruction set computing (RISC). Unlike some proprietary architectures, RISC-V is free to use, modify, and implement, making it a popular choice for hardware designers and engineers looking for flexibility. Its streamlined instruction set and load-store architecture ensure simplicity and efficiency, while optional extensions allow customization based on specific application needs. For reverse engineers, RISC-V presents a unique opportunity. With a fixed 32-bit instruction base, 31 general-purpose registers, and a program counter, it offers a clean and predictable structure for analysis.
It is also important to highlight that RISC-V offers a robust Linux port, supporting both RV32 and RV64 architectures, allowing seamless integration with open-source development environments. This port includes essential components such as the Linux kernel, U-Boot bootloader, and GCC/LLVM toolchains, ensuring full system functionality on RISC-V hardware.
There are many occasions when you would need to analyze such a binary:
Now, take a moment to explore the examples below. With the RISC-V decompiler, introduced in IDA 9.0, the analysis process is much more streamlined, intuitive, and accurate.
.text:00000000000029D6
.text:00000000000029D6 # =============== S U B R O U T I N E =======================================
.text:00000000000029D6
.text:00000000000029D6
.text:00000000000029D6 # __int64 __fastcall crc32_z(int, char *, unsigned __int64)
.text:00000000000029D6 # public crc32_z
.text:00000000000029D6 crc32_z: # CODE XREF: j_crc32_z+8↑p
.text:00000000000029D6 # DATA XREF: LOAD:0000000000000A50↑o ...
.text:00000000000029D6 beqz a1, loc_2CCA
.text:00000000000029DA sext.w a0, a0
.text:00000000000029DC not a0, a0
.text:00000000000029E0 la a4, dword_A850
.text:00000000000029E8
.text:00000000000029E8 loc_29E8: # CODE XREF: crc32_z+AE↓j
.text:00000000000029E8 beqz a2, loc_2A62
.text:00000000000029EA andi a5, a1, 3
.text:00000000000029EE bnez a5, loc_2A6C
.text:00000000000029F0 mv a4, a1
.text:00000000000029F2 add a7, a1, a2
.text:00000000000029F6 li a6, 1Fh
.text:00000000000029F8 la a5, dword_A850
.text:0000000000002A00
.text:0000000000002A00 loc_2A00: # CODE XREF: crc32_z+2B0↓j
.text:0000000000002A00 sub a3, a7, a4
.text:0000000000002A04 bltu a6, a3, loc_2A86
.text:0000000000002A08 srli a5, a2, 5
.text:0000000000002A0C slli a4, a5, 5
.text:0000000000002A10 add a1, a1, a4
.text:0000000000002A12 li a4, -20h
.text:0000000000002A14 th.mula a2, a5, a4
.text:0000000000002A18 li a7, 3
.text:0000000000002A1A li a4, 0
.text:0000000000002A1C la a3, dword_A850
.text:0000000000002A24
.text:0000000000002A24 loc_2A24: # CODE XREF: crc32_z+2F2↓j
.text:0000000000002A24 sub a5, a2, a4
.text:0000000000002A28 bltu a7, a5, loc_2C88
.text:0000000000002A2C srli a5, a2, 2
.text:0000000000002A30 li a4, -4
.text:0000000000002A32 th.mula a2, a5, a4
.text:0000000000002A36 th.addsl a1, a1, a5, 2
.text:0000000000002A3A beqz a2, loc_2A62
.text:0000000000002A3C li a4, 0
.text:0000000000002A3E la a3, dword_A850
.text:0000000000002A46
.text:0000000000002A46 loc_2A46: # CODE XREF: crc32_z+88↓j
.text:0000000000002A46 th.lrbu a5, a1, a4, 0
.text:0000000000002A4A addi a4, a4, 1
.text:0000000000002A4C xor a5, a5, a0
.text:0000000000002A4E andi a5, a5, 0FFh
.text:0000000000002A52 th.lurw a5, a3, a5, 2
.text:0000000000002A56 srliw a0, a0, 8
.text:0000000000002A5A xor a0, a0, a5
.text:0000000000002A5C sext.w a0, a0
.text:0000000000002A5E bne a2, a4, loc_2A46
.text:0000000000002A62
.text:0000000000002A62 loc_2A62: # CODE XREF: crc32_z:loc_29E8↑j
.text:0000000000002A62 # crc32_z+64↑j
.text:0000000000002A62 not a0, a0
.text:0000000000002A66 th.extu a0, a0, 1Fh, 0
.text:0000000000002A6A ret
.text:0000000000002A6C # ---------------------------------------------------------------------------
.text:0000000000002A6C
.text:0000000000002A6C loc_2A6C: # CODE XREF: crc32_z+18↑j
.text:0000000000002A6C th.lbuia a5, (a1), 1
.text:0000000000002A70 addi a2, a2, -1
.text:0000000000002A72 xor a5, a5, a0
.text:0000000000002A74 andi a5, a5, 0FFh
.text:0000000000002A78 th.lurw a5, a4, a5, 2
.text:0000000000002A7C srliw a0, a0, 8
.text:0000000000002A80 xor a0, a0, a5
.text:0000000000002A82 sext.w a0, a0
.text:0000000000002A84 j loc_29E8
.text:0000000000002A86 # ---------------------------------------------------------------------------
.text:0000000000002A86
.text:0000000000002A86 loc_2A86: # CODE XREF: crc32_z+2E↑j
.text:0000000000002A86 lw a3, 0(a4)
.text:0000000000002A88 addi a4, a4, 20h # ' '
.text:0000000000002A8C xor a0, a0, a3
.text:0000000000002A8E andi t1, a0, 0FFh
.text:0000000000002A92 srliw t3, a0, 18h
.text:0000000000002A96 addi t1, t1, 300h
.text:0000000000002A9A th.lurw t3, a5, t3, 2
.text:0000000000002A9E th.lrw t1, a5, t1, 2
.text:0000000000002AA2 th.extu a3, a0, 17h, 10h
.text:0000000000002AA6 th.extu a0, a0, 0Fh, 8
.text:0000000000002AAA xor t1, t1, t3
.text:0000000000002AAE addi a0, a0, 200h
.text:0000000000002AB2 lw t3, -1Ch(a4)
.text:0000000000002AB6 th.lrw a0, a5, a0, 2
.text:0000000000002ABA addi a3, a3, 100h
.text:0000000000002ABE xor t1, t1, t3
.text:0000000000002AC2 th.lrw a3, a5, a3, 2
.text:0000000000002AC6 xor a0, t1, a0
.text:0000000000002ACA sext.w a0, a0
.text:0000000000002ACC xor a3, a3, a0
.text:0000000000002ACE andi t1, a3, 0FFh
.text:0000000000002AD2 srliw t3, a3, 18h
.text:0000000000002AD6 addi t1, t1, 300h
.text:0000000000002ADA th.lurw t3, a5, t3, 2
.text:0000000000002ADE th.lrw t1, a5, t1, 2
.text:0000000000002AE2 th.extu a0, a3, 17h, 10h
.text:0000000000002AE6 th.extu a3, a3, 0Fh, 8
.text:0000000000002AEA xor t1, t1, t3
.text:0000000000002AEE addi a3, a3, 200h
.text:0000000000002AF2 lw t3, -18h(a4)
.text:0000000000002AF6 th.lrw a3, a5, a3, 2
.text:0000000000002AFA addi a0, a0, 100h
.text:0000000000002AFE xor t1, t1, t3
.text:0000000000002B02 th.lrw a0, a5, a0, 2
.text:0000000000002B06 xor a3, t1, a3
.text:0000000000002B0A sext.w a3, a3
.text:0000000000002B0C xor a0, a0, a3
.text:0000000000002B0E andi t1, a0, 0FFh
.text:0000000000002B12 srliw t3, a0, 18h
.text:0000000000002B16 addi t1, t1, 300h
.text:0000000000002B1A th.lurw t3, a5, t3, 2
.text:0000000000002B1E th.lrw t1, a5, t1, 2
.text:0000000000002B22 th.extu a3, a0, 17h, 10h
.text:0000000000002B26 th.extu a0, a0, 0Fh, 8
.text:0000000000002B2A xor t1, t1, t3
.text:0000000000002B2E addi a0, a0, 200h
.text:0000000000002B32 lw t3, -14h(a4)
.text:0000000000002B36 th.lrw a0, a5, a0, 2
.text:0000000000002B3A addi a3, a3, 100h
.text:0000000000002B3E xor t1, t1, t3
.text:0000000000002B42 th.lrw a3, a5, a3, 2
.text:0000000000002B46 xor a0, t1, a0
.text:0000000000002B4A sext.w a0, a0
.text:0000000000002B4C xor a3, a3, a0
.text:0000000000002B4E andi t1, a3, 0FFh
.text:0000000000002B52 srliw t3, a3, 18h
.text:0000000000002B56 addi t1, t1, 300h
.text:0000000000002B5A th.lurw t3, a5, t3, 2
.text:0000000000002B5E th.lrw t1, a5, t1, 2
.text:0000000000002B62 th.extu a0, a3, 17h, 10h
.text:0000000000002B66 th.extu a3, a3, 0Fh, 8
.text:0000000000002B6A xor t1, t1, t3
.text:0000000000002B6E addi a3, a3, 200h
.text:0000000000002B72 lw t3, -10h(a4)
.text:0000000000002B76 th.lrw a3, a5, a3, 2
.text:0000000000002B7A addi a0, a0, 100h
.text:0000000000002B7E xor t1, t1, t3
.text:0000000000002B82 th.lrw a0, a5, a0, 2
.text:0000000000002B86 xor a3, t1, a3
.text:0000000000002B8A sext.w a3, a3
.text:0000000000002B8C xor a0, a0, a3
.text:0000000000002B8E andi t1, a0, 0FFh
.text:0000000000002B92 srliw t3, a0, 18h
.text:0000000000002B96 addi t1, t1, 300h
.text:0000000000002B9A th.lurw t3, a5, t3, 2
.text:0000000000002B9E th.lrw t1, a5, t1, 2
.text:0000000000002BA2 th.extu a3, a0, 17h, 10h
.text:0000000000002BA6 th.extu a0, a0, 0Fh, 8
.text:0000000000002BAA xor t1, t1, t3
.text:0000000000002BAE addi a0, a0, 200h
.text:0000000000002BB2 lw t3, -0Ch(a4)
.text:0000000000002BB6 th.lrw a0, a5, a0, 2
.text:0000000000002BBA addi a3, a3, 100h
.text:0000000000002BBE xor t1, t1, t3
.text:0000000000002BC2 th.lrw a3, a5, a3, 2
.text:0000000000002BC6 xor a0, t1, a0
.text:0000000000002BCA sext.w a0, a0
.text:0000000000002BCC xor a3, a3, a0
.text:0000000000002BCE andi t1, a3, 0FFh
.text:0000000000002BD2 srliw t3, a3, 18h
.text:0000000000002BD6 addi t1, t1, 300h
.text:0000000000002BDA th.lurw t3, a5, t3, 2
.text:0000000000002BDE th.lrw t1, a5, t1, 2
.text:0000000000002BE2 th.extu a0, a3, 17h, 10h
.text:0000000000002BE6 th.extu a3, a3, 0Fh, 8
.text:0000000000002BEA xor t1, t1, t3
.text:0000000000002BEE addi a3, a3, 200h
.text:0000000000002BF2 lw t3, -8(a4)
.text:0000000000002BF6 th.lrw a3, a5, a3, 2
.text:0000000000002BFA addi a0, a0, 100h
.text:0000000000002BFE xor t1, t1, t3
.text:0000000000002C02 th.lrw a0, a5, a0, 2
.text:0000000000002C06 xor a3, t1, a3
.text:0000000000002C0A sext.w a3, a3
.text:0000000000002C0C xor a0, a0, a3
.text:0000000000002C0E th.extu a3, a0, 17h, 10h
.text:0000000000002C12 andi t1, a0, 0FFh
.text:0000000000002C16 srliw t3, a0, 18h
.text:0000000000002C1A addi a3, a3, 100h
.text:0000000000002C1E addi t1, t1, 300h
.text:0000000000002C22 th.lrw a3, a5, a3, 2
.text:0000000000002C26 th.lrw t1, a5, t1, 2
.text:0000000000002C2A th.lurw t3, a5, t3, 2
.text:0000000000002C2E th.extu a0, a0, 0Fh, 8
.text:0000000000002C32 addi a0, a0, 200h
.text:0000000000002C36 xor t1, t1, t3
.text:0000000000002C3A lw t3, -4(a4)
.text:0000000000002C3E th.lrw a0, a5, a0, 2
.text:0000000000002C42 xor t1, t1, t3
.text:0000000000002C46 xor a0, t1, a0
.text:0000000000002C4A sext.w a0, a0
.text:0000000000002C4C xor a3, a3, a0
.text:0000000000002C4E th.extu a0, a3, 17h, 10h
.text:0000000000002C52 andi t1, a3, 0FFh
.text:0000000000002C56 addi a0, a0, 100h
.text:0000000000002C5A addi t1, t1, 300h
.text:0000000000002C5E srliw t4, a3, 18h
.text:0000000000002C62 th.extu a3, a3, 0Fh, 8
.text:0000000000002C66 th.lrw t3, a5, a0, 2
.text:0000000000002C6A addi a3, a3, 200h
.text:0000000000002C6E th.lrw a0, a5, t1, 2
.text:0000000000002C72 th.lurw t1, a5, t4, 2
.text:0000000000002C76 th.lrw a3, a5, a3, 2
.text:0000000000002C7A xor a0, a0, t1
.text:0000000000002C7E xor a0, a0, a3
.text:0000000000002C80 sext.w a0, a0
.text:0000000000002C82 xor a0, t3, a0
.text:0000000000002C86 j loc_2A00
.text:0000000000002C88 # ---------------------------------------------------------------------------
.text:0000000000002C88
.text:0000000000002C88 loc_2C88: # CODE XREF: crc32_z+52↑j
.text:0000000000002C88 th.lrw a5, a1, a4, 0
.text:0000000000002C8C addi a4, a4, 4
.text:0000000000002C8E xor a5, a5, a0
.text:0000000000002C90 th.extu a0, a5, 17h, 10h
.text:0000000000002C94 andi a6, a5, 0FFh
.text:0000000000002C98 addi a0, a0, 100h
.text:0000000000002C9C addi a6, a6, 300h
.text:0000000000002CA0 srliw t3, a5, 18h
.text:0000000000002CA4 th.extu a5, a5, 0Fh, 8
.text:0000000000002CA8 th.lrw t1, a3, a0, 2
.text:0000000000002CAC addi a5, a5, 200h
.text:0000000000002CB0 th.lrw a0, a3, a6, 2
.text:0000000000002CB4 th.lurw a6, a3, t3, 2
.text:0000000000002CB8 th.lrw a5, a3, a5, 2
.text:0000000000002CBC xor a0, a0, a6
.text:0000000000002CC0 xor a0, a0, a5
.text:0000000000002CC2 sext.w a0, a0
.text:0000000000002CC4 xor a0, t1, a0
.text:0000000000002CC8 j loc_2A24
.text:0000000000002CCA # ---------------------------------------------------------------------------
.text:0000000000002CCA
.text:0000000000002CCA loc_2CCA: # CODE XREF: crc32_z↑j
.text:0000000000002CCA li a0, 0
.text:0000000000002CCC ret
.text:0000000000002CCC # End of function crc32_z
.text:0000000000002CCC
__int64 __fastcall crc32_z(int a1, char *a2, unsigned __int64 a3)
{
unsigned int v3; // a0
char *i; // a4
char *v5; // a1
unsigned __int64 v6; // a2
__int64 j; // a4
unsigned __int64 v8; // a5
unsigned __int64 v9; // a2
char *v10; // a1
__int64 k; // a4
char v12; // a5
char v14; // a5
int v15; // a3
unsigned int v16; // a0
unsigned int v17; // a3
unsigned int v18; // a0
unsigned int v19; // a3
unsigned int v20; // a0
unsigned int v21; // a3
unsigned int v22; // a0
unsigned int v23; // a3
int v24; // a5
if ( !a2 )
return 0LL;
v3 = ~a1;
while ( a3 )
{
if ( ((unsigned __int8)a2 & 3) == 0 )
{
for ( i = a2;
(unsigned __int64)(&a2[a3] - i) > 0x1F;
v3 = dword_A850[BYTE2(v23) + 256] ^ dword_A850[(unsigned __int8)(LOBYTE(dword_A850[BYTE2(v22) + 256]) ^ LOBYTE(dword_A850[(unsigned __int8)v22 + 768]) ^ LOBYTE(dword_A850[HIBYTE(v22)]) ^ *(i - 4) ^ LOBYTE(dword_A850[BYTE1(v22) + 512]))
+ 768] ^ dword_A850[HIBYTE(v23)] ^ dword_A850[BYTE1(v23) + 512] )
{
v15 = *(_DWORD *)i;
i += 32;
v16 = v3 ^ v15;
v17 = dword_A850[BYTE2(v16) + 256] ^ dword_A850[(unsigned __int8)v16 + 768] ^ dword_A850[HIBYTE(v16)] ^ *((_DWORD *)i - 7) ^ dword_A850[BYTE1(v16) + 512];
v18 = dword_A850[BYTE2(v17) + 256] ^ dword_A850[(unsigned __int8)(dword_A850[BYTE2(v16) + 256] ^ LOBYTE(dword_A850[(unsigned __int8)v16 + 768]) ^ LOBYTE(dword_A850[HIBYTE(v16)]) ^ *(i - 28) ^ LOBYTE(dword_A850[BYTE1(v16) + 512]))
+ 768] ^ dword_A850[HIBYTE(v17)] ^ *((_DWORD *)i - 6) ^ dword_A850[BYTE1(v17) + 512];
v19 = dword_A850[BYTE2(v18) + 256] ^ dword_A850[(unsigned __int8)v18 + 768] ^ dword_A850[HIBYTE(v18)] ^ *((_DWORD *)i - 5) ^ dword_A850[BYTE1(v18) + 512];
v20 = dword_A850[BYTE2(v19) + 256] ^ dword_A850[(unsigned __int8)(dword_A850[BYTE2(v18) + 256] ^ LOBYTE(dword_A850[(unsigned __int8)v18 + 768]) ^ LOBYTE(dword_A850[HIBYTE(v18)]) ^ *(i - 20) ^ LOBYTE(dword_A850[BYTE1(v18) + 512]))
+ 768] ^ dword_A850[HIBYTE(v19)] ^ *((_DWORD *)i - 4) ^ dword_A850[BYTE1(v19) + 512];
v21 = dword_A850[BYTE2(v20) + 256] ^ dword_A850[(unsigned __int8)v20 + 768] ^ dword_A850[HIBYTE(v20)] ^ *((_DWORD *)i - 3) ^ dword_A850[BYTE1(v20) + 512];
v22 = dword_A850[BYTE2(v21) + 256] ^ dword_A850[(unsigned __int8)(dword_A850[BYTE2(v20) + 256] ^ LOBYTE(dword_A850[(unsigned __int8)v20 + 768]) ^ LOBYTE(dword_A850[HIBYTE(v20)]) ^ *(i - 12) ^ LOBYTE(dword_A850[BYTE1(v20) + 512]))
+ 768] ^ dword_A850[HIBYTE(v21)] ^ *((_DWORD *)i - 2) ^ dword_A850[BYTE1(v21) + 512];
v23 = dword_A850[BYTE2(v22) + 256] ^ dword_A850[(unsigned __int8)v22 + 768] ^ dword_A850[HIBYTE(v22)] ^ *((_DWORD *)i - 1) ^ dword_A850[BYTE1(v22) + 512];
}
v5 = &a2[32 * (a3 >> 5)];
v6 = a3 - 32 * (a3 >> 5);
for ( j = 0LL; v6 - j > 3; j += 4LL )
{
v24 = *(_DWORD *)&v5[j];
v3 = dword_A850[(unsigned __int8)((v24 ^ v3) >> 16) + 256] ^ dword_A850[(unsigned __int8)(v24 ^ v3) + 768] ^ dword_A850[(v24 ^ v3) >> 24] ^ dword_A850[(unsigned __int8)((unsigned __int16)(v24 ^ v3) >> 8) + 512];
}
v8 = v6 >> 2;
v9 = v6 - 4 * (v6 >> 2);
v10 = &v5[4 * v8];
if ( v9 )
{
for ( k = 0LL; k != v9; ++k )
{
v12 = v10[k];
v3 = (v3 >> 8) ^ dword_A850[(unsigned __int8)(v12 ^ v3)];
}
}
return ~v3;
}
v14 = *a2++;
--a3;
v3 = (v3 >> 8) ^ dword_A850[(unsigned __int8)(v14 ^ v3)];
}
return ~v3;
}
This enhancement, introduced with the September 30th release of IDA Pro 9.0, sets the stage for even more sophisticated features in the future as IDA Pro continues to evolve alongside the complexity of modern software development.