1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
|
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <assert.h>
// For mmapp()
#include <sys/mman.h>
#include "ujit_asm.h"
// TODO: give ujit_examples.h some more meaningful file name
#include "ujit_examples.h"
// Dummy none/null operand
const x86opnd_t NO_OPND = { OPND_NONE, 0, .imm = 0 };
// 64-bit GP registers
const x86opnd_t RAX = { OPND_REG, 64, .reg = { REG_GP, 0 }};
const x86opnd_t RCX = { OPND_REG, 64, .reg = { REG_GP, 1 }};
const x86opnd_t RDX = { OPND_REG, 64, .reg = { REG_GP, 2 }};
const x86opnd_t RBX = { OPND_REG, 64, .reg = { REG_GP, 3 }};
const x86opnd_t RSP = { OPND_REG, 64, .reg = { REG_GP, 4 }};
const x86opnd_t RBP = { OPND_REG, 64, .reg = { REG_GP, 5 }};
const x86opnd_t RSI = { OPND_REG, 64, .reg = { REG_GP, 6 }};
const x86opnd_t RDI = { OPND_REG, 64, .reg = { REG_GP, 7 }};
const x86opnd_t R8 = { OPND_REG, 64, .reg = { REG_GP, 8 }};
const x86opnd_t R9 = { OPND_REG, 64, .reg = { REG_GP, 9 }};
const x86opnd_t R10 = { OPND_REG, 64, .reg = { REG_GP, 10 }};
const x86opnd_t R11 = { OPND_REG, 64, .reg = { REG_GP, 11 }};
const x86opnd_t R12 = { OPND_REG, 64, .reg = { REG_GP, 12 }};
const x86opnd_t R13 = { OPND_REG, 64, .reg = { REG_GP, 13 }};
const x86opnd_t R14 = { OPND_REG, 64, .reg = { REG_GP, 14 }};
const x86opnd_t R15 = { OPND_REG, 64, .reg = { REG_GP, 15 }};
// 32-bit GP registers
const x86opnd_t EAX = { OPND_REG, 32, .reg = { REG_GP, 0 }};
const x86opnd_t ECX = { OPND_REG, 32, .reg = { REG_GP, 1 }};
const x86opnd_t EDX = { OPND_REG, 32, .reg = { REG_GP, 2 }};
const x86opnd_t EBX = { OPND_REG, 32, .reg = { REG_GP, 3 }};
const x86opnd_t ESP = { OPND_REG, 32, .reg = { REG_GP, 4 }};
const x86opnd_t EBP = { OPND_REG, 32, .reg = { REG_GP, 5 }};
const x86opnd_t ESI = { OPND_REG, 32, .reg = { REG_GP, 6 }};
const x86opnd_t EDI = { OPND_REG, 32, .reg = { REG_GP, 7 }};
const x86opnd_t R8D = { OPND_REG, 32, .reg = { REG_GP, 8 }};
const x86opnd_t R9D = { OPND_REG, 32, .reg = { REG_GP, 9 }};
const x86opnd_t R10D = { OPND_REG, 32, .reg = { REG_GP, 10 }};
const x86opnd_t R11D = { OPND_REG, 32, .reg = { REG_GP, 11 }};
const x86opnd_t R12D = { OPND_REG, 32, .reg = { REG_GP, 12 }};
const x86opnd_t R13D = { OPND_REG, 32, .reg = { REG_GP, 13 }};
const x86opnd_t R14D = { OPND_REG, 32, .reg = { REG_GP, 14 }};
const x86opnd_t R15D = { OPND_REG, 32, .reg = { REG_GP, 15 }};
// Compute the number of bits needed to encode a signed value
size_t sig_imm_size(int64_t imm)
{
// Compute the smallest size this immediate fits in
if (imm >= -128 && imm <= 127)
return 8;
if (imm >= -32768 && imm <= 32767)
return 16;
if (imm >= -2147483648 && imm <= 2147483647)
return 32;
return 64;
}
// Compute the number of bits needed to encode an unsigned value
size_t unsig_imm_size(uint64_t imm)
{
// Compute the smallest size this immediate fits in
if (imm <= 255)
return 8;
else if (imm <= 65535)
return 16;
else if (imm <= 4294967295)
return 32;
return 64;
}
x86opnd_t mem_opnd(size_t num_bits, x86opnd_t base_reg, int32_t disp)
{
x86opnd_t opnd = {
OPND_MEM,
num_bits,
.mem = { base_reg.reg.reg_no, 0, 0, false, false, disp }
};
return opnd;
}
x86opnd_t imm_opnd(int64_t imm)
{
x86opnd_t opnd = {
OPND_IMM,
sig_imm_size(imm),
.imm = imm
};
return opnd;
}
void cb_init(codeblock_t* cb, size_t mem_size)
{
// Map the memory as executable
cb->mem_block = (uint8_t*)mmap(
NULL,
mem_size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANON,
-1,
0
);
// Check that the memory mapping was successful
if (cb->mem_block == MAP_FAILED)
{
fprintf(stderr, "mmap call failed\n");
exit(-1);
}
cb->mem_size = mem_size;
cb->write_pos = 0;
cb->num_labels = 0;
cb->num_refs = 0;
}
/**
Set the current write position
*/
void cb_set_pos(codeblock_t* cb, size_t pos)
{
assert (pos < cb->mem_size);
cb->write_pos = pos;
}
// Get a direct pointer into the executable memory block
uint8_t* cb_get_ptr(codeblock_t* cb, size_t index)
{
assert (index < cb->mem_size);
return &cb->mem_block[index];
}
// Write a byte at the current position
void cb_write_byte(codeblock_t* cb, uint8_t byte)
{
assert (cb->mem_block);
assert (cb->write_pos + 1 <= cb->mem_size);
cb->mem_block[cb->write_pos++] = byte;
}
// Write multiple bytes starting from the current position
void cb_write_bytes(codeblock_t* cb, size_t num_bytes, ...)
{
va_list va;
va_start(va, num_bytes);
for (size_t i = 0; i < num_bytes; ++i)
{
uint8_t byte = va_arg(va, int);
cb_write_byte(cb, byte);
}
va_end(va);
}
// Write a signed integer over a given number of bits at the current position
void cb_write_int(codeblock_t* cb, uint64_t val, size_t num_bits)
{
assert (num_bits > 0);
assert (num_bits % 8 == 0);
// Switch on the number of bits
switch (num_bits)
{
case 8:
cb_write_byte(cb, (uint8_t)val);
break;
case 16:
cb_write_bytes(
cb,
2,
(uint8_t)((val >> 0) & 0xFF),
(uint8_t)((val >> 8) & 0xFF)
);
break;
case 32:
cb_write_bytes(
cb,
4,
(uint8_t)((val >> 0) & 0xFF),
(uint8_t)((val >> 8) & 0xFF),
(uint8_t)((val >> 16) & 0xFF),
(uint8_t)((val >> 24) & 0xFF)
);
break;
default:
{
// Compute the size in bytes
size_t num_bytes = num_bits / 8;
// Write out the bytes
for (size_t i = 0; i < num_bytes; ++i)
{
uint8_t byte_val = (uint8_t)(val & 0xFF);
cb_write_byte(cb, byte_val);
val >>= 8;
}
}
}
}
// Ruby instruction prologue and epilogue functions
void cb_write_prologue(codeblock_t* cb)
{
for (size_t i = 0; i < sizeof(ujit_pre_call_bytes); ++i)
cb_write_byte(cb, ujit_pre_call_bytes[i]);
}
void cb_write_epilogue(codeblock_t* cb)
{
for (size_t i = 0; i < sizeof(ujit_post_call_bytes); ++i)
cb_write_byte(cb, ujit_post_call_bytes[i]);
}
// Allocate a new label with a given name
size_t cb_new_label(codeblock_t* cb, const char* name)
{
//if (hasASM)
// writeString(to!string(label) ~ ":");
assert (cb->num_labels < MAX_LABELS);
// Allocate the new label
size_t label_idx = cb->num_labels++;
// This label doesn't have an address yet
cb->label_addrs[label_idx] = 0;
cb->label_names[label_idx] = name;
return label_idx;
}
// Write a label at the current address
void cb_write_label(codeblock_t* cb, size_t label_idx)
{
assert (label_idx < MAX_LABELS);
cb->label_addrs[label_idx] = cb->write_pos;
}
// Add a label reference at the current write position
void cb_label_ref(codeblock_t* cb, size_t label_idx)
{
assert (label_idx < MAX_LABELS);
assert (cb->num_refs < MAX_LABEL_REFS);
// Keep track of the reference
cb->label_refs[cb->num_refs] = (labelref_t){ cb->write_pos, label_idx };
cb->num_refs++;
}
// Link internal label references
void cb_link_labels(codeblock_t* cb)
{
size_t orig_pos = cb->write_pos;
// For each label reference
for (size_t i = 0; i < cb->num_refs; ++i)
{
size_t ref_pos = cb->label_refs[i].pos;
size_t label_idx = cb->label_refs[i].label_idx;
assert (ref_pos < cb->mem_size);
assert (label_idx < MAX_LABELS);
size_t label_addr = cb->label_addrs[label_idx];
assert (label_addr < cb->mem_size);
// Compute the offset from the reference's end to the label
int64_t offset = (int64_t)label_addr - (int64_t)(ref_pos + 4);
cb_set_pos(cb, ref_pos);
cb_write_int(cb, offset, 32);
}
cb->write_pos = orig_pos;
// Clear the label positions and references
cb->num_labels = 0;
cb->num_refs = 0;
}
// Check if an operand needs a REX byte to be encoded
bool rex_needed(x86opnd_t opnd)
{
if (opnd.type == OPND_NONE || opnd.type == OPND_IMM)
{
return false;
}
if (opnd.type == OPND_REG)
{
return (
opnd.reg.reg_no > 7 ||
(opnd.num_bits == 8 && opnd.reg.reg_no >= 4 && opnd.reg.reg_no <= 7)
);
}
if (opnd.type == OPND_MEM)
{
return (opnd.mem.base_reg_no > 7) || (opnd.mem.has_idx && opnd.mem.idx_reg_no > 7);
}
assert (false);
}
// Check if an SIB byte is needed to encode this operand
bool sib_needed(x86opnd_t opnd)
{
if (opnd.type != OPND_MEM)
return false;
return (
opnd.mem.has_idx ||
opnd.mem.base_reg_no == RSP.reg.reg_no ||
opnd.mem.base_reg_no == R12.reg.reg_no
);
}
// Compute the size of the displacement field needed for a memory operand
size_t disp_size(x86opnd_t opnd)
{
assert (opnd.type == OPND_MEM);
// If using RIP as the base, use disp32
if (opnd.mem.is_iprel)
{
return 32;
}
// Compute the required displacement size
if (opnd.mem.disp != 0)
{
size_t num_bits = sig_imm_size(opnd.mem.disp);
assert (num_bits <= 32 && "displacement does not fit in 32 bits");
// x86 can only encode 8-bit and 32-bit displacements
if (num_bits == 16)
num_bits = 32;;
return num_bits;
}
// If EBP or RBP or R13 is used as the base, displacement must be encoded
if (opnd.mem.base_reg_no == RBP.reg.reg_no ||
opnd.mem.base_reg_no == R13.reg.reg_no)
{
return 8;
}
return 0;
}
// Write the REX byte
static void cb_write_rex(
codeblock_t* cb,
bool w_flag,
uint8_t reg_no,
uint8_t idx_reg_no,
uint8_t rm_reg_no
)
{
// 0 1 0 0 w r x b
// w - 64-bit operand size flag
// r - MODRM.reg extension
// x - SIB.index extension
// b - MODRM.rm or SIB.base extension
uint8_t w = w_flag? 1:0;
uint8_t r = (reg_no & 8)? 1:0;
uint8_t x = (idx_reg_no & 8)? 1:0;
uint8_t b = (rm_reg_no & 8)? 1:0;
// Encode and write the REX byte
uint8_t rexByte = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
cb_write_byte(cb, rexByte);
}
// Write an opcode byte with an embedded register operand
static void cb_write_opcode(codeblock_t* cb, uint8_t opcode, x86opnd_t reg)
{
// Write the reg field into the opcode byte
uint8_t op_byte = opcode | (reg.reg.reg_no & 7);
cb_write_byte(cb, op_byte);
}
// Encode an RM instruction
void cb_write_rm(
codeblock_t* cb,
bool szPref,
bool rexW,
x86opnd_t r_opnd,
x86opnd_t rm_opnd,
uint8_t opExt,
size_t op_len,
...)
{
assert (op_len > 0 && op_len <= 3);
assert (r_opnd.type == OPND_REG || r_opnd.type == OPND_NONE);
// Flag to indicate the REX prefix is needed
bool need_rex = rexW || rex_needed(r_opnd) || rex_needed(rm_opnd);
// Flag to indicate SIB byte is needed
bool need_sib = sib_needed(r_opnd) || sib_needed(rm_opnd);
// Add the operand-size prefix, if needed
if (szPref == true)
cb_write_byte(cb, 0x66);
// Add the REX prefix, if needed
if (need_rex)
{
// 0 1 0 0 w r x b
// w - 64-bit operand size flag
// r - MODRM.reg extension
// x - SIB.index extension
// b - MODRM.rm or SIB.base extension
uint8_t w = rexW? 1:0;
uint8_t r;
if (r_opnd.type != OPND_NONE)
r = (r_opnd.reg.reg_no & 8)? 1:0;
else
r = 0;
uint8_t x;
if (need_sib && rm_opnd.mem.has_idx)
x = (rm_opnd.mem.idx_reg_no & 8)? 1:0;
else
x = 0;
uint8_t b;
if (rm_opnd.type == OPND_REG)
b = (rm_opnd.reg.reg_no & 8)? 1:0;
else if (rm_opnd.type == OPND_MEM)
b = (rm_opnd.mem.base_reg_no & 8)? 1:0;
else
b = 0;
// Encode and write the REX byte
uint8_t rex_byte = 0x40 + (w << 3) + (r << 2) + (x << 1) + (b);
cb_write_byte(cb, rex_byte);
}
// Write the opcode bytes to the code block
va_list va;
va_start(va, op_len);
for (size_t i = 0; i < op_len; ++i)
{
uint8_t byte = va_arg(va, int);
cb_write_byte(cb, byte);
}
va_end(va);
// MODRM.mod (2 bits)
// MODRM.reg (3 bits)
// MODRM.rm (3 bits)
assert (
!(opExt != 0xFF && r_opnd.type != OPND_NONE) &&
"opcode extension and register operand present"
);
// Encode the mod field
uint8_t mod;
if (rm_opnd.type == OPND_REG)
{
mod = 3;
}
else
{
size_t dsize = disp_size(rm_opnd);
if (dsize == 0 || rm_opnd.mem.is_iprel)
mod = 0;
else if (dsize == 8)
mod = 1;
else if (dsize == 32)
mod = 2;
else
assert (false);
}
// Encode the reg field
uint8_t reg;
if (opExt != 0xFF)
reg = opExt;
else if (r_opnd.type == OPND_REG)
reg = r_opnd.reg.reg_no & 7;
else
reg = 0;
// Encode the rm field
uint8_t rm;
if (rm_opnd.type == OPND_REG)
{
rm = rm_opnd.reg.reg_no & 7;
}
else
{
if (need_sib)
rm = 4;
else
rm = rm_opnd.mem.base_reg_no & 7;
}
// Encode and write the ModR/M byte
uint8_t rm_byte = (mod << 6) + (reg << 3) + (rm);
cb_write_byte(cb, rm_byte);
// Add the SIB byte, if needed
if (need_sib)
{
// SIB.scale (2 bits)
// SIB.index (3 bits)
// SIB.base (3 bits)
assert (rm_opnd.type == OPND_MEM);
// Encode the scale value
uint8_t scale = rm_opnd.mem.scale_exp;
// Encode the index value
uint8_t index;
if (!rm_opnd.mem.has_idx)
index = 4;
else
index = rm_opnd.mem.idx_reg_no & 7;
// Encode the base register
uint8_t base = rm_opnd.mem.base_reg_no & 7;
// Encode and write the SIB byte
uint8_t sib_byte = (scale << 6) + (index << 3) + (base);
cb_write_byte(cb, sib_byte);
}
// Add the displacement size
if (rm_opnd.type == OPND_MEM && rm_opnd.mem.disp != 0)
{
size_t dsize = disp_size(rm_opnd);
cb_write_int(cb, rm_opnd.mem.disp, dsize);
}
}
// Encode an add-like RM instruction with multiple possible encodings
void cb_write_rm_multi(
codeblock_t* cb,
const char* mnem,
uint8_t opMemReg8,
uint8_t opMemRegPref,
uint8_t opRegMem8,
uint8_t opRegMemPref,
uint8_t opMemImm8,
uint8_t opMemImmSml,
uint8_t opMemImmLrg,
uint8_t opExtImm,
x86opnd_t opnd0,
x86opnd_t opnd1)
{
assert (opnd0.type == OPND_REG || opnd0.type == OPND_MEM);
/*
// Write disassembly string
if (!opnd1.isNone)
cb.writeASM(mnem, opnd0, opnd1);
else
cb.writeASM(mnem, opnd0);
*/
// Check the size of opnd0
size_t opndSize = opnd0.num_bits;
// Check the size of opnd1
if (opnd1.type == OPND_REG || opnd1.type == OPND_MEM)
{
assert (opnd1.num_bits == opndSize && "operand size mismatch");
}
else if (opnd1.type == OPND_IMM)
{
assert (opnd1.num_bits <= opndSize);
}
assert (opndSize == 8 || opndSize == 16 || opndSize == 32 || opndSize == 64);
bool szPref = opndSize == 16;
bool rexW = opndSize == 64;
// R/M + Reg
if ((opnd0.type == OPND_MEM && opnd1.type == OPND_REG) ||
(opnd0.type == OPND_REG && opnd1.type == OPND_REG))
{
// R/M is opnd0
if (opndSize == 8)
cb_write_rm(cb, false, false, opnd1, opnd0, 0xFF, 1, opMemReg8);
else
cb_write_rm(cb, szPref, rexW, opnd1, opnd0, 0xFF, 1, opMemRegPref);
}
// Reg + R/M
else if (opnd0.type == OPND_REG && opnd1.type == OPND_MEM)
{
// R/M is opnd1
if (opndSize == 8)
cb_write_rm(cb, false, false, opnd0, opnd1, 0xFF, 1, opRegMem8);
else
cb_write_rm(cb, szPref, rexW, opnd0, opnd1, 0xFF, 1, opRegMemPref);
}
// R/M + Imm
else if (opnd1.type == OPND_IMM)
{
// 8-bit immediate
if (opnd1.num_bits <= 8)
{
if (opndSize == 8)
cb_write_rm(cb, false, false, NO_OPND, opnd0, opExtImm, 1, opMemImm8);
else
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExtImm, 1, opMemImmSml);
cb_write_int(cb, opnd1.imm, 8);
}
// 32-bit immediate
else if (opnd1.num_bits <= 32)
{
assert (opnd1.num_bits <= opndSize && "immediate too large for dst");
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExtImm, 1, opMemImmLrg);
cb_write_int(cb, opnd1.imm, (opndSize > 32)? 32:opndSize);
}
// Immediate too large
else
{
assert (false && "immediate value too large");
}
}
// Invalid operands
else
{
assert (false && "invalid operand combination");
}
}
// Encode a single-operand shift instruction
void cb_write_shift(
codeblock_t* cb,
const char* mnem,
uint8_t opMemOnePref,
uint8_t opMemClPref,
uint8_t opMemImmPref,
uint8_t opExt,
x86opnd_t opnd0,
x86opnd_t opnd1)
{
// Write a disassembly string
//cb.writeASM(mnem, opnd0, opnd1);
// Check the size of opnd0
size_t opndSize;
if (opnd0.type == OPND_REG || opnd0.type == OPND_MEM)
opndSize = opnd0.num_bits;
else
assert (false && "shift: invalid first operand");
assert (opndSize == 16 || opndSize == 32 || opndSize == 64);
bool szPref = opndSize == 16;
bool rexW = opndSize == 64;
if (opnd1.type == OPND_IMM)
{
if (opnd1.imm == 1)
{
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExt, 1, opMemOnePref);
}
else
{
assert (opnd1.num_bits <= 8);
cb_write_rm(cb, szPref, rexW, NO_OPND, opnd0, opExt, 1, opMemImmPref);
cb_write_byte(cb, (uint8_t)opnd1.imm);
}
}
/*
else if (opnd1.isReg && opnd1.reg == CL)
{
cb.writeRMInstr!('l', opExt, opMemClPref)(szPref, rexW, opnd0, X86Opnd.NONE);
}
*/
else
{
assert (false);
}
}
// add - Integer addition
void add(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_rm_multi(
cb,
"add",
0x00, // opMemReg8
0x01, // opMemRegPref
0x02, // opRegMem8
0x03, // opRegMemPref
0x80, // opMemImm8
0x83, // opMemImmSml
0x81, // opMemImmLrg
0x00, // opExtImm
opnd0,
opnd1
);
}
/*
/// call - Call to label with 32-bit offset
void call(CodeBlock cb, Label label)
{
cb.writeASM("call", label);
// Write the opcode
cb.writeByte(0xE8);
// Add a reference to the label
cb.addLabelRef(label);
// Relative 32-bit offset to be patched
cb.writeInt(0, 32);
}
*/
/// call - Indirect call with an R/M operand
void call(codeblock_t* cb, x86opnd_t opnd)
{
//cb.writeASM("call", opnd);
cb_write_rm(cb, false, false, NO_OPND, opnd, 2, 1, 0xFF);
}
// Encode a relative jump to a label (direct or conditional)
// Note: this always encodes a 32-bit offset
void cb_write_jcc(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op1, size_t label_idx)
{
//cb.writeASM(mnem, label);
// Write the opcode
cb_write_byte(cb, op0);
cb_write_byte(cb, op1);
// Add a reference to the label
cb_label_ref(cb, label_idx);
// Relative 32-bit offset to be patched
cb_write_int(cb, 0, 32);
}
/// jcc - Conditional relative jump to a label
void ja (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "ja" , 0x0F, 0x87, label_idx); }
void jae (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jae" , 0x0F, 0x83, label_idx); }
void jb (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jb" , 0x0F, 0x82, label_idx); }
void jbe (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jbe" , 0x0F, 0x86, label_idx); }
void jc (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jc" , 0x0F, 0x82, label_idx); }
void je (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "je" , 0x0F, 0x84, label_idx); }
void jg (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jg" , 0x0F, 0x8F, label_idx); }
void jge (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jge" , 0x0F, 0x8D, label_idx); }
void jl (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jl" , 0x0F, 0x8C, label_idx); }
void jle (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jle" , 0x0F, 0x8E, label_idx); }
void jna (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jna" , 0x0F, 0x86, label_idx); }
void jnae(codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnae", 0x0F, 0x82, label_idx); }
void jnb (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnb" , 0x0F, 0x83, label_idx); }
void jnbe(codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnbe", 0x0F, 0x87, label_idx); }
void jnc (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnc" , 0x0F, 0x83, label_idx); }
void jne (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jne" , 0x0F, 0x85, label_idx); }
void jng (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jng" , 0x0F, 0x8E, label_idx); }
void jnge(codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnge", 0x0F, 0x8C, label_idx); }
void jnl (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnl" , 0x0F, 0x8D, label_idx); }
void jnle(codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnle", 0x0F, 0x8F, label_idx); }
void jno (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jno" , 0x0F, 0x81, label_idx); }
void jnp (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnp" , 0x0F, 0x8b, label_idx); }
void jns (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jns" , 0x0F, 0x89, label_idx); }
void jnz (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jnz" , 0x0F, 0x85, label_idx); }
void jo (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jo" , 0x0F, 0x80, label_idx); }
void jp (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jp" , 0x0F, 0x8A, label_idx); }
void jpe (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jpe" , 0x0F, 0x8A, label_idx); }
void jpo (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jpo" , 0x0F, 0x8B, label_idx); }
void js (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "js" , 0x0F, 0x88, label_idx); }
void jz (codeblock_t* cb, size_t label_idx) { cb_write_jcc(cb, "jz" , 0x0F, 0x84, label_idx); }
/// jmp - Direct relative jump to label
void jmp(codeblock_t* cb, size_t label_idx)
{
//cb.writeASM(mnem, label);
/// Opcode for direct jump with relative 32-bit offset
cb_write_byte(cb, 0xE9);
// Add a reference to the label
cb_label_ref(cb, label_idx);
// Relative 32-bit offset to be patched
cb_write_int(cb, 0, 32);
}
/// jmp - Indirect jump near to an R/M operand
void jmp_rm(codeblock_t* cb, x86opnd_t opnd)
{
//cb.writeASM("jmp", opnd);
cb_write_rm(cb, false, false, NO_OPND, opnd, 4, 1, 0xFF);
}
/*
/// Opcode for direct jump with relative 8-bit offset
const ubyte JMP_REL8_OPCODE = 0xEB;
*/
/*
/// jmp - Jump with relative 8-bit offset
void jmp8(CodeBlock cb, int8_t offset)
{
cb.writeASM("jmp", ((offset > 0)? "+":"-") ~ to!string(offset));
cb.writeByte(JMP_REL8_OPCODE);
cb.writeByte(offset);
}
*/
/*
/// jmp - Jump with relative 32-bit offset
void jmp32(CodeBlock cb, int32_t offset)
{
cb.writeASM("jmp", ((offset > 0)? "+":"-") ~ to!string(offset));
cb.writeByte(JMP_REL32_OPCODE);
cb.writeInt(offset, 32);
}
*/
/// lea - Load Effective Address
void lea(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
{
//cb.writeASM("lea", dst, src);
assert (dst.num_bits == 64);
cb_write_rm(cb, false, true, dst, src, 0xFF, 1, 0x8D);
}
/// mov - Data move operation
void mov(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
{
// R/M + Imm
if (src.type == OPND_IMM)
{
//cb.writeASM("mov", dst, src);
// R + Imm
if (dst.type == OPND_REG)
{
assert (
src.num_bits <= dst.num_bits ||
unsig_imm_size(src.imm) <= dst.num_bits
);
if (dst.num_bits == 16)
cb_write_byte(cb, 0x66);
if (rex_needed(src) || dst.num_bits == 64)
cb_write_rex(cb, dst.num_bits == 64, 0, 0, dst.reg.reg_no);
cb_write_opcode(cb, (dst.num_bits == 8)? 0xB0:0xB8, dst);
cb_write_int(cb, src.imm, dst.num_bits);
}
// M + Imm
else if (dst.type == OPND_MEM)
{
assert (src.num_bits <= dst.num_bits);
if (dst.num_bits == 8)
cb_write_rm(cb, false, false, NO_OPND, dst, 0xFF, 1, 0xC6);
else
cb_write_rm(cb, dst.num_bits == 16, dst.num_bits == 64, NO_OPND, dst, 0, 1, 0xC7);
cb_write_int(cb, src.imm, (dst.num_bits > 32)? 32:dst.num_bits);
}
else
{
assert (false);
}
}
else
{
cb_write_rm_multi(
cb,
"mov",
0x88, // opMemReg8
0x89, // opMemRegPref
0x8A, // opRegMem8
0x8B, // opRegMemPref
0xC6, // opMemImm8
0xFF, // opMemImmSml (not available)
0xFF, // opMemImmLrg
0xFF, // opExtImm
dst,
src
);
}
}
// nop - Noop, one or multiple bytes long
void nop(codeblock_t* cb, size_t length)
{
switch (length)
{
case 0:
break;
case 1:
//cb.writeASM("nop1");
cb_write_byte(cb, 0x90);
break;
case 2:
//cb.writeASM("nop2");
cb_write_bytes(cb, 2, 0x66,0x90);
break;
case 3:
//cb.writeASM("nop3");
cb_write_bytes(cb, 3, 0x0F,0x1F,0x00);
break;
case 4:
//cb.writeASM("nop4");
cb_write_bytes(cb, 4, 0x0F,0x1F,0x40,0x00);
break;
case 5:
//cb.writeASM("nop5");
cb_write_bytes(cb, 5, 0x0F,0x1F,0x44,0x00,0x00);
break;
case 6:
//cb.writeASM("nop6");
cb_write_bytes(cb, 6, 0x66,0x0F,0x1F,0x44,0x00,0x00);
break;
case 7:
//cb.writeASM("nop7");
cb_write_bytes(cb, 7, 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00);
break;
case 8:
//cb.writeASM("nop8");
cb_write_bytes(cb, 8, 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00);
break;
case 9:
//cb.writeASM("nop9");
cb_write_bytes(cb, 9, 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00);
break;
default:
{
size_t written = 0;
while (written + 9 <= length)
{
nop(cb, 9);
written += 9;
}
nop(cb, length - written);
}
break;
}
}
/// push - Push a register on the stack
void push(codeblock_t* cb, x86opnd_t reg)
{
assert (reg.num_bits == 64);
//cb.writeASM("push", reg);
if (rex_needed(reg))
cb_write_rex(cb, false, 0, 0, reg.reg.reg_no);
cb_write_opcode(cb, 0x50, reg);
}
/// pop - Pop a register off the stack
void pop(codeblock_t* cb, x86opnd_t reg)
{
assert (reg.num_bits == 64);
//cb.writeASM("pop", reg);
if (rex_needed(reg))
cb_write_rex(cb, false, 0, 0, reg.reg.reg_no);
cb_write_opcode(cb, 0x58, reg);
}
/// ret - Return from call, popping only the return address
void ret(codeblock_t* cb)
{
//cb.writeASM("ret");
cb_write_byte(cb, 0xC3);
}
// sal - Shift arithmetic left
void sal(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_shift(
cb,
"sal",
0xD1, // opMemOnePref,
0xD3, // opMemClPref,
0xC1, // opMemImmPref,
0x04,
opnd0,
opnd1
);
}
/// sar - Shift arithmetic right (signed)
void sar(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_shift(
cb,
"sar",
0xD1, // opMemOnePref,
0xD3, // opMemClPref,
0xC1, // opMemImmPref,
0x07,
opnd0,
opnd1
);
}
// shl - Shift logical left
void shl(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_shift(
cb,
"shl",
0xD1, // opMemOnePref,
0xD3, // opMemClPref,
0xC1, // opMemImmPref,
0x04,
opnd0,
opnd1
);
}
/// shr - Shift logical right (unsigned)
void shr(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_shift(
cb,
"shr",
0xD1, // opMemOnePref,
0xD3, // opMemClPref,
0xC1, // opMemImmPref,
0x05,
opnd0,
opnd1
);
}
/// sub - Integer subtraction
void sub(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
{
cb_write_rm_multi(
cb,
"sub",
0x28, // opMemReg8
0x29, // opMemRegPref
0x2A, // opRegMem8
0x2B, // opRegMemPref
0x80, // opMemImm8
0x83, // opMemImmSml
0x81, // opMemImmLrg
0x05, // opExtImm
opnd0,
opnd1
);
}
|