blob: ff98b7db20c6793f6dca2d338959264f23692c54 [file] [log] [blame]
Thomas Gleixner4317cf952019-05-31 01:09:38 -07001// SPDX-License-Identifier: GPL-2.0-only
David Daneya79f2482012-04-19 14:59:55 -07002/*
Shile Zhang10916702019-12-04 08:46:31 +08003 * sorttable.c: Sort the kernel's table
David Daneya79f2482012-04-19 14:59:55 -07004 *
David Daneyd59a1682012-04-24 11:23:14 -07005 * Copyright 2011 - 2012 Cavium, Inc.
David Daneya79f2482012-04-19 14:59:55 -07006 *
7 * Based on code taken from recortmcount.c which is:
8 *
9 * Copyright 2009 John F. Reiser <[email protected]>. All rights reserved.
David Daneya79f2482012-04-19 14:59:55 -070010 *
11 * Restructured to fit Linux format, as well as other updates:
12 * Copyright 2010 Steven Rostedt <[email protected]>, Red Hat Inc.
13 */
14
15/*
16 * Strategy: alter the vmlinux file in-place.
17 */
18
19#include <sys/types.h>
20#include <sys/mman.h>
21#include <sys/stat.h>
22#include <getopt.h>
23#include <elf.h>
24#include <fcntl.h>
David Daneya79f2482012-04-19 14:59:55 -070025#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <unistd.h>
29
David Daneyd59a1682012-04-24 11:23:14 -070030#include <tools/be_byteshift.h>
31#include <tools/le_byteshift.h>
32
Vineet Guptaf06d19e2013-11-15 12:08:05 +053033#ifndef EM_ARCOMPACT
34#define EM_ARCOMPACT 93
35#endif
36
Max Filippov25df8192014-02-18 15:29:11 +040037#ifndef EM_XTENSA
38#define EM_XTENSA 94
39#endif
40
Will Deaconadace892013-05-08 17:29:24 +010041#ifndef EM_AARCH64
42#define EM_AARCH64 183
43#endif
44
Michal Simek372c7202014-01-23 15:52:46 -080045#ifndef EM_MICROBLAZE
46#define EM_MICROBLAZE 189
47#endif
48
Vineet Guptab3210d142013-11-22 13:05:58 +053049#ifndef EM_ARCV2
50#define EM_ARCV2 195
51#endif
52
Shile Zhang6402e142019-12-04 08:46:28 +080053static uint32_t (*r)(const uint32_t *);
54static uint16_t (*r2)(const uint16_t *);
55static uint64_t (*r8)(const uint64_t *);
56static void (*w)(uint32_t, uint32_t *);
57static void (*w2)(uint16_t, uint16_t *);
58static void (*w8)(uint64_t, uint64_t *);
59typedef void (*table_sort_t)(char *, int);
60
David Daneya79f2482012-04-19 14:59:55 -070061/*
62 * Get the whole file as a programming convenience in order to avoid
63 * malloc+lseek+read+free of many pieces. If successful, then mmap
64 * avoids copying unused pieces; else just read the whole file.
65 * Open for both read and write.
66 */
Shile Zhang3c47b782019-12-04 08:46:27 +080067static void *mmap_file(char const *fname, size_t *size)
David Daneya79f2482012-04-19 14:59:55 -070068{
Shile Zhang3c47b782019-12-04 08:46:27 +080069 int fd;
70 struct stat sb;
71 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -070072
Shile Zhang3c47b782019-12-04 08:46:27 +080073 fd = open(fname, O_RDWR);
74 if (fd < 0) {
David Daneya79f2482012-04-19 14:59:55 -070075 perror(fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080076 return NULL;
77 }
78 if (fstat(fd, &sb) < 0) {
79 perror(fname);
80 goto out;
David Daneya79f2482012-04-19 14:59:55 -070081 }
82 if (!S_ISREG(sb.st_mode)) {
83 fprintf(stderr, "not a regular file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080084 goto out;
David Daneya79f2482012-04-19 14:59:55 -070085 }
Shile Zhang6402e142019-12-04 08:46:28 +080086
Shile Zhang3c47b782019-12-04 08:46:27 +080087 addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
David Daneya79f2482012-04-19 14:59:55 -070088 if (addr == MAP_FAILED) {
David Daneya79f2482012-04-19 14:59:55 -070089 fprintf(stderr, "Could not mmap file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +080090 goto out;
David Daneya79f2482012-04-19 14:59:55 -070091 }
Shile Zhang3c47b782019-12-04 08:46:27 +080092
93 *size = sb.st_size;
94
95out:
96 close(fd);
David Daneya79f2482012-04-19 14:59:55 -070097 return addr;
98}
99
David Daneyd59a1682012-04-24 11:23:14 -0700100static uint32_t rbe(const uint32_t *x)
101{
102 return get_unaligned_be32(x);
103}
Shile Zhang6402e142019-12-04 08:46:28 +0800104
David Daneyd59a1682012-04-24 11:23:14 -0700105static uint16_t r2be(const uint16_t *x)
106{
107 return get_unaligned_be16(x);
108}
Shile Zhang6402e142019-12-04 08:46:28 +0800109
110static uint64_t r8be(const uint64_t *x)
David Daneyd59a1682012-04-24 11:23:14 -0700111{
Shile Zhang6402e142019-12-04 08:46:28 +0800112 return get_unaligned_be64(x);
David Daneyd59a1682012-04-24 11:23:14 -0700113}
Shile Zhang6402e142019-12-04 08:46:28 +0800114
David Daneyd59a1682012-04-24 11:23:14 -0700115static uint32_t rle(const uint32_t *x)
116{
117 return get_unaligned_le32(x);
118}
Shile Zhang6402e142019-12-04 08:46:28 +0800119
David Daneyd59a1682012-04-24 11:23:14 -0700120static uint16_t r2le(const uint16_t *x)
121{
122 return get_unaligned_le16(x);
David Daneya79f2482012-04-19 14:59:55 -0700123}
124
Shile Zhang6402e142019-12-04 08:46:28 +0800125static uint64_t r8le(const uint64_t *x)
126{
127 return get_unaligned_le64(x);
128}
129
130static void wbe(uint32_t val, uint32_t *x)
131{
132 put_unaligned_be32(val, x);
133}
134
135static void w2be(uint16_t val, uint16_t *x)
136{
137 put_unaligned_be16(val, x);
138}
139
David Daneyd59a1682012-04-24 11:23:14 -0700140static void w8be(uint64_t val, uint64_t *x)
David Daneya79f2482012-04-19 14:59:55 -0700141{
David Daneyd59a1682012-04-24 11:23:14 -0700142 put_unaligned_be64(val, x);
143}
Shile Zhang6402e142019-12-04 08:46:28 +0800144
David Daneyd59a1682012-04-24 11:23:14 -0700145static void wle(uint32_t val, uint32_t *x)
146{
147 put_unaligned_le32(val, x);
148}
Shile Zhang6402e142019-12-04 08:46:28 +0800149
David Daneyd59a1682012-04-24 11:23:14 -0700150static void w2le(uint16_t val, uint16_t *x)
151{
152 put_unaligned_le16(val, x);
David Daneya79f2482012-04-19 14:59:55 -0700153}
154
Shile Zhang6402e142019-12-04 08:46:28 +0800155static void w8le(uint64_t val, uint64_t *x)
156{
157 put_unaligned_le64(val, x);
158}
David Daneya79f2482012-04-19 14:59:55 -0700159
Jamie Iles59c36452013-11-12 15:06:51 -0800160/*
161 * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
162 * the way to -256..-1, to avoid conflicting with real section
163 * indices.
164 */
165#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
166
167static inline int is_shndx_special(unsigned int i)
168{
169 return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
170}
171
172/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
173static inline unsigned int get_secindex(unsigned int shndx,
174 unsigned int sym_offs,
175 const Elf32_Word *symtab_shndx_start)
176{
177 if (is_shndx_special(shndx))
178 return SPECIAL(shndx);
179 if (shndx != SHN_XINDEX)
180 return shndx;
181 return r(&symtab_shndx_start[sym_offs]);
182}
183
David Daneya79f2482012-04-19 14:59:55 -0700184/* 32 bit and 64 bit are very similar */
Shile Zhang10916702019-12-04 08:46:31 +0800185#include "sorttable.h"
186#define SORTTABLE_64
187#include "sorttable.h"
David Daneya79f2482012-04-19 14:59:55 -0700188
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200189static int compare_relative_table(const void *a, const void *b)
David Daneyd59a1682012-04-24 11:23:14 -0700190{
191 int32_t av = (int32_t)r(a);
192 int32_t bv = (int32_t)r(b);
193
194 if (av < bv)
195 return -1;
196 if (av > bv)
197 return 1;
198 return 0;
199}
200
Shile Zhang6402e142019-12-04 08:46:28 +0800201static void sort_relative_table(char *extab_image, int image_size)
202{
203 int i = 0;
204
205 /*
206 * Do the same thing the runtime sort does, first normalize to
207 * being relative to the start of the section.
208 */
209 while (i < image_size) {
210 uint32_t *loc = (uint32_t *)(extab_image + i);
211 w(r(loc) + i, loc);
212 i += 4;
213 }
214
215 qsort(extab_image, image_size / 8, 8, compare_relative_table);
216
217 /* Now denormalize. */
218 i = 0;
219 while (i < image_size) {
220 uint32_t *loc = (uint32_t *)(extab_image + i);
221 w(r(loc) - i, loc);
222 i += 4;
223 }
224}
225
Tony Luck548acf12016-02-17 10:20:12 -0800226static void x86_sort_relative_table(char *extab_image, int image_size)
227{
Shile Zhang6402e142019-12-04 08:46:28 +0800228 int i = 0;
Tony Luck548acf12016-02-17 10:20:12 -0800229
Tony Luck548acf12016-02-17 10:20:12 -0800230 while (i < image_size) {
231 uint32_t *loc = (uint32_t *)(extab_image + i);
232
233 w(r(loc) + i, loc);
234 w(r(loc + 1) + i + 4, loc + 1);
235 w(r(loc + 2) + i + 8, loc + 2);
236
237 i += sizeof(uint32_t) * 3;
238 }
239
240 qsort(extab_image, image_size / 12, 12, compare_relative_table);
241
242 i = 0;
243 while (i < image_size) {
244 uint32_t *loc = (uint32_t *)(extab_image + i);
245
246 w(r(loc) - i, loc);
247 w(r(loc + 1) - (i + 4), loc + 1);
248 w(r(loc + 2) - (i + 8), loc + 2);
249
250 i += sizeof(uint32_t) * 3;
251 }
252}
253
Shile Zhang6402e142019-12-04 08:46:28 +0800254static int do_file(char const *const fname, void *addr)
David Daneyd59a1682012-04-24 11:23:14 -0700255{
Shile Zhang3c47b782019-12-04 08:46:27 +0800256 int rc = -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800257 Elf32_Ehdr *ehdr = addr;
258 table_sort_t custom_sort = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700259
David Daneya79f2482012-04-19 14:59:55 -0700260 switch (ehdr->e_ident[EI_DATA]) {
Shile Zhang6402e142019-12-04 08:46:28 +0800261 case ELFDATA2LSB:
262 r = rle;
263 r2 = r2le;
264 r8 = r8le;
265 w = wle;
266 w2 = w2le;
267 w8 = w8le;
268 break;
269 case ELFDATA2MSB:
270 r = rbe;
271 r2 = r2be;
272 r8 = r8be;
273 w = wbe;
274 w2 = w2be;
275 w8 = w8be;
276 break;
David Daneya79f2482012-04-19 14:59:55 -0700277 default:
278 fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
279 ehdr->e_ident[EI_DATA], fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800280 return -1;
Shile Zhang6402e142019-12-04 08:46:28 +0800281 }
282
283 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
284 (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
285 ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100286 fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800287 return -1;
David Daneya79f2482012-04-19 14:59:55 -0700288 }
289
David Daneyd59a1682012-04-24 11:23:14 -0700290 switch (r2(&ehdr->e_machine)) {
David Daneya79f2482012-04-19 14:59:55 -0700291 case EM_386:
David Daneya79f2482012-04-19 14:59:55 -0700292 case EM_X86_64:
Tony Luck548acf12016-02-17 10:20:12 -0800293 custom_sort = x86_sort_relative_table;
294 break;
Heiko Carstens3193a982012-07-24 14:51:34 +0200295 case EM_S390:
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100296 case EM_AARCH64:
Helge Deller0de79852016-03-23 16:00:46 +0100297 case EM_PARISC:
Nicholas Piggin5b9ff022016-10-13 16:42:55 +1100298 case EM_PPC:
299 case EM_PPC64:
Heiko Carstenseb608fb2012-09-05 13:26:11 +0200300 custom_sort = sort_relative_table;
301 break;
Vineet Guptaf06d19e2013-11-15 12:08:05 +0530302 case EM_ARCOMPACT:
Vineet Guptab3210d142013-11-22 13:05:58 +0530303 case EM_ARCV2:
Stephen Boydee951c62012-10-29 19:19:34 +0100304 case EM_ARM:
Michal Simek372c7202014-01-23 15:52:46 -0800305 case EM_MICROBLAZE:
David Daneyd59a1682012-04-24 11:23:14 -0700306 case EM_MIPS:
Max Filippov25df8192014-02-18 15:29:11 +0400307 case EM_XTENSA:
David Daneya79f2482012-04-19 14:59:55 -0700308 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800309 default:
310 fprintf(stderr, "unrecognized e_machine %d %s\n",
311 r2(&ehdr->e_machine), fname);
312 return -1;
313 }
David Daneya79f2482012-04-19 14:59:55 -0700314
315 switch (ehdr->e_ident[EI_CLASS]) {
David Daneya79f2482012-04-19 14:59:55 -0700316 case ELFCLASS32:
Shile Zhang6402e142019-12-04 08:46:28 +0800317 if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
318 r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700319 fprintf(stderr,
Ard Biesheuvel7b957b62016-01-10 11:42:28 +0100320 "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800321 break;
David Daneya79f2482012-04-19 14:59:55 -0700322 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800323 rc = do_sort_32(ehdr, fname, custom_sort);
David Daneya79f2482012-04-19 14:59:55 -0700324 break;
Shile Zhang6402e142019-12-04 08:46:28 +0800325 case ELFCLASS64:
326 {
David Daneya79f2482012-04-19 14:59:55 -0700327 Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
Shile Zhang6402e142019-12-04 08:46:28 +0800328 if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
329 r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
David Daneya79f2482012-04-19 14:59:55 -0700330 fprintf(stderr,
Shile Zhang6402e142019-12-04 08:46:28 +0800331 "unrecognized ET_EXEC/ET_DYN file: %s\n",
332 fname);
Shile Zhang3c47b782019-12-04 08:46:27 +0800333 break;
David Daneya79f2482012-04-19 14:59:55 -0700334 }
Shile Zhang57cafdf2019-12-04 08:46:30 +0800335 rc = do_sort_64(ghdr, fname, custom_sort);
Shile Zhang6402e142019-12-04 08:46:28 +0800336 }
337 break;
338 default:
339 fprintf(stderr, "unrecognized ELF class %d %s\n",
340 ehdr->e_ident[EI_CLASS], fname);
David Daneya79f2482012-04-19 14:59:55 -0700341 break;
342 }
David Daneya79f2482012-04-19 14:59:55 -0700343
Shile Zhang3c47b782019-12-04 08:46:27 +0800344 return rc;
David Daneya79f2482012-04-19 14:59:55 -0700345}
346
Shile Zhang6402e142019-12-04 08:46:28 +0800347int main(int argc, char *argv[])
David Daneya79f2482012-04-19 14:59:55 -0700348{
Shile Zhang3c47b782019-12-04 08:46:27 +0800349 int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
350 size_t size = 0;
351 void *addr = NULL;
David Daneya79f2482012-04-19 14:59:55 -0700352
353 if (argc < 2) {
Shile Zhang10916702019-12-04 08:46:31 +0800354 fprintf(stderr, "usage: sorttable vmlinux...\n");
David Daneya79f2482012-04-19 14:59:55 -0700355 return 0;
356 }
357
358 /* Process each file in turn, allowing deep failure. */
359 for (i = 1; i < argc; i++) {
Shile Zhang3c47b782019-12-04 08:46:27 +0800360 addr = mmap_file(argv[i], &size);
361 if (!addr) {
David Daneya79f2482012-04-19 14:59:55 -0700362 ++n_error;
Shile Zhang3c47b782019-12-04 08:46:27 +0800363 continue;
364 }
365
366 if (do_file(argv[i], addr))
367 ++n_error;
368
369 munmap(addr, size);
David Daneya79f2482012-04-19 14:59:55 -0700370 }
Shile Zhang6402e142019-12-04 08:46:28 +0800371
David Daneya79f2482012-04-19 14:59:55 -0700372 return !!n_error;
373}